thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-iothread-replace-init_done_cond-with-a-semaphore.patch

b38b0f
From 9db6f24509ee8a28818693d6a15257b873d9422a Mon Sep 17 00:00:00 2001
b38b0f
From: Stefan Hajnoczi <stefanha@redhat.com>
b38b0f
Date: Thu, 15 Aug 2019 13:23:11 +0100
b38b0f
Subject: [PATCH 09/10] iothread: replace init_done_cond with a semaphore
b38b0f
b38b0f
RH-Author: Stefan Hajnoczi <stefanha@redhat.com>
b38b0f
Message-id: <20190815132311.22027-3-stefanha@redhat.com>
b38b0f
Patchwork-id: 89996
b38b0f
O-Subject: [RHEL-8.1.0 qemu-kvm PATCH v2 2/2] iothread: replace init_done_cond with a semaphore
b38b0f
Bugzilla: 1687541
b38b0f
RH-Acked-by: Peter Xu <peterx@redhat.com>
b38b0f
RH-Acked-by: John Snow <jsnow@redhat.com>
b38b0f
RH-Acked-by: Markus Armbruster <armbru@redhat.com>
b38b0f
b38b0f
From: Peter Xu <peterx@redhat.com>
b38b0f
b38b0f
Only sending an init-done message using lock+cond seems an overkill to
b38b0f
me.  Replacing it with a simpler semaphore.
b38b0f
b38b0f
Meanwhile, init the semaphore unconditionally, then we can destroy it
b38b0f
unconditionally too in finalize which seems cleaner.
b38b0f
b38b0f
Signed-off-by: Peter Xu <peterx@redhat.com>
b38b0f
Message-id: 20190306115532.23025-2-peterx@redhat.com
b38b0f
Message-Id: <20190306115532.23025-2-peterx@redhat.com>
b38b0f
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
b38b0f
(cherry picked from commit 21c4d15b4708b7d30c450041a560df670f36cac8)
b38b0f
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
b38b0f
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
b38b0f
---
b38b0f
 include/sysemu/iothread.h |  3 +--
b38b0f
 iothread.c                | 17 ++++-------------
b38b0f
 2 files changed, 5 insertions(+), 15 deletions(-)
b38b0f
b38b0f
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
b38b0f
index 8a7ac2c..50411ba 100644
b38b0f
--- a/include/sysemu/iothread.h
b38b0f
+++ b/include/sysemu/iothread.h
b38b0f
@@ -27,8 +27,7 @@ typedef struct {
b38b0f
     GMainContext *worker_context;
b38b0f
     GMainLoop *main_loop;
b38b0f
     GOnce once;
b38b0f
-    QemuMutex init_done_lock;
b38b0f
-    QemuCond init_done_cond;    /* is thread initialization done? */
b38b0f
+    QemuSemaphore init_done_sem; /* is thread init done? */
b38b0f
     bool stopping;              /* has iothread_stop() been called? */
b38b0f
     bool running;               /* should iothread_run() continue? */
b38b0f
     int thread_id;
b38b0f
diff --git a/iothread.c b/iothread.c
b38b0f
index 2fb1cdf..b92232f 100644
b38b0f
--- a/iothread.c
b38b0f
+++ b/iothread.c
b38b0f
@@ -55,10 +55,8 @@ static void *iothread_run(void *opaque)
b38b0f
     rcu_register_thread();
b38b0f
 
b38b0f
     my_iothread = iothread;
b38b0f
-    qemu_mutex_lock(&iothread->init_done_lock);
b38b0f
     iothread->thread_id = qemu_get_thread_id();
b38b0f
-    qemu_cond_signal(&iothread->init_done_cond);
b38b0f
-    qemu_mutex_unlock(&iothread->init_done_lock);
b38b0f
+    qemu_sem_post(&iothread->init_done_sem);
b38b0f
 
b38b0f
     while (iothread->running) {
b38b0f
         aio_poll(iothread->ctx, true);
b38b0f
@@ -111,6 +109,7 @@ static void iothread_instance_init(Object *obj)
b38b0f
 
b38b0f
     iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
b38b0f
     iothread->thread_id = -1;
b38b0f
+    qemu_sem_init(&iothread->init_done_sem, 0);
b38b0f
 }
b38b0f
 
b38b0f
 static void iothread_instance_finalize(Object *obj)
b38b0f
@@ -119,10 +118,6 @@ static void iothread_instance_finalize(Object *obj)
b38b0f
 
b38b0f
     iothread_stop(iothread);
b38b0f
 
b38b0f
-    if (iothread->thread_id != -1) {
b38b0f
-        qemu_cond_destroy(&iothread->init_done_cond);
b38b0f
-        qemu_mutex_destroy(&iothread->init_done_lock);
b38b0f
-    }
b38b0f
     /*
b38b0f
      * Before glib2 2.33.10, there is a glib2 bug that GSource context
b38b0f
      * pointer may not be cleared even if the context has already been
b38b0f
@@ -141,6 +136,7 @@ static void iothread_instance_finalize(Object *obj)
b38b0f
         g_main_context_unref(iothread->worker_context);
b38b0f
         iothread->worker_context = NULL;
b38b0f
     }
b38b0f
+    qemu_sem_destroy(&iothread->init_done_sem);
b38b0f
 }
b38b0f
 
b38b0f
 static void iothread_complete(UserCreatable *obj, Error **errp)
b38b0f
@@ -169,8 +165,6 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
b38b0f
         return;
b38b0f
     }
b38b0f
 
b38b0f
-    qemu_mutex_init(&iothread->init_done_lock);
b38b0f
-    qemu_cond_init(&iothread->init_done_cond);
b38b0f
     iothread->once = (GOnce) G_ONCE_INIT;
b38b0f
 
b38b0f
     /* This assumes we are called from a thread with useful CPU affinity for us
b38b0f
@@ -184,12 +178,9 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
b38b0f
     g_free(name);
b38b0f
 
b38b0f
     /* Wait for initialization to complete */
b38b0f
-    qemu_mutex_lock(&iothread->init_done_lock);
b38b0f
     while (iothread->thread_id == -1) {
b38b0f
-        qemu_cond_wait(&iothread->init_done_cond,
b38b0f
-                       &iothread->init_done_lock);
b38b0f
+        qemu_sem_wait(&iothread->init_done_sem);
b38b0f
     }
b38b0f
-    qemu_mutex_unlock(&iothread->init_done_lock);
b38b0f
 }
b38b0f
 
b38b0f
 typedef struct {
b38b0f
-- 
b38b0f
1.8.3.1
b38b0f