thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone
0a122b
From 0bf037188149824a8ae161ad9dea7d650b6f7346 Mon Sep 17 00:00:00 2001
0a122b
From: Gerd Hoffmann <kraxel@redhat.com>
0a122b
Date: Tue, 21 Jan 2014 11:25:16 -0500
0a122b
Subject: [PATCH 5/6] QEMUBH: make AioContext's bh re-entrant
0a122b
0a122b
Message-id: <1390303517-20167-2-git-send-email-kraxel@redhat.com>
0a122b
Patchwork-id: 56866
0a122b
O-Subject: [RHEL-7 qemu-kvm PATCH 1/2] QEMUBH: make AioContext's bh re-entrant
0a122b
Bugzilla: 1009297
0a122b
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
0a122b
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
0a122b
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
0a122b
0a122b
From: Liu Ping Fan <qemulist@gmail.com>
0a122b
0a122b
BH will be used outside big lock, so introduce lock to protect
0a122b
between the writers, ie, bh's adders and deleter. The lock only
0a122b
affects the writers and bh's callback does not take this extra lock.
0a122b
Note that for the same AioContext, aio_bh_poll() can not run in
0a122b
parallel yet.
0a122b
0a122b
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
0a122b
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
0a122b
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
0a122b
(cherry picked from commit dcc772e2f2b7c2a68644133fea2b874f6751a57b)
0a122b
---
0a122b
 async.c             | 33 +++++++++++++++++++++++++++++++--
0a122b
 include/block/aio.h |  7 +++++++
0a122b
 2 files changed, 38 insertions(+), 2 deletions(-)
0a122b
0a122b
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
0a122b
---
0a122b
 async.c             | 33 +++++++++++++++++++++++++++++++--
0a122b
 include/block/aio.h |  7 +++++++
0a122b
 2 files changed, 38 insertions(+), 2 deletions(-)
0a122b
0a122b
diff --git a/async.c b/async.c
0a122b
index 90fe906..5ce3633 100644
0a122b
--- a/async.c
0a122b
+++ b/async.c
0a122b
@@ -47,11 +47,16 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
0a122b
     bh->ctx = ctx;
0a122b
     bh->cb = cb;
0a122b
     bh->opaque = opaque;
0a122b
+    qemu_mutex_lock(&ctx->bh_lock);
0a122b
     bh->next = ctx->first_bh;
0a122b
+    /* Make sure that the members are ready before putting bh into list */
0a122b
+    smp_wmb();
0a122b
     ctx->first_bh = bh;
0a122b
+    qemu_mutex_unlock(&ctx->bh_lock);
0a122b
     return bh;
0a122b
 }
0a122b
 
0a122b
+/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
0a122b
 int aio_bh_poll(AioContext *ctx)
0a122b
 {
0a122b
     QEMUBH *bh, **bhp, *next;
0a122b
@@ -61,9 +66,15 @@ int aio_bh_poll(AioContext *ctx)
0a122b
 
0a122b
     ret = 0;
0a122b
     for (bh = ctx->first_bh; bh; bh = next) {
0a122b
+        /* Make sure that fetching bh happens before accessing its members */
0a122b
+        smp_read_barrier_depends();
0a122b
         next = bh->next;
0a122b
         if (!bh->deleted && bh->scheduled) {
0a122b
             bh->scheduled = 0;
0a122b
+            /* Paired with write barrier in bh schedule to ensure reading for
0a122b
+             * idle & callbacks coming after bh's scheduling.
0a122b
+             */
0a122b
+            smp_rmb();
0a122b
             if (!bh->idle)
0a122b
                 ret = 1;
0a122b
             bh->idle = 0;
0a122b
@@ -75,6 +86,7 @@ int aio_bh_poll(AioContext *ctx)
0a122b
 
0a122b
     /* remove deleted bhs */
0a122b
     if (!ctx->walking_bh) {
0a122b
+        qemu_mutex_lock(&ctx->bh_lock);
0a122b
         bhp = &ctx->first_bh;
0a122b
         while (*bhp) {
0a122b
             bh = *bhp;
0a122b
@@ -85,6 +97,7 @@ int aio_bh_poll(AioContext *ctx)
0a122b
                 bhp = &bh->next;
0a122b
             }
0a122b
         }
0a122b
+        qemu_mutex_unlock(&ctx->bh_lock);
0a122b
     }
0a122b
 
0a122b
     return ret;
0a122b
@@ -94,24 +107,38 @@ void qemu_bh_schedule_idle(QEMUBH *bh)
0a122b
 {
0a122b
     if (bh->scheduled)
0a122b
         return;
0a122b
-    bh->scheduled = 1;
0a122b
     bh->idle = 1;
0a122b
+    /* Make sure that idle & any writes needed by the callback are done
0a122b
+     * before the locations are read in the aio_bh_poll.
0a122b
+     */
0a122b
+    smp_wmb();
0a122b
+    bh->scheduled = 1;
0a122b
 }
0a122b
 
0a122b
 void qemu_bh_schedule(QEMUBH *bh)
0a122b
 {
0a122b
     if (bh->scheduled)
0a122b
         return;
0a122b
-    bh->scheduled = 1;
0a122b
     bh->idle = 0;
0a122b
+    /* Make sure that idle & any writes needed by the callback are done
0a122b
+     * before the locations are read in the aio_bh_poll.
0a122b
+     */
0a122b
+    smp_wmb();
0a122b
+    bh->scheduled = 1;
0a122b
     aio_notify(bh->ctx);
0a122b
 }
0a122b
 
0a122b
+
0a122b
+/* This func is async.
0a122b
+ */
0a122b
 void qemu_bh_cancel(QEMUBH *bh)
0a122b
 {
0a122b
     bh->scheduled = 0;
0a122b
 }
0a122b
 
0a122b
+/* This func is async.The bottom half will do the delete action at the finial
0a122b
+ * end.
0a122b
+ */
0a122b
 void qemu_bh_delete(QEMUBH *bh)
0a122b
 {
0a122b
     bh->scheduled = 0;
0a122b
@@ -176,6 +203,7 @@ aio_ctx_finalize(GSource     *source)
0a122b
     thread_pool_free(ctx->thread_pool);
0a122b
     aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
0a122b
     event_notifier_cleanup(&ctx->notifier);
0a122b
+    qemu_mutex_destroy(&ctx->bh_lock);
0a122b
     g_array_free(ctx->pollfds, TRUE);
0a122b
 }
0a122b
 
0a122b
@@ -211,6 +239,7 @@ AioContext *aio_context_new(void)
0a122b
     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
0a122b
     ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
0a122b
     ctx->thread_pool = NULL;
0a122b
+    qemu_mutex_init(&ctx->bh_lock);
0a122b
     event_notifier_init(&ctx->notifier, false);
0a122b
     aio_set_event_notifier(ctx, &ctx->notifier, 
0a122b
                            (EventNotifierHandler *)
0a122b
diff --git a/include/block/aio.h b/include/block/aio.h
0a122b
index 1836793..cc77771 100644
0a122b
--- a/include/block/aio.h
0a122b
+++ b/include/block/aio.h
0a122b
@@ -17,6 +17,7 @@
0a122b
 #include "qemu-common.h"
0a122b
 #include "qemu/queue.h"
0a122b
 #include "qemu/event_notifier.h"
0a122b
+#include "qemu/thread.h"
0a122b
 
0a122b
 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
0a122b
 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
0a122b
@@ -53,6 +54,8 @@ typedef struct AioContext {
0a122b
      */
0a122b
     int walking_handlers;
0a122b
 
0a122b
+    /* lock to protect between bh's adders and deleter */
0a122b
+    QemuMutex bh_lock;
0a122b
     /* Anchor of the list of Bottom Halves belonging to the context */
0a122b
     struct QEMUBH *first_bh;
0a122b
 
0a122b
@@ -127,6 +130,8 @@ void aio_notify(AioContext *ctx);
0a122b
  * aio_bh_poll: Poll bottom halves for an AioContext.
0a122b
  *
0a122b
  * These are internal functions used by the QEMU main loop.
0a122b
+ * And notice that multiple occurrences of aio_bh_poll cannot
0a122b
+ * be called concurrently
0a122b
  */
0a122b
 int aio_bh_poll(AioContext *ctx);
0a122b
 
0a122b
@@ -163,6 +168,8 @@ void qemu_bh_cancel(QEMUBH *bh);
0a122b
  * Deleting a bottom half frees the memory that was allocated for it by
0a122b
  * qemu_bh_new.  It also implies canceling the bottom half if it was
0a122b
  * scheduled.
0a122b
+ * This func is async. The bottom half will do the delete action at the finial
0a122b
+ * end.
0a122b
  *
0a122b
  * @bh: The bottom half to be deleted.
0a122b
  */
0a122b
-- 
0a122b
1.8.3.1
0a122b