Blame SOURCES/kvm-block-Use-a-single-global-AioWait.patch

357786
From 54d30086f066f1094871c4886f3a6dee51263d76 Mon Sep 17 00:00:00 2001
357786
From: Kevin Wolf <kwolf@redhat.com>
357786
Date: Fri, 21 Sep 2018 12:46:29 +0200
357786
Subject: [PATCH 2/3] block: Use a single global AioWait
357786
357786
RH-Author: Kevin Wolf <kwolf@redhat.com>
357786
Message-id: <20180921124630.29036-3-kwolf@redhat.com>
357786
Patchwork-id: 82231
357786
O-Subject: [RHV-7.6 qemu-kvm-rhev PATCH 2/3] block: Use a single global AioWait
357786
Bugzilla: 1618584
357786
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
357786
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
357786
RH-Acked-by: John Snow <jsnow@redhat.com>
357786
RH-Acked-by: Eric Blake <eblake@redhat.com>
357786
357786
When draining a block node, we recurse to its parent and for subtree
357786
drains also to its children. A single AIO_WAIT_WHILE() is then used to
357786
wait for bdrv_drain_poll() to become true, which depends on all of the
357786
nodes we recursed to. However, if the respective child or parent becomes
357786
quiescent and calls bdrv_wakeup(), only the AioWait of the child/parent
357786
is checked, while AIO_WAIT_WHILE() depends on the AioWait of the
357786
original node.
357786
357786
Fix this by using a single AioWait for all callers of AIO_WAIT_WHILE().
357786
357786
This may mean that the draining thread gets a few more unnecessary
357786
wakeups because an unrelated operation got completed, but we already
357786
wake it up when something _could_ have changed rather than only if it
357786
has certainly changed.
357786
357786
Apart from that, drain is a slow path anyway. In theory it would be
357786
possible to use wakeups more selectively and still correctly, but the
357786
gains are likely not worth the additional complexity. In fact, this
357786
patch is a nice simplification for some places in the code.
357786
357786
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
357786
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
357786
---
357786
 block.c                   |  5 -----
357786
 block/block-backend.c     | 11 ++++-------
357786
 block/io.c                |  7 ++-----
357786
 blockjob.c                | 13 +------------
357786
 include/block/aio-wait.h  | 11 +++++------
357786
 include/block/block.h     |  6 +-----
357786
 include/block/block_int.h |  3 ---
357786
 include/block/blockjob.h  | 10 ----------
357786
 job.c                     |  3 +--
357786
 util/aio-wait.c           | 11 ++++++-----
357786
 10 files changed, 20 insertions(+), 60 deletions(-)
357786
357786
diff --git a/block.c b/block.c
357786
index e89b5e3..fbd569c 100644
357786
--- a/block.c
357786
+++ b/block.c
357786
@@ -4847,11 +4847,6 @@ AioContext *bdrv_get_aio_context(BlockDriverState *bs)
357786
     return bs ? bs->aio_context : qemu_get_aio_context();
357786
 }
357786
 
357786
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs)
357786
-{
357786
-    return bs ? &bs->wait : NULL;
357786
-}
357786
-
357786
 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co)
357786
 {
357786
     aio_co_enter(bdrv_get_aio_context(bs), co);
357786
diff --git a/block/block-backend.c b/block/block-backend.c
357786
index 466bc27..36922d1 100644
357786
--- a/block/block-backend.c
357786
+++ b/block/block-backend.c
357786
@@ -88,7 +88,6 @@ struct BlockBackend {
357786
      * Accessed with atomic ops.
357786
      */
357786
     unsigned int in_flight;
357786
-    AioWait wait;
357786
 };
357786
 
357786
 typedef struct BlockBackendAIOCB {
357786
@@ -1300,7 +1299,7 @@ static void blk_inc_in_flight(BlockBackend *blk)
357786
 static void blk_dec_in_flight(BlockBackend *blk)
357786
 {
357786
     atomic_dec(&blk->in_flight);
357786
-    aio_wait_kick(&blk->wait);
357786
+    aio_wait_kick();
357786
 }
357786
 
357786
 static void error_callback_bh(void *opaque)
357786
@@ -1609,9 +1608,8 @@ void blk_drain(BlockBackend *blk)
357786
     }
357786
 
357786
     /* We may have -ENOMEDIUM completions in flight */
357786
-    AIO_WAIT_WHILE(&blk->wait,
357786
-            blk_get_aio_context(blk),
357786
-            atomic_mb_read(&blk->in_flight) > 0);
357786
+    AIO_WAIT_WHILE(blk_get_aio_context(blk),
357786
+                   atomic_mb_read(&blk->in_flight) > 0);
357786
 
357786
     if (bs) {
357786
         bdrv_drained_end(bs);
357786
@@ -1630,8 +1628,7 @@ void blk_drain_all(void)
357786
         aio_context_acquire(ctx);
357786
 
357786
         /* We may have -ENOMEDIUM completions in flight */
357786
-        AIO_WAIT_WHILE(&blk->wait, ctx,
357786
-                atomic_mb_read(&blk->in_flight) > 0);
357786
+        AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
357786
 
357786
         aio_context_release(ctx);
357786
     }
357786
diff --git a/block/io.c b/block/io.c
357786
index 3313958..7a99f7b 100644
357786
--- a/block/io.c
357786
+++ b/block/io.c
357786
@@ -38,8 +38,6 @@
357786
 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
357786
 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
357786
 
357786
-static AioWait drain_all_aio_wait;
357786
-
357786
 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
357786
     int64_t offset, int bytes, BdrvRequestFlags flags);
357786
 
357786
@@ -555,7 +553,7 @@ void bdrv_drain_all_begin(void)
357786
     }
357786
 
357786
     /* Now poll the in-flight requests */
357786
-    AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
357786
+    AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
357786
 
357786
     while ((bs = bdrv_next_all_states(bs))) {
357786
         bdrv_drain_assert_idle(bs);
357786
@@ -709,8 +707,7 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
357786
 
357786
 void bdrv_wakeup(BlockDriverState *bs)
357786
 {
357786
-    aio_wait_kick(bdrv_get_aio_wait(bs));
357786
-    aio_wait_kick(&drain_all_aio_wait);
357786
+    aio_wait_kick();
357786
 }
357786
 
357786
 void bdrv_dec_in_flight(BlockDriverState *bs)
357786
diff --git a/blockjob.c b/blockjob.c
357786
index 617d86f..06f2429 100644
357786
--- a/blockjob.c
357786
+++ b/blockjob.c
357786
@@ -221,20 +221,9 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
357786
     return 0;
357786
 }
357786
 
357786
-void block_job_wakeup_all_bdrv(BlockJob *job)
357786
-{
357786
-    GSList *l;
357786
-
357786
-    for (l = job->nodes; l; l = l->next) {
357786
-        BdrvChild *c = l->data;
357786
-        bdrv_wakeup(c->bs);
357786
-    }
357786
-}
357786
-
357786
 static void block_job_on_idle(Notifier *n, void *opaque)
357786
 {
357786
-    BlockJob *job = opaque;
357786
-    block_job_wakeup_all_bdrv(job);
357786
+    aio_wait_kick();
357786
 }
357786
 
357786
 bool block_job_is_internal(BlockJob *job)
357786
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
357786
index 600fad1..46f86f9 100644
357786
--- a/include/block/aio-wait.h
357786
+++ b/include/block/aio-wait.h
357786
@@ -54,9 +54,10 @@ typedef struct {
357786
     unsigned num_waiters;
357786
 } AioWait;
357786
 
357786
+extern AioWait global_aio_wait;
357786
+
357786
 /**
357786
  * AIO_WAIT_WHILE:
357786
- * @wait: the aio wait object
357786
  * @ctx: the aio context, or NULL if multiple aio contexts (for which the
357786
  *       caller does not hold a lock) are involved in the polling condition.
357786
  * @cond: wait while this conditional expression is true
357786
@@ -72,9 +73,9 @@ typedef struct {
357786
  * wait on conditions between two IOThreads since that could lead to deadlock,
357786
  * go via the main loop instead.
357786
  */
357786
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({                         \
357786
+#define AIO_WAIT_WHILE(ctx, cond) ({                               \
357786
     bool waited_ = false;                                          \
357786
-    AioWait *wait_ = (wait);                                       \
357786
+    AioWait *wait_ = &global_aio_wait;                             \
357786
     AioContext *ctx_ = (ctx);                                      \
357786
     /* Increment wait_->num_waiters before evaluating cond. */     \
357786
     atomic_inc(&wait_->num_waiters);                               \
357786
@@ -102,14 +103,12 @@ typedef struct {
357786
 
357786
 /**
357786
  * aio_wait_kick:
357786
- * @wait: the aio wait object that should re-evaluate its condition
357786
- *
357786
  * Wake up the main thread if it is waiting on AIO_WAIT_WHILE().  During
357786
  * synchronous operations performed in an IOThread, the main thread lets the
357786
  * IOThread's event loop run, waiting for the operation to complete.  A
357786
  * aio_wait_kick() call will wake up the main thread.
357786
  */
357786
-void aio_wait_kick(AioWait *wait);
357786
+void aio_wait_kick(void);
357786
 
357786
 /**
357786
  * aio_wait_bh_oneshot:
357786
diff --git a/include/block/block.h b/include/block/block.h
357786
index 356712c..8e78daf 100644
357786
--- a/include/block/block.h
357786
+++ b/include/block/block.h
357786
@@ -406,13 +406,9 @@ void bdrv_drain_all_begin(void);
357786
 void bdrv_drain_all_end(void);
357786
 void bdrv_drain_all(void);
357786
 
357786
-/* Returns NULL when bs == NULL */
357786
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs);
357786
-
357786
 #define BDRV_POLL_WHILE(bs, cond) ({                       \
357786
     BlockDriverState *bs_ = (bs);                          \
357786
-    AIO_WAIT_WHILE(bdrv_get_aio_wait(bs_),                 \
357786
-                   bdrv_get_aio_context(bs_),              \
357786
+    AIO_WAIT_WHILE(bdrv_get_aio_context(bs_),              \
357786
                    cond); })
357786
 
357786
 int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
357786
diff --git a/include/block/block_int.h b/include/block/block_int.h
357786
index b7806e3..ff923b7 100644
357786
--- a/include/block/block_int.h
357786
+++ b/include/block/block_int.h
357786
@@ -782,9 +782,6 @@ struct BlockDriverState {
357786
     unsigned int in_flight;
357786
     unsigned int serialising_in_flight;
357786
 
357786
-    /* Kicked to signal main loop when a request completes. */
357786
-    AioWait wait;
357786
-
357786
     /* counter for nested bdrv_io_plug.
357786
      * Accessed with atomic ops.
357786
     */
357786
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
357786
index 2290bbb..ede0bd8 100644
357786
--- a/include/block/blockjob.h
357786
+++ b/include/block/blockjob.h
357786
@@ -122,16 +122,6 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
357786
 void block_job_remove_all_bdrv(BlockJob *job);
357786
 
357786
 /**
357786
- * block_job_wakeup_all_bdrv:
357786
- * @job: The block job
357786
- *
357786
- * Calls bdrv_wakeup() for all BlockDriverStates that have been added to the
357786
- * job. This function is to be called whenever child_job_drained_poll() would
357786
- * go from true to false to notify waiting drain requests.
357786
- */
357786
-void block_job_wakeup_all_bdrv(BlockJob *job);
357786
-
357786
-/**
357786
  * block_job_set_speed:
357786
  * @job: The job to set the speed for.
357786
  * @speed: The new value
357786
diff --git a/job.c b/job.c
357786
index 5b53e43..3a7db59 100644
357786
--- a/job.c
357786
+++ b/job.c
357786
@@ -973,7 +973,6 @@ void job_complete(Job *job, Error **errp)
357786
 int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
357786
 {
357786
     Error *local_err = NULL;
357786
-    AioWait dummy_wait = {};
357786
     int ret;
357786
 
357786
     job_ref(job);
357786
@@ -987,7 +986,7 @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
357786
         return -EBUSY;
357786
     }
357786
 
357786
-    AIO_WAIT_WHILE(&dummy_wait, job->aio_context,
357786
+    AIO_WAIT_WHILE(job->aio_context,
357786
                    (job_drain(job), !job_is_completed(job)));
357786
 
357786
     ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
357786
diff --git a/util/aio-wait.c b/util/aio-wait.c
357786
index b8a8f86..b487749 100644
357786
--- a/util/aio-wait.c
357786
+++ b/util/aio-wait.c
357786
@@ -26,21 +26,22 @@
357786
 #include "qemu/main-loop.h"
357786
 #include "block/aio-wait.h"
357786
 
357786
+AioWait global_aio_wait;
357786
+
357786
 static void dummy_bh_cb(void *opaque)
357786
 {
357786
     /* The point is to make AIO_WAIT_WHILE()'s aio_poll() return */
357786
 }
357786
 
357786
-void aio_wait_kick(AioWait *wait)
357786
+void aio_wait_kick(void)
357786
 {
357786
     /* The barrier (or an atomic op) is in the caller.  */
357786
-    if (atomic_read(&wait->num_waiters)) {
357786
+    if (atomic_read(&global_aio_wait.num_waiters)) {
357786
         aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
357786
     }
357786
 }
357786
 
357786
 typedef struct {
357786
-    AioWait wait;
357786
     bool done;
357786
     QEMUBHFunc *cb;
357786
     void *opaque;
357786
@@ -54,7 +55,7 @@ static void aio_wait_bh(void *opaque)
357786
     data->cb(data->opaque);
357786
 
357786
     data->done = true;
357786
-    aio_wait_kick(&data->wait);
357786
+    aio_wait_kick();
357786
 }
357786
 
357786
 void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
357786
@@ -67,5 +68,5 @@ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
357786
     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
357786
 
357786
     aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data);
357786
-    AIO_WAIT_WHILE(&data.wait, ctx, !data.done);
357786
+    AIO_WAIT_WHILE(ctx, !data.done);
357786
 }
357786
-- 
357786
1.8.3.1
357786