thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone
ed5979
From 3009e49f242ab371ffad35bb29c2c26ddfac75d4 Mon Sep 17 00:00:00 2001
ed5979
From: Kevin Wolf <kwolf@redhat.com>
ed5979
Date: Fri, 18 Nov 2022 18:40:59 +0100
ed5979
Subject: [PATCH 17/31] block: Remove drained_end_counter
ed5979
ed5979
RH-Author: Stefano Garzarella <sgarzare@redhat.com>
ed5979
RH-MergeRequest: 135: block: Simplify drain to prevent QEMU from crashing during snapshot
ed5979
RH-Bugzilla: 2155112
ed5979
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
ed5979
RH-Acked-by: Hanna Czenczek <hreitz@redhat.com>
ed5979
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
ed5979
RH-Commit: [5/16] 5589e3f05dece5394a05641f7f42096e8dc62bdb (sgarzarella/qemu-kvm-c-9-s)
ed5979
ed5979
drained_end_counter is unused now, nobody changes its value any more. It
ed5979
can be removed.
ed5979
ed5979
In cases where we had two almost identical functions that only differed
ed5979
in whether the caller passes drained_end_counter, or whether they would
ed5979
poll for a local drained_end_counter to reach 0, these become a single
ed5979
function.
ed5979
ed5979
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
ed5979
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
ed5979
Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
ed5979
Message-Id: <20221118174110.55183-5-kwolf@redhat.com>
ed5979
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
ed5979
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
ed5979
(cherry picked from commit 2f65df6e16dea2d6e7212fa675f4779d9281e26f)
ed5979
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
ed5979
---
ed5979
 block.c                          |  5 +-
ed5979
 block/block-backend.c            |  4 +-
ed5979
 block/io.c                       | 98 ++++++++------------------------
ed5979
 blockjob.c                       |  2 +-
ed5979
 include/block/block-io.h         | 24 --------
ed5979
 include/block/block_int-common.h |  6 +-
ed5979
 6 files changed, 30 insertions(+), 109 deletions(-)
ed5979
ed5979
diff --git a/block.c b/block.c
ed5979
index 16a62a329c..7999fd08c5 100644
ed5979
--- a/block.c
ed5979
+++ b/block.c
ed5979
@@ -1235,11 +1235,10 @@ static bool bdrv_child_cb_drained_poll(BdrvChild *child)
ed5979
     return bdrv_drain_poll(bs, false, NULL, false);
ed5979
 }
ed5979
 
ed5979
-static void bdrv_child_cb_drained_end(BdrvChild *child,
ed5979
-                                      int *drained_end_counter)
ed5979
+static void bdrv_child_cb_drained_end(BdrvChild *child)
ed5979
 {
ed5979
     BlockDriverState *bs = child->opaque;
ed5979
-    bdrv_drained_end_no_poll(bs, drained_end_counter);
ed5979
+    bdrv_drained_end(bs);
ed5979
 }
ed5979
 
ed5979
 static int bdrv_child_cb_inactivate(BdrvChild *child)
ed5979
diff --git a/block/block-backend.c b/block/block-backend.c
ed5979
index d98a96ff37..feaf2181fa 100644
ed5979
--- a/block/block-backend.c
ed5979
+++ b/block/block-backend.c
ed5979
@@ -129,7 +129,7 @@ static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format,
ed5979
 }
ed5979
 static void blk_root_drained_begin(BdrvChild *child);
ed5979
 static bool blk_root_drained_poll(BdrvChild *child);
ed5979
-static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter);
ed5979
+static void blk_root_drained_end(BdrvChild *child);
ed5979
 
ed5979
 static void blk_root_change_media(BdrvChild *child, bool load);
ed5979
 static void blk_root_resize(BdrvChild *child);
ed5979
@@ -2556,7 +2556,7 @@ static bool blk_root_drained_poll(BdrvChild *child)
ed5979
     return busy || !!blk->in_flight;
ed5979
 }
ed5979
 
ed5979
-static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
ed5979
+static void blk_root_drained_end(BdrvChild *child)
ed5979
 {
ed5979
     BlockBackend *blk = child->opaque;
ed5979
     assert(blk->quiesce_counter);
ed5979
diff --git a/block/io.c b/block/io.c
ed5979
index c2ed4b2af9..f4ca62b034 100644
ed5979
--- a/block/io.c
ed5979
+++ b/block/io.c
ed5979
@@ -58,28 +58,19 @@ static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
ed5979
     }
ed5979
 }
ed5979
 
ed5979
-static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
ed5979
-                                                   int *drained_end_counter)
ed5979
+void bdrv_parent_drained_end_single(BdrvChild *c)
ed5979
 {
ed5979
+    IO_OR_GS_CODE();
ed5979
+
ed5979
     assert(c->parent_quiesce_counter > 0);
ed5979
     c->parent_quiesce_counter--;
ed5979
     if (c->klass->drained_end) {
ed5979
-        c->klass->drained_end(c, drained_end_counter);
ed5979
+        c->klass->drained_end(c);
ed5979
     }
ed5979
 }
ed5979
 
ed5979
-void bdrv_parent_drained_end_single(BdrvChild *c)
ed5979
-{
ed5979
-    int drained_end_counter = 0;
ed5979
-    AioContext *ctx = bdrv_child_get_parent_aio_context(c);
ed5979
-    IO_OR_GS_CODE();
ed5979
-    bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
ed5979
-    AIO_WAIT_WHILE(ctx, qatomic_read(&drained_end_counter) > 0);
ed5979
-}
ed5979
-
ed5979
 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
ed5979
-                                    bool ignore_bds_parents,
ed5979
-                                    int *drained_end_counter)
ed5979
+                                    bool ignore_bds_parents)
ed5979
 {
ed5979
     BdrvChild *c;
ed5979
 
ed5979
@@ -87,7 +78,7 @@ static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
ed5979
         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
ed5979
             continue;
ed5979
         }
ed5979
-        bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
ed5979
+        bdrv_parent_drained_end_single(c);
ed5979
     }
ed5979
 }
ed5979
 
ed5979
@@ -249,12 +240,10 @@ typedef struct {
ed5979
     bool poll;
ed5979
     BdrvChild *parent;
ed5979
     bool ignore_bds_parents;
ed5979
-    int *drained_end_counter;
ed5979
 } BdrvCoDrainData;
ed5979
 
ed5979
 /* Recursively call BlockDriver.bdrv_drain_begin/end callbacks */
ed5979
-static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
ed5979
-                              int *drained_end_counter)
ed5979
+static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
ed5979
 {
ed5979
     if (!bs->drv || (begin && !bs->drv->bdrv_drain_begin) ||
ed5979
             (!begin && !bs->drv->bdrv_drain_end)) {
ed5979
@@ -305,8 +294,7 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
ed5979
                                   BdrvChild *parent, bool ignore_bds_parents,
ed5979
                                   bool poll);
ed5979
 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
ed5979
-                                BdrvChild *parent, bool ignore_bds_parents,
ed5979
-                                int *drained_end_counter);
ed5979
+                                BdrvChild *parent, bool ignore_bds_parents);
ed5979
 
ed5979
 static void bdrv_co_drain_bh_cb(void *opaque)
ed5979
 {
ed5979
@@ -319,14 +307,12 @@ static void bdrv_co_drain_bh_cb(void *opaque)
ed5979
         aio_context_acquire(ctx);
ed5979
         bdrv_dec_in_flight(bs);
ed5979
         if (data->begin) {
ed5979
-            assert(!data->drained_end_counter);
ed5979
             bdrv_do_drained_begin(bs, data->recursive, data->parent,
ed5979
                                   data->ignore_bds_parents, data->poll);
ed5979
         } else {
ed5979
             assert(!data->poll);
ed5979
             bdrv_do_drained_end(bs, data->recursive, data->parent,
ed5979
-                                data->ignore_bds_parents,
ed5979
-                                data->drained_end_counter);
ed5979
+                                data->ignore_bds_parents);
ed5979
         }
ed5979
         aio_context_release(ctx);
ed5979
     } else {
ed5979
@@ -342,8 +328,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
ed5979
                                                 bool begin, bool recursive,
ed5979
                                                 BdrvChild *parent,
ed5979
                                                 bool ignore_bds_parents,
ed5979
-                                                bool poll,
ed5979
-                                                int *drained_end_counter)
ed5979
+                                                bool poll)
ed5979
 {
ed5979
     BdrvCoDrainData data;
ed5979
     Coroutine *self = qemu_coroutine_self();
ed5979
@@ -363,7 +348,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
ed5979
         .parent = parent,
ed5979
         .ignore_bds_parents = ignore_bds_parents,
ed5979
         .poll = poll,
ed5979
-        .drained_end_counter = drained_end_counter,
ed5979
     };
ed5979
 
ed5979
     if (bs) {
ed5979
@@ -406,7 +390,7 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
ed5979
     }
ed5979
 
ed5979
     bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
ed5979
-    bdrv_drain_invoke(bs, true, NULL);
ed5979
+    bdrv_drain_invoke(bs, true);
ed5979
 }
ed5979
 
ed5979
 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
ed5979
@@ -417,7 +401,7 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
ed5979
 
ed5979
     if (qemu_in_coroutine()) {
ed5979
         bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
ed5979
-                               poll, NULL);
ed5979
+                               poll);
ed5979
         return;
ed5979
     }
ed5979
 
ed5979
@@ -461,38 +445,24 @@ void bdrv_subtree_drained_begin(BlockDriverState *bs)
ed5979
 
ed5979
 /**
ed5979
  * This function does not poll, nor must any of its recursively called
ed5979
- * functions.  The *drained_end_counter pointee will be incremented
ed5979
- * once for every background operation scheduled, and decremented once
ed5979
- * the operation settles.  Therefore, the pointer must remain valid
ed5979
- * until the pointee reaches 0.  That implies that whoever sets up the
ed5979
- * pointee has to poll until it is 0.
ed5979
- *
ed5979
- * We use atomic operations to access *drained_end_counter, because
ed5979
- * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
ed5979
- *     @bs may contain nodes in different AioContexts,
ed5979
- * (2) bdrv_drain_all_end() uses the same counter for all nodes,
ed5979
- *     regardless of which AioContext they are in.
ed5979
+ * functions.
ed5979
  */
ed5979
 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
ed5979
-                                BdrvChild *parent, bool ignore_bds_parents,
ed5979
-                                int *drained_end_counter)
ed5979
+                                BdrvChild *parent, bool ignore_bds_parents)
ed5979
 {
ed5979
     BdrvChild *child;
ed5979
     int old_quiesce_counter;
ed5979
 
ed5979
-    assert(drained_end_counter != NULL);
ed5979
-
ed5979
     if (qemu_in_coroutine()) {
ed5979
         bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
ed5979
-                               false, drained_end_counter);
ed5979
+                               false);
ed5979
         return;
ed5979
     }
ed5979
     assert(bs->quiesce_counter > 0);
ed5979
 
ed5979
     /* Re-enable things in child-to-parent order */
ed5979
-    bdrv_drain_invoke(bs, false, drained_end_counter);
ed5979
-    bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
ed5979
-                            drained_end_counter);
ed5979
+    bdrv_drain_invoke(bs, false);
ed5979
+    bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
ed5979
 
ed5979
     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
ed5979
     if (old_quiesce_counter == 1) {
ed5979
@@ -503,32 +473,21 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
ed5979
         assert(!ignore_bds_parents);
ed5979
         bs->recursive_quiesce_counter--;
ed5979
         QLIST_FOREACH(child, &bs->children, next) {
ed5979
-            bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
ed5979
-                                drained_end_counter);
ed5979
+            bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents);
ed5979
         }
ed5979
     }
ed5979
 }
ed5979
 
ed5979
 void bdrv_drained_end(BlockDriverState *bs)
ed5979
 {
ed5979
-    int drained_end_counter = 0;
ed5979
     IO_OR_GS_CODE();
ed5979
-    bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
ed5979
-    BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
ed5979
-}
ed5979
-
ed5979
-void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
ed5979
-{
ed5979
-    IO_CODE();
ed5979
-    bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
ed5979
+    bdrv_do_drained_end(bs, false, NULL, false);
ed5979
 }
ed5979
 
ed5979
 void bdrv_subtree_drained_end(BlockDriverState *bs)
ed5979
 {
ed5979
-    int drained_end_counter = 0;
ed5979
     IO_OR_GS_CODE();
ed5979
-    bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
ed5979
-    BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
ed5979
+    bdrv_do_drained_end(bs, true, NULL, false);
ed5979
 }
ed5979
 
ed5979
 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
ed5979
@@ -543,16 +502,12 @@ void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
ed5979
 
ed5979
 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
ed5979
 {
ed5979
-    int drained_end_counter = 0;
ed5979
     int i;
ed5979
     IO_OR_GS_CODE();
ed5979
 
ed5979
     for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
ed5979
-        bdrv_do_drained_end(child->bs, true, child, false,
ed5979
-                            &drained_end_counter);
ed5979
+        bdrv_do_drained_end(child->bs, true, child, false);
ed5979
     }
ed5979
-
ed5979
-    BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
ed5979
 }
ed5979
 
ed5979
 void bdrv_drain(BlockDriverState *bs)
ed5979
@@ -610,7 +565,7 @@ void bdrv_drain_all_begin(void)
ed5979
     GLOBAL_STATE_CODE();
ed5979
 
ed5979
     if (qemu_in_coroutine()) {
ed5979
-        bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
ed5979
+        bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true);
ed5979
         return;
ed5979
     }
ed5979
 
ed5979
@@ -649,22 +604,19 @@ void bdrv_drain_all_begin(void)
ed5979
 
ed5979
 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
ed5979
 {
ed5979
-    int drained_end_counter = 0;
ed5979
     GLOBAL_STATE_CODE();
ed5979
 
ed5979
     g_assert(bs->quiesce_counter > 0);
ed5979
     g_assert(!bs->refcnt);
ed5979
 
ed5979
     while (bs->quiesce_counter) {
ed5979
-        bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
ed5979
+        bdrv_do_drained_end(bs, false, NULL, true);
ed5979
     }
ed5979
-    BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
ed5979
 }
ed5979
 
ed5979
 void bdrv_drain_all_end(void)
ed5979
 {
ed5979
     BlockDriverState *bs = NULL;
ed5979
-    int drained_end_counter = 0;
ed5979
     GLOBAL_STATE_CODE();
ed5979
 
ed5979
     /*
ed5979
@@ -680,13 +632,11 @@ void bdrv_drain_all_end(void)
ed5979
         AioContext *aio_context = bdrv_get_aio_context(bs);
ed5979
 
ed5979
         aio_context_acquire(aio_context);
ed5979
-        bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
ed5979
+        bdrv_do_drained_end(bs, false, NULL, true);
ed5979
         aio_context_release(aio_context);
ed5979
     }
ed5979
 
ed5979
     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
ed5979
-    AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
ed5979
-
ed5979
     assert(bdrv_drain_all_count > 0);
ed5979
     bdrv_drain_all_count--;
ed5979
 }
ed5979
diff --git a/blockjob.c b/blockjob.c
ed5979
index f51d4e18f3..0ab721e139 100644
ed5979
--- a/blockjob.c
ed5979
+++ b/blockjob.c
ed5979
@@ -120,7 +120,7 @@ static bool child_job_drained_poll(BdrvChild *c)
ed5979
     }
ed5979
 }
ed5979
 
ed5979
-static void child_job_drained_end(BdrvChild *c, int *drained_end_counter)
ed5979
+static void child_job_drained_end(BdrvChild *c)
ed5979
 {
ed5979
     BlockJob *job = c->opaque;
ed5979
     job_resume(&job->job);
ed5979
diff --git a/include/block/block-io.h b/include/block/block-io.h
ed5979
index b099d7db45..054e964c9b 100644
ed5979
--- a/include/block/block-io.h
ed5979
+++ b/include/block/block-io.h
ed5979
@@ -237,21 +237,6 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
ed5979
                                     int64_t bytes, BdrvRequestFlags read_flags,
ed5979
                                     BdrvRequestFlags write_flags);
ed5979
 
ed5979
-/**
ed5979
- * bdrv_drained_end_no_poll:
ed5979
- *
ed5979
- * Same as bdrv_drained_end(), but do not poll for the subgraph to
ed5979
- * actually become unquiesced.  Therefore, no graph changes will occur
ed5979
- * with this function.
ed5979
- *
ed5979
- * *drained_end_counter is incremented for every background operation
ed5979
- * that is scheduled, and will be decremented for every operation once
ed5979
- * it settles.  The caller must poll until it reaches 0.  The counter
ed5979
- * should be accessed using atomic operations only.
ed5979
- */
ed5979
-void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
ed5979
-
ed5979
-
ed5979
 /*
ed5979
  * "I/O or GS" API functions. These functions can run without
ed5979
  * the BQL, but only in one specific iothread/main loop.
ed5979
@@ -311,9 +296,6 @@ void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
ed5979
  * bdrv_parent_drained_end_single:
ed5979
  *
ed5979
  * End a quiesced section for the parent of @c.
ed5979
- *
ed5979
- * This polls @bs's AioContext until all scheduled sub-drained_ends
ed5979
- * have settled, which may result in graph changes.
ed5979
  */
ed5979
 void bdrv_parent_drained_end_single(BdrvChild *c);
ed5979
 
ed5979
@@ -361,12 +343,6 @@ void bdrv_subtree_drained_begin(BlockDriverState *bs);
ed5979
  * bdrv_drained_end:
ed5979
  *
ed5979
  * End a quiescent section started by bdrv_drained_begin().
ed5979
- *
ed5979
- * This polls @bs's AioContext until all scheduled sub-drained_ends
ed5979
- * have settled.  On one hand, that may result in graph changes.  On
ed5979
- * the other, this requires that the caller either runs in the main
ed5979
- * loop; or that all involved nodes (@bs and all of its parents) are
ed5979
- * in the caller's AioContext.
ed5979
  */
ed5979
 void bdrv_drained_end(BlockDriverState *bs);
ed5979
 
ed5979
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
ed5979
index 40d646d1ed..2b97576f6d 100644
ed5979
--- a/include/block/block_int-common.h
ed5979
+++ b/include/block/block_int-common.h
ed5979
@@ -939,15 +939,11 @@ struct BdrvChildClass {
ed5979
      * These functions must not change the graph (and therefore also must not
ed5979
      * call aio_poll(), which could change the graph indirectly).
ed5979
      *
ed5979
-     * If drained_end() schedules background operations, it must atomically
ed5979
-     * increment *drained_end_counter for each such operation and atomically
ed5979
-     * decrement it once the operation has settled.
ed5979
-     *
ed5979
      * Note that this can be nested. If drained_begin() was called twice, new
ed5979
      * I/O is allowed only after drained_end() was called twice, too.
ed5979
      */
ed5979
     void (*drained_begin)(BdrvChild *child);
ed5979
-    void (*drained_end)(BdrvChild *child, int *drained_end_counter);
ed5979
+    void (*drained_end)(BdrvChild *child);
ed5979
 
ed5979
     /*
ed5979
      * Returns whether the parent has pending requests for the child. This
ed5979
-- 
ed5979
2.31.1
ed5979