|
|
357786 |
From 1a6556bc1317af4669d058e6df70bc1c036d37a5 Mon Sep 17 00:00:00 2001
|
|
|
357786 |
From: Kevin Wolf <kwolf@redhat.com>
|
|
|
357786 |
Date: Fri, 14 Sep 2018 10:55:04 +0200
|
|
|
357786 |
Subject: [PATCH 13/49] block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
|
|
|
357786 |
|
|
|
357786 |
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
|
357786 |
Message-id: <20180914105540.18077-7-kwolf@redhat.com>
|
|
|
357786 |
Patchwork-id: 82158
|
|
|
357786 |
O-Subject: [RHV-7.6 qemu-kvm-rhev PATCH 06/42] block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
|
|
|
357786 |
Bugzilla: 1601212
|
|
|
357786 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
357786 |
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
357786 |
RH-Acked-by: Fam Zheng <famz@redhat.com>
|
|
|
357786 |
|
|
|
357786 |
Commit 91af091f923 added an additional aio_poll() to BDRV_POLL_WHILE()
|
|
|
357786 |
in order to make sure that all pending BHs are executed on drain. This
|
|
|
357786 |
was the wrong place to make the fix, as it is useless overhead for all
|
|
|
357786 |
other users of the macro and unnecessarily complicates the mechanism.
|
|
|
357786 |
|
|
|
357786 |
This patch effectively reverts said commit (the context has changed a
|
|
|
357786 |
bit and the code has moved to AIO_WAIT_WHILE()) and instead polls in the
|
|
|
357786 |
loop condition for drain.
|
|
|
357786 |
|
|
|
357786 |
The effect is probably hard to measure in any real-world use case
|
|
|
357786 |
because actual I/O will dominate, but if I run only the initialisation
|
|
|
357786 |
part of 'qemu-img convert' where it calls bdrv_block_status() for the
|
|
|
357786 |
whole image to find out how much data there is copy, this phase actually
|
|
|
357786 |
needs only roughly half the time after this patch.
|
|
|
357786 |
|
|
|
357786 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
357786 |
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
357786 |
(cherry picked from commit 1cc8e54ada97f7ac479554e15ca9e426c895b158)
|
|
|
357786 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
357786 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
357786 |
---
|
|
|
357786 |
block/io.c | 11 ++++++++++-
|
|
|
357786 |
include/block/aio-wait.h | 22 ++++++++--------------
|
|
|
357786 |
2 files changed, 18 insertions(+), 15 deletions(-)
|
|
|
357786 |
|
|
|
357786 |
diff --git a/block/io.c b/block/io.c
|
|
|
357786 |
index e5fc42c..4d332c3 100644
|
|
|
357786 |
--- a/block/io.c
|
|
|
357786 |
+++ b/block/io.c
|
|
|
357786 |
@@ -181,13 +181,22 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
|
|
|
357786 |
BDRV_POLL_WHILE(bs, !data.done);
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
|
|
|
357786 |
+static bool bdrv_drain_poll(BlockDriverState *bs)
|
|
|
357786 |
+{
|
|
|
357786 |
+ /* Execute pending BHs first and check everything else only after the BHs
|
|
|
357786 |
+ * have executed. */
|
|
|
357786 |
+ while (aio_poll(bs->aio_context, false));
|
|
|
357786 |
+ return atomic_read(&bs->in_flight);
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
static bool bdrv_drain_recurse(BlockDriverState *bs)
|
|
|
357786 |
{
|
|
|
357786 |
BdrvChild *child, *tmp;
|
|
|
357786 |
bool waited;
|
|
|
357786 |
|
|
|
357786 |
/* Wait for drained requests to finish */
|
|
|
357786 |
- waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
|
|
|
357786 |
+ waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll(bs));
|
|
|
357786 |
|
|
|
357786 |
QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
|
|
|
357786 |
BlockDriverState *bs = child->bs;
|
|
|
357786 |
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
|
|
|
357786 |
index 8c90a2e..783d367 100644
|
|
|
357786 |
--- a/include/block/aio-wait.h
|
|
|
357786 |
+++ b/include/block/aio-wait.h
|
|
|
357786 |
@@ -73,29 +73,23 @@ typedef struct {
|
|
|
357786 |
*/
|
|
|
357786 |
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
|
|
|
357786 |
bool waited_ = false; \
|
|
|
357786 |
- bool busy_ = true; \
|
|
|
357786 |
AioWait *wait_ = (wait); \
|
|
|
357786 |
AioContext *ctx_ = (ctx); \
|
|
|
357786 |
if (in_aio_context_home_thread(ctx_)) { \
|
|
|
357786 |
- while ((cond) || busy_) { \
|
|
|
357786 |
- busy_ = aio_poll(ctx_, (cond)); \
|
|
|
357786 |
- waited_ |= !!(cond) | busy_; \
|
|
|
357786 |
+ while ((cond)) { \
|
|
|
357786 |
+ aio_poll(ctx_, true); \
|
|
|
357786 |
+ waited_ = true; \
|
|
|
357786 |
} \
|
|
|
357786 |
} else { \
|
|
|
357786 |
assert(qemu_get_current_aio_context() == \
|
|
|
357786 |
qemu_get_aio_context()); \
|
|
|
357786 |
/* Increment wait_->num_waiters before evaluating cond. */ \
|
|
|
357786 |
atomic_inc(&wait_->num_waiters); \
|
|
|
357786 |
- while (busy_) { \
|
|
|
357786 |
- if ((cond)) { \
|
|
|
357786 |
- waited_ = busy_ = true; \
|
|
|
357786 |
- aio_context_release(ctx_); \
|
|
|
357786 |
- aio_poll(qemu_get_aio_context(), true); \
|
|
|
357786 |
- aio_context_acquire(ctx_); \
|
|
|
357786 |
- } else { \
|
|
|
357786 |
- busy_ = aio_poll(ctx_, false); \
|
|
|
357786 |
- waited_ |= busy_; \
|
|
|
357786 |
- } \
|
|
|
357786 |
+ while ((cond)) { \
|
|
|
357786 |
+ aio_context_release(ctx_); \
|
|
|
357786 |
+ aio_poll(qemu_get_aio_context(), true); \
|
|
|
357786 |
+ aio_context_acquire(ctx_); \
|
|
|
357786 |
+ waited_ = true; \
|
|
|
357786 |
} \
|
|
|
357786 |
atomic_dec(&wait_->num_waiters); \
|
|
|
357786 |
} \
|
|
|
357786 |
--
|
|
|
357786 |
1.8.3.1
|
|
|
357786 |
|