|
|
1bdc94 |
From 1a6556bc1317af4669d058e6df70bc1c036d37a5 Mon Sep 17 00:00:00 2001
|
|
|
1bdc94 |
From: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Date: Fri, 14 Sep 2018 10:55:04 +0200
|
|
|
1bdc94 |
Subject: [PATCH 13/49] block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
|
|
|
1bdc94 |
|
|
|
1bdc94 |
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Message-id: <20180914105540.18077-7-kwolf@redhat.com>
|
|
|
1bdc94 |
Patchwork-id: 82158
|
|
|
1bdc94 |
O-Subject: [RHV-7.6 qemu-kvm-rhev PATCH 06/42] block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
|
|
|
1bdc94 |
Bugzilla: 1601212
|
|
|
1bdc94 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
1bdc94 |
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
1bdc94 |
RH-Acked-by: Fam Zheng <famz@redhat.com>
|
|
|
1bdc94 |
|
|
|
1bdc94 |
Commit 91af091f923 added an additional aio_poll() to BDRV_POLL_WHILE()
|
|
|
1bdc94 |
in order to make sure that all pending BHs are executed on drain. This
|
|
|
1bdc94 |
was the wrong place to make the fix, as it is useless overhead for all
|
|
|
1bdc94 |
other users of the macro and unnecessarily complicates the mechanism.
|
|
|
1bdc94 |
|
|
|
1bdc94 |
This patch effectively reverts said commit (the context has changed a
|
|
|
1bdc94 |
bit and the code has moved to AIO_WAIT_WHILE()) and instead polls in the
|
|
|
1bdc94 |
loop condition for drain.
|
|
|
1bdc94 |
|
|
|
1bdc94 |
The effect is probably hard to measure in any real-world use case
|
|
|
1bdc94 |
because actual I/O will dominate, but if I run only the initialisation
|
|
|
1bdc94 |
part of 'qemu-img convert' where it calls bdrv_block_status() for the
|
|
|
1bdc94 |
whole image to find out how much data there is copy, this phase actually
|
|
|
1bdc94 |
needs only roughly half the time after this patch.
|
|
|
1bdc94 |
|
|
|
1bdc94 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
1bdc94 |
(cherry picked from commit 1cc8e54ada97f7ac479554e15ca9e426c895b158)
|
|
|
1bdc94 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
1bdc94 |
---
|
|
|
1bdc94 |
block/io.c | 11 ++++++++++-
|
|
|
1bdc94 |
include/block/aio-wait.h | 22 ++++++++--------------
|
|
|
1bdc94 |
2 files changed, 18 insertions(+), 15 deletions(-)
|
|
|
1bdc94 |
|
|
|
1bdc94 |
diff --git a/block/io.c b/block/io.c
|
|
|
1bdc94 |
index e5fc42c..4d332c3 100644
|
|
|
1bdc94 |
--- a/block/io.c
|
|
|
1bdc94 |
+++ b/block/io.c
|
|
|
1bdc94 |
@@ -181,13 +181,22 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
|
|
|
1bdc94 |
BDRV_POLL_WHILE(bs, !data.done);
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
|
|
|
1bdc94 |
+/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
|
|
|
1bdc94 |
+static bool bdrv_drain_poll(BlockDriverState *bs)
|
|
|
1bdc94 |
+{
|
|
|
1bdc94 |
+ /* Execute pending BHs first and check everything else only after the BHs
|
|
|
1bdc94 |
+ * have executed. */
|
|
|
1bdc94 |
+ while (aio_poll(bs->aio_context, false));
|
|
|
1bdc94 |
+ return atomic_read(&bs->in_flight);
|
|
|
1bdc94 |
+}
|
|
|
1bdc94 |
+
|
|
|
1bdc94 |
static bool bdrv_drain_recurse(BlockDriverState *bs)
|
|
|
1bdc94 |
{
|
|
|
1bdc94 |
BdrvChild *child, *tmp;
|
|
|
1bdc94 |
bool waited;
|
|
|
1bdc94 |
|
|
|
1bdc94 |
/* Wait for drained requests to finish */
|
|
|
1bdc94 |
- waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
|
|
|
1bdc94 |
+ waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll(bs));
|
|
|
1bdc94 |
|
|
|
1bdc94 |
QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
|
|
|
1bdc94 |
BlockDriverState *bs = child->bs;
|
|
|
1bdc94 |
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
|
|
|
1bdc94 |
index 8c90a2e..783d367 100644
|
|
|
1bdc94 |
--- a/include/block/aio-wait.h
|
|
|
1bdc94 |
+++ b/include/block/aio-wait.h
|
|
|
1bdc94 |
@@ -73,29 +73,23 @@ typedef struct {
|
|
|
1bdc94 |
*/
|
|
|
1bdc94 |
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
|
|
|
1bdc94 |
bool waited_ = false; \
|
|
|
1bdc94 |
- bool busy_ = true; \
|
|
|
1bdc94 |
AioWait *wait_ = (wait); \
|
|
|
1bdc94 |
AioContext *ctx_ = (ctx); \
|
|
|
1bdc94 |
if (in_aio_context_home_thread(ctx_)) { \
|
|
|
1bdc94 |
- while ((cond) || busy_) { \
|
|
|
1bdc94 |
- busy_ = aio_poll(ctx_, (cond)); \
|
|
|
1bdc94 |
- waited_ |= !!(cond) | busy_; \
|
|
|
1bdc94 |
+ while ((cond)) { \
|
|
|
1bdc94 |
+ aio_poll(ctx_, true); \
|
|
|
1bdc94 |
+ waited_ = true; \
|
|
|
1bdc94 |
} \
|
|
|
1bdc94 |
} else { \
|
|
|
1bdc94 |
assert(qemu_get_current_aio_context() == \
|
|
|
1bdc94 |
qemu_get_aio_context()); \
|
|
|
1bdc94 |
/* Increment wait_->num_waiters before evaluating cond. */ \
|
|
|
1bdc94 |
atomic_inc(&wait_->num_waiters); \
|
|
|
1bdc94 |
- while (busy_) { \
|
|
|
1bdc94 |
- if ((cond)) { \
|
|
|
1bdc94 |
- waited_ = busy_ = true; \
|
|
|
1bdc94 |
- aio_context_release(ctx_); \
|
|
|
1bdc94 |
- aio_poll(qemu_get_aio_context(), true); \
|
|
|
1bdc94 |
- aio_context_acquire(ctx_); \
|
|
|
1bdc94 |
- } else { \
|
|
|
1bdc94 |
- busy_ = aio_poll(ctx_, false); \
|
|
|
1bdc94 |
- waited_ |= busy_; \
|
|
|
1bdc94 |
- } \
|
|
|
1bdc94 |
+ while ((cond)) { \
|
|
|
1bdc94 |
+ aio_context_release(ctx_); \
|
|
|
1bdc94 |
+ aio_poll(qemu_get_aio_context(), true); \
|
|
|
1bdc94 |
+ aio_context_acquire(ctx_); \
|
|
|
1bdc94 |
+ waited_ = true; \
|
|
|
1bdc94 |
} \
|
|
|
1bdc94 |
atomic_dec(&wait_->num_waiters); \
|
|
|
1bdc94 |
} \
|
|
|
1bdc94 |
--
|
|
|
1bdc94 |
1.8.3.1
|
|
|
1bdc94 |
|