|
|
383d26 |
From 54d30086f066f1094871c4886f3a6dee51263d76 Mon Sep 17 00:00:00 2001
|
|
|
383d26 |
From: Kevin Wolf <kwolf@redhat.com>
|
|
|
383d26 |
Date: Fri, 21 Sep 2018 12:46:29 +0200
|
|
|
383d26 |
Subject: [PATCH 2/3] block: Use a single global AioWait
|
|
|
383d26 |
|
|
|
383d26 |
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
|
383d26 |
Message-id: <20180921124630.29036-3-kwolf@redhat.com>
|
|
|
383d26 |
Patchwork-id: 82231
|
|
|
383d26 |
O-Subject: [RHV-7.6 qemu-kvm-rhev PATCH 2/3] block: Use a single global AioWait
|
|
|
383d26 |
Bugzilla: 1618584
|
|
|
383d26 |
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
|
|
|
383d26 |
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
383d26 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
383d26 |
RH-Acked-by: Eric Blake <eblake@redhat.com>
|
|
|
383d26 |
|
|
|
383d26 |
When draining a block node, we recurse to its parent and for subtree
|
|
|
383d26 |
drains also to its children. A single AIO_WAIT_WHILE() is then used to
|
|
|
383d26 |
wait for bdrv_drain_poll() to become true, which depends on all of the
|
|
|
383d26 |
nodes we recursed to. However, if the respective child or parent becomes
|
|
|
383d26 |
quiescent and calls bdrv_wakeup(), only the AioWait of the child/parent
|
|
|
383d26 |
is checked, while AIO_WAIT_WHILE() depends on the AioWait of the
|
|
|
383d26 |
original node.
|
|
|
383d26 |
|
|
|
383d26 |
Fix this by using a single AioWait for all callers of AIO_WAIT_WHILE().
|
|
|
383d26 |
|
|
|
383d26 |
This may mean that the draining thread gets a few more unnecessary
|
|
|
383d26 |
wakeups because an unrelated operation got completed, but we already
|
|
|
383d26 |
wake it up when something _could_ have changed rather than only if it
|
|
|
383d26 |
has certainly changed.
|
|
|
383d26 |
|
|
|
383d26 |
Apart from that, drain is a slow path anyway. In theory it would be
|
|
|
383d26 |
possible to use wakeups more selectively and still correctly, but the
|
|
|
383d26 |
gains are likely not worth the additional complexity. In fact, this
|
|
|
383d26 |
patch is a nice simplification for some places in the code.
|
|
|
383d26 |
|
|
|
383d26 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
383d26 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
383d26 |
---
|
|
|
383d26 |
block.c | 5 -----
|
|
|
383d26 |
block/block-backend.c | 11 ++++-------
|
|
|
383d26 |
block/io.c | 7 ++-----
|
|
|
383d26 |
blockjob.c | 13 +------------
|
|
|
383d26 |
include/block/aio-wait.h | 11 +++++------
|
|
|
383d26 |
include/block/block.h | 6 +-----
|
|
|
383d26 |
include/block/block_int.h | 3 ---
|
|
|
383d26 |
include/block/blockjob.h | 10 ----------
|
|
|
383d26 |
job.c | 3 +--
|
|
|
383d26 |
util/aio-wait.c | 11 ++++++-----
|
|
|
383d26 |
10 files changed, 20 insertions(+), 60 deletions(-)
|
|
|
383d26 |
|
|
|
383d26 |
diff --git a/block.c b/block.c
|
|
|
383d26 |
index e89b5e3..fbd569c 100644
|
|
|
383d26 |
--- a/block.c
|
|
|
383d26 |
+++ b/block.c
|
|
|
383d26 |
@@ -4847,11 +4847,6 @@ AioContext *bdrv_get_aio_context(BlockDriverState *bs)
|
|
|
383d26 |
return bs ? bs->aio_context : qemu_get_aio_context();
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs)
|
|
|
383d26 |
-{
|
|
|
383d26 |
- return bs ? &bs->wait : NULL;
|
|
|
383d26 |
-}
|
|
|
383d26 |
-
|
|
|
383d26 |
void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co)
|
|
|
383d26 |
{
|
|
|
383d26 |
aio_co_enter(bdrv_get_aio_context(bs), co);
|
|
|
383d26 |
diff --git a/block/block-backend.c b/block/block-backend.c
|
|
|
383d26 |
index 466bc27..36922d1 100644
|
|
|
383d26 |
--- a/block/block-backend.c
|
|
|
383d26 |
+++ b/block/block-backend.c
|
|
|
383d26 |
@@ -88,7 +88,6 @@ struct BlockBackend {
|
|
|
383d26 |
* Accessed with atomic ops.
|
|
|
383d26 |
*/
|
|
|
383d26 |
unsigned int in_flight;
|
|
|
383d26 |
- AioWait wait;
|
|
|
383d26 |
};
|
|
|
383d26 |
|
|
|
383d26 |
typedef struct BlockBackendAIOCB {
|
|
|
383d26 |
@@ -1300,7 +1299,7 @@ static void blk_inc_in_flight(BlockBackend *blk)
|
|
|
383d26 |
static void blk_dec_in_flight(BlockBackend *blk)
|
|
|
383d26 |
{
|
|
|
383d26 |
atomic_dec(&blk->in_flight);
|
|
|
383d26 |
- aio_wait_kick(&blk->wait);
|
|
|
383d26 |
+ aio_wait_kick();
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
static void error_callback_bh(void *opaque)
|
|
|
383d26 |
@@ -1609,9 +1608,8 @@ void blk_drain(BlockBackend *blk)
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
/* We may have -ENOMEDIUM completions in flight */
|
|
|
383d26 |
- AIO_WAIT_WHILE(&blk->wait,
|
|
|
383d26 |
- blk_get_aio_context(blk),
|
|
|
383d26 |
- atomic_mb_read(&blk->in_flight) > 0);
|
|
|
383d26 |
+ AIO_WAIT_WHILE(blk_get_aio_context(blk),
|
|
|
383d26 |
+ atomic_mb_read(&blk->in_flight) > 0);
|
|
|
383d26 |
|
|
|
383d26 |
if (bs) {
|
|
|
383d26 |
bdrv_drained_end(bs);
|
|
|
383d26 |
@@ -1630,8 +1628,7 @@ void blk_drain_all(void)
|
|
|
383d26 |
aio_context_acquire(ctx);
|
|
|
383d26 |
|
|
|
383d26 |
/* We may have -ENOMEDIUM completions in flight */
|
|
|
383d26 |
- AIO_WAIT_WHILE(&blk->wait, ctx,
|
|
|
383d26 |
- atomic_mb_read(&blk->in_flight) > 0);
|
|
|
383d26 |
+ AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
|
|
|
383d26 |
|
|
|
383d26 |
aio_context_release(ctx);
|
|
|
383d26 |
}
|
|
|
383d26 |
diff --git a/block/io.c b/block/io.c
|
|
|
383d26 |
index 3313958..7a99f7b 100644
|
|
|
383d26 |
--- a/block/io.c
|
|
|
383d26 |
+++ b/block/io.c
|
|
|
383d26 |
@@ -38,8 +38,6 @@
|
|
|
383d26 |
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
|
|
|
383d26 |
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
|
|
|
383d26 |
|
|
|
383d26 |
-static AioWait drain_all_aio_wait;
|
|
|
383d26 |
-
|
|
|
383d26 |
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
|
|
383d26 |
int64_t offset, int bytes, BdrvRequestFlags flags);
|
|
|
383d26 |
|
|
|
383d26 |
@@ -555,7 +553,7 @@ void bdrv_drain_all_begin(void)
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
/* Now poll the in-flight requests */
|
|
|
383d26 |
- AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
|
|
|
383d26 |
+ AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
|
|
|
383d26 |
|
|
|
383d26 |
while ((bs = bdrv_next_all_states(bs))) {
|
|
|
383d26 |
bdrv_drain_assert_idle(bs);
|
|
|
383d26 |
@@ -709,8 +707,7 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
|
|
|
383d26 |
|
|
|
383d26 |
void bdrv_wakeup(BlockDriverState *bs)
|
|
|
383d26 |
{
|
|
|
383d26 |
- aio_wait_kick(bdrv_get_aio_wait(bs));
|
|
|
383d26 |
- aio_wait_kick(&drain_all_aio_wait);
|
|
|
383d26 |
+ aio_wait_kick();
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
void bdrv_dec_in_flight(BlockDriverState *bs)
|
|
|
383d26 |
diff --git a/blockjob.c b/blockjob.c
|
|
|
383d26 |
index 617d86f..06f2429 100644
|
|
|
383d26 |
--- a/blockjob.c
|
|
|
383d26 |
+++ b/blockjob.c
|
|
|
383d26 |
@@ -221,20 +221,9 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
|
|
|
383d26 |
return 0;
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
-void block_job_wakeup_all_bdrv(BlockJob *job)
|
|
|
383d26 |
-{
|
|
|
383d26 |
- GSList *l;
|
|
|
383d26 |
-
|
|
|
383d26 |
- for (l = job->nodes; l; l = l->next) {
|
|
|
383d26 |
- BdrvChild *c = l->data;
|
|
|
383d26 |
- bdrv_wakeup(c->bs);
|
|
|
383d26 |
- }
|
|
|
383d26 |
-}
|
|
|
383d26 |
-
|
|
|
383d26 |
static void block_job_on_idle(Notifier *n, void *opaque)
|
|
|
383d26 |
{
|
|
|
383d26 |
- BlockJob *job = opaque;
|
|
|
383d26 |
- block_job_wakeup_all_bdrv(job);
|
|
|
383d26 |
+ aio_wait_kick();
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
bool block_job_is_internal(BlockJob *job)
|
|
|
383d26 |
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
|
|
|
383d26 |
index 600fad1..46f86f9 100644
|
|
|
383d26 |
--- a/include/block/aio-wait.h
|
|
|
383d26 |
+++ b/include/block/aio-wait.h
|
|
|
383d26 |
@@ -54,9 +54,10 @@ typedef struct {
|
|
|
383d26 |
unsigned num_waiters;
|
|
|
383d26 |
} AioWait;
|
|
|
383d26 |
|
|
|
383d26 |
+extern AioWait global_aio_wait;
|
|
|
383d26 |
+
|
|
|
383d26 |
/**
|
|
|
383d26 |
* AIO_WAIT_WHILE:
|
|
|
383d26 |
- * @wait: the aio wait object
|
|
|
383d26 |
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
|
|
|
383d26 |
* caller does not hold a lock) are involved in the polling condition.
|
|
|
383d26 |
* @cond: wait while this conditional expression is true
|
|
|
383d26 |
@@ -72,9 +73,9 @@ typedef struct {
|
|
|
383d26 |
* wait on conditions between two IOThreads since that could lead to deadlock,
|
|
|
383d26 |
* go via the main loop instead.
|
|
|
383d26 |
*/
|
|
|
383d26 |
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
|
|
|
383d26 |
+#define AIO_WAIT_WHILE(ctx, cond) ({ \
|
|
|
383d26 |
bool waited_ = false; \
|
|
|
383d26 |
- AioWait *wait_ = (wait); \
|
|
|
383d26 |
+ AioWait *wait_ = &global_aio_wait; \
|
|
|
383d26 |
AioContext *ctx_ = (ctx); \
|
|
|
383d26 |
/* Increment wait_->num_waiters before evaluating cond. */ \
|
|
|
383d26 |
atomic_inc(&wait_->num_waiters); \
|
|
|
383d26 |
@@ -102,14 +103,12 @@ typedef struct {
|
|
|
383d26 |
|
|
|
383d26 |
/**
|
|
|
383d26 |
* aio_wait_kick:
|
|
|
383d26 |
- * @wait: the aio wait object that should re-evaluate its condition
|
|
|
383d26 |
- *
|
|
|
383d26 |
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
|
|
|
383d26 |
* synchronous operations performed in an IOThread, the main thread lets the
|
|
|
383d26 |
* IOThread's event loop run, waiting for the operation to complete. A
|
|
|
383d26 |
* aio_wait_kick() call will wake up the main thread.
|
|
|
383d26 |
*/
|
|
|
383d26 |
-void aio_wait_kick(AioWait *wait);
|
|
|
383d26 |
+void aio_wait_kick(void);
|
|
|
383d26 |
|
|
|
383d26 |
/**
|
|
|
383d26 |
* aio_wait_bh_oneshot:
|
|
|
383d26 |
diff --git a/include/block/block.h b/include/block/block.h
|
|
|
383d26 |
index 356712c..8e78daf 100644
|
|
|
383d26 |
--- a/include/block/block.h
|
|
|
383d26 |
+++ b/include/block/block.h
|
|
|
383d26 |
@@ -406,13 +406,9 @@ void bdrv_drain_all_begin(void);
|
|
|
383d26 |
void bdrv_drain_all_end(void);
|
|
|
383d26 |
void bdrv_drain_all(void);
|
|
|
383d26 |
|
|
|
383d26 |
-/* Returns NULL when bs == NULL */
|
|
|
383d26 |
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs);
|
|
|
383d26 |
-
|
|
|
383d26 |
#define BDRV_POLL_WHILE(bs, cond) ({ \
|
|
|
383d26 |
BlockDriverState *bs_ = (bs); \
|
|
|
383d26 |
- AIO_WAIT_WHILE(bdrv_get_aio_wait(bs_), \
|
|
|
383d26 |
- bdrv_get_aio_context(bs_), \
|
|
|
383d26 |
+ AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
|
|
|
383d26 |
cond); })
|
|
|
383d26 |
|
|
|
383d26 |
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
|
|
|
383d26 |
diff --git a/include/block/block_int.h b/include/block/block_int.h
|
|
|
383d26 |
index b7806e3..ff923b7 100644
|
|
|
383d26 |
--- a/include/block/block_int.h
|
|
|
383d26 |
+++ b/include/block/block_int.h
|
|
|
383d26 |
@@ -782,9 +782,6 @@ struct BlockDriverState {
|
|
|
383d26 |
unsigned int in_flight;
|
|
|
383d26 |
unsigned int serialising_in_flight;
|
|
|
383d26 |
|
|
|
383d26 |
- /* Kicked to signal main loop when a request completes. */
|
|
|
383d26 |
- AioWait wait;
|
|
|
383d26 |
-
|
|
|
383d26 |
/* counter for nested bdrv_io_plug.
|
|
|
383d26 |
* Accessed with atomic ops.
|
|
|
383d26 |
*/
|
|
|
383d26 |
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
|
|
|
383d26 |
index 2290bbb..ede0bd8 100644
|
|
|
383d26 |
--- a/include/block/blockjob.h
|
|
|
383d26 |
+++ b/include/block/blockjob.h
|
|
|
383d26 |
@@ -122,16 +122,6 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
|
|
|
383d26 |
void block_job_remove_all_bdrv(BlockJob *job);
|
|
|
383d26 |
|
|
|
383d26 |
/**
|
|
|
383d26 |
- * block_job_wakeup_all_bdrv:
|
|
|
383d26 |
- * @job: The block job
|
|
|
383d26 |
- *
|
|
|
383d26 |
- * Calls bdrv_wakeup() for all BlockDriverStates that have been added to the
|
|
|
383d26 |
- * job. This function is to be called whenever child_job_drained_poll() would
|
|
|
383d26 |
- * go from true to false to notify waiting drain requests.
|
|
|
383d26 |
- */
|
|
|
383d26 |
-void block_job_wakeup_all_bdrv(BlockJob *job);
|
|
|
383d26 |
-
|
|
|
383d26 |
-/**
|
|
|
383d26 |
* block_job_set_speed:
|
|
|
383d26 |
* @job: The job to set the speed for.
|
|
|
383d26 |
* @speed: The new value
|
|
|
383d26 |
diff --git a/job.c b/job.c
|
|
|
383d26 |
index 5b53e43..3a7db59 100644
|
|
|
383d26 |
--- a/job.c
|
|
|
383d26 |
+++ b/job.c
|
|
|
383d26 |
@@ -973,7 +973,6 @@ void job_complete(Job *job, Error **errp)
|
|
|
383d26 |
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
|
|
|
383d26 |
{
|
|
|
383d26 |
Error *local_err = NULL;
|
|
|
383d26 |
- AioWait dummy_wait = {};
|
|
|
383d26 |
int ret;
|
|
|
383d26 |
|
|
|
383d26 |
job_ref(job);
|
|
|
383d26 |
@@ -987,7 +986,7 @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
|
|
|
383d26 |
return -EBUSY;
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
- AIO_WAIT_WHILE(&dummy_wait, job->aio_context,
|
|
|
383d26 |
+ AIO_WAIT_WHILE(job->aio_context,
|
|
|
383d26 |
(job_drain(job), !job_is_completed(job)));
|
|
|
383d26 |
|
|
|
383d26 |
ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
|
|
|
383d26 |
diff --git a/util/aio-wait.c b/util/aio-wait.c
|
|
|
383d26 |
index b8a8f86..b487749 100644
|
|
|
383d26 |
--- a/util/aio-wait.c
|
|
|
383d26 |
+++ b/util/aio-wait.c
|
|
|
383d26 |
@@ -26,21 +26,22 @@
|
|
|
383d26 |
#include "qemu/main-loop.h"
|
|
|
383d26 |
#include "block/aio-wait.h"
|
|
|
383d26 |
|
|
|
383d26 |
+AioWait global_aio_wait;
|
|
|
383d26 |
+
|
|
|
383d26 |
static void dummy_bh_cb(void *opaque)
|
|
|
383d26 |
{
|
|
|
383d26 |
/* The point is to make AIO_WAIT_WHILE()'s aio_poll() return */
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
-void aio_wait_kick(AioWait *wait)
|
|
|
383d26 |
+void aio_wait_kick(void)
|
|
|
383d26 |
{
|
|
|
383d26 |
/* The barrier (or an atomic op) is in the caller. */
|
|
|
383d26 |
- if (atomic_read(&wait->num_waiters)) {
|
|
|
383d26 |
+ if (atomic_read(&global_aio_wait.num_waiters)) {
|
|
|
383d26 |
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
|
|
|
383d26 |
}
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
typedef struct {
|
|
|
383d26 |
- AioWait wait;
|
|
|
383d26 |
bool done;
|
|
|
383d26 |
QEMUBHFunc *cb;
|
|
|
383d26 |
void *opaque;
|
|
|
383d26 |
@@ -54,7 +55,7 @@ static void aio_wait_bh(void *opaque)
|
|
|
383d26 |
data->cb(data->opaque);
|
|
|
383d26 |
|
|
|
383d26 |
data->done = true;
|
|
|
383d26 |
- aio_wait_kick(&data->wait);
|
|
|
383d26 |
+ aio_wait_kick();
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
|
|
383d26 |
@@ -67,5 +68,5 @@ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
|
|
383d26 |
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
|
|
383d26 |
|
|
|
383d26 |
aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data);
|
|
|
383d26 |
- AIO_WAIT_WHILE(&data.wait, ctx, !data.done);
|
|
|
383d26 |
+ AIO_WAIT_WHILE(ctx, !data.done);
|
|
|
383d26 |
}
|
|
|
383d26 |
--
|
|
|
383d26 |
1.8.3.1
|
|
|
383d26 |
|