|
|
1bdc94 |
From 78a927e7c583a7556604e55b5d27d4c4c082fb64 Mon Sep 17 00:00:00 2001
|
|
|
1bdc94 |
From: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Date: Fri, 14 Sep 2018 10:55:37 +0200
|
|
|
1bdc94 |
Subject: [PATCH 46/49] block: Remove aio_poll() in bdrv_drain_poll variants
|
|
|
1bdc94 |
|
|
|
1bdc94 |
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Message-id: <20180914105540.18077-40-kwolf@redhat.com>
|
|
|
1bdc94 |
Patchwork-id: 82191
|
|
|
1bdc94 |
O-Subject: [RHV-7.6 qemu-kvm-rhev PATCH 39/42] block: Remove aio_poll() in bdrv_drain_poll variants
|
|
|
1bdc94 |
Bugzilla: 1601212
|
|
|
1bdc94 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
1bdc94 |
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
1bdc94 |
RH-Acked-by: Fam Zheng <famz@redhat.com>
|
|
|
1bdc94 |
|
|
|
1bdc94 |
bdrv_drain_poll_top_level() was buggy because it didn't release the
|
|
|
1bdc94 |
AioContext lock of the node to be drained before calling aio_poll().
|
|
|
1bdc94 |
This way, callbacks called by aio_poll() would possibly take the lock a
|
|
|
1bdc94 |
second time and run into a deadlock with a nested AIO_WAIT_WHILE() call.
|
|
|
1bdc94 |
|
|
|
1bdc94 |
However, it turns out that the aio_poll() call isn't actually needed any
|
|
|
1bdc94 |
more. It was introduced in commit 91af091f923, which is effectively
|
|
|
1bdc94 |
reverted by this patch. The cases it was supposed to fix are now covered
|
|
|
1bdc94 |
by bdrv_drain_poll(), which waits for block jobs to reach a quiescent
|
|
|
1bdc94 |
state.
|
|
|
1bdc94 |
|
|
|
1bdc94 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
1bdc94 |
Reviewed-by: Fam Zheng <famz@redhat.com>
|
|
|
1bdc94 |
Reviewed-by: Max Reitz <mreitz@redhat.com>
|
|
|
1bdc94 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
1bdc94 |
---
|
|
|
1bdc94 |
block/io.c | 8 --------
|
|
|
1bdc94 |
1 file changed, 8 deletions(-)
|
|
|
1bdc94 |
|
|
|
1bdc94 |
diff --git a/block/io.c b/block/io.c
|
|
|
1bdc94 |
index 19db35e..3313958 100644
|
|
|
1bdc94 |
--- a/block/io.c
|
|
|
1bdc94 |
+++ b/block/io.c
|
|
|
1bdc94 |
@@ -266,10 +266,6 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
|
|
1bdc94 |
static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
|
|
|
1bdc94 |
BdrvChild *ignore_parent)
|
|
|
1bdc94 |
{
|
|
|
1bdc94 |
- /* Execute pending BHs first and check everything else only after the BHs
|
|
|
1bdc94 |
- * have executed. */
|
|
|
1bdc94 |
- while (aio_poll(bs->aio_context, false));
|
|
|
1bdc94 |
-
|
|
|
1bdc94 |
return bdrv_drain_poll(bs, recursive, ignore_parent, false);
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
|
|
|
1bdc94 |
@@ -509,10 +505,6 @@ static bool bdrv_drain_all_poll(void)
|
|
|
1bdc94 |
BlockDriverState *bs = NULL;
|
|
|
1bdc94 |
bool result = false;
|
|
|
1bdc94 |
|
|
|
1bdc94 |
- /* Execute pending BHs first (may modify the graph) and check everything
|
|
|
1bdc94 |
- * else only after the BHs have executed. */
|
|
|
1bdc94 |
- while (aio_poll(qemu_get_aio_context(), false));
|
|
|
1bdc94 |
-
|
|
|
1bdc94 |
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
|
|
|
1bdc94 |
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
|
|
|
1bdc94 |
while ((bs = bdrv_next_all_states(bs))) {
|
|
|
1bdc94 |
--
|
|
|
1bdc94 |
1.8.3.1
|
|
|
1bdc94 |
|