thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-aio-posix-fix-concurrent-access-to-poll_disable_cnt.patch

ae23c9
From 67669ce3a2c163c467df63abc90e77bd9a856d34 Mon Sep 17 00:00:00 2001
ae23c9
From: Fam Zheng <famz@redhat.com>
ae23c9
Date: Tue, 9 Oct 2018 08:16:49 +0100
ae23c9
Subject: [PATCH 05/17] aio-posix: fix concurrent access to poll_disable_cnt
ae23c9
ae23c9
RH-Author: Fam Zheng <famz@redhat.com>
ae23c9
Message-id: <20181009081651.15463-4-famz@redhat.com>
ae23c9
Patchwork-id: 82452
ae23c9
O-Subject: [RHEL8/rhel qemu-kvm PATCH 3/5] aio-posix: fix concurrent access to poll_disable_cnt
ae23c9
Bugzilla: 1632622
ae23c9
RH-Acked-by: Thomas Huth <thuth@redhat.com>
ae23c9
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
ae23c9
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
ae23c9
ae23c9
From: Paolo Bonzini <pbonzini@redhat.com>
ae23c9
ae23c9
BZ: 1632622
ae23c9
ae23c9
It is valid for an aio_set_fd_handler to happen concurrently with
ae23c9
aio_poll.  In that case, poll_disable_cnt can change under the heels
ae23c9
of aio_poll, and the assertion on poll_disable_cnt can fail in
ae23c9
run_poll_handlers.
ae23c9
ae23c9
Therefore, this patch simply checks the counter on every polling
ae23c9
iteration.  There are no particular needs for ordering, since the
ae23c9
polling loop is terminated anyway by aio_notify at the end of
ae23c9
aio_set_fd_handler.
ae23c9
ae23c9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
ae23c9
Message-Id: <20180912171040.1732-2-pbonzini@redhat.com>
ae23c9
Reviewed-by: Fam Zheng <famz@redhat.com>
ae23c9
Signed-off-by: Fam Zheng <famz@redhat.com>
ae23c9
(cherry picked from commit d7be5dd19c0df7f76e1b42f0c2cbbabefa1974cb)
ae23c9
Signed-off-by: Fam Zheng <famz@redhat.com>
ae23c9
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
ae23c9
---
ae23c9
 util/aio-posix.c | 26 +++++++++++++++-----------
ae23c9
 1 file changed, 15 insertions(+), 11 deletions(-)
ae23c9
ae23c9
diff --git a/util/aio-posix.c b/util/aio-posix.c
ae23c9
index f05d3a8..1d7cc53 100644
ae23c9
--- a/util/aio-posix.c
ae23c9
+++ b/util/aio-posix.c
ae23c9
@@ -211,6 +211,7 @@ void aio_set_fd_handler(AioContext *ctx,
ae23c9
     AioHandler *node;
ae23c9
     bool is_new = false;
ae23c9
     bool deleted = false;
ae23c9
+    int poll_disable_change;
ae23c9
 
ae23c9
     qemu_lockcnt_lock(&ctx->list_lock);
ae23c9
 
ae23c9
@@ -244,11 +245,9 @@ void aio_set_fd_handler(AioContext *ctx,
ae23c9
             QLIST_REMOVE(node, node);
ae23c9
             deleted = true;
ae23c9
         }
ae23c9
-
ae23c9
-        if (!node->io_poll) {
ae23c9
-            ctx->poll_disable_cnt--;
ae23c9
-        }
ae23c9
+        poll_disable_change = -!node->io_poll;
ae23c9
     } else {
ae23c9
+        poll_disable_change = !io_poll - (node && !node->io_poll);
ae23c9
         if (node == NULL) {
ae23c9
             /* Alloc and insert if it's not already there */
ae23c9
             node = g_new0(AioHandler, 1);
ae23c9
@@ -257,10 +256,6 @@ void aio_set_fd_handler(AioContext *ctx,
ae23c9
 
ae23c9
             g_source_add_poll(&ctx->source, &node->pfd);
ae23c9
             is_new = true;
ae23c9
-
ae23c9
-            ctx->poll_disable_cnt += !io_poll;
ae23c9
-        } else {
ae23c9
-            ctx->poll_disable_cnt += !io_poll - !node->io_poll;
ae23c9
         }
ae23c9
 
ae23c9
         /* Update handler with latest information */
ae23c9
@@ -274,6 +269,15 @@ void aio_set_fd_handler(AioContext *ctx,
ae23c9
         node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
ae23c9
     }
ae23c9
 
ae23c9
+    /* No need to order poll_disable_cnt writes against other updates;
ae23c9
+     * the counter is only used to avoid wasting time and latency on
ae23c9
+     * iterated polling when the system call will be ultimately necessary.
ae23c9
+     * Changing handlers is a rare event, and a little wasted polling until
ae23c9
+     * the aio_notify below is not an issue.
ae23c9
+     */
ae23c9
+    atomic_set(&ctx->poll_disable_cnt,
ae23c9
+               atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
ae23c9
+
ae23c9
     aio_epoll_update(ctx, node, is_new);
ae23c9
     qemu_lockcnt_unlock(&ctx->list_lock);
ae23c9
     aio_notify(ctx);
ae23c9
@@ -525,7 +529,6 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
ae23c9
 
ae23c9
     assert(ctx->notify_me);
ae23c9
     assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
ae23c9
-    assert(ctx->poll_disable_cnt == 0);
ae23c9
 
ae23c9
     trace_run_poll_handlers_begin(ctx, max_ns);
ae23c9
 
ae23c9
@@ -533,7 +536,8 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
ae23c9
 
ae23c9
     do {
ae23c9
         progress = run_poll_handlers_once(ctx);
ae23c9
-    } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
ae23c9
+    } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
ae23c9
+             && !atomic_read(&ctx->poll_disable_cnt));
ae23c9
 
ae23c9
     trace_run_poll_handlers_end(ctx, progress);
ae23c9
 
ae23c9
@@ -552,7 +556,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
ae23c9
  */
ae23c9
 static bool try_poll_mode(AioContext *ctx, bool blocking)
ae23c9
 {
ae23c9
-    if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
ae23c9
+    if (blocking && ctx->poll_max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
ae23c9
         /* See qemu_soonest_timeout() uint64_t hack */
ae23c9
         int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
ae23c9
                              (uint64_t)ctx->poll_ns);
ae23c9
-- 
ae23c9
1.8.3.1
ae23c9