thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-aio-posix-fix-concurrent-access-to-poll_disable_cnt.patch

26ba25
From 67669ce3a2c163c467df63abc90e77bd9a856d34 Mon Sep 17 00:00:00 2001
26ba25
From: Fam Zheng <famz@redhat.com>
26ba25
Date: Tue, 9 Oct 2018 08:16:49 +0100
26ba25
Subject: [PATCH 05/17] aio-posix: fix concurrent access to poll_disable_cnt
26ba25
26ba25
RH-Author: Fam Zheng <famz@redhat.com>
26ba25
Message-id: <20181009081651.15463-4-famz@redhat.com>
26ba25
Patchwork-id: 82452
26ba25
O-Subject: [RHEL8/rhel qemu-kvm PATCH 3/5] aio-posix: fix concurrent access to poll_disable_cnt
26ba25
Bugzilla: 1632622
26ba25
RH-Acked-by: Thomas Huth <thuth@redhat.com>
26ba25
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
26ba25
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
26ba25
26ba25
From: Paolo Bonzini <pbonzini@redhat.com>
26ba25
26ba25
BZ: 1632622
26ba25
26ba25
It is valid for an aio_set_fd_handler to happen concurrently with
26ba25
aio_poll.  In that case, poll_disable_cnt can change under the heels
26ba25
of aio_poll, and the assertion on poll_disable_cnt can fail in
26ba25
run_poll_handlers.
26ba25
26ba25
Therefore, this patch simply checks the counter on every polling
26ba25
iteration.  There are no particular needs for ordering, since the
26ba25
polling loop is terminated anyway by aio_notify at the end of
26ba25
aio_set_fd_handler.
26ba25
26ba25
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
26ba25
Message-Id: <20180912171040.1732-2-pbonzini@redhat.com>
26ba25
Reviewed-by: Fam Zheng <famz@redhat.com>
26ba25
Signed-off-by: Fam Zheng <famz@redhat.com>
26ba25
(cherry picked from commit d7be5dd19c0df7f76e1b42f0c2cbbabefa1974cb)
26ba25
Signed-off-by: Fam Zheng <famz@redhat.com>
26ba25
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
26ba25
---
26ba25
 util/aio-posix.c | 26 +++++++++++++++-----------
26ba25
 1 file changed, 15 insertions(+), 11 deletions(-)
26ba25
26ba25
diff --git a/util/aio-posix.c b/util/aio-posix.c
26ba25
index f05d3a8..1d7cc53 100644
26ba25
--- a/util/aio-posix.c
26ba25
+++ b/util/aio-posix.c
26ba25
@@ -211,6 +211,7 @@ void aio_set_fd_handler(AioContext *ctx,
26ba25
     AioHandler *node;
26ba25
     bool is_new = false;
26ba25
     bool deleted = false;
26ba25
+    int poll_disable_change;
26ba25
 
26ba25
     qemu_lockcnt_lock(&ctx->list_lock);
26ba25
 
26ba25
@@ -244,11 +245,9 @@ void aio_set_fd_handler(AioContext *ctx,
26ba25
             QLIST_REMOVE(node, node);
26ba25
             deleted = true;
26ba25
         }
26ba25
-
26ba25
-        if (!node->io_poll) {
26ba25
-            ctx->poll_disable_cnt--;
26ba25
-        }
26ba25
+        poll_disable_change = -!node->io_poll;
26ba25
     } else {
26ba25
+        poll_disable_change = !io_poll - (node && !node->io_poll);
26ba25
         if (node == NULL) {
26ba25
             /* Alloc and insert if it's not already there */
26ba25
             node = g_new0(AioHandler, 1);
26ba25
@@ -257,10 +256,6 @@ void aio_set_fd_handler(AioContext *ctx,
26ba25
 
26ba25
             g_source_add_poll(&ctx->source, &node->pfd);
26ba25
             is_new = true;
26ba25
-
26ba25
-            ctx->poll_disable_cnt += !io_poll;
26ba25
-        } else {
26ba25
-            ctx->poll_disable_cnt += !io_poll - !node->io_poll;
26ba25
         }
26ba25
 
26ba25
         /* Update handler with latest information */
26ba25
@@ -274,6 +269,15 @@ void aio_set_fd_handler(AioContext *ctx,
26ba25
         node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
26ba25
     }
26ba25
 
26ba25
+    /* No need to order poll_disable_cnt writes against other updates;
26ba25
+     * the counter is only used to avoid wasting time and latency on
26ba25
+     * iterated polling when the system call will be ultimately necessary.
26ba25
+     * Changing handlers is a rare event, and a little wasted polling until
26ba25
+     * the aio_notify below is not an issue.
26ba25
+     */
26ba25
+    atomic_set(&ctx->poll_disable_cnt,
26ba25
+               atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
26ba25
+
26ba25
     aio_epoll_update(ctx, node, is_new);
26ba25
     qemu_lockcnt_unlock(&ctx->list_lock);
26ba25
     aio_notify(ctx);
26ba25
@@ -525,7 +529,6 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
26ba25
 
26ba25
     assert(ctx->notify_me);
26ba25
     assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
26ba25
-    assert(ctx->poll_disable_cnt == 0);
26ba25
 
26ba25
     trace_run_poll_handlers_begin(ctx, max_ns);
26ba25
 
26ba25
@@ -533,7 +536,8 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
26ba25
 
26ba25
     do {
26ba25
         progress = run_poll_handlers_once(ctx);
26ba25
-    } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
26ba25
+    } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
26ba25
+             && !atomic_read(&ctx->poll_disable_cnt));
26ba25
 
26ba25
     trace_run_poll_handlers_end(ctx, progress);
26ba25
 
26ba25
@@ -552,7 +556,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
26ba25
  */
26ba25
 static bool try_poll_mode(AioContext *ctx, bool blocking)
26ba25
 {
26ba25
-    if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
26ba25
+    if (blocking && ctx->poll_max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
26ba25
         /* See qemu_soonest_timeout() uint64_t hack */
26ba25
         int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
26ba25
                              (uint64_t)ctx->poll_ns);
26ba25
-- 
26ba25
1.8.3.1
26ba25