Blame SOURCES/kvm-aio-wait-delegate-polling-of-main-AioContext-if-BQL-.patch

6e7d01
From b474155fdc38f86f516c14ba9a6f934616d589ef Mon Sep 17 00:00:00 2001
6e7d01
From: Andrew Jones <drjones@redhat.com>
6e7d01
Date: Wed, 4 Aug 2021 03:27:22 -0400
6e7d01
Subject: [PATCH 1/2] aio-wait: delegate polling of main AioContext if BQL not
6e7d01
 held
6e7d01
6e7d01
RH-Author: Andrew Jones <drjones@redhat.com>
6e7d01
Message-id: <20210729134448.4995-2-drjones@redhat.com>
6e7d01
Patchwork-id: 101935
6e7d01
O-Subject: [RHEL-8.5.0 qemu-kvm PATCH v2 1/2] aio-wait: delegate polling of main AioContext if BQL not held
6e7d01
Bugzilla: 1969848
6e7d01
RH-Acked-by: Gavin Shan <gshan@redhat.com>
6e7d01
RH-Acked-by: Auger Eric <eric.auger@redhat.com>
6e7d01
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
6e7d01
6e7d01
From: Paolo Bonzini <pbonzini@redhat.com>
6e7d01
6e7d01
Any thread that is not a iothread returns NULL for qemu_get_current_aio_context().
6e7d01
As a result, it would also return true for
6e7d01
in_aio_context_home_thread(qemu_get_aio_context()), causing
6e7d01
AIO_WAIT_WHILE to invoke aio_poll() directly.  This is incorrect
6e7d01
if the BQL is not held, because aio_poll() does not expect to
6e7d01
run concurrently from multiple threads, and it can actually
6e7d01
happen when savevm writes to the vmstate file from the
6e7d01
migration thread.
6e7d01
6e7d01
Therefore, restrict in_aio_context_home_thread to return true
6e7d01
for the main AioContext only if the BQL is held.
6e7d01
6e7d01
The function is moved to aio-wait.h because it is mostly used
6e7d01
there and to avoid a circular reference between main-loop.h
6e7d01
and block/aio.h.
6e7d01
6e7d01
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6e7d01
Message-Id: <20200407140746.8041-5-pbonzini@redhat.com>
6e7d01
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6e7d01
(cherry picked from commit 3c18a92dc4b55ca8cc37a755ed119f11c0f34099)
6e7d01
Signed-off-by: Andrew Jones <drjones@redhat.com>
6e7d01
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
6e7d01
---
6e7d01
 include/block/aio-wait.h | 22 ++++++++++++++++++++++
6e7d01
 include/block/aio.h      | 29 ++++++++++-------------------
6e7d01
 2 files changed, 32 insertions(+), 19 deletions(-)
6e7d01
6e7d01
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
6e7d01
index afeeb18f95..716d2639df 100644
6e7d01
--- a/include/block/aio-wait.h
6e7d01
+++ b/include/block/aio-wait.h
6e7d01
@@ -26,6 +26,7 @@
6e7d01
 #define QEMU_AIO_WAIT_H
6e7d01
 
6e7d01
 #include "block/aio.h"
6e7d01
+#include "qemu/main-loop.h"
6e7d01
 
6e7d01
 /**
6e7d01
  * AioWait:
6e7d01
@@ -124,4 +125,25 @@ void aio_wait_kick(void);
6e7d01
  */
6e7d01
 void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
6e7d01
 
6e7d01
+/**
6e7d01
+ * in_aio_context_home_thread:
6e7d01
+ * @ctx: the aio context
6e7d01
+ *
6e7d01
+ * Return whether we are running in the thread that normally runs @ctx.  Note
6e7d01
+ * that acquiring/releasing ctx does not affect the outcome, each AioContext
6e7d01
+ * still only has one home thread that is responsible for running it.
6e7d01
+ */
6e7d01
+static inline bool in_aio_context_home_thread(AioContext *ctx)
6e7d01
+{
6e7d01
+    if (ctx == qemu_get_current_aio_context()) {
6e7d01
+        return true;
6e7d01
+    }
6e7d01
+
6e7d01
+    if (ctx == qemu_get_aio_context()) {
6e7d01
+        return qemu_mutex_iothread_locked();
6e7d01
+    } else {
6e7d01
+        return false;
6e7d01
+    }
6e7d01
+}
6e7d01
+
6e7d01
 #endif /* QEMU_AIO_WAIT_H */
6e7d01
diff --git a/include/block/aio.h b/include/block/aio.h
6e7d01
index 6b0d52f732..9d28e247df 100644
6e7d01
--- a/include/block/aio.h
6e7d01
+++ b/include/block/aio.h
6e7d01
@@ -60,12 +60,16 @@ struct AioContext {
6e7d01
     QLIST_HEAD(, AioHandler) aio_handlers;
6e7d01
 
6e7d01
     /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
6e7d01
-     * accessed with atomic primitives.  If this field is 0, everything
6e7d01
-     * (file descriptors, bottom halves, timers) will be re-evaluated
6e7d01
-     * before the next blocking poll(), thus the event_notifier_set call
6e7d01
-     * can be skipped.  If it is non-zero, you may need to wake up a
6e7d01
-     * concurrent aio_poll or the glib main event loop, making
6e7d01
-     * event_notifier_set necessary.
6e7d01
+     * only written from the AioContext home thread, or under the BQL in
6e7d01
+     * the case of the main AioContext.  However, it is read from any
6e7d01
+     * thread so it is still accessed with atomic primitives.
6e7d01
+     *
6e7d01
+     * If this field is 0, everything (file descriptors, bottom halves,
6e7d01
+     * timers) will be re-evaluated before the next blocking poll() or
6e7d01
+     * io_uring wait; therefore, the event_notifier_set call can be
6e7d01
+     * skipped.  If it is non-zero, you may need to wake up a concurrent
6e7d01
+     * aio_poll or the glib main event loop, making event_notifier_set
6e7d01
+     * necessary.
6e7d01
      *
6e7d01
      * Bit 0 is reserved for GSource usage of the AioContext, and is 1
6e7d01
      * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
6e7d01
@@ -580,19 +584,6 @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co);
6e7d01
  */
6e7d01
 AioContext *qemu_get_current_aio_context(void);
6e7d01
 
6e7d01
-/**
6e7d01
- * in_aio_context_home_thread:
6e7d01
- * @ctx: the aio context
6e7d01
- *
6e7d01
- * Return whether we are running in the thread that normally runs @ctx.  Note
6e7d01
- * that acquiring/releasing ctx does not affect the outcome, each AioContext
6e7d01
- * still only has one home thread that is responsible for running it.
6e7d01
- */
6e7d01
-static inline bool in_aio_context_home_thread(AioContext *ctx)
6e7d01
-{
6e7d01
-    return ctx == qemu_get_current_aio_context();
6e7d01
-}
6e7d01
-
6e7d01
 /**
6e7d01
  * aio_context_setup:
6e7d01
  * @ctx: the aio context
6e7d01
-- 
6e7d01
2.27.0
6e7d01