|
|
97168e |
From ea3856bb545d19499602830cdc3076d83a981e7a Mon Sep 17 00:00:00 2001
|
|
|
97168e |
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
|
97168e |
Date: Thu, 9 Mar 2023 08:15:36 -0500
|
|
|
97168e |
Subject: [PATCH 09/13] async: update documentation of the memory barriers
|
|
|
97168e |
|
|
|
97168e |
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
|
97168e |
RH-MergeRequest: 263: qatomic: add smp_mb__before/after_rmw()
|
|
|
97168e |
RH-Bugzilla: 2168472
|
|
|
97168e |
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
|
97168e |
RH-Acked-by: Eric Auger <eric.auger@redhat.com>
|
|
|
97168e |
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
97168e |
RH-Acked-by: David Hildenbrand <david@redhat.com>
|
|
|
97168e |
RH-Commit: [9/10] d471da2acf7a107cf75f3327c5e8d7456307160e
|
|
|
97168e |
|
|
|
97168e |
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2168472
|
|
|
97168e |
|
|
|
97168e |
commit 8dd48650b43dfde4ebea34191ac267e474bcc29e
|
|
|
97168e |
Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
97168e |
Date: Mon Mar 6 10:15:06 2023 +0100
|
|
|
97168e |
|
|
|
97168e |
async: update documentation of the memory barriers
|
|
|
97168e |
|
|
|
97168e |
Ever since commit 8c6b0356b539 ("util/async: make bh_aio_poll() O(1)",
|
|
|
97168e |
2020-02-22), synchronization between qemu_bh_schedule() and aio_bh_poll()
|
|
|
97168e |
is happening when the bottom half is enqueued in the bh_list; not
|
|
|
97168e |
when the flags are set. Update the documentation to match.
|
|
|
97168e |
|
|
|
97168e |
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
97168e |
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
97168e |
|
|
|
97168e |
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
|
97168e |
---
|
|
|
97168e |
util/async.c | 33 +++++++++++++++++++--------------
|
|
|
97168e |
1 file changed, 19 insertions(+), 14 deletions(-)
|
|
|
97168e |
|
|
|
97168e |
diff --git a/util/async.c b/util/async.c
|
|
|
97168e |
index 6f6717a34b..795fe699b6 100644
|
|
|
97168e |
--- a/util/async.c
|
|
|
97168e |
+++ b/util/async.c
|
|
|
97168e |
@@ -71,14 +71,21 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
|
|
|
97168e |
unsigned old_flags;
|
|
|
97168e |
|
|
|
97168e |
/*
|
|
|
97168e |
- * The memory barrier implicit in qatomic_fetch_or makes sure that:
|
|
|
97168e |
- * 1. idle & any writes needed by the callback are done before the
|
|
|
97168e |
- * locations are read in the aio_bh_poll.
|
|
|
97168e |
- * 2. ctx is loaded before the callback has a chance to execute and bh
|
|
|
97168e |
- * could be freed.
|
|
|
97168e |
+ * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
|
|
|
97168e |
+ * insertion starts after BH_PENDING is set.
|
|
|
97168e |
*/
|
|
|
97168e |
old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
|
|
|
97168e |
+
|
|
|
97168e |
if (!(old_flags & BH_PENDING)) {
|
|
|
97168e |
+ /*
|
|
|
97168e |
+ * At this point the bottom half becomes visible to aio_bh_poll().
|
|
|
97168e |
+ * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
|
|
|
97168e |
+ * aio_bh_poll(), ensuring that:
|
|
|
97168e |
+ * 1. any writes needed by the callback are visible from the callback
|
|
|
97168e |
+ * after aio_bh_dequeue() returns bh.
|
|
|
97168e |
+ * 2. ctx is loaded before the callback has a chance to execute and bh
|
|
|
97168e |
+ * could be freed.
|
|
|
97168e |
+ */
|
|
|
97168e |
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
|
|
|
97168e |
}
|
|
|
97168e |
|
|
|
97168e |
@@ -97,11 +104,8 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
|
|
|
97168e |
QSLIST_REMOVE_HEAD(head, next);
|
|
|
97168e |
|
|
|
97168e |
/*
|
|
|
97168e |
- * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
|
|
|
97168e |
- * barrier ensures that the callback sees all writes done by the scheduling
|
|
|
97168e |
- * thread. It also ensures that the scheduling thread sees the cleared
|
|
|
97168e |
- * flag before bh->cb has run, and thus will call aio_notify again if
|
|
|
97168e |
- * necessary.
|
|
|
97168e |
+ * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
|
|
|
97168e |
+ * the removal finishes before BH_PENDING is reset.
|
|
|
97168e |
*/
|
|
|
97168e |
*flags = qatomic_fetch_and(&bh->flags,
|
|
|
97168e |
~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
|
|
|
97168e |
@@ -148,6 +152,7 @@ int aio_bh_poll(AioContext *ctx)
|
|
|
97168e |
BHListSlice *s;
|
|
|
97168e |
int ret = 0;
|
|
|
97168e |
|
|
|
97168e |
+ /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */
|
|
|
97168e |
QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
|
|
|
97168e |
QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
|
|
|
97168e |
|
|
|
97168e |
@@ -437,15 +442,15 @@ LuringState *aio_get_linux_io_uring(AioContext *ctx)
|
|
|
97168e |
void aio_notify(AioContext *ctx)
|
|
|
97168e |
{
|
|
|
97168e |
/*
|
|
|
97168e |
- * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in
|
|
|
97168e |
- * aio_notify_accept.
|
|
|
97168e |
+ * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with
|
|
|
97168e |
+ * smp_mb() in aio_notify_accept().
|
|
|
97168e |
*/
|
|
|
97168e |
smp_wmb();
|
|
|
97168e |
qatomic_set(&ctx->notified, true);
|
|
|
97168e |
|
|
|
97168e |
/*
|
|
|
97168e |
- * Write ctx->notified before reading ctx->notify_me. Pairs
|
|
|
97168e |
- * with smp_mb in aio_ctx_prepare or aio_poll.
|
|
|
97168e |
+ * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
|
|
|
97168e |
+ * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
|
|
|
97168e |
*/
|
|
|
97168e |
smp_mb();
|
|
|
97168e |
if (qatomic_read(&ctx->notify_me)) {
|
|
|
97168e |
--
|
|
|
97168e |
2.37.3
|
|
|
97168e |
|