|
|
ed5979 |
From 1fdc864f9ac927f3ea407f35f6771a4b2e8f509f Mon Sep 17 00:00:00 2001
|
|
|
ed5979 |
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
|
ed5979 |
Date: Thu, 9 Mar 2023 08:24:36 -0500
|
|
|
ed5979 |
Subject: [PATCH 04/12] qatomic: add smp_mb__before/after_rmw()
|
|
|
ed5979 |
|
|
|
ed5979 |
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
|
ed5979 |
RH-MergeRequest: 158: qatomic: add smp_mb__before/after_rmw()
|
|
|
ed5979 |
RH-Bugzilla: 2175660
|
|
|
ed5979 |
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
ed5979 |
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
|
ed5979 |
RH-Acked-by: David Hildenbrand <david@redhat.com>
|
|
|
ed5979 |
RH-Acked-by: Eric Auger <eric.auger@redhat.com>
|
|
|
ed5979 |
RH-Commit: [1/9] e8d0b64670bff778d275b1fb477dcee0c109251a (eesposit/qemu-kvm)
|
|
|
ed5979 |
|
|
|
ed5979 |
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2175660
|
|
|
ed5979 |
|
|
|
ed5979 |
commit ff00bed1897c3d27adc5b0cec6f6eeb5a7d13176
|
|
|
ed5979 |
Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
ed5979 |
Date: Thu Mar 2 11:10:56 2023 +0100
|
|
|
ed5979 |
|
|
|
ed5979 |
qatomic: add smp_mb__before/after_rmw()
|
|
|
ed5979 |
|
|
|
ed5979 |
On ARM, seqcst loads and stores (which QEMU does not use) are compiled
|
|
|
ed5979 |
respectively as LDAR and STLR instructions. Even though LDAR is
|
|
|
ed5979 |
also used for load-acquire operations, it also waits for all STLRs to
|
|
|
ed5979 |
leave the store buffer. Thus, LDAR and STLR alone are load-acquire
|
|
|
ed5979 |
and store-release operations, but LDAR also provides store-against-load
|
|
|
ed5979 |
ordering as long as the previous store is a STLR.
|
|
|
ed5979 |
|
|
|
ed5979 |
Compare this to ARMv7, where store-release is DMB+STR and load-acquire
|
|
|
ed5979 |
is LDR+DMB, but an additional DMB is needed between store-seqcst and
|
|
|
ed5979 |
load-seqcst (e.g. DMB+STR+DMB+LDR+DMB); or with x86, where MOV provides
|
|
|
ed5979 |
load-acquire and store-release semantics and the two can be reordered.
|
|
|
ed5979 |
|
|
|
ed5979 |
Likewise, on ARM sequentially consistent read-modify-write operations only
|
|
|
ed5979 |
need to use LDAXR and STLXR respectively for the load and the store, while
|
|
|
ed5979 |
on x86 they need to use the stronger LOCK prefix.
|
|
|
ed5979 |
|
|
|
ed5979 |
In a strange twist of events, however, the _stronger_ semantics
|
|
|
ed5979 |
of the ARM instructions can end up causing bugs on ARM, not on x86.
|
|
|
ed5979 |
The problems occur when seqcst atomics are mixed with relaxed atomics.
|
|
|
ed5979 |
|
|
|
ed5979 |
QEMU's atomics try to bridge the Linux API (that most of the developers
|
|
|
ed5979 |
are familiar with) and the C11 API, and the two have a substantial
|
|
|
ed5979 |
difference:
|
|
|
ed5979 |
|
|
|
ed5979 |
- in Linux, strongly-ordered atomics such as atomic_add_return() affect
|
|
|
ed5979 |
the global ordering of _all_ memory operations, including for example
|
|
|
ed5979 |
READ_ONCE()/WRITE_ONCE()
|
|
|
ed5979 |
|
|
|
ed5979 |
- in C11, sequentially consistent atomics (except for seq-cst fences)
|
|
|
ed5979 |
only affect the ordering of sequentially consistent operations.
|
|
|
ed5979 |
In particular, since relaxed loads are done with LDR on ARM, they are
|
|
|
ed5979 |
not ordered against seqcst stores (which are done with STLR).
|
|
|
ed5979 |
|
|
|
ed5979 |
QEMU implements high-level synchronization primitives with the idea that
|
|
|
ed5979 |
the primitives contain the necessary memory barriers, and the callers can
|
|
|
ed5979 |
use relaxed atomics (qatomic_read/qatomic_set) or even regular accesses.
|
|
|
ed5979 |
This is very much incompatible with the C11 view that seqcst accesses
|
|
|
ed5979 |
are only ordered against other seqcst accesses, and requires using seqcst
|
|
|
ed5979 |
fences as in the following example:
|
|
|
ed5979 |
|
|
|
ed5979 |
qatomic_set(&y, 1); qatomic_set(&x, 1);
|
|
|
ed5979 |
smp_mb(); smp_mb();
|
|
|
ed5979 |
... qatomic_read(&x) ... ... qatomic_read(&y) ...
|
|
|
ed5979 |
|
|
|
ed5979 |
When a qatomic_*() read-modify write operation is used instead of one
|
|
|
ed5979 |
or both stores, developers that are more familiar with the Linux API may
|
|
|
ed5979 |
be tempted to omit the smp_mb(), which will work on x86 but not on ARM.
|
|
|
ed5979 |
|
|
|
ed5979 |
This nasty difference between Linux and C11 read-modify-write operations
|
|
|
ed5979 |
has already caused issues in util/async.c and more are being found.
|
|
|
ed5979 |
Provide something similar to Linux smp_mb__before/after_atomic(); this
|
|
|
ed5979 |
has the double function of documenting clearly why there is a memory
|
|
|
ed5979 |
barrier, and avoiding a double barrier on x86 and s390x systems.
|
|
|
ed5979 |
|
|
|
ed5979 |
The new macro can already be put to use in qatomic_mb_set().
|
|
|
ed5979 |
|
|
|
ed5979 |
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
|
|
|
ed5979 |
Reviewed-by: David Hildenbrand <david@redhat.com>
|
|
|
ed5979 |
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
ed5979 |
|
|
|
ed5979 |
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
|
ed5979 |
---
|
|
|
ed5979 |
docs/devel/atomics.rst | 26 +++++++++++++++++++++-----
|
|
|
ed5979 |
include/qemu/atomic.h | 17 ++++++++++++++++-
|
|
|
ed5979 |
2 files changed, 37 insertions(+), 6 deletions(-)
|
|
|
ed5979 |
|
|
|
ed5979 |
diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst
|
|
|
ed5979 |
index 52baa0736d..10fbfc58bb 100644
|
|
|
ed5979 |
--- a/docs/devel/atomics.rst
|
|
|
ed5979 |
+++ b/docs/devel/atomics.rst
|
|
|
ed5979 |
@@ -25,7 +25,8 @@ provides macros that fall in three camps:
|
|
|
ed5979 |
|
|
|
ed5979 |
- weak atomic access and manual memory barriers: ``qatomic_read()``,
|
|
|
ed5979 |
``qatomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``,
|
|
|
ed5979 |
- ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``;
|
|
|
ed5979 |
+ ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``,
|
|
|
ed5979 |
+ ``smp_mb__before_rmw()``, ``smp_mb__after_rmw()``;
|
|
|
ed5979 |
|
|
|
ed5979 |
- sequentially consistent atomic access: everything else.
|
|
|
ed5979 |
|
|
|
ed5979 |
@@ -470,7 +471,7 @@ and memory barriers, and the equivalents in QEMU:
|
|
|
ed5979 |
sequential consistency.
|
|
|
ed5979 |
|
|
|
ed5979 |
- in QEMU, ``qatomic_read()`` and ``qatomic_set()`` do not participate in
|
|
|
ed5979 |
- the total ordering enforced by sequentially-consistent operations.
|
|
|
ed5979 |
+ the ordering enforced by read-modify-write operations.
|
|
|
ed5979 |
This is because QEMU uses the C11 memory model. The following example
|
|
|
ed5979 |
is correct in Linux but not in QEMU:
|
|
|
ed5979 |
|
|
|
ed5979 |
@@ -486,9 +487,24 @@ and memory barriers, and the equivalents in QEMU:
|
|
|
ed5979 |
because the read of ``y`` can be moved (by either the processor or the
|
|
|
ed5979 |
compiler) before the write of ``x``.
|
|
|
ed5979 |
|
|
|
ed5979 |
- Fixing this requires an ``smp_mb()`` memory barrier between the write
|
|
|
ed5979 |
- of ``x`` and the read of ``y``. In the common case where only one thread
|
|
|
ed5979 |
- writes ``x``, it is also possible to write it like this:
|
|
|
ed5979 |
+ Fixing this requires a full memory barrier between the write of ``x`` and
|
|
|
ed5979 |
+ the read of ``y``. QEMU provides ``smp_mb__before_rmw()`` and
|
|
|
ed5979 |
+ ``smp_mb__after_rmw()``; they act both as an optimization,
|
|
|
ed5979 |
+ avoiding the memory barrier on processors where it is unnecessary,
|
|
|
ed5979 |
+ and as a clarification of this corner case of the C11 memory model:
|
|
|
ed5979 |
+
|
|
|
ed5979 |
+ +--------------------------------+
|
|
|
ed5979 |
+ | QEMU (correct) |
|
|
|
ed5979 |
+ +================================+
|
|
|
ed5979 |
+ | :: |
|
|
|
ed5979 |
+ | |
|
|
|
ed5979 |
+ | a = qatomic_fetch_add(&x, 2);|
|
|
|
ed5979 |
+ | smp_mb__after_rmw(); |
|
|
|
ed5979 |
+ | b = qatomic_read(&y); |
|
|
|
ed5979 |
+ +--------------------------------+
|
|
|
ed5979 |
+
|
|
|
ed5979 |
+ In the common case where only one thread writes ``x``, it is also possible
|
|
|
ed5979 |
+ to write it like this:
|
|
|
ed5979 |
|
|
|
ed5979 |
+--------------------------------+
|
|
|
ed5979 |
| QEMU (correct) |
|
|
|
ed5979 |
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
|
|
|
ed5979 |
index 874134fd19..f85834ee8b 100644
|
|
|
ed5979 |
--- a/include/qemu/atomic.h
|
|
|
ed5979 |
+++ b/include/qemu/atomic.h
|
|
|
ed5979 |
@@ -245,6 +245,20 @@
|
|
|
ed5979 |
#define smp_wmb() smp_mb_release()
|
|
|
ed5979 |
#define smp_rmb() smp_mb_acquire()
|
|
|
ed5979 |
|
|
|
ed5979 |
+/*
|
|
|
ed5979 |
+ * SEQ_CST is weaker than the older __sync_* builtins and Linux
|
|
|
ed5979 |
+ * kernel read-modify-write atomics. Provide a macro to obtain
|
|
|
ed5979 |
+ * the same semantics.
|
|
|
ed5979 |
+ */
|
|
|
ed5979 |
+#if !defined(QEMU_SANITIZE_THREAD) && \
|
|
|
ed5979 |
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
|
|
|
ed5979 |
+# define smp_mb__before_rmw() signal_barrier()
|
|
|
ed5979 |
+# define smp_mb__after_rmw() signal_barrier()
|
|
|
ed5979 |
+#else
|
|
|
ed5979 |
+# define smp_mb__before_rmw() smp_mb()
|
|
|
ed5979 |
+# define smp_mb__after_rmw() smp_mb()
|
|
|
ed5979 |
+#endif
|
|
|
ed5979 |
+
|
|
|
ed5979 |
/* qatomic_mb_read/set semantics map Java volatile variables. They are
|
|
|
ed5979 |
* less expensive on some platforms (notably POWER) than fully
|
|
|
ed5979 |
* sequentially consistent operations.
|
|
|
ed5979 |
@@ -259,7 +273,8 @@
|
|
|
ed5979 |
#if !defined(QEMU_SANITIZE_THREAD) && \
|
|
|
ed5979 |
(defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
|
|
|
ed5979 |
/* This is more efficient than a store plus a fence. */
|
|
|
ed5979 |
-# define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
|
|
|
ed5979 |
+# define qatomic_mb_set(ptr, i) \
|
|
|
ed5979 |
+ ({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
|
|
|
ed5979 |
#else
|
|
|
ed5979 |
# define qatomic_mb_set(ptr, i) \
|
|
|
ed5979 |
({ qatomic_store_release(ptr, i); smp_mb(); })
|
|
|
ed5979 |
--
|
|
|
ed5979 |
2.39.1
|
|
|
ed5979 |
|