|
|
8ae002 |
Partial backport:
|
|
|
8ae002 |
|
|
|
8ae002 |
Skipped elide.h since rw-locks are not backported.
|
|
|
8ae002 |
|
|
|
8ae002 |
commit ca6e601a9d4a72b3699cca15bad12ac1716bf49a
|
|
|
8ae002 |
Author: Torvald Riegel <triegel@redhat.com>
|
|
|
8ae002 |
Date: Wed Nov 30 17:53:11 2016 +0100
|
|
|
8ae002 |
|
|
|
8ae002 |
Use C11-like atomics instead of plain memory accesses in x86 lock elision.
|
|
|
8ae002 |
|
|
|
8ae002 |
This uses atomic operations to access lock elision metadata that is accessed
|
|
|
8ae002 |
concurrently (ie, adapt_count fields). The size of the data is less than a
|
|
|
8ae002 |
word but accessed only with atomic loads and stores; therefore, we add
|
|
|
8ae002 |
support for shorter-size atomic load and stores too.
|
|
|
8ae002 |
|
|
|
8ae002 |
* include/atomic.h (__atomic_check_size_ls): New.
|
|
|
8ae002 |
(atomic_load_relaxed, atomic_load_acquire, atomic_store_relaxed,
|
|
|
8ae002 |
atomic_store_release): Use it.
|
|
|
8ae002 |
* sysdeps/x86/elide.h (ACCESS_ONCE): Remove.
|
|
|
8ae002 |
(elision_adapt, ELIDE_LOCK): Use atomics.
|
|
|
8ae002 |
* sysdeps/unix/sysv/linux/x86/elision-lock.c (__lll_lock_elision): Use
|
|
|
8ae002 |
atomics and improve code comments.
|
|
|
8ae002 |
* sysdeps/unix/sysv/linux/x86/elision-trylock.c
|
|
|
8ae002 |
(__lll_trylock_elision): Likewise.
|
|
|
8ae002 |
|
|
|
8ae002 |
Index: glibc-2.17-c758a686/include/atomic.h
|
|
|
8ae002 |
===================================================================
|
|
|
8ae002 |
--- glibc-2.17-c758a686.orig/include/atomic.h
|
|
|
8ae002 |
+++ glibc-2.17-c758a686/include/atomic.h
|
|
|
8ae002 |
@@ -567,6 +567,20 @@ void __atomic_link_error (void);
|
|
|
8ae002 |
if (sizeof (*mem) != 4) \
|
|
|
8ae002 |
__atomic_link_error ();
|
|
|
8ae002 |
# endif
|
|
|
8ae002 |
+/* We additionally provide 8b and 16b atomic loads and stores; we do not yet
|
|
|
8ae002 |
+ need other atomic operations of such sizes, and restricting the support to
|
|
|
8ae002 |
+ loads and stores makes this easier for archs that do not have native
|
|
|
8ae002 |
+ support for atomic operations to less-than-word-sized data. */
|
|
|
8ae002 |
+# if __HAVE_64B_ATOMICS == 1
|
|
|
8ae002 |
+# define __atomic_check_size_ls(mem) \
|
|
|
8ae002 |
+ if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
|
|
|
8ae002 |
+ && (sizeof (*mem) != 8)) \
|
|
|
8ae002 |
+ __atomic_link_error ();
|
|
|
8ae002 |
+# else
|
|
|
8ae002 |
+# define __atomic_check_size_ls(mem) \
|
|
|
8ae002 |
+ if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \
|
|
|
8ae002 |
+ __atomic_link_error ();
|
|
|
8ae002 |
+# endif
|
|
|
8ae002 |
|
|
|
8ae002 |
# define atomic_thread_fence_acquire() \
|
|
|
8ae002 |
__atomic_thread_fence (__ATOMIC_ACQUIRE)
|
|
|
8ae002 |
@@ -576,18 +590,20 @@ void __atomic_link_error (void);
|
|
|
8ae002 |
__atomic_thread_fence (__ATOMIC_SEQ_CST)
|
|
|
8ae002 |
|
|
|
8ae002 |
# define atomic_load_relaxed(mem) \
|
|
|
8ae002 |
- ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); })
|
|
|
8ae002 |
+ ({ __atomic_check_size_ls((mem)); \
|
|
|
8ae002 |
+ __atomic_load_n ((mem), __ATOMIC_RELAXED); })
|
|
|
8ae002 |
# define atomic_load_acquire(mem) \
|
|
|
8ae002 |
- ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
|
|
|
8ae002 |
+ ({ __atomic_check_size_ls((mem)); \
|
|
|
8ae002 |
+ __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
|
|
|
8ae002 |
|
|
|
8ae002 |
# define atomic_store_relaxed(mem, val) \
|
|
|
8ae002 |
do { \
|
|
|
8ae002 |
- __atomic_check_size((mem)); \
|
|
|
8ae002 |
+ __atomic_check_size_ls((mem)); \
|
|
|
8ae002 |
__atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
|
|
|
8ae002 |
} while (0)
|
|
|
8ae002 |
# define atomic_store_release(mem, val) \
|
|
|
8ae002 |
do { \
|
|
|
8ae002 |
- __atomic_check_size((mem)); \
|
|
|
8ae002 |
+ __atomic_check_size_ls((mem)); \
|
|
|
8ae002 |
__atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
|
|
|
8ae002 |
} while (0)
|
|
|
8ae002 |
|
|
|
8ae002 |
Index: glibc-2.17-c758a686/nptl/sysdeps/unix/sysv/linux/x86/elision-lock.c
|
|
|
8ae002 |
===================================================================
|
|
|
8ae002 |
--- glibc-2.17-c758a686.orig/nptl/sysdeps/unix/sysv/linux/x86/elision-lock.c
|
|
|
8ae002 |
+++ glibc-2.17-c758a686/nptl/sysdeps/unix/sysv/linux/x86/elision-lock.c
|
|
|
8ae002 |
@@ -44,7 +44,13 @@
|
|
|
8ae002 |
int
|
|
|
8ae002 |
__lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
|
|
|
8ae002 |
{
|
|
|
8ae002 |
- if (*adapt_count <= 0)
|
|
|
8ae002 |
+ /* adapt_count can be accessed concurrently; these accesses can be both
|
|
|
8ae002 |
+ inside of transactions (if critical sections are nested and the outer
|
|
|
8ae002 |
+ critical section uses lock elision) and outside of transactions. Thus,
|
|
|
8ae002 |
+ we need to use atomic accesses to avoid data races. However, the
|
|
|
8ae002 |
+ value of adapt_count is just a hint, so relaxed MO accesses are
|
|
|
8ae002 |
+ sufficient. */
|
|
|
8ae002 |
+ if (atomic_load_relaxed (adapt_count) <= 0)
|
|
|
8ae002 |
{
|
|
|
8ae002 |
unsigned status;
|
|
|
8ae002 |
int try_xbegin;
|
|
|
8ae002 |
@@ -70,15 +76,20 @@ __lll_lock_elision (int *futex, short *a
|
|
|
8ae002 |
&& _XABORT_CODE (status) == _ABORT_LOCK_BUSY)
|
|
|
8ae002 |
{
|
|
|
8ae002 |
/* Right now we skip here. Better would be to wait a bit
|
|
|
8ae002 |
- and retry. This likely needs some spinning. */
|
|
|
8ae002 |
- if (*adapt_count != aconf.skip_lock_busy)
|
|
|
8ae002 |
- *adapt_count = aconf.skip_lock_busy;
|
|
|
8ae002 |
+ and retry. This likely needs some spinning. See
|
|
|
8ae002 |
+ above for why relaxed MO is sufficient. */
|
|
|
8ae002 |
+ if (atomic_load_relaxed (adapt_count)
|
|
|
8ae002 |
+ != aconf.skip_lock_busy)
|
|
|
8ae002 |
+ atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
|
|
|
8ae002 |
}
|
|
|
8ae002 |
/* Internal abort. There is no chance for retry.
|
|
|
8ae002 |
Use the normal locking and next time use lock.
|
|
|
8ae002 |
- Be careful to avoid writing to the lock. */
|
|
|
8ae002 |
- else if (*adapt_count != aconf.skip_lock_internal_abort)
|
|
|
8ae002 |
- *adapt_count = aconf.skip_lock_internal_abort;
|
|
|
8ae002 |
+ Be careful to avoid writing to the lock. See above for why
|
|
|
8ae002 |
+ relaxed MO is sufficient. */
|
|
|
8ae002 |
+ else if (atomic_load_relaxed (adapt_count)
|
|
|
8ae002 |
+ != aconf.skip_lock_internal_abort)
|
|
|
8ae002 |
+ atomic_store_relaxed (adapt_count,
|
|
|
8ae002 |
+ aconf.skip_lock_internal_abort);
|
|
|
8ae002 |
break;
|
|
|
8ae002 |
}
|
|
|
8ae002 |
}
|
|
|
8ae002 |
@@ -87,7 +98,8 @@ __lll_lock_elision (int *futex, short *a
|
|
|
8ae002 |
{
|
|
|
8ae002 |
/* Use a normal lock until the threshold counter runs out.
|
|
|
8ae002 |
Lost updates possible. */
|
|
|
8ae002 |
- (*adapt_count)--;
|
|
|
8ae002 |
+ atomic_store_relaxed (adapt_count,
|
|
|
8ae002 |
+ atomic_load_relaxed (adapt_count) - 1);
|
|
|
8ae002 |
}
|
|
|
8ae002 |
|
|
|
8ae002 |
/* Use a normal lock here. */
|
|
|
8ae002 |
Index: glibc-2.17-c758a686/nptl/sysdeps/unix/sysv/linux/x86/elision-trylock.c
|
|
|
8ae002 |
===================================================================
|
|
|
8ae002 |
--- glibc-2.17-c758a686.orig/nptl/sysdeps/unix/sysv/linux/x86/elision-trylock.c
|
|
|
8ae002 |
+++ glibc-2.17-c758a686/nptl/sysdeps/unix/sysv/linux/x86/elision-trylock.c
|
|
|
8ae002 |
@@ -36,8 +36,10 @@ __lll_trylock_elision (int *futex, short
|
|
|
8ae002 |
return an error. */
|
|
|
8ae002 |
_xabort (_ABORT_NESTED_TRYLOCK);
|
|
|
8ae002 |
|
|
|
8ae002 |
- /* Only try a transaction if it's worth it. */
|
|
|
8ae002 |
- if (*adapt_count <= 0)
|
|
|
8ae002 |
+ /* Only try a transaction if it's worth it. See __lll_lock_elision for
|
|
|
8ae002 |
+ why we need atomic accesses. Relaxed MO is sufficient because this is
|
|
|
8ae002 |
+ just a hint. */
|
|
|
8ae002 |
+ if (atomic_load_relaxed (adapt_count) <= 0)
|
|
|
8ae002 |
{
|
|
|
8ae002 |
unsigned status;
|
|
|
8ae002 |
|
|
|
8ae002 |
@@ -55,16 +57,18 @@ __lll_trylock_elision (int *futex, short
|
|
|
8ae002 |
if (!(status & _XABORT_RETRY))
|
|
|
8ae002 |
{
|
|
|
8ae002 |
/* Internal abort. No chance for retry. For future
|
|
|
8ae002 |
- locks don't try speculation for some time. */
|
|
|
8ae002 |
- if (*adapt_count != aconf.skip_trylock_internal_abort)
|
|
|
8ae002 |
- *adapt_count = aconf.skip_trylock_internal_abort;
|
|
|
8ae002 |
+ locks don't try speculation for some time. See above for MO. */
|
|
|
8ae002 |
+ if (atomic_load_relaxed (adapt_count)
|
|
|
8ae002 |
+ != aconf.skip_lock_internal_abort)
|
|
|
8ae002 |
+ atomic_store_relaxed (adapt_count, aconf.skip_lock_internal_abort);
|
|
|
8ae002 |
}
|
|
|
8ae002 |
/* Could do some retries here. */
|
|
|
8ae002 |
}
|
|
|
8ae002 |
else
|
|
|
8ae002 |
{
|
|
|
8ae002 |
- /* Lost updates are possible, but harmless. */
|
|
|
8ae002 |
- (*adapt_count)--;
|
|
|
8ae002 |
+ /* Lost updates are possible but harmless (see above). */
|
|
|
8ae002 |
+ atomic_store_relaxed (adapt_count,
|
|
|
8ae002 |
+ atomic_load_relaxed (adapt_count) - 1);
|
|
|
8ae002 |
}
|
|
|
8ae002 |
|
|
|
8ae002 |
return lll_trylock (*futex);
|