|
|
9832fd |
commit 823624bdc47f1f80109c9c52dee7939b9386d708
|
|
|
9832fd |
Author: Stefan Liebler <stli@linux.ibm.com>
|
|
|
9832fd |
Date: Thu Feb 7 15:18:36 2019 +0100
|
|
|
9832fd |
|
|
|
9832fd |
Add compiler barriers around modifications of the robust mutex list for pthread_mutex_trylock. [BZ #24180]
|
|
|
9832fd |
|
|
|
9832fd |
While debugging a kernel warning, Thomas Gleixner, Sebastian Sewior and
|
|
|
9832fd |
Heiko Carstens found a bug in pthread_mutex_trylock due to misordered
|
|
|
9832fd |
instructions:
|
|
|
9832fd |
140: a5 1b 00 01 oill %r1,1
|
|
|
9832fd |
144: e5 48 a0 f0 00 00 mvghi 240(%r10),0 <--- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
14a: e3 10 a0 e0 00 24 stg %r1,224(%r10) <--- last THREAD_SETMEM of ENQUEUE_MUTEX_PI
|
|
|
9832fd |
|
|
|
9832fd |
vs (with compiler barriers):
|
|
|
9832fd |
140: a5 1b 00 01 oill %r1,1
|
|
|
9832fd |
144: e3 10 a0 e0 00 24 stg %r1,224(%r10)
|
|
|
9832fd |
14a: e5 48 a0 f0 00 00 mvghi 240(%r10),0
|
|
|
9832fd |
|
|
|
9832fd |
Please have a look at the discussion:
|
|
|
9832fd |
"Re: WARN_ON_ONCE(!new_owner) within wake_futex_pi() triggerede"
|
|
|
9832fd |
(https://lore.kernel.org/lkml/20190202112006.GB3381@osiris/)
|
|
|
9832fd |
|
|
|
9832fd |
This patch is introducing the same compiler barriers and comments
|
|
|
9832fd |
for pthread_mutex_trylock as introduced for pthread_mutex_lock and
|
|
|
9832fd |
pthread_mutex_timedlock by commit 8f9450a0b7a9e78267e8ae1ab1000ebca08e473e
|
|
|
9832fd |
"Add compiler barriers around modifications of the robust mutex list."
|
|
|
9832fd |
|
|
|
9832fd |
ChangeLog:
|
|
|
9832fd |
|
|
|
9832fd |
[BZ #24180]
|
|
|
9832fd |
* nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock):
|
|
|
9832fd |
|
|
|
9832fd |
|
|
|
9832fd |
diff -Nrup a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
|
|
|
9832fd |
--- a/nptl/pthread_mutex_trylock.c 2019-07-26 16:43:11.028271897 -0400
|
|
|
9832fd |
+++ b/nptl/pthread_mutex_trylock.c 2019-07-26 17:06:48.708748979 -0400
|
|
|
9832fd |
@@ -95,6 +95,9 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
9832fd |
&mutex->__data.__list.__next);
|
|
|
9832fd |
+ /* We need to set op_pending before starting the operation. Also
|
|
|
9832fd |
+ see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
|
|
|
9832fd |
oldval = mutex->__data.__lock;
|
|
|
9832fd |
do
|
|
|
9832fd |
@@ -120,7 +123,12 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
/* But it is inconsistent unless marked otherwise. */
|
|
|
9832fd |
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
|
|
9832fd |
|
|
|
9832fd |
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
9832fd |
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
ENQUEUE_MUTEX (mutex);
|
|
|
9832fd |
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
/* Note that we deliberately exist here. If we fall
|
|
|
9832fd |
@@ -136,6 +144,8 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
int kind = PTHREAD_MUTEX_TYPE (mutex);
|
|
|
9832fd |
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We do not need to ensure ordering wrt another memory
|
|
|
9832fd |
+ access. Also see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
9832fd |
NULL);
|
|
|
9832fd |
return EDEADLK;
|
|
|
9832fd |
@@ -143,6 +153,8 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
|
|
|
9832fd |
if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We do not need to ensure ordering wrt another memory
|
|
|
9832fd |
+ access. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
9832fd |
NULL);
|
|
|
9832fd |
|
|
|
9832fd |
@@ -160,6 +172,10 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
oldval = lll_robust_trylock (mutex->__data.__lock, id);
|
|
|
9832fd |
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We haven't acquired the lock as it is already acquired by
|
|
|
9832fd |
+ another owner. We do not need to ensure ordering wrt another
|
|
|
9832fd |
+ memory access. */
|
|
|
9832fd |
+
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
return EBUSY;
|
|
|
9832fd |
@@ -173,13 +189,20 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
if (oldval == id)
|
|
|
9832fd |
lll_unlock (mutex->__data.__lock,
|
|
|
9832fd |
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
|
|
9832fd |
+ /* FIXME This violates the mutex destruction requirements. See
|
|
|
9832fd |
+ __pthread_mutex_unlock_full. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
return ENOTRECOVERABLE;
|
|
|
9832fd |
}
|
|
|
9832fd |
}
|
|
|
9832fd |
while ((oldval & FUTEX_OWNER_DIED) != 0);
|
|
|
9832fd |
|
|
|
9832fd |
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
9832fd |
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
ENQUEUE_MUTEX (mutex);
|
|
|
9832fd |
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
mutex->__data.__owner = id;
|
|
|
9832fd |
@@ -201,10 +224,15 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
|
|
9832fd |
|
|
|
9832fd |
if (robust)
|
|
|
9832fd |
- /* Note: robust PI futexes are signaled by setting bit 0. */
|
|
|
9832fd |
- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
9832fd |
- (void *) (((uintptr_t) &mutex->__data.__list.__next)
|
|
|
9832fd |
- | 1));
|
|
|
9832fd |
+ {
|
|
|
9832fd |
+ /* Note: robust PI futexes are signaled by setting bit 0. */
|
|
|
9832fd |
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
9832fd |
+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
|
|
|
9832fd |
+ | 1));
|
|
|
9832fd |
+ /* We need to set op_pending before starting the operation. Also
|
|
|
9832fd |
+ see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
+ }
|
|
|
9832fd |
|
|
|
9832fd |
oldval = mutex->__data.__lock;
|
|
|
9832fd |
|
|
|
9832fd |
@@ -213,12 +241,16 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
{
|
|
|
9832fd |
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We do not need to ensure ordering wrt another memory
|
|
|
9832fd |
+ access. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
return EDEADLK;
|
|
|
9832fd |
}
|
|
|
9832fd |
|
|
|
9832fd |
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We do not need to ensure ordering wrt another memory
|
|
|
9832fd |
+ access. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
/* Just bump the counter. */
|
|
|
9832fd |
@@ -240,6 +272,9 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
{
|
|
|
9832fd |
if ((oldval & FUTEX_OWNER_DIED) == 0)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We haven't acquired the lock as it is already acquired by
|
|
|
9832fd |
+ another owner. We do not need to ensure ordering wrt another
|
|
|
9832fd |
+ memory access. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
return EBUSY;
|
|
|
9832fd |
@@ -260,6 +295,9 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
if (INTERNAL_SYSCALL_ERROR_P (e, __err)
|
|
|
9832fd |
&& INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* The kernel has not yet finished the mutex owner death.
|
|
|
9832fd |
+ We do not need to ensure ordering wrt another memory
|
|
|
9832fd |
+ access. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
return EBUSY;
|
|
|
9832fd |
@@ -277,7 +315,12 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
/* But it is inconsistent unless marked otherwise. */
|
|
|
9832fd |
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
|
|
9832fd |
|
|
|
9832fd |
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
9832fd |
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
ENQUEUE_MUTEX (mutex);
|
|
|
9832fd |
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
|
|
|
9832fd |
/* Note that we deliberately exit here. If we fall
|
|
|
9832fd |
@@ -300,13 +343,20 @@ __pthread_mutex_trylock (pthread_mutex_t
|
|
|
9832fd |
PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
|
|
|
9832fd |
0, 0);
|
|
|
9832fd |
|
|
|
9832fd |
+ /* To the kernel, this will be visible after the kernel has
|
|
|
9832fd |
+ acquired the mutex in the syscall. */
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
return ENOTRECOVERABLE;
|
|
|
9832fd |
}
|
|
|
9832fd |
|
|
|
9832fd |
if (robust)
|
|
|
9832fd |
{
|
|
|
9832fd |
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
9832fd |
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
ENQUEUE_MUTEX_PI (mutex);
|
|
|
9832fd |
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
9832fd |
+ __asm ("" ::: "memory");
|
|
|
9832fd |
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
9832fd |
}
|
|
|
9832fd |
|