c6d234
commit 8f9450a0b7a9e78267e8ae1ab1000ebca08e473e
c6d234
Author: Torvald Riegel <triegel@redhat.com>
c6d234
Date:   Sat Dec 24 00:40:46 2016 +0100
c6d234
c6d234
    Add compiler barriers around modifications of the robust mutex list.
c6d234
    
c6d234
    Any changes to the per-thread list of robust mutexes currently acquired as
c6d234
    well as the pending-operations entry are not simply sequential code but
c6d234
    basically concurrent with any actions taken by the kernel when it tries
c6d234
    to clean up after a crash.  This is not quite like multi-thread concurrency
c6d234
    but more like signal-handler concurrency.
c6d234
    This patch fixes latent bugs by adding compiler barriers where necessary so
c6d234
    that it is ensured that the kernel crash handling sees consistent data.
c6d234
    
c6d234
    This is meant to be easy to backport, so we do not use C11-style signal
c6d234
    fences yet.
c6d234
    
c6d234
            * nptl/descr.h (ENQUEUE_MUTEX_BOTH, DEQUEUE_MUTEX): Add compiler
c6d234
            barriers and comments.
c6d234
            * nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full): Likewise.
c6d234
            * nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Likewise.
c6d234
            * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
c6d234
c6d234
Index: glibc-2.17-c758a686/nptl/descr.h
c6d234
===================================================================
c6d234
--- glibc-2.17-c758a686.orig/nptl/descr.h
c6d234
+++ glibc-2.17-c758a686/nptl/descr.h
c6d234
@@ -180,7 +180,16 @@ struct pthread
c6d234
      but the pointer to the next/previous element of the list points
c6d234
      in the middle of the object, the __next element.  Whenever
c6d234
      casting to __pthread_list_t we need to adjust the pointer
c6d234
-     first.  */
c6d234
+     first.
c6d234
+     These operations are effectively concurrent code in that the thread
c6d234
+     can get killed at any point in time and the kernel takes over.  Thus,
c6d234
+     the __next elements are a kind of concurrent list and we need to
c6d234
+     enforce using compiler barriers that the individual operations happen
c6d234
+     in such a way that the kernel always sees a consistent list.  The
c6d234
+     backward links (ie, the __prev elements) are not used by the kernel.
c6d234
+     FIXME We should use relaxed MO atomic operations here and signal fences
c6d234
+     because this kind of concurrency is similar to synchronizing with a
c6d234
+     signal handler.  */
c6d234
 # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
c6d234
 
c6d234
 # define ENQUEUE_MUTEX_BOTH(mutex, val)					      \
c6d234
@@ -192,6 +201,8 @@ struct pthread
c6d234
     mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF,		      \
c6d234
 						 robust_head.list);	      \
c6d234
     mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head;	      \
c6d234
+    /* Ensure that the new list entry is ready before we insert it.  */	      \
c6d234
+    __asm ("" ::: "memory");						      \
c6d234
     THREAD_SETMEM (THREAD_SELF, robust_head.list,			      \
c6d234
 		   (void *) (((uintptr_t) &mutex->__data.__list.__next)	      \
c6d234
 			     | val));					      \
c6d234
@@ -206,6 +217,9 @@ struct pthread
c6d234
       ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul)	      \
c6d234
        - QUEUE_PTR_ADJUST);						      \
c6d234
     prev->__next = mutex->__data.__list.__next;				      \
c6d234
+    /* Ensure that we remove the entry from the list before we change the     \
c6d234
+       __next pointer of the entry, which is read by the kernel.  */	      \
c6d234
+    __asm ("" ::: "memory");						      \
c6d234
     mutex->__data.__list.__prev = NULL;					      \
c6d234
     mutex->__data.__list.__next = NULL;					      \
c6d234
   } while (0)
c6d234
@@ -220,6 +234,8 @@ struct pthread
c6d234
   do {									      \
c6d234
     mutex->__data.__list.__next						      \
c6d234
       = THREAD_GETMEM (THREAD_SELF, robust_list.__next);		      \
c6d234
+    /* Ensure that the new list entry is ready before we insert it.  */	      \
c6d234
+    __asm ("" ::: "memory");						      \
c6d234
     THREAD_SETMEM (THREAD_SELF, robust_list.__next,			      \
c6d234
 		   (void *) (((uintptr_t) &mutex->__data.__list) | val));     \
c6d234
   } while (0)
c6d234
@@ -240,6 +256,9 @@ struct pthread
c6d234
 	  }								      \
c6d234
 									      \
c6d234
 	runp->__next = next->__next;					      \
c6d234
+	/* Ensure that we remove the entry from the list before we change the \
c6d234
+	   __next pointer of the entry, which is read by the kernel.  */      \
c6d234
+	    __asm ("" ::: "memory");					      \
c6d234
 	mutex->__data.__list.__next = NULL;				      \
c6d234
       }									      \
c6d234
   } while (0)
c6d234
Index: glibc-2.17-c758a686/nptl/pthread_mutex_lock.c
c6d234
===================================================================
c6d234
--- glibc-2.17-c758a686.orig/nptl/pthread_mutex_lock.c
c6d234
+++ glibc-2.17-c758a686/nptl/pthread_mutex_lock.c
c6d234
@@ -181,6 +181,9 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 		     &mutex->__data.__list.__next);
c6d234
+      /* We need to set op_pending before starting the operation.  Also
c6d234
+	 see comments at ENQUEUE_MUTEX.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
 
c6d234
       oldval = mutex->__data.__lock;
c6d234
       /* This is set to FUTEX_WAITERS iff we might have shared the
c6d234
@@ -228,7 +231,12 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	      /* But it is inconsistent unless marked otherwise.  */
c6d234
 	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
c6d234
 
c6d234
+	      /* We must not enqueue the mutex before we have acquired it.
c6d234
+		 Also see comments at ENQUEUE_MUTEX.  */
c6d234
+	      __asm ("" ::: "memory");
c6d234
 	      ENQUEUE_MUTEX (mutex);
c6d234
+	      /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+	      __asm ("" ::: "memory");
c6d234
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 
c6d234
 	      /* Note that we deliberately exit here.  If we fall
c6d234
@@ -250,6 +258,8 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	      int kind = PTHREAD_MUTEX_TYPE (mutex);
c6d234
 	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
c6d234
 		{
c6d234
+		  /* We do not need to ensure ordering wrt another memory
c6d234
+		     access.  Also see comments at ENQUEUE_MUTEX. */
c6d234
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 				 NULL);
c6d234
 		  return EDEADLK;
c6d234
@@ -257,6 +267,8 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 
c6d234
 	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
c6d234
 		{
c6d234
+		  /* We do not need to ensure ordering wrt another memory
c6d234
+		     access.  */
c6d234
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 				 NULL);
c6d234
 
c6d234
@@ -309,12 +321,19 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	  mutex->__data.__count = 0;
c6d234
 	  int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
c6d234
 	  lll_unlock (mutex->__data.__lock, private);
c6d234
+	  /* FIXME This violates the mutex destruction requirements.  See
c6d234
+	     __pthread_mutex_unlock_full.  */
c6d234
 	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 	  return ENOTRECOVERABLE;
c6d234
 	}
c6d234
 
c6d234
       mutex->__data.__count = 1;
c6d234
+      /* We must not enqueue the mutex before we have acquired it.
c6d234
+	 Also see comments at ENQUEUE_MUTEX.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
       ENQUEUE_MUTEX (mutex);
c6d234
+      /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
       break;
c6d234
 
c6d234
@@ -331,10 +350,15 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
c6d234
 
c6d234
 	if (robust)
c6d234
-	  /* Note: robust PI futexes are signaled by setting bit 0.  */
c6d234
-	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
-			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
c6d234
-				   | 1));
c6d234
+	  {
c6d234
+	    /* Note: robust PI futexes are signaled by setting bit 0.  */
c6d234
+	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
+			   (void *) (((uintptr_t) &mutex->__data.__list.__next)
c6d234
+				     | 1));
c6d234
+	    /* We need to set op_pending before starting the operation.  Also
c6d234
+	       see comments at ENQUEUE_MUTEX.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
+	  }
c6d234
 
c6d234
 	oldval = mutex->__data.__lock;
c6d234
 
c6d234
@@ -343,12 +367,16 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	  {
c6d234
 	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
c6d234
 	      {
c6d234
+		/* We do not need to ensure ordering wrt another memory
c6d234
+		   access.  */
c6d234
 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 		return EDEADLK;
c6d234
 	      }
c6d234
 
c6d234
 	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
c6d234
 	      {
c6d234
+		/* We do not need to ensure ordering wrt another memory
c6d234
+		   access.  */
c6d234
 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 
c6d234
 		/* Just bump the counter.  */
c6d234
@@ -411,7 +439,12 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	    /* But it is inconsistent unless marked otherwise.  */
c6d234
 	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
c6d234
 
c6d234
+	    /* We must not enqueue the mutex before we have acquired it.
c6d234
+	       Also see comments at ENQUEUE_MUTEX.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    ENQUEUE_MUTEX_PI (mutex);
c6d234
+	    /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 
c6d234
 	    /* Note that we deliberately exit here.  If we fall
c6d234
@@ -439,6 +472,8 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
c6d234
 			      0, 0);
c6d234
 
c6d234
+	    /* To the kernel, this will be visible after the kernel has
c6d234
+	       acquired the mutex in the syscall.  */
c6d234
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 	    return ENOTRECOVERABLE;
c6d234
 	  }
c6d234
@@ -446,7 +481,12 @@ __pthread_mutex_lock_full (pthread_mutex
c6d234
 	mutex->__data.__count = 1;
c6d234
 	if (robust)
c6d234
 	  {
c6d234
+	    /* We must not enqueue the mutex before we have acquired it.
c6d234
+	       Also see comments at ENQUEUE_MUTEX.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    ENQUEUE_MUTEX_PI (mutex);
c6d234
+	    /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 	  }
c6d234
       }
c6d234
Index: glibc-2.17-c758a686/nptl/pthread_mutex_timedlock.c
c6d234
===================================================================
c6d234
--- glibc-2.17-c758a686.orig/nptl/pthread_mutex_timedlock.c
c6d234
+++ glibc-2.17-c758a686/nptl/pthread_mutex_timedlock.c
c6d234
@@ -140,6 +140,9 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 		     &mutex->__data.__list.__next);
c6d234
+      /* We need to set op_pending before starting the operation.  Also
c6d234
+	 see comments at ENQUEUE_MUTEX.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
 
c6d234
       oldval = mutex->__data.__lock;
c6d234
       /* This is set to FUTEX_WAITERS iff we might have shared the
c6d234
@@ -177,7 +180,12 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	      /* But it is inconsistent unless marked otherwise.  */
c6d234
 	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
c6d234
 
c6d234
+	      /* We must not enqueue the mutex before we have acquired it.
c6d234
+		 Also see comments at ENQUEUE_MUTEX.  */
c6d234
+	      __asm ("" ::: "memory");
c6d234
 	      ENQUEUE_MUTEX (mutex);
c6d234
+	      /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+	      __asm ("" ::: "memory");
c6d234
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 
c6d234
 	      /* Note that we deliberately exit here.  If we fall
c6d234
@@ -193,6 +201,8 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	      int kind = PTHREAD_MUTEX_TYPE (mutex);
c6d234
 	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
c6d234
 		{
c6d234
+		  /* We do not need to ensure ordering wrt another memory
c6d234
+		     access.  Also see comments at ENQUEUE_MUTEX. */
c6d234
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 				 NULL);
c6d234
 		  return EDEADLK;
c6d234
@@ -200,6 +210,8 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 
c6d234
 	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
c6d234
 		{
c6d234
+		  /* We do not need to ensure ordering wrt another memory
c6d234
+		     access.  */
c6d234
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 				 NULL);
c6d234
 
c6d234
@@ -294,12 +306,19 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	  mutex->__data.__count = 0;
c6d234
 	  int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
c6d234
 	  lll_unlock (mutex->__data.__lock, private);
c6d234
+	  /* FIXME This violates the mutex destruction requirements.  See
c6d234
+	     __pthread_mutex_unlock_full.  */
c6d234
 	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 	  return ENOTRECOVERABLE;
c6d234
 	}
c6d234
 
c6d234
       mutex->__data.__count = 1;
c6d234
+      /* We must not enqueue the mutex before we have acquired it.
c6d234
+	 Also see comments at ENQUEUE_MUTEX.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
       ENQUEUE_MUTEX (mutex);
c6d234
+      /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
       break;
c6d234
 
c6d234
@@ -316,10 +335,15 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
c6d234
 
c6d234
 	if (robust)
c6d234
-	  /* Note: robust PI futexes are signaled by setting bit 0.  */
c6d234
-	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
-			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
c6d234
-				   | 1));
c6d234
+	  {
c6d234
+	    /* Note: robust PI futexes are signaled by setting bit 0.  */
c6d234
+	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
+			   (void *) (((uintptr_t) &mutex->__data.__list.__next)
c6d234
+				     | 1));
c6d234
+	    /* We need to set op_pending before starting the operation.  Also
c6d234
+	       see comments at ENQUEUE_MUTEX.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
+	  }
c6d234
 
c6d234
 	oldval = mutex->__data.__lock;
c6d234
 
c6d234
@@ -328,12 +352,16 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	  {
c6d234
 	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
c6d234
 	      {
c6d234
+		/* We do not need to ensure ordering wrt another memory
c6d234
+		   access.  */
c6d234
 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 		return EDEADLK;
c6d234
 	      }
c6d234
 
c6d234
 	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
c6d234
 	      {
c6d234
+		/* We do not need to ensure ordering wrt another memory
c6d234
+		   access.  */
c6d234
 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 
c6d234
 		/* Just bump the counter.  */
c6d234
@@ -420,7 +448,12 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	    /* But it is inconsistent unless marked otherwise.  */
c6d234
 	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
c6d234
 
c6d234
+	    /* We must not enqueue the mutex before we have acquired it.
c6d234
+	       Also see comments at ENQUEUE_MUTEX.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    ENQUEUE_MUTEX_PI (mutex);
c6d234
+	    /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 
c6d234
 	    /* Note that we deliberately exit here.  If we fall
c6d234
@@ -443,6 +476,8 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
c6d234
 			      0, 0);
c6d234
 
c6d234
+	    /* To the kernel, this will be visible after the kernel has
c6d234
+	       acquired the mutex in the syscall.  */
c6d234
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 	    return ENOTRECOVERABLE;
c6d234
 	  }
c6d234
@@ -450,7 +485,12 @@ pthread_mutex_timedlock (pthread_mutex_t
c6d234
 	mutex->__data.__count = 1;
c6d234
 	if (robust)
c6d234
 	  {
c6d234
+	    /* We must not enqueue the mutex before we have acquired it.
c6d234
+	       Also see comments at ENQUEUE_MUTEX.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    ENQUEUE_MUTEX_PI (mutex);
c6d234
+	    /* We need to clear op_pending after we enqueue the mutex.  */
c6d234
+	    __asm ("" ::: "memory");
c6d234
 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
 	  }
c6d234
 	}
c6d234
Index: glibc-2.17-c758a686/nptl/pthread_mutex_unlock.c
c6d234
===================================================================
c6d234
--- glibc-2.17-c758a686.orig/nptl/pthread_mutex_unlock.c
c6d234
+++ glibc-2.17-c758a686/nptl/pthread_mutex_unlock.c
c6d234
@@ -143,6 +143,9 @@ __pthread_mutex_unlock_full (pthread_mut
c6d234
       /* Remove mutex from the list.  */
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 		     &mutex->__data.__list.__next);
c6d234
+      /* We must set op_pending before we dequeue the mutex.  Also see
c6d234
+	 comments at ENQUEUE_MUTEX.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
       DEQUEUE_MUTEX (mutex);
c6d234
 
c6d234
       mutex->__data.__owner = newowner;
c6d234
@@ -159,6 +162,14 @@ __pthread_mutex_unlock_full (pthread_mut
c6d234
 			     & FUTEX_WAITERS) != 0))
c6d234
 	lll_futex_wake (&mutex->__data.__lock, 1, private);
c6d234
 
c6d234
+      /* We must clear op_pending after we release the mutex.
c6d234
+	 FIXME However, this violates the mutex destruction requirements
c6d234
+	 because another thread could acquire the mutex, destroy it, and
c6d234
+	 reuse the memory for something else; then, if this thread crashes,
c6d234
+	 and the memory happens to have a value equal to the TID, the kernel
c6d234
+	 will believe it is still related to the mutex (which has been
c6d234
+	 destroyed already) and will modify some other random object.  */
c6d234
+      __asm ("" ::: "memory");
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
       break;
c6d234
 
c6d234
@@ -223,6 +234,9 @@ __pthread_mutex_unlock_full (pthread_mut
c6d234
 	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
c6d234
 			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
c6d234
 				   | 1));
c6d234
+	  /* We must set op_pending before we dequeue the mutex.  Also see
c6d234
+	     comments at ENQUEUE_MUTEX.  */
c6d234
+	  __asm ("" ::: "memory");
c6d234
 	  DEQUEUE_MUTEX (mutex);
c6d234
 	}
c6d234
 
c6d234
@@ -247,6 +261,9 @@ __pthread_mutex_unlock_full (pthread_mut
c6d234
 			    __lll_private_flag (FUTEX_UNLOCK_PI, private));
c6d234
 	}
c6d234
 
c6d234
+      /* This happens after the kernel releases the mutex but violates the
c6d234
+	 mutex destruction requirements; see comments in the code handling
c6d234
+	 PTHREAD_MUTEX_ROBUST_NORMAL_NP.  */
c6d234
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
c6d234
       break;
c6d234