077c9d
commit 3d265911c2aac65d978f679101594f9071024874
077c9d
Author: Andreas Schwab <schwab@suse.de>
077c9d
Date:   Mon Nov 12 11:11:40 2018 +0100
077c9d
077c9d
    Reindent nptl/pthread_rwlock_common.c
077c9d
077c9d
diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c
077c9d
index a290d08332b802a5..5dd534271aed6b41 100644
077c9d
--- a/nptl/pthread_rwlock_common.c
077c9d
+++ b/nptl/pthread_rwlock_common.c
077c9d
@@ -34,7 +34,7 @@
077c9d
 
077c9d
    A thread is allowed to acquire a read lock recursively (i.e., have rdlock
077c9d
    critical sections that overlap in sequenced-before) unless the kind of the
077c9d
-   rwlock is set to PTHREAD_RWLOCK_PREFER_WRITERS_NONRECURSIVE_NP.
077c9d
+   rwlock is set to PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP.
077c9d
 
077c9d
    This lock is built so that workloads of mostly readers can be executed with
077c9d
    low runtime overheads.  This matches that the default kind of the lock is
077c9d
@@ -46,7 +46,7 @@
077c9d
    An uncontended write lock acquisition is as fast as for a normal
077c9d
    exclusive mutex but writer contention is somewhat more costly due to
077c9d
    keeping track of the exact number of writers.  If the rwlock kind requests
077c9d
-   writers to be preferred (i.e., PTHREAD_RWLOCK_PREFER_WRITERS_NP or the
077c9d
+   writers to be preferred (i.e., PTHREAD_RWLOCK_PREFER_WRITER_NP or the
077c9d
    no-recursive-readers variant of it), then writer--to--writer lock ownership
077c9d
    hand-over is fairly fast and bypasses lock acquisition attempts by readers.
077c9d
    The costs of lock ownership transfer between readers and writers vary.  If
077c9d
@@ -251,7 +251,7 @@ __pthread_rwlock_rdunlock (pthread_rwlock_t *rwlock)
077c9d
 	 the first reader's store to __wrphase_futex (or a later value) if
077c9d
 	 the writer observes that a write phase has been started.  */
077c9d
       if (atomic_compare_exchange_weak_release (&rwlock->__data.__readers,
077c9d
-	  &r, rnew))
077c9d
+						&r, rnew))
077c9d
 	break;
077c9d
       /* TODO Back-off.  */
077c9d
     }
077c9d
@@ -285,7 +285,7 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
   /* Make sure we are not holding the rwlock as a writer.  This is a deadlock
077c9d
      situation we recognize and report.  */
077c9d
   if (__glibc_unlikely (atomic_load_relaxed (&rwlock->__data.__cur_writer)
077c9d
-      == THREAD_GETMEM (THREAD_SELF, tid)))
077c9d
+			== THREAD_GETMEM (THREAD_SELF, tid)))
077c9d
     return EDEADLK;
077c9d
 
077c9d
   /* If we prefer writers, recursive rdlock is disallowed, we are in a read
077c9d
@@ -299,9 +299,9 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
   if (rwlock->__data.__flags == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
077c9d
     {
077c9d
       r = atomic_load_relaxed (&rwlock->__data.__readers);
077c9d
-      while (((r & PTHREAD_RWLOCK_WRPHASE) == 0)
077c9d
-	      && ((r & PTHREAD_RWLOCK_WRLOCKED) != 0)
077c9d
-	      && ((r >> PTHREAD_RWLOCK_READER_SHIFT) > 0))
077c9d
+      while ((r & PTHREAD_RWLOCK_WRPHASE) == 0
077c9d
+	     && (r & PTHREAD_RWLOCK_WRLOCKED) != 0
077c9d
+	     && (r >> PTHREAD_RWLOCK_READER_SHIFT) > 0)
077c9d
 	{
077c9d
 	  /* TODO Spin first.  */
077c9d
 	  /* Try setting the flag signaling that we are waiting without having
077c9d
@@ -315,11 +315,11 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
 		 __readers, and all threads set the flag under the same
077c9d
 		 conditions.  */
077c9d
 	      while ((atomic_load_relaxed (&rwlock->__data.__readers)
077c9d
-		  & PTHREAD_RWLOCK_RWAITING) != 0)
077c9d
+		      & PTHREAD_RWLOCK_RWAITING) != 0)
077c9d
 		{
077c9d
 		  int private = __pthread_rwlock_get_private (rwlock);
077c9d
 		  int err = futex_abstimed_wait (&rwlock->__data.__readers,
077c9d
-		      r, abstime, private);
077c9d
+						 r, abstime, private);
077c9d
 		  /* We ignore EAGAIN and EINTR.  On time-outs, we can just
077c9d
 		     return because we don't need to clean up anything.  */
077c9d
 		  if (err == ETIMEDOUT)
077c9d
@@ -338,8 +338,9 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
      expected value for future operations.  Acquire MO so we synchronize with
077c9d
      prior writers as well as the last reader of the previous read phase (see
077c9d
      below).  */
077c9d
-  r = atomic_fetch_add_acquire (&rwlock->__data.__readers,
077c9d
-      (1 << PTHREAD_RWLOCK_READER_SHIFT)) + (1 << PTHREAD_RWLOCK_READER_SHIFT);
077c9d
+  r = (atomic_fetch_add_acquire (&rwlock->__data.__readers,
077c9d
+				 (1 << PTHREAD_RWLOCK_READER_SHIFT))
077c9d
+       + (1 << PTHREAD_RWLOCK_READER_SHIFT));
077c9d
 
077c9d
   /* Check whether there is an overflow in the number of readers.  We assume
077c9d
      that the total number of threads is less than half the maximum number
077c9d
@@ -359,8 +360,9 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
       /* Relaxed MO is okay because we just want to undo our registration and
077c9d
 	 cannot have changed the rwlock state substantially if the CAS
077c9d
 	 succeeds.  */
077c9d
-      if (atomic_compare_exchange_weak_relaxed (&rwlock->__data.__readers, &r,
077c9d
-	  r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
077c9d
+      if (atomic_compare_exchange_weak_relaxed
077c9d
+	  (&rwlock->__data.__readers,
077c9d
+	   &r, r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
077c9d
 	return EAGAIN;
077c9d
     }
077c9d
 
077c9d
@@ -378,15 +380,15 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
   /* Otherwise, if we were in a write phase (states #6 or #8), we must wait
077c9d
      for explicit hand-over of the read phase; the only exception is if we
077c9d
      can start a read phase if there is no primary writer currently.  */
077c9d
-  while (((r & PTHREAD_RWLOCK_WRPHASE) != 0)
077c9d
-      && ((r & PTHREAD_RWLOCK_WRLOCKED) == 0))
077c9d
+  while ((r & PTHREAD_RWLOCK_WRPHASE) != 0
077c9d
+	 && (r & PTHREAD_RWLOCK_WRLOCKED) == 0)
077c9d
     {
077c9d
-       /* Try to enter a read phase: If the CAS below succeeds, we have
077c9d
+      /* Try to enter a read phase: If the CAS below succeeds, we have
077c9d
 	 ownership; if it fails, we will simply retry and reassess the
077c9d
 	 situation.
077c9d
 	 Acquire MO so we synchronize with prior writers.  */
077c9d
       if (atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers, &r,
077c9d
-	  r ^ PTHREAD_RWLOCK_WRPHASE))
077c9d
+						r ^ PTHREAD_RWLOCK_WRPHASE))
077c9d
 	{
077c9d
 	  /* We started the read phase, so we are also responsible for
077c9d
 	     updating the write-phase futex.  Relaxed MO is sufficient.
077c9d
@@ -397,7 +399,7 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
 	     (but we can pretend to do the setting and unsetting of WRLOCKED
077c9d
 	     atomically, and thus can skip this step).  */
077c9d
 	  if ((atomic_exchange_relaxed (&rwlock->__data.__wrphase_futex, 0)
077c9d
-	      & PTHREAD_RWLOCK_FUTEX_USED) != 0)
077c9d
+	       & PTHREAD_RWLOCK_FUTEX_USED) != 0)
077c9d
 	    {
077c9d
 	      int private = __pthread_rwlock_get_private (rwlock);
077c9d
 	      futex_wake (&rwlock->__data.__wrphase_futex, INT_MAX, private);
077c9d
@@ -435,16 +437,17 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
   for (;;)
077c9d
     {
077c9d
       while (((wpf = atomic_load_relaxed (&rwlock->__data.__wrphase_futex))
077c9d
-	  | PTHREAD_RWLOCK_FUTEX_USED) == (1 | PTHREAD_RWLOCK_FUTEX_USED))
077c9d
+	      | PTHREAD_RWLOCK_FUTEX_USED) == (1 | PTHREAD_RWLOCK_FUTEX_USED))
077c9d
 	{
077c9d
 	  int private = __pthread_rwlock_get_private (rwlock);
077c9d
 	  if (((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0)
077c9d
-	      && !atomic_compare_exchange_weak_relaxed
077c9d
+	      && (!atomic_compare_exchange_weak_relaxed
077c9d
 		  (&rwlock->__data.__wrphase_futex,
077c9d
-		   &wpf, wpf | PTHREAD_RWLOCK_FUTEX_USED))
077c9d
+		   &wpf, wpf | PTHREAD_RWLOCK_FUTEX_USED)))
077c9d
 	    continue;
077c9d
 	  int err = futex_abstimed_wait (&rwlock->__data.__wrphase_futex,
077c9d
-	      1 | PTHREAD_RWLOCK_FUTEX_USED, abstime, private);
077c9d
+					 1 | PTHREAD_RWLOCK_FUTEX_USED,
077c9d
+					 abstime, private);
077c9d
 	  if (err == ETIMEDOUT)
077c9d
 	    {
077c9d
 	      /* If we timed out, we need to unregister.  If no read phase
077c9d
@@ -477,8 +480,8 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
 		 in this case and thus make the spin-waiting we need
077c9d
 		 unnecessarily expensive.  */
077c9d
 	      while ((atomic_load_relaxed (&rwlock->__data.__wrphase_futex)
077c9d
-		  | PTHREAD_RWLOCK_FUTEX_USED)
077c9d
-		  == (1 | PTHREAD_RWLOCK_FUTEX_USED))
077c9d
+		      | PTHREAD_RWLOCK_FUTEX_USED)
077c9d
+		     == (1 | PTHREAD_RWLOCK_FUTEX_USED))
077c9d
 		{
077c9d
 		  /* TODO Back-off?  */
077c9d
 		}
077c9d
@@ -495,7 +498,7 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
077c9d
 	 release of the writer, and so that we observe a recent value of
077c9d
 	 __wrphase_futex (see below).  */
077c9d
       if ((atomic_load_acquire (&rwlock->__data.__readers)
077c9d
-	  & PTHREAD_RWLOCK_WRPHASE) == 0)
077c9d
+	   & PTHREAD_RWLOCK_WRPHASE) == 0)
077c9d
 	/* We are in a read phase now, so the least recent modification of
077c9d
 	   __wrphase_futex we can read from is the store by the writer
077c9d
 	   with value 1.  Thus, only now we can assume that if we observe
077c9d
@@ -516,8 +519,9 @@ __pthread_rwlock_wrunlock (pthread_rwlock_t *rwlock)
077c9d
   atomic_store_relaxed (&rwlock->__data.__cur_writer, 0);
077c9d
   /* Disable waiting by writers.  We will wake up after we decided how to
077c9d
      proceed.  */
077c9d
-  bool wake_writers = ((atomic_exchange_relaxed
077c9d
-      (&rwlock->__data.__writers_futex, 0) & PTHREAD_RWLOCK_FUTEX_USED) != 0);
077c9d
+  bool wake_writers
077c9d
+    = ((atomic_exchange_relaxed (&rwlock->__data.__writers_futex, 0)
077c9d
+	& PTHREAD_RWLOCK_FUTEX_USED) != 0);
077c9d
 
077c9d
   if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP)
077c9d
     {
077c9d
@@ -529,8 +533,8 @@ __pthread_rwlock_wrunlock (pthread_rwlock_t *rwlock)
077c9d
 	     synchronize with us and thus can take over our view of
077c9d
 	     __readers (including, for example, whether we are in a write
077c9d
 	     phase or not).  */
077c9d
-	  if (atomic_compare_exchange_weak_release (&rwlock->__data.__writers,
077c9d
-	      &w, w | PTHREAD_RWLOCK_WRHANDOVER))
077c9d
+	  if (atomic_compare_exchange_weak_release
077c9d
+	      (&rwlock->__data.__writers, &w, w | PTHREAD_RWLOCK_WRHANDOVER))
077c9d
 	    /* Another writer will take over.  */
077c9d
 	    goto done;
077c9d
 	  /* TODO Back-off.  */
077c9d
@@ -543,9 +547,10 @@ __pthread_rwlock_wrunlock (pthread_rwlock_t *rwlock)
077c9d
   unsigned int r = atomic_load_relaxed (&rwlock->__data.__readers);
077c9d
   /* Release MO so that subsequent readers or writers synchronize with us.  */
077c9d
   while (!atomic_compare_exchange_weak_release
077c9d
-      (&rwlock->__data.__readers, &r, (r ^ PTHREAD_RWLOCK_WRLOCKED)
077c9d
-	  ^ ((r >> PTHREAD_RWLOCK_READER_SHIFT) == 0 ? 0
077c9d
-	      : PTHREAD_RWLOCK_WRPHASE)))
077c9d
+	 (&rwlock->__data.__readers, &r,
077c9d
+	  ((r ^ PTHREAD_RWLOCK_WRLOCKED)
077c9d
+	   ^ ((r >> PTHREAD_RWLOCK_READER_SHIFT) == 0 ? 0
077c9d
+	      : PTHREAD_RWLOCK_WRPHASE))))
077c9d
     {
077c9d
       /* TODO Back-off.  */
077c9d
     }
077c9d
@@ -574,7 +579,7 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
   /* Make sure we are not holding the rwlock as a writer.  This is a deadlock
077c9d
      situation we recognize and report.  */
077c9d
   if (__glibc_unlikely (atomic_load_relaxed (&rwlock->__data.__cur_writer)
077c9d
-      == THREAD_GETMEM (THREAD_SELF, tid)))
077c9d
+			== THREAD_GETMEM (THREAD_SELF, tid)))
077c9d
     return EDEADLK;
077c9d
 
077c9d
   /* First we try to acquire the role of primary writer by setting WRLOCKED;
077c9d
@@ -593,12 +598,12 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
      this could be less scalable if readers arrive and leave frequently.  */
077c9d
   bool may_share_futex_used_flag = false;
077c9d
   unsigned int r = atomic_fetch_or_acquire (&rwlock->__data.__readers,
077c9d
-      PTHREAD_RWLOCK_WRLOCKED);
077c9d
+					    PTHREAD_RWLOCK_WRLOCKED);
077c9d
   if (__glibc_unlikely ((r & PTHREAD_RWLOCK_WRLOCKED) != 0))
077c9d
     {
077c9d
       /* There is another primary writer.  */
077c9d
-      bool prefer_writer =
077c9d
-	  (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP);
077c9d
+      bool prefer_writer
077c9d
+	= (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP);
077c9d
       if (prefer_writer)
077c9d
 	{
077c9d
 	  /* We register as a waiting writer, so that we can make use of
077c9d
@@ -617,8 +622,7 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 	      /* Try to become the primary writer or retry.  Acquire MO as in
077c9d
 		 the fetch_or above.  */
077c9d
 	      if (atomic_compare_exchange_weak_acquire
077c9d
-		  (&rwlock->__data.__readers, &r,
077c9d
-		      r | PTHREAD_RWLOCK_WRLOCKED))
077c9d
+		  (&rwlock->__data.__readers, &r, r | PTHREAD_RWLOCK_WRLOCKED))
077c9d
 		{
077c9d
 		  if (prefer_writer)
077c9d
 		    {
077c9d
@@ -633,8 +637,7 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 			 __writers).
077c9d
 			 ??? Perhaps this is not strictly necessary for
077c9d
 			 reasons we do not yet know of.  */
077c9d
-		      atomic_fetch_add_relaxed (&rwlock->__data.__writers,
077c9d
-			  -1);
077c9d
+		      atomic_fetch_add_relaxed (&rwlock->__data.__writers, -1);
077c9d
 		    }
077c9d
 		  break;
077c9d
 		}
077c9d
@@ -646,8 +649,7 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 	     succeed, we own WRLOCKED.  */
077c9d
 	  if (prefer_writer)
077c9d
 	    {
077c9d
-	      unsigned int w = atomic_load_relaxed
077c9d
-		  (&rwlock->__data.__writers);
077c9d
+	      unsigned int w = atomic_load_relaxed (&rwlock->__data.__writers);
077c9d
 	      if ((w & PTHREAD_RWLOCK_WRHANDOVER) != 0)
077c9d
 		{
077c9d
 		  /* Acquire MO is required here so that we synchronize with
077c9d
@@ -677,13 +679,13 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 	  /* We did not acquire WRLOCKED nor were able to use writer--writer
077c9d
 	     hand-over, so we block on __writers_futex.  */
077c9d
 	  int private = __pthread_rwlock_get_private (rwlock);
077c9d
-	  unsigned int wf = atomic_load_relaxed
077c9d
-	      (&rwlock->__data.__writers_futex);
077c9d
+	  unsigned int wf
077c9d
+	    = atomic_load_relaxed (&rwlock->__data.__writers_futex);
077c9d
 	  if (((wf & ~(unsigned int) PTHREAD_RWLOCK_FUTEX_USED) != 1)
077c9d
 	      || ((wf != (1 | PTHREAD_RWLOCK_FUTEX_USED))
077c9d
-		  && !atomic_compare_exchange_weak_relaxed
077c9d
+		  && (!atomic_compare_exchange_weak_relaxed
077c9d
 		      (&rwlock->__data.__writers_futex, &wf,
077c9d
-		       1 | PTHREAD_RWLOCK_FUTEX_USED)))
077c9d
+		       1 | PTHREAD_RWLOCK_FUTEX_USED))))
077c9d
 	    {
077c9d
 	      /* If we cannot block on __writers_futex because there is no
077c9d
 		 primary writer, or we cannot set PTHREAD_RWLOCK_FUTEX_USED,
077c9d
@@ -704,7 +706,8 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 	     in this group.  */
077c9d
 	  may_share_futex_used_flag = true;
077c9d
 	  int err = futex_abstimed_wait (&rwlock->__data.__writers_futex,
077c9d
-	      1 | PTHREAD_RWLOCK_FUTEX_USED, abstime, private);
077c9d
+					 1 | PTHREAD_RWLOCK_FUTEX_USED,
077c9d
+					 abstime, private);
077c9d
 	  if (err == ETIMEDOUT)
077c9d
 	    {
077c9d
 	      if (prefer_writer)
077c9d
@@ -716,10 +719,10 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 		     that this happened before the timeout; see
077c9d
 		     pthread_rwlock_rdlock_full for the full reasoning.)
077c9d
 		     Also see the similar code above.  */
077c9d
-		  unsigned int w = atomic_load_relaxed
077c9d
-		      (&rwlock->__data.__writers);
077c9d
+		  unsigned int w
077c9d
+		    = atomic_load_relaxed (&rwlock->__data.__writers);
077c9d
 		  while (!atomic_compare_exchange_weak_acquire
077c9d
-		      (&rwlock->__data.__writers, &w,
077c9d
+			 (&rwlock->__data.__writers, &w,
077c9d
 			  (w == PTHREAD_RWLOCK_WRHANDOVER + 1 ? 0 : w - 1)))
077c9d
 		    {
077c9d
 		      /* TODO Back-off.  */
077c9d
@@ -751,7 +754,8 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
      modifications of __readers ensures that this store happens after the
077c9d
      store of value 0 by the previous primary writer.  */
077c9d
   atomic_store_relaxed (&rwlock->__data.__writers_futex,
077c9d
-      1 | (may_share_futex_used_flag ? PTHREAD_RWLOCK_FUTEX_USED : 0));
077c9d
+			1 | (may_share_futex_used_flag
077c9d
+			     ? PTHREAD_RWLOCK_FUTEX_USED : 0));
077c9d
 
077c9d
   /* If we are in a write phase, we have acquired the lock.  */
077c9d
   if ((r & PTHREAD_RWLOCK_WRPHASE) != 0)
077c9d
@@ -759,15 +763,15 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 
077c9d
   /* If we are in a read phase and there are no readers, try to start a write
077c9d
      phase.  */
077c9d
-  while (((r & PTHREAD_RWLOCK_WRPHASE) == 0)
077c9d
-      && ((r >> PTHREAD_RWLOCK_READER_SHIFT) == 0))
077c9d
+  while ((r & PTHREAD_RWLOCK_WRPHASE) == 0
077c9d
+	 && (r >> PTHREAD_RWLOCK_READER_SHIFT) == 0)
077c9d
     {
077c9d
       /* Acquire MO so that we synchronize with prior writers and do
077c9d
 	 not interfere with their updates to __writers_futex, as well
077c9d
 	 as regarding prior readers and their updates to __wrphase_futex,
077c9d
 	 respectively.  */
077c9d
       if (atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers,
077c9d
-	  &r, r | PTHREAD_RWLOCK_WRPHASE))
077c9d
+						&r, r | PTHREAD_RWLOCK_WRPHASE))
077c9d
 	{
077c9d
 	  /* We have started a write phase, so need to enable readers to wait.
077c9d
 	     See the similar case in __pthread_rwlock_rdlock_full.  Unlike in
077c9d
@@ -792,24 +796,24 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
   for (;;)
077c9d
     {
077c9d
       while (((wpf = atomic_load_relaxed (&rwlock->__data.__wrphase_futex))
077c9d
-	  | PTHREAD_RWLOCK_FUTEX_USED) == PTHREAD_RWLOCK_FUTEX_USED)
077c9d
+	      | PTHREAD_RWLOCK_FUTEX_USED) == PTHREAD_RWLOCK_FUTEX_USED)
077c9d
 	{
077c9d
 	  int private = __pthread_rwlock_get_private (rwlock);
077c9d
-	  if (((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0)
077c9d
-	      && !atomic_compare_exchange_weak_relaxed
077c9d
+	  if ((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0
077c9d
+	      && (!atomic_compare_exchange_weak_relaxed
077c9d
 		  (&rwlock->__data.__wrphase_futex, &wpf,
077c9d
-		   PTHREAD_RWLOCK_FUTEX_USED))
077c9d
+		   PTHREAD_RWLOCK_FUTEX_USED)))
077c9d
 	    continue;
077c9d
 	  int err = futex_abstimed_wait (&rwlock->__data.__wrphase_futex,
077c9d
-	      PTHREAD_RWLOCK_FUTEX_USED, abstime, private);
077c9d
+					 PTHREAD_RWLOCK_FUTEX_USED,
077c9d
+					 abstime, private);
077c9d
 	  if (err == ETIMEDOUT)
077c9d
 	    {
077c9d
-	      if (rwlock->__data.__flags
077c9d
-		  != PTHREAD_RWLOCK_PREFER_READER_NP)
077c9d
+	      if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP)
077c9d
 		{
077c9d
 		  /* We try writer--writer hand-over.  */
077c9d
-		  unsigned int w = atomic_load_relaxed
077c9d
-		      (&rwlock->__data.__writers);
077c9d
+		  unsigned int w
077c9d
+		    = atomic_load_relaxed (&rwlock->__data.__writers);
077c9d
 		  if (w != 0)
077c9d
 		    {
077c9d
 		      /* We are about to hand over WRLOCKED, so we must
077c9d
@@ -823,13 +827,13 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 			 Release MO so that another writer that gets
077c9d
 			 WRLOCKED from us can take over our view of
077c9d
 			 __readers.  */
077c9d
-		      unsigned int wf = atomic_exchange_relaxed
077c9d
-			  (&rwlock->__data.__writers_futex, 0);
077c9d
+		      unsigned int wf
077c9d
+			= atomic_exchange_relaxed (&rwlock->__data.__writers_futex, 0);
077c9d
 		      while (w != 0)
077c9d
 			{
077c9d
 			  if (atomic_compare_exchange_weak_release
077c9d
 			      (&rwlock->__data.__writers, &w,
077c9d
-				  w | PTHREAD_RWLOCK_WRHANDOVER))
077c9d
+			       w | PTHREAD_RWLOCK_WRHANDOVER))
077c9d
 			    {
077c9d
 			      /* Wake other writers.  */
077c9d
 			      if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
077c9d
@@ -844,8 +848,7 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 			 again.  Make sure we don't loose the flag that
077c9d
 			 signals whether there are threads waiting on
077c9d
 			 this futex.  */
077c9d
-		      atomic_store_relaxed
077c9d
-			  (&rwlock->__data.__writers_futex, wf);
077c9d
+		      atomic_store_relaxed (&rwlock->__data.__writers_futex, wf);
077c9d
 		    }
077c9d
 		}
077c9d
 	      /* If we timed out and we are not in a write phase, we can
077c9d
@@ -857,8 +860,8 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 		  /* We are about to release WRLOCKED, so we must release
077c9d
 		     __writers_futex too; see the handling of
077c9d
 		     writer--writer hand-over above.  */
077c9d
-		  unsigned int wf = atomic_exchange_relaxed
077c9d
-		      (&rwlock->__data.__writers_futex, 0);
077c9d
+		  unsigned int wf
077c9d
+		    = atomic_exchange_relaxed (&rwlock->__data.__writers_futex, 0);
077c9d
 		  while ((r & PTHREAD_RWLOCK_WRPHASE) == 0)
077c9d
 		    {
077c9d
 		      /* While we don't need to make anything from a
077c9d
@@ -877,11 +880,11 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 			  /* Wake other writers.  */
077c9d
 			  if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
077c9d
 			    futex_wake (&rwlock->__data.__writers_futex,
077c9d
-				1, private);
077c9d
+					1, private);
077c9d
 			  /* Wake waiting readers.  */
077c9d
 			  if ((r & PTHREAD_RWLOCK_RWAITING) != 0)
077c9d
 			    futex_wake (&rwlock->__data.__readers,
077c9d
-				INT_MAX, private);
077c9d
+					INT_MAX, private);
077c9d
 			  return ETIMEDOUT;
077c9d
 			}
077c9d
 		    }
077c9d
@@ -898,10 +901,9 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
 	      atomic_thread_fence_acquire ();
077c9d
 	      /* We still need to wait for explicit hand-over, but we must
077c9d
 		 not use futex_wait anymore.  */
077c9d
-	      while ((atomic_load_relaxed
077c9d
-		  (&rwlock->__data.__wrphase_futex)
077c9d
-		   | PTHREAD_RWLOCK_FUTEX_USED)
077c9d
-		  == PTHREAD_RWLOCK_FUTEX_USED)
077c9d
+	      while ((atomic_load_relaxed (&rwlock->__data.__wrphase_futex)
077c9d
+		      | PTHREAD_RWLOCK_FUTEX_USED)
077c9d
+		     == PTHREAD_RWLOCK_FUTEX_USED)
077c9d
 		{
077c9d
 		  /* TODO Back-off.  */
077c9d
 		}
077c9d
@@ -915,12 +917,12 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
077c9d
       if (ready)
077c9d
 	break;
077c9d
       if ((atomic_load_acquire (&rwlock->__data.__readers)
077c9d
-	  & PTHREAD_RWLOCK_WRPHASE) != 0)
077c9d
+	   & PTHREAD_RWLOCK_WRPHASE) != 0)
077c9d
 	ready = true;
077c9d
     }
077c9d
 
077c9d
  done:
077c9d
   atomic_store_relaxed (&rwlock->__data.__cur_writer,
077c9d
-      THREAD_GETMEM (THREAD_SELF, tid));
077c9d
+			THREAD_GETMEM (THREAD_SELF, tid));
077c9d
   return 0;
077c9d
 }