ce426f
commit fff94fa2245612191123a8015eac94eb04f001e2
ce426f
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
ce426f
Date:   Tue May 19 06:40:37 2015 +0530
ce426f
ce426f
    Avoid deadlock in malloc on backtrace (BZ #16159)
ce426f
    
ce426f
    When the malloc subsystem detects some kind of memory corruption,
ce426f
    depending on the configuration it prints the error, a backtrace, a
ce426f
    memory map and then aborts the process.  In this process, the
ce426f
    backtrace() call may result in a call to malloc, resulting in
ce426f
    various kinds of problematic behavior.
ce426f
    
ce426f
    In one case, the malloc it calls may detect a corruption and call
ce426f
    backtrace again, and a stack overflow may result due to the infinite
ce426f
    recursion.  In another case, the malloc it calls may deadlock on an
ce426f
    arena lock with the malloc (or free, realloc, etc.) that detected the
ce426f
    corruption.  In yet another case, if the program is linked with
ce426f
    pthreads, backtrace may do a pthread_once initialization, which
ce426f
    deadlocks on itself.
ce426f
    
ce426f
    In all these cases, the program exit is not as intended.  This is
ce426f
    avoidable by marking the arena that malloc detected a corruption on,
ce426f
    as unusable.  The following patch does that.  Features of this patch
ce426f
    are as follows:
ce426f
    
ce426f
    - A flag is added to the mstate struct of the arena to indicate if the
ce426f
      arena is corrupt.
ce426f
    
ce426f
    - The flag is checked whenever malloc functions try to get a lock on
ce426f
      an arena.  If the arena is unusable, a NULL is returned, causing the
ce426f
      malloc to use mmap or try the next arena.
ce426f
    
ce426f
    - malloc_printerr sets the corrupt flag on the arena when it detects a
ce426f
      corruption
ce426f
    
ce426f
    - free does not concern itself with the flag at all.  It is not
ce426f
      important since the backtrace workflow does not need free.  A free
ce426f
      in a parallel thread may cause another corruption, but that's not
ce426f
      new
ce426f
    
ce426f
    - The flag check and set are not atomic and may race.  This is fine
ce426f
      since we don't care about contention during the flag check.  We want
ce426f
      to make sure that the malloc call in the backtrace does not trip on
ce426f
      itself and all that action happens in the same thread and not across
ce426f
      threads.
ce426f
    
ce426f
    I verified that the test case does not show any regressions due to
ce426f
    this patch.  I also ran the malloc benchmarks and found an
ce426f
    insignificant difference in timings (< 2%).
ce426f
ce426f
ce426f
The follow-on test-suite fix has been folded into the patch below, but
ce426f
to keep it minimal, ignore_stderr is put directly into
ce426f
tst-malloc-backtrace.c.
ce426f
ce426f
commit 02242448bf431a69fd0b8c929ca4408a05479baa
ce426f
Author: Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
ce426f
Date:   Tue Jun 2 10:32:25 2015 -0300
ce426f
ce426f
    Avoid outputting to TTY after an expected memory corruption in testcase
ce426f
ce426f
    Protect TTY against an expected memory corruption from testcase
ce426f
    tst-malloc-backtrace, which is expected to SIGABRT after a forced memory
ce426f
    corruption.
ce426f
ce426f
ce426f
Index: b/malloc/arena.c
ce426f
===================================================================
ce426f
--- a/malloc/arena.c
ce426f
+++ b/malloc/arena.c
ce426f
@@ -119,7 +119,7 @@ int __malloc_initialized = -1;
ce426f
 
ce426f
 #ifdef PER_THREAD
ce426f
 # define arena_lock(ptr, size) do { \
ce426f
-  if(ptr) \
ce426f
+  if(ptr && !arena_is_corrupt (ptr)) \
ce426f
     (void)mutex_lock(&ptr->mutex); \
ce426f
   else \
ce426f
     ptr = arena_get2(ptr, (size), NULL); \
ce426f
@@ -808,7 +808,7 @@ reused_arena (mstate avoid_arena)
ce426f
   result = next_to_use;
ce426f
   do
ce426f
     {
ce426f
-      if (!mutex_trylock(&result->mutex))
ce426f
+      if (!arena_is_corrupt (result) && !mutex_trylock(&result->mutex))
ce426f
 	goto out;
ce426f
 
ce426f
       result = result->next;
ce426f
@@ -820,7 +820,21 @@ reused_arena (mstate avoid_arena)
ce426f
   if (result == avoid_arena)
ce426f
     result = result->next;
ce426f
 
ce426f
-  /* No arena available.  Wait for the next in line.  */
ce426f
+  /* Make sure that the arena we get is not corrupted.  */
ce426f
+  mstate begin = result;
ce426f
+  while (arena_is_corrupt (result) || result == avoid_arena)
ce426f
+    {
ce426f
+      result = result->next;
ce426f
+      if (result == begin)
ce426f
+	break;
ce426f
+    }
ce426f
+
ce426f
+  /* We could not find any arena that was either not corrupted or not the one
ce426f
+     we wanted to avoid.  */
ce426f
+  if (result == begin || result == avoid_arena)
ce426f
+    return NULL;
ce426f
+
ce426f
+  /* No arena available without contention.  Wait for the next in line.  */
ce426f
   LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
ce426f
   (void)mutex_lock(&result->mutex);
ce426f
 
ce426f
Index: b/malloc/hooks.c
ce426f
===================================================================
ce426f
--- a/malloc/hooks.c
ce426f
+++ b/malloc/hooks.c
ce426f
@@ -109,7 +109,8 @@ malloc_check_get_size(mchunkptr p)
ce426f
        size -= c) {
ce426f
     if(c<=0 || size<(c+2*SIZE_SZ)) {
ce426f
       malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
ce426f
-		      chunk2mem(p));
ce426f
+		      chunk2mem(p),
ce426f
+		      chunk_is_mmapped (p) ? NULL : arena_for_chunk (p));
ce426f
       return 0;
ce426f
     }
ce426f
   }
ce426f
@@ -221,7 +222,8 @@ top_check(void)
ce426f
     return 0;
ce426f
 
ce426f
   mutex_unlock(&main_arena);
ce426f
-  malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
ce426f
+  malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
ce426f
+		   &main_arena);
ce426f
   mutex_lock(&main_arena);
ce426f
 
ce426f
   /* Try to set up a new top chunk. */
ce426f
@@ -276,7 +278,8 @@ free_check(void* mem, const void *caller
ce426f
   if(!p) {
ce426f
     (void)mutex_unlock(&main_arena.mutex);
ce426f
 
ce426f
-    malloc_printerr(check_action, "free(): invalid pointer", mem);
ce426f
+    malloc_printerr(check_action, "free(): invalid pointer", mem,
ce426f
+		    &main_arena);
ce426f
     return;
ce426f
   }
ce426f
   if (chunk_is_mmapped(p)) {
ce426f
@@ -308,7 +311,8 @@ realloc_check(void* oldmem, size_t bytes
ce426f
   const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
ce426f
   (void)mutex_unlock(&main_arena.mutex);
ce426f
   if(!oldp) {
ce426f
-    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
ce426f
+    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem,
ce426f
+		    &main_arena);
ce426f
     return malloc_check(bytes, NULL);
ce426f
   }
ce426f
   const INTERNAL_SIZE_T oldsize = chunksize(oldp);
ce426f
Index: b/malloc/Makefile
ce426f
===================================================================
ce426f
--- a/malloc/Makefile
ce426f
+++ b/malloc/Makefile
ce426f
@@ -25,7 +25,8 @@ all:
ce426f
 dist-headers := malloc.h
ce426f
 headers := $(dist-headers) obstack.h mcheck.h
ce426f
 tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
ce426f
-	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 tst-malloc-usable
ce426f
+	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
ce426f
+	 tst-malloc-usable tst-malloc-backtrace
ce426f
 test-srcs = tst-mtrace
ce426f
 
ce426f
 routines = malloc morecore mcheck mtrace obstack
ce426f
@@ -40,6 +41,9 @@ extra-libs-others = $(extra-libs)
ce426f
 libmemusage-routines = memusage
ce426f
 libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
ce426f
 
ce426f
+$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
ce426f
+			       $(common-objpfx)nptl/libpthread_nonshared.a
ce426f
+
ce426f
 # These should be removed by `make clean'.
ce426f
 extra-objs = mcheck-init.o libmcheck.a
ce426f
 
ce426f
Index: b/malloc/malloc.c
ce426f
===================================================================
ce426f
--- a/malloc/malloc.c
ce426f
+++ b/malloc/malloc.c
ce426f
@@ -1060,7 +1060,7 @@ static void*  _int_realloc(mstate, mchun
ce426f
 static void*  _int_memalign(mstate, size_t, size_t);
ce426f
 static void*  _int_valloc(mstate, size_t);
ce426f
 static void*  _int_pvalloc(mstate, size_t);
ce426f
-static void malloc_printerr(int action, const char *str, void *ptr);
ce426f
+static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
ce426f
 
ce426f
 static void* internal_function mem2mem_check(void *p, size_t sz);
ce426f
 static int internal_function top_check(void);
ce426f
@@ -1430,7 +1430,8 @@ typedef struct malloc_chunk* mbinptr;
ce426f
   BK = P->bk;                                                          \
ce426f
   if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) {	       \
ce426f
     mutex_unlock(&(AV)->mutex);					       \
ce426f
-    malloc_printerr (check_action, "corrupted double-linked list", P); \
ce426f
+    malloc_printerr (check_action, "corrupted double-linked list", P,  \
ce426f
+		     AV);					       \
ce426f
     mutex_lock(&(AV)->mutex);					       \
ce426f
   } else {							       \
ce426f
     FD->bk = BK;                                                       \
ce426f
@@ -1670,6 +1671,15 @@ typedef struct malloc_chunk* mfastbinptr
ce426f
 #define set_noncontiguous(M)   ((M)->flags |=  NONCONTIGUOUS_BIT)
ce426f
 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
ce426f
 
ce426f
+/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
ce426f
+   arena.  Such an arena is no longer used to allocate chunks.  Chunks
ce426f
+   allocated in that arena before detecting corruption are not freed.  */
ce426f
+
ce426f
+#define ARENA_CORRUPTION_BIT (4U)
ce426f
+
ce426f
+#define arena_is_corrupt(A)    (((A)->flags & ARENA_CORRUPTION_BIT))
ce426f
+#define set_arena_corrupt(A)   ((A)->flags |= ARENA_CORRUPTION_BIT)
ce426f
+
ce426f
 /*
ce426f
    Set value of max_fast.
ce426f
    Use impossibly small value if 0.
ce426f
@@ -2281,8 +2291,9 @@ static void* sysmalloc(INTERNAL_SIZE_T n
ce426f
     rather than expanding top.
ce426f
   */
ce426f
 
ce426f
-  if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
ce426f
-      (mp_.n_mmaps < mp_.n_mmaps_max)) {
ce426f
+  if (av == NULL
ce426f
+      || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
ce426f
+	  && (mp_.n_mmaps < mp_.n_mmaps_max))) {
ce426f
 
ce426f
     char* mm;             /* return value from mmap call*/
ce426f
 
ce426f
@@ -2354,6 +2365,10 @@ static void* sysmalloc(INTERNAL_SIZE_T n
ce426f
     }
ce426f
   }
ce426f
 
ce426f
+  /* There are no usable arenas and mmap also failed.  */
ce426f
+  if (av == NULL)
ce426f
+    return 0;
ce426f
+
ce426f
   /* Record incoming configuration of top */
ce426f
 
ce426f
   old_top  = av->top;
ce426f
@@ -2519,7 +2534,7 @@ static void* sysmalloc(INTERNAL_SIZE_T n
ce426f
     else if (contiguous(av) && old_size && brk < old_end) {
ce426f
       /* Oops!  Someone else killed our space..  Can't touch anything.  */
ce426f
       mutex_unlock(&av->mutex);
ce426f
-      malloc_printerr (3, "break adjusted to free malloc space", brk);
ce426f
+      malloc_printerr (3, "break adjusted to free malloc space", brk, av);
ce426f
       mutex_lock(&av->mutex);
ce426f
     }
ce426f
 
ce426f
@@ -2793,7 +2808,7 @@ munmap_chunk(mchunkptr p)
ce426f
   if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
ce426f
     {
ce426f
       malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
ce426f
-		       chunk2mem (p));
ce426f
+		       chunk2mem (p), NULL);
ce426f
       return;
ce426f
     }
ce426f
 
ce426f
@@ -2861,21 +2876,20 @@ __libc_malloc(size_t bytes)
ce426f
   if (__builtin_expect (hook != NULL, 0))
ce426f
     return (*hook)(bytes, RETURN_ADDRESS (0));
ce426f
 
ce426f
-  arena_lookup(ar_ptr);
ce426f
+  arena_get(ar_ptr, bytes);
ce426f
 
ce426f
-  arena_lock(ar_ptr, bytes);
ce426f
-  if(!ar_ptr)
ce426f
-    return 0;
ce426f
   victim = _int_malloc(ar_ptr, bytes);
ce426f
-  if(!victim) {
ce426f
+  /* Retry with another arena only if we were able to find a usable arena
ce426f
+     before.  */
ce426f
+  if (!victim && ar_ptr != NULL) {
ce426f
     LIBC_PROBE (memory_malloc_retry, 1, bytes);
ce426f
     ar_ptr = arena_get_retry(ar_ptr, bytes);
ce426f
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
ce426f
-      victim = _int_malloc(ar_ptr, bytes);
ce426f
-      (void)mutex_unlock(&ar_ptr->mutex);
ce426f
-    }
ce426f
-  } else
ce426f
+    victim = _int_malloc (ar_ptr, bytes);
ce426f
+  }
ce426f
+
ce426f
+  if (ar_ptr != NULL)
ce426f
     (void)mutex_unlock(&ar_ptr->mutex);
ce426f
+
ce426f
   assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
ce426f
 	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
ce426f
   return victim;
ce426f
@@ -2946,6 +2960,11 @@ __libc_realloc(void* oldmem, size_t byte
ce426f
   /* its size */
ce426f
   const INTERNAL_SIZE_T oldsize = chunksize(oldp);
ce426f
 
ce426f
+  if (chunk_is_mmapped (oldp))
ce426f
+    ar_ptr = NULL;
ce426f
+  else
ce426f
+    ar_ptr = arena_for_chunk (oldp);
ce426f
+
ce426f
   /* Little security check which won't hurt performance: the
ce426f
      allocator never wrapps around at the end of the address space.
ce426f
      Therefore we can exclude some size values which might appear
ce426f
@@ -2953,7 +2972,8 @@ __libc_realloc(void* oldmem, size_t byte
ce426f
   if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
ce426f
       || __builtin_expect (misaligned_chunk (oldp), 0))
ce426f
     {
ce426f
-      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
ce426f
+      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
ce426f
+		       ar_ptr);
ce426f
       return NULL;
ce426f
     }
ce426f
 
ce426f
@@ -2977,7 +2997,6 @@ __libc_realloc(void* oldmem, size_t byte
ce426f
     return newmem;
ce426f
   }
ce426f
 
ce426f
-  ar_ptr = arena_for_chunk(oldp);
ce426f
 #if THREAD_STATS
ce426f
   if(!mutex_trylock(&ar_ptr->mutex))
ce426f
     ++(ar_ptr->stat_lock_direct);
ce426f
@@ -3043,18 +3062,17 @@ __libc_memalign(size_t alignment, size_t
ce426f
     }
ce426f
 
ce426f
   arena_get(ar_ptr, bytes + alignment + MINSIZE);
ce426f
-  if(!ar_ptr)
ce426f
-    return 0;
ce426f
+
ce426f
   p = _int_memalign(ar_ptr, alignment, bytes);
ce426f
-  if(!p) {
ce426f
+  if(!p && ar_ptr != NULL) {
ce426f
     LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
ce426f
     ar_ptr = arena_get_retry (ar_ptr, bytes);
ce426f
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
ce426f
-      p = _int_memalign(ar_ptr, alignment, bytes);
ce426f
-      (void)mutex_unlock(&ar_ptr->mutex);
ce426f
-    }
ce426f
-  } else
ce426f
+    p = _int_memalign (ar_ptr, alignment, bytes);
ce426f
+  }
ce426f
+
ce426f
+  if (ar_ptr != NULL)
ce426f
     (void)mutex_unlock(&ar_ptr->mutex);
ce426f
+
ce426f
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ce426f
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
ce426f
   return p;
ce426f
@@ -3088,18 +3106,16 @@ __libc_valloc(size_t bytes)
ce426f
     return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
ce426f
 
ce426f
   arena_get(ar_ptr, bytes + pagesz + MINSIZE);
ce426f
-  if(!ar_ptr)
ce426f
-    return 0;
ce426f
   p = _int_valloc(ar_ptr, bytes);
ce426f
-  if(!p) {
ce426f
+  if(!p && ar_ptr != NULL) {
ce426f
     LIBC_PROBE (memory_valloc_retry, 1, bytes);
ce426f
     ar_ptr = arena_get_retry (ar_ptr, bytes);
ce426f
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
ce426f
-      p = _int_memalign(ar_ptr, pagesz, bytes);
ce426f
-      (void)mutex_unlock(&ar_ptr->mutex);
ce426f
-    }
ce426f
-  } else
ce426f
+    p = _int_memalign(ar_ptr, pagesz, bytes);
ce426f
+  }
ce426f
+
ce426f
+  if (ar_ptr != NULL)
ce426f
     (void)mutex_unlock (&ar_ptr->mutex);
ce426f
+
ce426f
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ce426f
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
ce426f
 
ce426f
@@ -3134,15 +3150,15 @@ __libc_pvalloc(size_t bytes)
ce426f
 
ce426f
   arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
ce426f
   p = _int_pvalloc(ar_ptr, bytes);
ce426f
-  if(!p) {
ce426f
+  if(!p && ar_ptr != NULL) {
ce426f
     LIBC_PROBE (memory_pvalloc_retry, 1, bytes);
ce426f
     ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
ce426f
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
ce426f
-      p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
ce426f
-      (void)mutex_unlock(&ar_ptr->mutex);
ce426f
-    }
ce426f
-  } else
ce426f
+    p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
ce426f
+  }
ce426f
+
ce426f
+  if (ar_ptr != NULL)
ce426f
     (void)mutex_unlock(&ar_ptr->mutex);
ce426f
+
ce426f
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ce426f
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
ce426f
 
ce426f
@@ -3184,43 +3200,54 @@ __libc_calloc(size_t n, size_t elem_size
ce426f
   sz = bytes;
ce426f
 
ce426f
   arena_get(av, sz);
ce426f
-  if(!av)
ce426f
-    return 0;
ce426f
+  if(av)
ce426f
+    {
ce426f
 
ce426f
-  /* Check if we hand out the top chunk, in which case there may be no
ce426f
-     need to clear. */
ce426f
+      /* Check if we hand out the top chunk, in which case there may be no
ce426f
+	 need to clear. */
ce426f
 #if MORECORE_CLEARS
ce426f
-  oldtop = top(av);
ce426f
-  oldtopsize = chunksize(top(av));
ce426f
-#if MORECORE_CLEARS < 2
ce426f
-  /* Only newly allocated memory is guaranteed to be cleared.  */
ce426f
-  if (av == &main_arena &&
ce426f
-      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
ce426f
-    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
ce426f
+      oldtop = top(av);
ce426f
+      oldtopsize = chunksize(top(av));
ce426f
+# if MORECORE_CLEARS < 2
ce426f
+      /* Only newly allocated memory is guaranteed to be cleared.  */
ce426f
+      if (av == &main_arena &&
ce426f
+	  oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
ce426f
+	oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
ce426f
+# endif
ce426f
+      if (av != &main_arena)
ce426f
+	{
ce426f
+	  heap_info *heap = heap_for_ptr (oldtop);
ce426f
+	  if (oldtopsize < ((char *) heap + heap->mprotect_size -
ce426f
+			    (char *) oldtop))
ce426f
+	    oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
ce426f
+	}
ce426f
 #endif
ce426f
-  if (av != &main_arena)
ce426f
-    {
ce426f
-      heap_info *heap = heap_for_ptr (oldtop);
ce426f
-      if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
ce426f
-	oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
ce426f
     }
ce426f
-#endif
ce426f
+  else
ce426f
+    {
ce426f
+      /* No usable arenas.  */
ce426f
+      oldtop = 0;
ce426f
+      oldtopsize = 0;
ce426f
+    }
ce426f
   mem = _int_malloc(av, sz);
ce426f
 
ce426f
 
ce426f
   assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
ce426f
 	 av == arena_for_chunk(mem2chunk(mem)));
ce426f
 
ce426f
-  if (mem == 0) {
ce426f
+  if (mem == 0 && av != NULL) {
ce426f
     LIBC_PROBE (memory_calloc_retry, 1, sz);
ce426f
     av = arena_get_retry (av, sz);
ce426f
-    if (__builtin_expect(av != NULL, 1)) {
ce426f
-      mem = _int_malloc(av, sz);
ce426f
-      (void)mutex_unlock(&av->mutex);
ce426f
-    }
ce426f
-    if (mem == 0) return 0;
ce426f
-  } else
ce426f
+    mem = _int_malloc(av, sz);
ce426f
+  }
ce426f
+
ce426f
+  if (av != NULL)
ce426f
     (void)mutex_unlock(&av->mutex);
ce426f
+
ce426f
+  /* Allocation failed even after a retry.  */
ce426f
+  if (mem == 0)
ce426f
+    return 0;
ce426f
+
ce426f
   p = mem2chunk(mem);
ce426f
 
ce426f
   /* Two optional cases in which clearing not necessary */
ce426f
@@ -3310,6 +3337,16 @@ _int_malloc(mstate av, size_t bytes)
ce426f
 
ce426f
   checked_request2size(bytes, nb);
ce426f
 
ce426f
+  /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
ce426f
+     mmap.  */
ce426f
+  if (__glibc_unlikely (av == NULL))
ce426f
+    {
ce426f
+      void *p = sysmalloc (nb, av);
ce426f
+      if (p != NULL)
ce426f
+       alloc_perturb (p, bytes);
ce426f
+      return p;
ce426f
+    }
ce426f
+
ce426f
   /*
ce426f
     If the size qualifies as a fastbin, first check corresponding bin.
ce426f
     This code is safe to execute even if av is not yet initialized, so we
ce426f
@@ -3334,7 +3371,7 @@ _int_malloc(mstate av, size_t bytes)
ce426f
 	  errstr = "malloc(): memory corruption (fast)";
ce426f
 	errout:
ce426f
 	  mutex_unlock(&av->mutex);
ce426f
-	  malloc_printerr (check_action, errstr, chunk2mem (victim));
ce426f
+	  malloc_printerr (check_action, errstr, chunk2mem (victim), av);
ce426f
 	  mutex_lock(&av->mutex);
ce426f
 	  return NULL;
ce426f
 	}
ce426f
@@ -3421,9 +3458,9 @@ _int_malloc(mstate av, size_t bytes)
ce426f
       if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
ce426f
 	  || __builtin_expect (victim->size > av->system_mem, 0))
ce426f
 	{
ce426f
-	  void *p = chunk2mem(victim);
ce426f
 	  mutex_unlock(&av->mutex);
ce426f
-	  malloc_printerr (check_action, "malloc(): memory corruption", p);
ce426f
+	  malloc_printerr (check_action, "malloc(): memory corruption",
ce426f
+			   chunk2mem (victim), av);
ce426f
 	  mutex_lock(&av->mutex);
ce426f
 	}
ce426f
       size = chunksize(victim);
ce426f
@@ -3801,7 +3838,7 @@ _int_free(mstate av, mchunkptr p, int ha
ce426f
     errout:
ce426f
       if (have_lock || locked)
ce426f
 	(void)mutex_unlock(&av->mutex);
ce426f
-      malloc_printerr (check_action, errstr, chunk2mem(p));
ce426f
+      malloc_printerr (check_action, errstr, chunk2mem(p), av);
ce426f
       if (have_lock)
ce426f
 	mutex_lock(&av->mutex);
ce426f
       return;
ce426f
@@ -4196,7 +4233,7 @@ _int_realloc(mstate av, mchunkptr oldp,
ce426f
       errstr = "realloc(): invalid old size";
ce426f
     errout:
ce426f
       mutex_unlock(&av->mutex);
ce426f
-      malloc_printerr (check_action, errstr, chunk2mem(oldp));
ce426f
+      malloc_printerr (check_action, errstr, chunk2mem(oldp), av);
ce426f
       mutex_lock(&av->mutex);
ce426f
       return NULL;
ce426f
     }
ce426f
@@ -4436,7 +4473,7 @@ static void*
ce426f
 _int_valloc(mstate av, size_t bytes)
ce426f
 {
ce426f
   /* Ensure initialization/consolidation */
ce426f
-  if (have_fastchunks(av)) malloc_consolidate(av);
ce426f
+  if (av && have_fastchunks(av)) malloc_consolidate(av);
ce426f
   return _int_memalign(av, GLRO(dl_pagesize), bytes);
ce426f
 }
ce426f
 
ce426f
@@ -4451,7 +4488,7 @@ _int_pvalloc(mstate av, size_t bytes)
ce426f
   size_t pagesz;
ce426f
 
ce426f
   /* Ensure initialization/consolidation */
ce426f
-  if (have_fastchunks(av)) malloc_consolidate(av);
ce426f
+  if (av && have_fastchunks(av)) malloc_consolidate(av);
ce426f
   pagesz = GLRO(dl_pagesize);
ce426f
   return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
ce426f
 }
ce426f
@@ -4463,6 +4500,10 @@ _int_pvalloc(mstate av, size_t bytes)
ce426f
 
ce426f
 static int mtrim(mstate av, size_t pad)
ce426f
 {
ce426f
+  /* Don't touch corrupt arenas.  */
ce426f
+  if (arena_is_corrupt (av))
ce426f
+    return 0;
ce426f
+
ce426f
   /* Ensure initialization/consolidation */
ce426f
   malloc_consolidate (av);
ce426f
 
ce426f
@@ -4956,8 +4997,14 @@ libc_hidden_def (__libc_mallopt)
ce426f
 extern char **__libc_argv attribute_hidden;
ce426f
 
ce426f
 static void
ce426f
-malloc_printerr(int action, const char *str, void *ptr)
ce426f
+malloc_printerr(int action, const char *str, void *ptr, mstate ar_ptr)
ce426f
 {
ce426f
+  /* Avoid using this arena in future.  We do not attempt to synchronize this
ce426f
+     with anything else because we minimally want to ensure that __libc_message
ce426f
+     gets its resources safely without stumbling on the current corruption.  */
ce426f
+  if (ar_ptr)
ce426f
+    set_arena_corrupt (ar_ptr);
ce426f
+
ce426f
   if ((action & 5) == 5)
ce426f
     __libc_message (action & 2, "%s\n", str);
ce426f
   else if (action & 1)
ce426f
Index: b/malloc/tst-malloc-backtrace.c
ce426f
===================================================================
ce426f
--- /dev/null
ce426f
+++ b/malloc/tst-malloc-backtrace.c
ce426f
@@ -0,0 +1,71 @@
ce426f
+/* Verify that backtrace does not deadlock on itself on memory corruption.
ce426f
+   Copyright (C) 2015 Free Software Foundation, Inc.
ce426f
+   This file is part of the GNU C Library.
ce426f
+
ce426f
+   The GNU C Library is free software; you can redistribute it and/or
ce426f
+   modify it under the terms of the GNU Lesser General Public
ce426f
+   License as published by the Free Software Foundation; either
ce426f
+   version 2.1 of the License, or (at your option) any later version.
ce426f
+
ce426f
+   The GNU C Library is distributed in the hope that it will be useful,
ce426f
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
ce426f
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
ce426f
+   Lesser General Public License for more details.
ce426f
+
ce426f
+   You should have received a copy of the GNU Lesser General Public
ce426f
+   License along with the GNU C Library; if not, see
ce426f
+   <http://www.gnu.org/licenses/>.  */
ce426f
+
ce426f
+
ce426f
+#include <fcntl.h>
ce426f
+#include <paths.h>
ce426f
+#include <stdlib.h>
ce426f
+#include <unistd.h>
ce426f
+
ce426f
+#define SIZE 4096
ce426f
+
ce426f
+/* Avoid all the buffer overflow messages on stderr.  */
ce426f
+static void
ce426f
+ignore_stderr (void)
ce426f
+{
ce426f
+  int fd = open (_PATH_DEVNULL, O_WRONLY);
ce426f
+  if (fd == -1)
ce426f
+    close (STDERR_FILENO);
ce426f
+  else
ce426f
+    {
ce426f
+      dup2 (fd, STDERR_FILENO);
ce426f
+      close (fd);
ce426f
+    }
ce426f
+  setenv ("LIBC_FATAL_STDERR_", "1", 1);
ce426f
+}
ce426f
+
ce426f
+/* Wrap free with a function to prevent gcc from optimizing it out.  */
ce426f
+static void
ce426f
+__attribute__((noinline))
ce426f
+call_free (void *ptr)
ce426f
+{
ce426f
+  free (ptr);
ce426f
+  *(size_t *)(ptr - sizeof (size_t)) = 1;
ce426f
+}
ce426f
+
ce426f
+int
ce426f
+do_test (void)
ce426f
+{
ce426f
+  void *ptr1 = malloc (SIZE);
ce426f
+  void *ptr2 = malloc (SIZE);
ce426f
+
ce426f
+  /* Avoid unwanted output to TTY after an expected memory corruption.  */
ce426f
+  ignore_stderr ();
ce426f
+
ce426f
+  call_free ((void *) ptr1);
ce426f
+  ptr1 = malloc (SIZE);
ce426f
+
ce426f
+  /* Not reached.  The return statement is to put ptr2 into use so that gcc
ce426f
+     doesn't optimize out that malloc call.  */
ce426f
+  return (ptr1 == ptr2);
ce426f
+}
ce426f
+
ce426f
+#define TEST_FUNCTION do_test ()
ce426f
+#define EXPECTED_SIGNAL SIGABRT
ce426f
+
ce426f
+#include "../test-skeleton.c"