00db10
commit fff94fa2245612191123a8015eac94eb04f001e2
00db10
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
00db10
Date:   Tue May 19 06:40:37 2015 +0530
00db10
00db10
    Avoid deadlock in malloc on backtrace (BZ #16159)
00db10
    
00db10
    When the malloc subsystem detects some kind of memory corruption,
00db10
    depending on the configuration it prints the error, a backtrace, a
00db10
    memory map and then aborts the process.  In this process, the
00db10
    backtrace() call may result in a call to malloc, resulting in
00db10
    various kinds of problematic behavior.
00db10
    
00db10
    In one case, the malloc it calls may detect a corruption and call
00db10
    backtrace again, and a stack overflow may result due to the infinite
00db10
    recursion.  In another case, the malloc it calls may deadlock on an
00db10
    arena lock with the malloc (or free, realloc, etc.) that detected the
00db10
    corruption.  In yet another case, if the program is linked with
00db10
    pthreads, backtrace may do a pthread_once initialization, which
00db10
    deadlocks on itself.
00db10
    
00db10
    In all these cases, the program exit is not as intended.  This is
00db10
    avoidable by marking the arena that malloc detected a corruption on,
00db10
    as unusable.  The following patch does that.  Features of this patch
00db10
    are as follows:
00db10
    
00db10
    - A flag is added to the mstate struct of the arena to indicate if the
00db10
      arena is corrupt.
00db10
    
00db10
    - The flag is checked whenever malloc functions try to get a lock on
00db10
      an arena.  If the arena is unusable, a NULL is returned, causing the
00db10
      malloc to use mmap or try the next arena.
00db10
    
00db10
    - malloc_printerr sets the corrupt flag on the arena when it detects a
00db10
      corruption
00db10
    
00db10
    - free does not concern itself with the flag at all.  It is not
00db10
      important since the backtrace workflow does not need free.  A free
00db10
      in a parallel thread may cause another corruption, but that's not
00db10
      new
00db10
    
00db10
    - The flag check and set are not atomic and may race.  This is fine
00db10
      since we don't care about contention during the flag check.  We want
00db10
      to make sure that the malloc call in the backtrace does not trip on
00db10
      itself and all that action happens in the same thread and not across
00db10
      threads.
00db10
    
00db10
    I verified that the test case does not show any regressions due to
00db10
    this patch.  I also ran the malloc benchmarks and found an
00db10
    insignificant difference in timings (< 2%).
00db10
00db10
00db10
The follow-on test-suite fix has been folded into the patch below, but
00db10
to keep it minimal, ignore_stderr is put directly into
00db10
tst-malloc-backtrace.c.
00db10
00db10
commit 02242448bf431a69fd0b8c929ca4408a05479baa
00db10
Author: Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
00db10
Date:   Tue Jun 2 10:32:25 2015 -0300
00db10
00db10
    Avoid outputting to TTY after an expected memory corruption in testcase
00db10
00db10
    Protect TTY against an expected memory corruption from testcase
00db10
    tst-malloc-backtrace, which is expected to SIGABRT after a forced memory
00db10
    corruption.
00db10
00db10
00db10
Index: b/malloc/arena.c
00db10
===================================================================
00db10
--- a/malloc/arena.c
00db10
+++ b/malloc/arena.c
00db10
@@ -119,7 +119,7 @@ int __malloc_initialized = -1;
00db10
 
00db10
 #ifdef PER_THREAD
00db10
 # define arena_lock(ptr, size) do { \
00db10
-  if(ptr) \
00db10
+  if(ptr && !arena_is_corrupt (ptr)) \
00db10
     (void)mutex_lock(&ptr->mutex); \
00db10
   else \
00db10
     ptr = arena_get2(ptr, (size), NULL); \
00db10
@@ -808,7 +808,7 @@ reused_arena (mstate avoid_arena)
00db10
   result = next_to_use;
00db10
   do
00db10
     {
00db10
-      if (!mutex_trylock(&result->mutex))
00db10
+      if (!arena_is_corrupt (result) && !mutex_trylock(&result->mutex))
00db10
 	goto out;
00db10
 
00db10
       result = result->next;
00db10
@@ -820,7 +820,21 @@ reused_arena (mstate avoid_arena)
00db10
   if (result == avoid_arena)
00db10
     result = result->next;
00db10
 
00db10
-  /* No arena available.  Wait for the next in line.  */
00db10
+  /* Make sure that the arena we get is not corrupted.  */
00db10
+  mstate begin = result;
00db10
+  while (arena_is_corrupt (result) || result == avoid_arena)
00db10
+    {
00db10
+      result = result->next;
00db10
+      if (result == begin)
00db10
+	break;
00db10
+    }
00db10
+
00db10
+  /* We could not find any arena that was either not corrupted or not the one
00db10
+     we wanted to avoid.  */
00db10
+  if (result == begin || result == avoid_arena)
00db10
+    return NULL;
00db10
+
00db10
+  /* No arena available without contention.  Wait for the next in line.  */
00db10
   LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
00db10
   (void)mutex_lock(&result->mutex);
00db10
 
00db10
Index: b/malloc/hooks.c
00db10
===================================================================
00db10
--- a/malloc/hooks.c
00db10
+++ b/malloc/hooks.c
00db10
@@ -109,7 +109,8 @@ malloc_check_get_size(mchunkptr p)
00db10
        size -= c) {
00db10
     if(c<=0 || size<(c+2*SIZE_SZ)) {
00db10
       malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
00db10
-		      chunk2mem(p));
00db10
+		      chunk2mem(p),
00db10
+		      chunk_is_mmapped (p) ? NULL : arena_for_chunk (p));
00db10
       return 0;
00db10
     }
00db10
   }
00db10
@@ -221,7 +222,8 @@ top_check(void)
00db10
     return 0;
00db10
 
00db10
   mutex_unlock(&main_arena);
00db10
-  malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
00db10
+  malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
00db10
+		   &main_arena);
00db10
   mutex_lock(&main_arena);
00db10
 
00db10
   /* Try to set up a new top chunk. */
00db10
@@ -276,7 +278,8 @@ free_check(void* mem, const void *caller
00db10
   if(!p) {
00db10
     (void)mutex_unlock(&main_arena.mutex);
00db10
 
00db10
-    malloc_printerr(check_action, "free(): invalid pointer", mem);
00db10
+    malloc_printerr(check_action, "free(): invalid pointer", mem,
00db10
+		    &main_arena);
00db10
     return;
00db10
   }
00db10
   if (chunk_is_mmapped(p)) {
00db10
@@ -308,7 +311,8 @@ realloc_check(void* oldmem, size_t bytes
00db10
   const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
00db10
   (void)mutex_unlock(&main_arena.mutex);
00db10
   if(!oldp) {
00db10
-    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
00db10
+    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem,
00db10
+		    &main_arena);
00db10
     return malloc_check(bytes, NULL);
00db10
   }
00db10
   const INTERNAL_SIZE_T oldsize = chunksize(oldp);
00db10
Index: b/malloc/Makefile
00db10
===================================================================
00db10
--- a/malloc/Makefile
00db10
+++ b/malloc/Makefile
00db10
@@ -25,7 +25,8 @@ all:
00db10
 dist-headers := malloc.h
00db10
 headers := $(dist-headers) obstack.h mcheck.h
00db10
 tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
00db10
-	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 tst-malloc-usable
00db10
+	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
00db10
+	 tst-malloc-usable tst-malloc-backtrace
00db10
 test-srcs = tst-mtrace
00db10
 
00db10
 routines = malloc morecore mcheck mtrace obstack
00db10
@@ -40,6 +41,9 @@ extra-libs-others = $(extra-libs)
00db10
 libmemusage-routines = memusage
00db10
 libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
00db10
 
00db10
+$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
00db10
+			       $(common-objpfx)nptl/libpthread_nonshared.a
00db10
+
00db10
 # These should be removed by `make clean'.
00db10
 extra-objs = mcheck-init.o libmcheck.a
00db10
 
00db10
Index: b/malloc/malloc.c
00db10
===================================================================
00db10
--- a/malloc/malloc.c
00db10
+++ b/malloc/malloc.c
00db10
@@ -1060,7 +1060,7 @@ static void*  _int_realloc(mstate, mchun
00db10
 static void*  _int_memalign(mstate, size_t, size_t);
00db10
 static void*  _int_valloc(mstate, size_t);
00db10
 static void*  _int_pvalloc(mstate, size_t);
00db10
-static void malloc_printerr(int action, const char *str, void *ptr);
00db10
+static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
00db10
 
00db10
 static void* internal_function mem2mem_check(void *p, size_t sz);
00db10
 static int internal_function top_check(void);
00db10
@@ -1430,7 +1430,8 @@ typedef struct malloc_chunk* mbinptr;
00db10
   BK = P->bk;                                                          \
00db10
   if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) {	       \
00db10
     mutex_unlock(&(AV)->mutex);					       \
00db10
-    malloc_printerr (check_action, "corrupted double-linked list", P); \
00db10
+    malloc_printerr (check_action, "corrupted double-linked list", P,  \
00db10
+		     AV);					       \
00db10
     mutex_lock(&(AV)->mutex);					       \
00db10
   } else {							       \
00db10
     FD->bk = BK;                                                       \
00db10
@@ -1670,6 +1671,15 @@ typedef struct malloc_chunk* mfastbinptr
00db10
 #define set_noncontiguous(M)   ((M)->flags |=  NONCONTIGUOUS_BIT)
00db10
 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
00db10
 
00db10
+/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
00db10
+   arena.  Such an arena is no longer used to allocate chunks.  Chunks
00db10
+   allocated in that arena before detecting corruption are not freed.  */
00db10
+
00db10
+#define ARENA_CORRUPTION_BIT (4U)
00db10
+
00db10
+#define arena_is_corrupt(A)    (((A)->flags & ARENA_CORRUPTION_BIT))
00db10
+#define set_arena_corrupt(A)   ((A)->flags |= ARENA_CORRUPTION_BIT)
00db10
+
00db10
 /*
00db10
    Set value of max_fast.
00db10
    Use impossibly small value if 0.
00db10
@@ -2281,8 +2291,9 @@ static void* sysmalloc(INTERNAL_SIZE_T n
00db10
     rather than expanding top.
00db10
   */
00db10
 
00db10
-  if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
00db10
-      (mp_.n_mmaps < mp_.n_mmaps_max)) {
00db10
+  if (av == NULL
00db10
+      || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
00db10
+	  && (mp_.n_mmaps < mp_.n_mmaps_max))) {
00db10
 
00db10
     char* mm;             /* return value from mmap call*/
00db10
 
00db10
@@ -2354,6 +2365,10 @@ static void* sysmalloc(INTERNAL_SIZE_T n
00db10
     }
00db10
   }
00db10
 
00db10
+  /* There are no usable arenas and mmap also failed.  */
00db10
+  if (av == NULL)
00db10
+    return 0;
00db10
+
00db10
   /* Record incoming configuration of top */
00db10
 
00db10
   old_top  = av->top;
00db10
@@ -2519,7 +2534,7 @@ static void* sysmalloc(INTERNAL_SIZE_T n
00db10
     else if (contiguous(av) && old_size && brk < old_end) {
00db10
       /* Oops!  Someone else killed our space..  Can't touch anything.  */
00db10
       mutex_unlock(&av->mutex);
00db10
-      malloc_printerr (3, "break adjusted to free malloc space", brk);
00db10
+      malloc_printerr (3, "break adjusted to free malloc space", brk, av);
00db10
       mutex_lock(&av->mutex);
00db10
     }
00db10
 
00db10
@@ -2793,7 +2808,7 @@ munmap_chunk(mchunkptr p)
00db10
   if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
00db10
     {
00db10
       malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
00db10
-		       chunk2mem (p));
00db10
+		       chunk2mem (p), NULL);
00db10
       return;
00db10
     }
00db10
 
00db10
@@ -2861,21 +2876,20 @@ __libc_malloc(size_t bytes)
00db10
   if (__builtin_expect (hook != NULL, 0))
00db10
     return (*hook)(bytes, RETURN_ADDRESS (0));
00db10
 
00db10
-  arena_lookup(ar_ptr);
00db10
+  arena_get(ar_ptr, bytes);
00db10
 
00db10
-  arena_lock(ar_ptr, bytes);
00db10
-  if(!ar_ptr)
00db10
-    return 0;
00db10
   victim = _int_malloc(ar_ptr, bytes);
00db10
-  if(!victim) {
00db10
+  /* Retry with another arena only if we were able to find a usable arena
00db10
+     before.  */
00db10
+  if (!victim && ar_ptr != NULL) {
00db10
     LIBC_PROBE (memory_malloc_retry, 1, bytes);
00db10
     ar_ptr = arena_get_retry(ar_ptr, bytes);
00db10
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
00db10
-      victim = _int_malloc(ar_ptr, bytes);
00db10
-      (void)mutex_unlock(&ar_ptr->mutex);
00db10
-    }
00db10
-  } else
00db10
+    victim = _int_malloc (ar_ptr, bytes);
00db10
+  }
00db10
+
00db10
+  if (ar_ptr != NULL)
00db10
     (void)mutex_unlock(&ar_ptr->mutex);
00db10
+
00db10
   assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
00db10
 	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
00db10
   return victim;
00db10
@@ -2946,6 +2960,11 @@ __libc_realloc(void* oldmem, size_t byte
00db10
   /* its size */
00db10
   const INTERNAL_SIZE_T oldsize = chunksize(oldp);
00db10
 
00db10
+  if (chunk_is_mmapped (oldp))
00db10
+    ar_ptr = NULL;
00db10
+  else
00db10
+    ar_ptr = arena_for_chunk (oldp);
00db10
+
00db10
   /* Little security check which won't hurt performance: the
00db10
      allocator never wrapps around at the end of the address space.
00db10
      Therefore we can exclude some size values which might appear
00db10
@@ -2953,7 +2972,8 @@ __libc_realloc(void* oldmem, size_t byte
00db10
   if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
00db10
       || __builtin_expect (misaligned_chunk (oldp), 0))
00db10
     {
00db10
-      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
00db10
+      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
00db10
+		       ar_ptr);
00db10
       return NULL;
00db10
     }
00db10
 
00db10
@@ -2977,7 +2997,6 @@ __libc_realloc(void* oldmem, size_t byte
00db10
     return newmem;
00db10
   }
00db10
 
00db10
-  ar_ptr = arena_for_chunk(oldp);
00db10
 #if THREAD_STATS
00db10
   if(!mutex_trylock(&ar_ptr->mutex))
00db10
     ++(ar_ptr->stat_lock_direct);
00db10
@@ -3043,18 +3062,17 @@ __libc_memalign(size_t alignment, size_t
00db10
     }
00db10
 
00db10
   arena_get(ar_ptr, bytes + alignment + MINSIZE);
00db10
-  if(!ar_ptr)
00db10
-    return 0;
00db10
+
00db10
   p = _int_memalign(ar_ptr, alignment, bytes);
00db10
-  if(!p) {
00db10
+  if(!p && ar_ptr != NULL) {
00db10
     LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
00db10
     ar_ptr = arena_get_retry (ar_ptr, bytes);
00db10
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
00db10
-      p = _int_memalign(ar_ptr, alignment, bytes);
00db10
-      (void)mutex_unlock(&ar_ptr->mutex);
00db10
-    }
00db10
-  } else
00db10
+    p = _int_memalign (ar_ptr, alignment, bytes);
00db10
+  }
00db10
+
00db10
+  if (ar_ptr != NULL)
00db10
     (void)mutex_unlock(&ar_ptr->mutex);
00db10
+
00db10
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
00db10
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
00db10
   return p;
00db10
@@ -3088,18 +3106,16 @@ __libc_valloc(size_t bytes)
00db10
     return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
00db10
 
00db10
   arena_get(ar_ptr, bytes + pagesz + MINSIZE);
00db10
-  if(!ar_ptr)
00db10
-    return 0;
00db10
   p = _int_valloc(ar_ptr, bytes);
00db10
-  if(!p) {
00db10
+  if(!p && ar_ptr != NULL) {
00db10
     LIBC_PROBE (memory_valloc_retry, 1, bytes);
00db10
     ar_ptr = arena_get_retry (ar_ptr, bytes);
00db10
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
00db10
-      p = _int_memalign(ar_ptr, pagesz, bytes);
00db10
-      (void)mutex_unlock(&ar_ptr->mutex);
00db10
-    }
00db10
-  } else
00db10
+    p = _int_memalign(ar_ptr, pagesz, bytes);
00db10
+  }
00db10
+
00db10
+  if (ar_ptr != NULL)
00db10
     (void)mutex_unlock (&ar_ptr->mutex);
00db10
+
00db10
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
00db10
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
00db10
 
00db10
@@ -3134,15 +3150,15 @@ __libc_pvalloc(size_t bytes)
00db10
 
00db10
   arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
00db10
   p = _int_pvalloc(ar_ptr, bytes);
00db10
-  if(!p) {
00db10
+  if(!p && ar_ptr != NULL) {
00db10
     LIBC_PROBE (memory_pvalloc_retry, 1, bytes);
00db10
     ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
00db10
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
00db10
-      p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
00db10
-      (void)mutex_unlock(&ar_ptr->mutex);
00db10
-    }
00db10
-  } else
00db10
+    p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
00db10
+  }
00db10
+
00db10
+  if (ar_ptr != NULL)
00db10
     (void)mutex_unlock(&ar_ptr->mutex);
00db10
+
00db10
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
00db10
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
00db10
 
00db10
@@ -3184,43 +3200,54 @@ __libc_calloc(size_t n, size_t elem_size
00db10
   sz = bytes;
00db10
 
00db10
   arena_get(av, sz);
00db10
-  if(!av)
00db10
-    return 0;
00db10
+  if(av)
00db10
+    {
00db10
 
00db10
-  /* Check if we hand out the top chunk, in which case there may be no
00db10
-     need to clear. */
00db10
+      /* Check if we hand out the top chunk, in which case there may be no
00db10
+	 need to clear. */
00db10
 #if MORECORE_CLEARS
00db10
-  oldtop = top(av);
00db10
-  oldtopsize = chunksize(top(av));
00db10
-#if MORECORE_CLEARS < 2
00db10
-  /* Only newly allocated memory is guaranteed to be cleared.  */
00db10
-  if (av == &main_arena &&
00db10
-      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
00db10
-    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
00db10
+      oldtop = top(av);
00db10
+      oldtopsize = chunksize(top(av));
00db10
+# if MORECORE_CLEARS < 2
00db10
+      /* Only newly allocated memory is guaranteed to be cleared.  */
00db10
+      if (av == &main_arena &&
00db10
+	  oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
00db10
+	oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
00db10
+# endif
00db10
+      if (av != &main_arena)
00db10
+	{
00db10
+	  heap_info *heap = heap_for_ptr (oldtop);
00db10
+	  if (oldtopsize < ((char *) heap + heap->mprotect_size -
00db10
+			    (char *) oldtop))
00db10
+	    oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
00db10
+	}
00db10
 #endif
00db10
-  if (av != &main_arena)
00db10
-    {
00db10
-      heap_info *heap = heap_for_ptr (oldtop);
00db10
-      if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
00db10
-	oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
00db10
     }
00db10
-#endif
00db10
+  else
00db10
+    {
00db10
+      /* No usable arenas.  */
00db10
+      oldtop = 0;
00db10
+      oldtopsize = 0;
00db10
+    }
00db10
   mem = _int_malloc(av, sz);
00db10
 
00db10
 
00db10
   assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
00db10
 	 av == arena_for_chunk(mem2chunk(mem)));
00db10
 
00db10
-  if (mem == 0) {
00db10
+  if (mem == 0 && av != NULL) {
00db10
     LIBC_PROBE (memory_calloc_retry, 1, sz);
00db10
     av = arena_get_retry (av, sz);
00db10
-    if (__builtin_expect(av != NULL, 1)) {
00db10
-      mem = _int_malloc(av, sz);
00db10
-      (void)mutex_unlock(&av->mutex);
00db10
-    }
00db10
-    if (mem == 0) return 0;
00db10
-  } else
00db10
+    mem = _int_malloc(av, sz);
00db10
+  }
00db10
+
00db10
+  if (av != NULL)
00db10
     (void)mutex_unlock(&av->mutex);
00db10
+
00db10
+  /* Allocation failed even after a retry.  */
00db10
+  if (mem == 0)
00db10
+    return 0;
00db10
+
00db10
   p = mem2chunk(mem);
00db10
 
00db10
   /* Two optional cases in which clearing not necessary */
00db10
@@ -3310,6 +3337,16 @@ _int_malloc(mstate av, size_t bytes)
00db10
 
00db10
   checked_request2size(bytes, nb);
00db10
 
00db10
+  /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
00db10
+     mmap.  */
00db10
+  if (__glibc_unlikely (av == NULL))
00db10
+    {
00db10
+      void *p = sysmalloc (nb, av);
00db10
+      if (p != NULL)
00db10
+       alloc_perturb (p, bytes);
00db10
+      return p;
00db10
+    }
00db10
+
00db10
   /*
00db10
     If the size qualifies as a fastbin, first check corresponding bin.
00db10
     This code is safe to execute even if av is not yet initialized, so we
00db10
@@ -3334,7 +3371,7 @@ _int_malloc(mstate av, size_t bytes)
00db10
 	  errstr = "malloc(): memory corruption (fast)";
00db10
 	errout:
00db10
 	  mutex_unlock(&av->mutex);
00db10
-	  malloc_printerr (check_action, errstr, chunk2mem (victim));
00db10
+	  malloc_printerr (check_action, errstr, chunk2mem (victim), av);
00db10
 	  mutex_lock(&av->mutex);
00db10
 	  return NULL;
00db10
 	}
00db10
@@ -3421,9 +3458,9 @@ _int_malloc(mstate av, size_t bytes)
00db10
       if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
00db10
 	  || __builtin_expect (victim->size > av->system_mem, 0))
00db10
 	{
00db10
-	  void *p = chunk2mem(victim);
00db10
 	  mutex_unlock(&av->mutex);
00db10
-	  malloc_printerr (check_action, "malloc(): memory corruption", p);
00db10
+	  malloc_printerr (check_action, "malloc(): memory corruption",
00db10
+			   chunk2mem (victim), av);
00db10
 	  mutex_lock(&av->mutex);
00db10
 	}
00db10
       size = chunksize(victim);
00db10
@@ -3801,7 +3838,7 @@ _int_free(mstate av, mchunkptr p, int ha
00db10
     errout:
00db10
       if (have_lock || locked)
00db10
 	(void)mutex_unlock(&av->mutex);
00db10
-      malloc_printerr (check_action, errstr, chunk2mem(p));
00db10
+      malloc_printerr (check_action, errstr, chunk2mem(p), av);
00db10
       if (have_lock)
00db10
 	mutex_lock(&av->mutex);
00db10
       return;
00db10
@@ -4196,7 +4233,7 @@ _int_realloc(mstate av, mchunkptr oldp,
00db10
       errstr = "realloc(): invalid old size";
00db10
     errout:
00db10
       mutex_unlock(&av->mutex);
00db10
-      malloc_printerr (check_action, errstr, chunk2mem(oldp));
00db10
+      malloc_printerr (check_action, errstr, chunk2mem(oldp), av);
00db10
       mutex_lock(&av->mutex);
00db10
       return NULL;
00db10
     }
00db10
@@ -4436,7 +4473,7 @@ static void*
00db10
 _int_valloc(mstate av, size_t bytes)
00db10
 {
00db10
   /* Ensure initialization/consolidation */
00db10
-  if (have_fastchunks(av)) malloc_consolidate(av);
00db10
+  if (av && have_fastchunks(av)) malloc_consolidate(av);
00db10
   return _int_memalign(av, GLRO(dl_pagesize), bytes);
00db10
 }
00db10
 
00db10
@@ -4451,7 +4488,7 @@ _int_pvalloc(mstate av, size_t bytes)
00db10
   size_t pagesz;
00db10
 
00db10
   /* Ensure initialization/consolidation */
00db10
-  if (have_fastchunks(av)) malloc_consolidate(av);
00db10
+  if (av && have_fastchunks(av)) malloc_consolidate(av);
00db10
   pagesz = GLRO(dl_pagesize);
00db10
   return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
00db10
 }
00db10
@@ -4463,6 +4500,10 @@ _int_pvalloc(mstate av, size_t bytes)
00db10
 
00db10
 static int mtrim(mstate av, size_t pad)
00db10
 {
00db10
+  /* Don't touch corrupt arenas.  */
00db10
+  if (arena_is_corrupt (av))
00db10
+    return 0;
00db10
+
00db10
   /* Ensure initialization/consolidation */
00db10
   malloc_consolidate (av);
00db10
 
00db10
@@ -4956,8 +4997,14 @@ libc_hidden_def (__libc_mallopt)
00db10
 extern char **__libc_argv attribute_hidden;
00db10
 
00db10
 static void
00db10
-malloc_printerr(int action, const char *str, void *ptr)
00db10
+malloc_printerr(int action, const char *str, void *ptr, mstate ar_ptr)
00db10
 {
00db10
+  /* Avoid using this arena in future.  We do not attempt to synchronize this
00db10
+     with anything else because we minimally want to ensure that __libc_message
00db10
+     gets its resources safely without stumbling on the current corruption.  */
00db10
+  if (ar_ptr)
00db10
+    set_arena_corrupt (ar_ptr);
00db10
+
00db10
   if ((action & 5) == 5)
00db10
     __libc_message (action & 2, "%s\n", str);
00db10
   else if (action & 1)
00db10
Index: b/malloc/tst-malloc-backtrace.c
00db10
===================================================================
00db10
--- /dev/null
00db10
+++ b/malloc/tst-malloc-backtrace.c
00db10
@@ -0,0 +1,71 @@
00db10
+/* Verify that backtrace does not deadlock on itself on memory corruption.
00db10
+   Copyright (C) 2015 Free Software Foundation, Inc.
00db10
+   This file is part of the GNU C Library.
00db10
+
00db10
+   The GNU C Library is free software; you can redistribute it and/or
00db10
+   modify it under the terms of the GNU Lesser General Public
00db10
+   License as published by the Free Software Foundation; either
00db10
+   version 2.1 of the License, or (at your option) any later version.
00db10
+
00db10
+   The GNU C Library is distributed in the hope that it will be useful,
00db10
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
00db10
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00db10
+   Lesser General Public License for more details.
00db10
+
00db10
+   You should have received a copy of the GNU Lesser General Public
00db10
+   License along with the GNU C Library; if not, see
00db10
+   <http://www.gnu.org/licenses/>.  */
00db10
+
00db10
+
00db10
+#include <fcntl.h>
00db10
+#include <paths.h>
00db10
+#include <stdlib.h>
00db10
+#include <unistd.h>
00db10
+
00db10
+#define SIZE 4096
00db10
+
00db10
+/* Avoid all the buffer overflow messages on stderr.  */
00db10
+static void
00db10
+ignore_stderr (void)
00db10
+{
00db10
+  int fd = open (_PATH_DEVNULL, O_WRONLY);
00db10
+  if (fd == -1)
00db10
+    close (STDERR_FILENO);
00db10
+  else
00db10
+    {
00db10
+      dup2 (fd, STDERR_FILENO);
00db10
+      close (fd);
00db10
+    }
00db10
+  setenv ("LIBC_FATAL_STDERR_", "1", 1);
00db10
+}
00db10
+
00db10
+/* Wrap free with a function to prevent gcc from optimizing it out.  */
00db10
+static void
00db10
+__attribute__((noinline))
00db10
+call_free (void *ptr)
00db10
+{
00db10
+  free (ptr);
00db10
+  *(size_t *)(ptr - sizeof (size_t)) = 1;
00db10
+}
00db10
+
00db10
+int
00db10
+do_test (void)
00db10
+{
00db10
+  void *ptr1 = malloc (SIZE);
00db10
+  void *ptr2 = malloc (SIZE);
00db10
+
00db10
+  /* Avoid unwanted output to TTY after an expected memory corruption.  */
00db10
+  ignore_stderr ();
00db10
+
00db10
+  call_free ((void *) ptr1);
00db10
+  ptr1 = malloc (SIZE);
00db10
+
00db10
+  /* Not reached.  The return statement is to put ptr2 into use so that gcc
00db10
+     doesn't optimize out that malloc call.  */
00db10
+  return (ptr1 == ptr2);
00db10
+}
00db10
+
00db10
+#define TEST_FUNCTION do_test ()
00db10
+#define EXPECTED_SIGNAL SIGABRT
00db10
+
00db10
+#include "../test-skeleton.c"