12745e
commit fff94fa2245612191123a8015eac94eb04f001e2
12745e
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
12745e
Date:   Tue May 19 06:40:37 2015 +0530
12745e
12745e
    Avoid deadlock in malloc on backtrace (BZ #16159)
12745e
    
12745e
    When the malloc subsystem detects some kind of memory corruption,
12745e
    depending on the configuration it prints the error, a backtrace, a
12745e
    memory map and then aborts the process.  In this process, the
12745e
    backtrace() call may result in a call to malloc, resulting in
12745e
    various kinds of problematic behavior.
12745e
    
12745e
    In one case, the malloc it calls may detect a corruption and call
12745e
    backtrace again, and a stack overflow may result due to the infinite
12745e
    recursion.  In another case, the malloc it calls may deadlock on an
12745e
    arena lock with the malloc (or free, realloc, etc.) that detected the
12745e
    corruption.  In yet another case, if the program is linked with
12745e
    pthreads, backtrace may do a pthread_once initialization, which
12745e
    deadlocks on itself.
12745e
    
12745e
    In all these cases, the program exit is not as intended.  This is
12745e
    avoidable by marking the arena that malloc detected a corruption on,
12745e
    as unusable.  The following patch does that.  Features of this patch
12745e
    are as follows:
12745e
    
12745e
    - A flag is added to the mstate struct of the arena to indicate if the
12745e
      arena is corrupt.
12745e
    
12745e
    - The flag is checked whenever malloc functions try to get a lock on
12745e
      an arena.  If the arena is unusable, a NULL is returned, causing the
12745e
      malloc to use mmap or try the next arena.
12745e
    
12745e
    - malloc_printerr sets the corrupt flag on the arena when it detects a
12745e
      corruption
12745e
    
12745e
    - free does not concern itself with the flag at all.  It is not
12745e
      important since the backtrace workflow does not need free.  A free
12745e
      in a parallel thread may cause another corruption, but that's not
12745e
      new
12745e
    
12745e
    - The flag check and set are not atomic and may race.  This is fine
12745e
      since we don't care about contention during the flag check.  We want
12745e
      to make sure that the malloc call in the backtrace does not trip on
12745e
      itself and all that action happens in the same thread and not across
12745e
      threads.
12745e
    
12745e
    I verified that the test case does not show any regressions due to
12745e
    this patch.  I also ran the malloc benchmarks and found an
12745e
    insignificant difference in timings (< 2%).
12745e
12745e
diff -pruN glibc-2.17-c758a686/malloc/arena.c glibc-2.17-c758a686/malloc/arena.c
12745e
--- glibc-2.17-c758a686/malloc/arena.c	2015-05-28 13:32:17.544433238 +0530
12745e
+++ glibc-2.17-c758a686/malloc/arena.c	2015-05-28 15:29:02.605120231 +0530
12745e
@@ -119,7 +119,7 @@ int __malloc_initialized = -1;
12745e
 
12745e
 #ifdef PER_THREAD
12745e
 # define arena_lock(ptr, size) do { \
12745e
-  if(ptr) \
12745e
+  if(ptr && !arena_is_corrupt (ptr)) \
12745e
     (void)mutex_lock(&ptr->mutex); \
12745e
   else \
12745e
     ptr = arena_get2(ptr, (size), NULL); \
12745e
@@ -808,7 +808,7 @@ reused_arena (mstate avoid_arena)
12745e
   result = next_to_use;
12745e
   do
12745e
     {
12745e
-      if (!mutex_trylock(&result->mutex))
12745e
+      if (!arena_is_corrupt (result) && !mutex_trylock(&result->mutex))
12745e
 	goto out;
12745e
 
12745e
       result = result->next;
12745e
@@ -820,7 +820,21 @@ reused_arena (mstate avoid_arena)
12745e
   if (result == avoid_arena)
12745e
     result = result->next;
12745e
 
12745e
-  /* No arena available.  Wait for the next in line.  */
12745e
+  /* Make sure that the arena we get is not corrupted.  */
12745e
+  mstate begin = result;
12745e
+  while (arena_is_corrupt (result) || result == avoid_arena)
12745e
+    {
12745e
+      result = result->next;
12745e
+      if (result == begin)
12745e
+	break;
12745e
+    }
12745e
+
12745e
+  /* We could not find any arena that was either not corrupted or not the one
12745e
+     we wanted to avoid.  */
12745e
+  if (result == begin || result == avoid_arena)
12745e
+    return NULL;
12745e
+
12745e
+  /* No arena available without contention.  Wait for the next in line.  */
12745e
   LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
12745e
   (void)mutex_lock(&result->mutex);
12745e
 
12745e
diff -pruN glibc-2.17-c758a686/malloc/hooks.c glibc-2.17-c758a686/malloc/hooks.c
12745e
--- glibc-2.17-c758a686/malloc/hooks.c	2015-05-28 13:32:17.379431450 +0530
12745e
+++ glibc-2.17-c758a686/malloc/hooks.c	2015-05-28 15:31:14.132551554 +0530
12745e
@@ -109,7 +109,8 @@ malloc_check_get_size(mchunkptr p)
12745e
        size -= c) {
12745e
     if(c<=0 || size<(c+2*SIZE_SZ)) {
12745e
       malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
12745e
-		      chunk2mem(p));
12745e
+		      chunk2mem(p),
12745e
+		      chunk_is_mmapped (p) ? NULL : arena_for_chunk (p));
12745e
       return 0;
12745e
     }
12745e
   }
12745e
@@ -221,7 +222,8 @@ top_check(void)
12745e
     return 0;
12745e
 
12745e
   mutex_unlock(&main_arena);
12745e
-  malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
12745e
+  malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
12745e
+		   &main_arena);
12745e
   mutex_lock(&main_arena);
12745e
 
12745e
   /* Try to set up a new top chunk. */
12745e
@@ -276,7 +278,8 @@ free_check(void* mem, const void *caller
12745e
   if(!p) {
12745e
     (void)mutex_unlock(&main_arena.mutex);
12745e
 
12745e
-    malloc_printerr(check_action, "free(): invalid pointer", mem);
12745e
+    malloc_printerr(check_action, "free(): invalid pointer", mem,
12745e
+		    &main_arena);
12745e
     return;
12745e
   }
12745e
   if (chunk_is_mmapped(p)) {
12745e
@@ -308,7 +311,8 @@ realloc_check(void* oldmem, size_t bytes
12745e
   const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
12745e
   (void)mutex_unlock(&main_arena.mutex);
12745e
   if(!oldp) {
12745e
-    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
12745e
+    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem,
12745e
+		    &main_arena);
12745e
     return malloc_check(bytes, NULL);
12745e
   }
12745e
   const INTERNAL_SIZE_T oldsize = chunksize(oldp);
12745e
diff -pruN glibc-2.17-c758a686/malloc/Makefile glibc-2.17-c758a686/malloc/Makefile
12745e
--- glibc-2.17-c758a686/malloc/Makefile	2012-12-25 08:32:13.000000000 +0530
12745e
+++ glibc-2.17-c758a686/malloc/Makefile	2015-05-28 13:34:11.967673754 +0530
12745e
@@ -25,7 +25,8 @@ all:
12745e
 dist-headers := malloc.h
12745e
 headers := $(dist-headers) obstack.h mcheck.h
12745e
 tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
12745e
-	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 tst-malloc-usable
12745e
+	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
12745e
+	 tst-malloc-usable tst-malloc-backtrace
12745e
 test-srcs = tst-mtrace
12745e
 
12745e
 routines = malloc morecore mcheck mtrace obstack
12745e
@@ -40,6 +41,9 @@ extra-libs-others = $(extra-libs)
12745e
 libmemusage-routines = memusage
12745e
 libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
12745e
 
12745e
+$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
12745e
+			       $(common-objpfx)nptl/libpthread_nonshared.a
12745e
+
12745e
 # These should be removed by `make clean'.
12745e
 extra-objs = mcheck-init.o libmcheck.a
12745e
 
12745e
diff -pruN glibc-2.17-c758a686/malloc/malloc.c glibc-2.17-c758a686/malloc/malloc.c
12745e
--- glibc-2.17-c758a686/malloc/malloc.c	2015-05-28 13:32:17.848436534 +0530
12745e
+++ glibc-2.17-c758a686/malloc/malloc.c	2015-05-28 15:53:16.694991702 +0530
12745e
@@ -1060,7 +1060,7 @@ static void*  _int_realloc(mstate, mchun
12745e
 static void*  _int_memalign(mstate, size_t, size_t);
12745e
 static void*  _int_valloc(mstate, size_t);
12745e
 static void*  _int_pvalloc(mstate, size_t);
12745e
-static void malloc_printerr(int action, const char *str, void *ptr);
12745e
+static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
12745e
 
12745e
 static void* internal_function mem2mem_check(void *p, size_t sz);
12745e
 static int internal_function top_check(void);
12745e
@@ -1430,7 +1430,8 @@ typedef struct malloc_chunk* mbinptr;
12745e
   BK = P->bk;                                                          \
12745e
   if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) {	       \
12745e
     mutex_unlock(&(AV)->mutex);					       \
12745e
-    malloc_printerr (check_action, "corrupted double-linked list", P); \
12745e
+    malloc_printerr (check_action, "corrupted double-linked list", P,  \
12745e
+		     AV);					       \
12745e
     mutex_lock(&(AV)->mutex);					       \
12745e
   } else {							       \
12745e
     FD->bk = BK;                                                       \
12745e
@@ -1670,6 +1671,15 @@ typedef struct malloc_chunk* mfastbinptr
12745e
 #define set_noncontiguous(M)   ((M)->flags |=  NONCONTIGUOUS_BIT)
12745e
 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
12745e
 
12745e
+/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
12745e
+   arena.  Such an arena is no longer used to allocate chunks.  Chunks
12745e
+   allocated in that arena before detecting corruption are not freed.  */
12745e
+
12745e
+#define ARENA_CORRUPTION_BIT (4U)
12745e
+
12745e
+#define arena_is_corrupt(A)    (((A)->flags & ARENA_CORRUPTION_BIT))
12745e
+#define set_arena_corrupt(A)   ((A)->flags |= ARENA_CORRUPTION_BIT)
12745e
+
12745e
 /*
12745e
    Set value of max_fast.
12745e
    Use impossibly small value if 0.
12745e
@@ -2281,8 +2291,9 @@ static void* sysmalloc(INTERNAL_SIZE_T n
12745e
     rather than expanding top.
12745e
   */
12745e
 
12745e
-  if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
12745e
-      (mp_.n_mmaps < mp_.n_mmaps_max)) {
12745e
+  if (av == NULL
12745e
+      || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
12745e
+	  && (mp_.n_mmaps < mp_.n_mmaps_max))) {
12745e
 
12745e
     char* mm;             /* return value from mmap call*/
12745e
 
12745e
@@ -2354,6 +2365,10 @@ static void* sysmalloc(INTERNAL_SIZE_T n
12745e
     }
12745e
   }
12745e
 
12745e
+  /* There are no usable arenas and mmap also failed.  */
12745e
+  if (av == NULL)
12745e
+    return 0;
12745e
+
12745e
   /* Record incoming configuration of top */
12745e
 
12745e
   old_top  = av->top;
12745e
@@ -2519,7 +2534,7 @@ static void* sysmalloc(INTERNAL_SIZE_T n
12745e
     else if (contiguous(av) && old_size && brk < old_end) {
12745e
       /* Oops!  Someone else killed our space..  Can't touch anything.  */
12745e
       mutex_unlock(&av->mutex);
12745e
-      malloc_printerr (3, "break adjusted to free malloc space", brk);
12745e
+      malloc_printerr (3, "break adjusted to free malloc space", brk, av);
12745e
       mutex_lock(&av->mutex);
12745e
     }
12745e
 
12745e
@@ -2793,7 +2808,7 @@ munmap_chunk(mchunkptr p)
12745e
   if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
12745e
     {
12745e
       malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
12745e
-		       chunk2mem (p));
12745e
+		       chunk2mem (p), NULL);
12745e
       return;
12745e
     }
12745e
 
12745e
@@ -2861,21 +2876,20 @@ __libc_malloc(size_t bytes)
12745e
   if (__builtin_expect (hook != NULL, 0))
12745e
     return (*hook)(bytes, RETURN_ADDRESS (0));
12745e
 
12745e
-  arena_lookup(ar_ptr);
12745e
+  arena_get(ar_ptr, bytes);
12745e
 
12745e
-  arena_lock(ar_ptr, bytes);
12745e
-  if(!ar_ptr)
12745e
-    return 0;
12745e
   victim = _int_malloc(ar_ptr, bytes);
12745e
-  if(!victim) {
12745e
+  /* Retry with another arena only if we were able to find a usable arena
12745e
+     before.  */
12745e
+  if (!victim && ar_ptr != NULL) {
12745e
     LIBC_PROBE (memory_malloc_retry, 1, bytes);
12745e
     ar_ptr = arena_get_retry(ar_ptr, bytes);
12745e
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
12745e
-      victim = _int_malloc(ar_ptr, bytes);
12745e
-      (void)mutex_unlock(&ar_ptr->mutex);
12745e
-    }
12745e
-  } else
12745e
+    victim = _int_malloc (ar_ptr, bytes);
12745e
+  }
12745e
+
12745e
+  if (ar_ptr != NULL)
12745e
     (void)mutex_unlock(&ar_ptr->mutex);
12745e
+
12745e
   assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
12745e
 	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
12745e
   return victim;
12745e
@@ -2946,6 +2960,11 @@ __libc_realloc(void* oldmem, size_t byte
12745e
   /* its size */
12745e
   const INTERNAL_SIZE_T oldsize = chunksize(oldp);
12745e
 
12745e
+  if (chunk_is_mmapped (oldp))
12745e
+    ar_ptr = NULL;
12745e
+  else
12745e
+    ar_ptr = arena_for_chunk (oldp);
12745e
+
12745e
   /* Little security check which won't hurt performance: the
12745e
      allocator never wrapps around at the end of the address space.
12745e
      Therefore we can exclude some size values which might appear
12745e
@@ -2953,7 +2972,8 @@ __libc_realloc(void* oldmem, size_t byte
12745e
   if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
12745e
       || __builtin_expect (misaligned_chunk (oldp), 0))
12745e
     {
12745e
-      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
12745e
+      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
12745e
+		       ar_ptr);
12745e
       return NULL;
12745e
     }
12745e
 
12745e
@@ -2977,7 +2997,6 @@ __libc_realloc(void* oldmem, size_t byte
12745e
     return newmem;
12745e
   }
12745e
 
12745e
-  ar_ptr = arena_for_chunk(oldp);
12745e
 #if THREAD_STATS
12745e
   if(!mutex_trylock(&ar_ptr->mutex))
12745e
     ++(ar_ptr->stat_lock_direct);
12745e
@@ -3043,18 +3062,17 @@ __libc_memalign(size_t alignment, size_t
12745e
     }
12745e
 
12745e
   arena_get(ar_ptr, bytes + alignment + MINSIZE);
12745e
-  if(!ar_ptr)
12745e
-    return 0;
12745e
+
12745e
   p = _int_memalign(ar_ptr, alignment, bytes);
12745e
-  if(!p) {
12745e
+  if(!p && ar_ptr != NULL) {
12745e
     LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
12745e
     ar_ptr = arena_get_retry (ar_ptr, bytes);
12745e
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
12745e
-      p = _int_memalign(ar_ptr, alignment, bytes);
12745e
-      (void)mutex_unlock(&ar_ptr->mutex);
12745e
-    }
12745e
-  } else
12745e
+    p = _int_memalign (ar_ptr, alignment, bytes);
12745e
+  }
12745e
+
12745e
+  if (ar_ptr != NULL)
12745e
     (void)mutex_unlock(&ar_ptr->mutex);
12745e
+
12745e
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
12745e
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
12745e
   return p;
12745e
@@ -3088,18 +3106,16 @@ __libc_valloc(size_t bytes)
12745e
     return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
12745e
 
12745e
   arena_get(ar_ptr, bytes + pagesz + MINSIZE);
12745e
-  if(!ar_ptr)
12745e
-    return 0;
12745e
   p = _int_valloc(ar_ptr, bytes);
12745e
-  if(!p) {
12745e
+  if(!p && ar_ptr != NULL) {
12745e
     LIBC_PROBE (memory_valloc_retry, 1, bytes);
12745e
     ar_ptr = arena_get_retry (ar_ptr, bytes);
12745e
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
12745e
-      p = _int_memalign(ar_ptr, pagesz, bytes);
12745e
-      (void)mutex_unlock(&ar_ptr->mutex);
12745e
-    }
12745e
-  } else
12745e
+    p = _int_memalign(ar_ptr, pagesz, bytes);
12745e
+  }
12745e
+
12745e
+  if (ar_ptr != NULL)
12745e
     (void)mutex_unlock (&ar_ptr->mutex);
12745e
+
12745e
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
12745e
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
12745e
 
12745e
@@ -3134,15 +3150,15 @@ __libc_pvalloc(size_t bytes)
12745e
 
12745e
   arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
12745e
   p = _int_pvalloc(ar_ptr, bytes);
12745e
-  if(!p) {
12745e
+  if(!p && ar_ptr != NULL) {
12745e
     LIBC_PROBE (memory_pvalloc_retry, 1, bytes);
12745e
     ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
12745e
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
12745e
-      p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
12745e
-      (void)mutex_unlock(&ar_ptr->mutex);
12745e
-    }
12745e
-  } else
12745e
+    p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
12745e
+  }
12745e
+
12745e
+  if (ar_ptr != NULL)
12745e
     (void)mutex_unlock(&ar_ptr->mutex);
12745e
+
12745e
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
12745e
 	 ar_ptr == arena_for_chunk(mem2chunk(p)));
12745e
 
12745e
@@ -3184,43 +3200,48 @@ __libc_calloc(size_t n, size_t elem_size
12745e
   sz = bytes;
12745e
 
12745e
   arena_get(av, sz);
12745e
-  if(!av)
12745e
-    return 0;
12745e
+  if(av)
12745e
+    {
12745e
 
12745e
-  /* Check if we hand out the top chunk, in which case there may be no
12745e
-     need to clear. */
12745e
+      /* Check if we hand out the top chunk, in which case there may be no
12745e
+	 need to clear. */
12745e
 #if MORECORE_CLEARS
12745e
-  oldtop = top(av);
12745e
-  oldtopsize = chunksize(top(av));
12745e
-#if MORECORE_CLEARS < 2
12745e
-  /* Only newly allocated memory is guaranteed to be cleared.  */
12745e
-  if (av == &main_arena &&
12745e
-      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
12745e
-    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
12745e
+      oldtop = top(av);
12745e
+      oldtopsize = chunksize(top(av));
12745e
+# if MORECORE_CLEARS < 2
12745e
+      /* Only newly allocated memory is guaranteed to be cleared.  */
12745e
+      if (av == &main_arena &&
12745e
+	  oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
12745e
+	oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
12745e
+# endif
12745e
+      if (av != &main_arena)
12745e
+	{
12745e
+	  heap_info *heap = heap_for_ptr (oldtop);
12745e
+	  if (oldtopsize < ((char *) heap + heap->mprotect_size -
12745e
+			    (char *) oldtop))
12745e
+	    oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
12745e
+	}
12745e
 #endif
12745e
-  if (av != &main_arena)
12745e
-    {
12745e
-      heap_info *heap = heap_for_ptr (oldtop);
12745e
-      if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
12745e
-	oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
12745e
     }
12745e
-#endif
12745e
   mem = _int_malloc(av, sz);
12745e
 
12745e
 
12745e
   assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
12745e
 	 av == arena_for_chunk(mem2chunk(mem)));
12745e
 
12745e
-  if (mem == 0) {
12745e
+  if (mem == 0 && av != NULL) {
12745e
     LIBC_PROBE (memory_calloc_retry, 1, sz);
12745e
     av = arena_get_retry (av, sz);
12745e
-    if (__builtin_expect(av != NULL, 1)) {
12745e
-      mem = _int_malloc(av, sz);
12745e
-      (void)mutex_unlock(&av->mutex);
12745e
-    }
12745e
-    if (mem == 0) return 0;
12745e
-  } else
12745e
+    mem = _int_malloc(av, sz);
12745e
+  }
12745e
+
12745e
+  if (av != NULL)
12745e
     (void)mutex_unlock(&av->mutex);
12745e
+
12745e
+  /* Allocation failed even after a retry.  */
12745e
+  if (mem == 0)
12745e
+    return 0;
12745e
+
12745e
   p = mem2chunk(mem);
12745e
 
12745e
   /* Two optional cases in which clearing not necessary */
12745e
@@ -3310,6 +3331,16 @@ _int_malloc(mstate av, size_t bytes)
12745e
 
12745e
   checked_request2size(bytes, nb);
12745e
 
12745e
+  /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
12745e
+     mmap.  */
12745e
+  if (__glibc_unlikely (av == NULL))
12745e
+    {
12745e
+      void *p = sysmalloc (nb, av);
12745e
+      if (p != NULL)
12745e
+       alloc_perturb (p, bytes);
12745e
+      return p;
12745e
+    }
12745e
+
12745e
   /*
12745e
     If the size qualifies as a fastbin, first check corresponding bin.
12745e
     This code is safe to execute even if av is not yet initialized, so we
12745e
@@ -3334,7 +3365,7 @@ _int_malloc(mstate av, size_t bytes)
12745e
 	  errstr = "malloc(): memory corruption (fast)";
12745e
 	errout:
12745e
 	  mutex_unlock(&av->mutex);
12745e
-	  malloc_printerr (check_action, errstr, chunk2mem (victim));
12745e
+	  malloc_printerr (check_action, errstr, chunk2mem (victim), av);
12745e
 	  mutex_lock(&av->mutex);
12745e
 	  return NULL;
12745e
 	}
12745e
@@ -3421,9 +3452,9 @@ _int_malloc(mstate av, size_t bytes)
12745e
       if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
12745e
 	  || __builtin_expect (victim->size > av->system_mem, 0))
12745e
 	{
12745e
-	  void *p = chunk2mem(victim);
12745e
 	  mutex_unlock(&av->mutex);
12745e
-	  malloc_printerr (check_action, "malloc(): memory corruption", p);
12745e
+	  malloc_printerr (check_action, "malloc(): memory corruption",
12745e
+			   chunk2mem (victim), av);
12745e
 	  mutex_lock(&av->mutex);
12745e
 	}
12745e
       size = chunksize(victim);
12745e
@@ -3801,7 +3832,7 @@ _int_free(mstate av, mchunkptr p, int ha
12745e
     errout:
12745e
       if (have_lock || locked)
12745e
 	(void)mutex_unlock(&av->mutex);
12745e
-      malloc_printerr (check_action, errstr, chunk2mem(p));
12745e
+      malloc_printerr (check_action, errstr, chunk2mem(p), av);
12745e
       if (have_lock)
12745e
 	mutex_lock(&av->mutex);
12745e
       return;
12745e
@@ -4196,7 +4227,7 @@ _int_realloc(mstate av, mchunkptr oldp,
12745e
       errstr = "realloc(): invalid old size";
12745e
     errout:
12745e
       mutex_unlock(&av->mutex);
12745e
-      malloc_printerr (check_action, errstr, chunk2mem(oldp));
12745e
+      malloc_printerr (check_action, errstr, chunk2mem(oldp), av);
12745e
       mutex_lock(&av->mutex);
12745e
       return NULL;
12745e
     }
12745e
@@ -4467,7 +4467,7 @@ static void*
12745e
 _int_valloc(mstate av, size_t bytes)
12745e
 {
12745e
   /* Ensure initialization/consolidation */
12745e
-  if (have_fastchunks(av)) malloc_consolidate(av);
12745e
+  if (av && have_fastchunks(av)) malloc_consolidate(av);
12745e
   return _int_memalign(av, GLRO(dl_pagesize), bytes);
12745e
 }
12745e
 
12745e
@@ -4482,7 +4482,7 @@ _int_pvalloc(mstate av, size_t bytes)
12745e
   size_t pagesz;
12745e
 
12745e
   /* Ensure initialization/consolidation */
12745e
-  if (have_fastchunks(av)) malloc_consolidate(av);
12745e
+  if (av && have_fastchunks(av)) malloc_consolidate(av);
12745e
   pagesz = GLRO(dl_pagesize);
12745e
   return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
12745e
 }
12745e
@@ -4463,6 +4494,10 @@ _int_pvalloc(mstate av, size_t bytes)
12745e
 
12745e
 static int mtrim(mstate av, size_t pad)
12745e
 {
12745e
+  /* Don't touch corrupt arenas.  */
12745e
+  if (arena_is_corrupt (av))
12745e
+    return 0;
12745e
+
12745e
   /* Ensure initialization/consolidation */
12745e
   malloc_consolidate (av);
12745e
 
12745e
@@ -4956,8 +4991,14 @@ libc_hidden_def (__libc_mallopt)
12745e
 extern char **__libc_argv attribute_hidden;
12745e
 
12745e
 static void
12745e
-malloc_printerr(int action, const char *str, void *ptr)
12745e
+malloc_printerr(int action, const char *str, void *ptr, mstate ar_ptr)
12745e
 {
12745e
+  /* Avoid using this arena in future.  We do not attempt to synchronize this
12745e
+     with anything else because we minimally want to ensure that __libc_message
12745e
+     gets its resources safely without stumbling on the current corruption.  */
12745e
+  if (ar_ptr)
12745e
+    set_arena_corrupt (ar_ptr);
12745e
+
12745e
   if ((action & 5) == 5)
12745e
     __libc_message (action & 2, "%s\n", str);
12745e
   else if (action & 1)
12745e
diff -pruN glibc-2.17-c758a686/malloc/tst-malloc-backtrace.c glibc-2.17-c758a686/malloc/tst-malloc-backtrace.c
12745e
--- glibc-2.17-c758a686/malloc/tst-malloc-backtrace.c	1970-01-01 05:30:00.000000000 +0530
12745e
+++ glibc-2.17-c758a686/malloc/tst-malloc-backtrace.c	2015-05-28 15:54:10.135577633 +0530
12745e
@@ -0,0 +1,50 @@
12745e
+/* Verify that backtrace does not deadlock on itself on memory corruption.
12745e
+   Copyright (C) 2015 Free Software Foundation, Inc.
12745e
+   This file is part of the GNU C Library.
12745e
+
12745e
+   The GNU C Library is free software; you can redistribute it and/or
12745e
+   modify it under the terms of the GNU Lesser General Public
12745e
+   License as published by the Free Software Foundation; either
12745e
+   version 2.1 of the License, or (at your option) any later version.
12745e
+
12745e
+   The GNU C Library is distributed in the hope that it will be useful,
12745e
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
12745e
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12745e
+   Lesser General Public License for more details.
12745e
+
12745e
+   You should have received a copy of the GNU Lesser General Public
12745e
+   License along with the GNU C Library; if not, see
12745e
+   <http://www.gnu.org/licenses/>.  */
12745e
+
12745e
+
12745e
+#include <stdlib.h>
12745e
+
12745e
+#define SIZE 4096
12745e
+
12745e
+/* Wrap free with a function to prevent gcc from optimizing it out.  */
12745e
+static void
12745e
+__attribute__((noinline))
12745e
+call_free (void *ptr)
12745e
+{
12745e
+  free (ptr);
12745e
+  *(size_t *)(ptr - sizeof (size_t)) = 1;
12745e
+}
12745e
+
12745e
+int
12745e
+do_test (void)
12745e
+{
12745e
+  void *ptr1 = malloc (SIZE);
12745e
+  void *ptr2 = malloc (SIZE);
12745e
+
12745e
+  call_free (ptr1);
12745e
+  ptr1 = malloc (SIZE);
12745e
+
12745e
+  /* Not reached.  The return statement is to put ptr2 into use so that gcc
12745e
+     doesn't optimize out that malloc call.  */
12745e
+  return (ptr1 == ptr2);
12745e
+}
12745e
+
12745e
+#define TEST_FUNCTION do_test ()
12745e
+#define EXPECTED_SIGNAL SIGABRT
12745e
+
12745e
+#include "../test-skeleton.c"