| commit fff94fa2245612191123a8015eac94eb04f001e2 |
| Author: Siddhesh Poyarekar <siddhesh@redhat.com> |
| Date: Tue May 19 06:40:37 2015 +0530 |
| |
| Avoid deadlock in malloc on backtrace (BZ #16159) |
| |
| When the malloc subsystem detects some kind of memory corruption, |
| depending on the configuration it prints the error, a backtrace, a |
| memory map and then aborts the process. In this process, the |
| backtrace() call may result in a call to malloc, resulting in |
| various kinds of problematic behavior. |
| |
| In one case, the malloc it calls may detect a corruption and call |
| backtrace again, and a stack overflow may result due to the infinite |
| recursion. In another case, the malloc it calls may deadlock on an |
| arena lock with the malloc (or free, realloc, etc.) that detected the |
| corruption. In yet another case, if the program is linked with |
| pthreads, backtrace may do a pthread_once initialization, which |
| deadlocks on itself. |
| |
| In all these cases, the program exit is not as intended. This is |
| avoidable by marking the arena that malloc detected a corruption on, |
| as unusable. The following patch does that. Features of this patch |
| are as follows: |
| |
| - A flag is added to the mstate struct of the arena to indicate if the |
| arena is corrupt. |
| |
| - The flag is checked whenever malloc functions try to get a lock on |
| an arena. If the arena is unusable, a NULL is returned, causing the |
| malloc to use mmap or try the next arena. |
| |
| - malloc_printerr sets the corrupt flag on the arena when it detects a |
| corruption |
| |
| - free does not concern itself with the flag at all. It is not |
| important since the backtrace workflow does not need free. A free |
| in a parallel thread may cause another corruption, but that's not |
| new |
| |
| - The flag check and set are not atomic and may race. This is fine |
| since we don't care about contention during the flag check. We want |
| to make sure that the malloc call in the backtrace does not trip on |
| itself and all that action happens in the same thread and not across |
| threads. |
| |
| I verified that the test case does not show any regressions due to |
| this patch. I also ran the malloc benchmarks and found an |
| insignificant difference in timings (< 2%). |
| |
| |
| The follow-on test-suite fix has been folded into the patch below, but |
| to keep it minimal, ignore_stderr is put directly into |
| tst-malloc-backtrace.c. |
| |
| commit 02242448bf431a69fd0b8c929ca4408a05479baa |
| Author: Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com> |
| Date: Tue Jun 2 10:32:25 2015 -0300 |
| |
| Avoid outputting to TTY after an expected memory corruption in testcase |
| |
| Protect TTY against an expected memory corruption from testcase |
| tst-malloc-backtrace, which is expected to SIGABRT after a forced memory |
| corruption. |
| |
| |
| |
| |
| |
| |
| @@ -119,7 +119,7 @@ int __malloc_initialized = -1; |
| |
| #ifdef PER_THREAD |
| # define arena_lock(ptr, size) do { \ |
| - if(ptr) \ |
| + if(ptr && !arena_is_corrupt (ptr)) \ |
| (void)mutex_lock(&ptr->mutex); \ |
| else \ |
| ptr = arena_get2(ptr, (size), NULL); \ |
| @@ -808,7 +808,7 @@ reused_arena (mstate avoid_arena) |
| result = next_to_use; |
| do |
| { |
| - if (!mutex_trylock(&result->mutex)) |
| + if (!arena_is_corrupt (result) && !mutex_trylock(&result->mutex)) |
| goto out; |
| |
| result = result->next; |
| @@ -820,7 +820,21 @@ reused_arena (mstate avoid_arena) |
| if (result == avoid_arena) |
| result = result->next; |
| |
| - /* No arena available. Wait for the next in line. */ |
| + /* Make sure that the arena we get is not corrupted. */ |
| + mstate begin = result; |
| + while (arena_is_corrupt (result) || result == avoid_arena) |
| + { |
| + result = result->next; |
| + if (result == begin) |
| + break; |
| + } |
| + |
| + /* We could not find any arena that was either not corrupted or not the one |
| + we wanted to avoid. */ |
| + if (result == begin || result == avoid_arena) |
| + return NULL; |
| + |
| + /* No arena available without contention. Wait for the next in line. */ |
| LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); |
| (void)mutex_lock(&result->mutex); |
| |
| |
| |
| |
| |
| @@ -109,7 +109,8 @@ malloc_check_get_size(mchunkptr p) |
| size -= c) { |
| if(c<=0 || size<(c+2*SIZE_SZ)) { |
| malloc_printerr(check_action, "malloc_check_get_size: memory corruption", |
| - chunk2mem(p)); |
| + chunk2mem(p), |
| + chunk_is_mmapped (p) ? NULL : arena_for_chunk (p)); |
| return 0; |
| } |
| } |
| @@ -221,7 +222,8 @@ top_check(void) |
| return 0; |
| |
| mutex_unlock(&main_arena); |
| - malloc_printerr (check_action, "malloc: top chunk is corrupt", t); |
| + malloc_printerr (check_action, "malloc: top chunk is corrupt", t, |
| + &main_arena); |
| mutex_lock(&main_arena); |
| |
| /* Try to set up a new top chunk. */ |
| @@ -276,7 +278,8 @@ free_check(void* mem, const void *caller |
| if(!p) { |
| (void)mutex_unlock(&main_arena.mutex); |
| |
| - malloc_printerr(check_action, "free(): invalid pointer", mem); |
| + malloc_printerr(check_action, "free(): invalid pointer", mem, |
| + &main_arena); |
| return; |
| } |
| if (chunk_is_mmapped(p)) { |
| @@ -308,7 +311,8 @@ realloc_check(void* oldmem, size_t bytes |
| const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p); |
| (void)mutex_unlock(&main_arena.mutex); |
| if(!oldp) { |
| - malloc_printerr(check_action, "realloc(): invalid pointer", oldmem); |
| + malloc_printerr(check_action, "realloc(): invalid pointer", oldmem, |
| + &main_arena); |
| return malloc_check(bytes, NULL); |
| } |
| const INTERNAL_SIZE_T oldsize = chunksize(oldp); |
| |
| |
| |
| |
| @@ -25,7 +25,8 @@ all: |
| dist-headers := malloc.h |
| headers := $(dist-headers) obstack.h mcheck.h |
| tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \ |
| - tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 tst-malloc-usable |
| + tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \ |
| + tst-malloc-usable tst-malloc-backtrace |
| test-srcs = tst-mtrace |
| |
| routines = malloc morecore mcheck mtrace obstack |
| @@ -40,6 +41,9 @@ extra-libs-others = $(extra-libs) |
| libmemusage-routines = memusage |
| libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes)) |
| |
| +$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \ |
| + $(common-objpfx)nptl/libpthread_nonshared.a |
| + |
| # These should be removed by `make clean'. |
| extra-objs = mcheck-init.o libmcheck.a |
| |
| |
| |
| |
| |
| @@ -1060,7 +1060,7 @@ static void* _int_realloc(mstate, mchun |
| static void* _int_memalign(mstate, size_t, size_t); |
| static void* _int_valloc(mstate, size_t); |
| static void* _int_pvalloc(mstate, size_t); |
| -static void malloc_printerr(int action, const char *str, void *ptr); |
| +static void malloc_printerr(int action, const char *str, void *ptr, mstate av); |
| |
| static void* internal_function mem2mem_check(void *p, size_t sz); |
| static int internal_function top_check(void); |
| @@ -1430,7 +1430,8 @@ typedef struct malloc_chunk* mbinptr; |
| BK = P->bk; \ |
| if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) { \ |
| mutex_unlock(&(AV)->mutex); \ |
| - malloc_printerr (check_action, "corrupted double-linked list", P); \ |
| + malloc_printerr (check_action, "corrupted double-linked list", P, \ |
| + AV); \ |
| mutex_lock(&(AV)->mutex); \ |
| } else { \ |
| FD->bk = BK; \ |
| @@ -1670,6 +1671,15 @@ typedef struct malloc_chunk* mfastbinptr |
| #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) |
| #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) |
| |
| +/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the |
| + arena. Such an arena is no longer used to allocate chunks. Chunks |
| + allocated in that arena before detecting corruption are not freed. */ |
| + |
| +#define ARENA_CORRUPTION_BIT (4U) |
| + |
| +#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT)) |
| +#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT) |
| + |
| /* |
| Set value of max_fast. |
| Use impossibly small value if 0. |
| @@ -2281,8 +2291,9 @@ static void* sysmalloc(INTERNAL_SIZE_T n |
| rather than expanding top. |
| */ |
| |
| - if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) && |
| - (mp_.n_mmaps < mp_.n_mmaps_max)) { |
| + if (av == NULL |
| + || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) |
| + && (mp_.n_mmaps < mp_.n_mmaps_max))) { |
| |
| char* mm; /* return value from mmap call*/ |
| |
| @@ -2354,6 +2365,10 @@ static void* sysmalloc(INTERNAL_SIZE_T n |
| } |
| } |
| |
| + /* There are no usable arenas and mmap also failed. */ |
| + if (av == NULL) |
| + return 0; |
| + |
| /* Record incoming configuration of top */ |
| |
| old_top = av->top; |
| @@ -2519,7 +2534,7 @@ static void* sysmalloc(INTERNAL_SIZE_T n |
| else if (contiguous(av) && old_size && brk < old_end) { |
| /* Oops! Someone else killed our space.. Can't touch anything. */ |
| mutex_unlock(&av->mutex); |
| - malloc_printerr (3, "break adjusted to free malloc space", brk); |
| + malloc_printerr (3, "break adjusted to free malloc space", brk, av); |
| mutex_lock(&av->mutex); |
| } |
| |
| @@ -2793,7 +2808,7 @@ munmap_chunk(mchunkptr p) |
| if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0)) |
| { |
| malloc_printerr (check_action, "munmap_chunk(): invalid pointer", |
| - chunk2mem (p)); |
| + chunk2mem (p), NULL); |
| return; |
| } |
| |
| @@ -2861,21 +2876,20 @@ __libc_malloc(size_t bytes) |
| if (__builtin_expect (hook != NULL, 0)) |
| return (*hook)(bytes, RETURN_ADDRESS (0)); |
| |
| - arena_lookup(ar_ptr); |
| + arena_get(ar_ptr, bytes); |
| |
| - arena_lock(ar_ptr, bytes); |
| - if(!ar_ptr) |
| - return 0; |
| victim = _int_malloc(ar_ptr, bytes); |
| - if(!victim) { |
| + /* Retry with another arena only if we were able to find a usable arena |
| + before. */ |
| + if (!victim && ar_ptr != NULL) { |
| LIBC_PROBE (memory_malloc_retry, 1, bytes); |
| ar_ptr = arena_get_retry(ar_ptr, bytes); |
| - if (__builtin_expect(ar_ptr != NULL, 1)) { |
| - victim = _int_malloc(ar_ptr, bytes); |
| - (void)mutex_unlock(&ar_ptr->mutex); |
| - } |
| - } else |
| + victim = _int_malloc (ar_ptr, bytes); |
| + } |
| + |
| + if (ar_ptr != NULL) |
| (void)mutex_unlock(&ar_ptr->mutex); |
| + |
| assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || |
| ar_ptr == arena_for_chunk(mem2chunk(victim))); |
| return victim; |
| @@ -2946,6 +2960,11 @@ __libc_realloc(void* oldmem, size_t byte |
| /* its size */ |
| const INTERNAL_SIZE_T oldsize = chunksize(oldp); |
| |
| + if (chunk_is_mmapped (oldp)) |
| + ar_ptr = NULL; |
| + else |
| + ar_ptr = arena_for_chunk (oldp); |
| + |
| /* Little security check which won't hurt performance: the |
| allocator never wrapps around at the end of the address space. |
| Therefore we can exclude some size values which might appear |
| @@ -2953,7 +2972,8 @@ __libc_realloc(void* oldmem, size_t byte |
| if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) |
| || __builtin_expect (misaligned_chunk (oldp), 0)) |
| { |
| - malloc_printerr (check_action, "realloc(): invalid pointer", oldmem); |
| + malloc_printerr (check_action, "realloc(): invalid pointer", oldmem, |
| + ar_ptr); |
| return NULL; |
| } |
| |
| @@ -2977,7 +2997,6 @@ __libc_realloc(void* oldmem, size_t byte |
| return newmem; |
| } |
| |
| - ar_ptr = arena_for_chunk(oldp); |
| #if THREAD_STATS |
| if(!mutex_trylock(&ar_ptr->mutex)) |
| ++(ar_ptr->stat_lock_direct); |
| @@ -3043,18 +3062,17 @@ __libc_memalign(size_t alignment, size_t |
| } |
| |
| arena_get(ar_ptr, bytes + alignment + MINSIZE); |
| - if(!ar_ptr) |
| - return 0; |
| + |
| p = _int_memalign(ar_ptr, alignment, bytes); |
| - if(!p) { |
| + if(!p && ar_ptr != NULL) { |
| LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment); |
| ar_ptr = arena_get_retry (ar_ptr, bytes); |
| - if (__builtin_expect(ar_ptr != NULL, 1)) { |
| - p = _int_memalign(ar_ptr, alignment, bytes); |
| - (void)mutex_unlock(&ar_ptr->mutex); |
| - } |
| - } else |
| + p = _int_memalign (ar_ptr, alignment, bytes); |
| + } |
| + |
| + if (ar_ptr != NULL) |
| (void)mutex_unlock(&ar_ptr->mutex); |
| + |
| assert(!p || chunk_is_mmapped(mem2chunk(p)) || |
| ar_ptr == arena_for_chunk(mem2chunk(p))); |
| return p; |
| @@ -3088,18 +3106,16 @@ __libc_valloc(size_t bytes) |
| return (*hook)(pagesz, bytes, RETURN_ADDRESS (0)); |
| |
| arena_get(ar_ptr, bytes + pagesz + MINSIZE); |
| - if(!ar_ptr) |
| - return 0; |
| p = _int_valloc(ar_ptr, bytes); |
| - if(!p) { |
| + if(!p && ar_ptr != NULL) { |
| LIBC_PROBE (memory_valloc_retry, 1, bytes); |
| ar_ptr = arena_get_retry (ar_ptr, bytes); |
| - if (__builtin_expect(ar_ptr != NULL, 1)) { |
| - p = _int_memalign(ar_ptr, pagesz, bytes); |
| - (void)mutex_unlock(&ar_ptr->mutex); |
| - } |
| - } else |
| + p = _int_memalign(ar_ptr, pagesz, bytes); |
| + } |
| + |
| + if (ar_ptr != NULL) |
| (void)mutex_unlock (&ar_ptr->mutex); |
| + |
| assert(!p || chunk_is_mmapped(mem2chunk(p)) || |
| ar_ptr == arena_for_chunk(mem2chunk(p))); |
| |
| @@ -3134,15 +3150,15 @@ __libc_pvalloc(size_t bytes) |
| |
| arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE); |
| p = _int_pvalloc(ar_ptr, bytes); |
| - if(!p) { |
| + if(!p && ar_ptr != NULL) { |
| LIBC_PROBE (memory_pvalloc_retry, 1, bytes); |
| ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE); |
| - if (__builtin_expect(ar_ptr != NULL, 1)) { |
| - p = _int_memalign(ar_ptr, pagesz, rounded_bytes); |
| - (void)mutex_unlock(&ar_ptr->mutex); |
| - } |
| - } else |
| + p = _int_memalign(ar_ptr, pagesz, rounded_bytes); |
| + } |
| + |
| + if (ar_ptr != NULL) |
| (void)mutex_unlock(&ar_ptr->mutex); |
| + |
| assert(!p || chunk_is_mmapped(mem2chunk(p)) || |
| ar_ptr == arena_for_chunk(mem2chunk(p))); |
| |
| @@ -3184,43 +3200,54 @@ __libc_calloc(size_t n, size_t elem_size |
| sz = bytes; |
| |
| arena_get(av, sz); |
| - if(!av) |
| - return 0; |
| + if(av) |
| + { |
| |
| - /* Check if we hand out the top chunk, in which case there may be no |
| - need to clear. */ |
| + /* Check if we hand out the top chunk, in which case there may be no |
| + need to clear. */ |
| #if MORECORE_CLEARS |
| - oldtop = top(av); |
| - oldtopsize = chunksize(top(av)); |
| -#if MORECORE_CLEARS < 2 |
| - /* Only newly allocated memory is guaranteed to be cleared. */ |
| - if (av == &main_arena && |
| - oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop) |
| - oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop); |
| + oldtop = top(av); |
| + oldtopsize = chunksize(top(av)); |
| +# if MORECORE_CLEARS < 2 |
| + /* Only newly allocated memory is guaranteed to be cleared. */ |
| + if (av == &main_arena && |
| + oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop) |
| + oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop); |
| +# endif |
| + if (av != &main_arena) |
| + { |
| + heap_info *heap = heap_for_ptr (oldtop); |
| + if (oldtopsize < ((char *) heap + heap->mprotect_size - |
| + (char *) oldtop)) |
| + oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop; |
| + } |
| #endif |
| - if (av != &main_arena) |
| - { |
| - heap_info *heap = heap_for_ptr (oldtop); |
| - if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop) |
| - oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop; |
| } |
| -#endif |
| + else |
| + { |
| + /* No usable arenas. */ |
| + oldtop = 0; |
| + oldtopsize = 0; |
| + } |
| mem = _int_malloc(av, sz); |
| |
| |
| assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || |
| av == arena_for_chunk(mem2chunk(mem))); |
| |
| - if (mem == 0) { |
| + if (mem == 0 && av != NULL) { |
| LIBC_PROBE (memory_calloc_retry, 1, sz); |
| av = arena_get_retry (av, sz); |
| - if (__builtin_expect(av != NULL, 1)) { |
| - mem = _int_malloc(av, sz); |
| - (void)mutex_unlock(&av->mutex); |
| - } |
| - if (mem == 0) return 0; |
| - } else |
| + mem = _int_malloc(av, sz); |
| + } |
| + |
| + if (av != NULL) |
| (void)mutex_unlock(&av->mutex); |
| + |
| + /* Allocation failed even after a retry. */ |
| + if (mem == 0) |
| + return 0; |
| + |
| p = mem2chunk(mem); |
| |
| /* Two optional cases in which clearing not necessary */ |
| @@ -3310,6 +3337,16 @@ _int_malloc(mstate av, size_t bytes) |
| |
| checked_request2size(bytes, nb); |
| |
| + /* There are no usable arenas. Fall back to sysmalloc to get a chunk from |
| + mmap. */ |
| + if (__glibc_unlikely (av == NULL)) |
| + { |
| + void *p = sysmalloc (nb, av); |
| + if (p != NULL) |
| + alloc_perturb (p, bytes); |
| + return p; |
| + } |
| + |
| /* |
| If the size qualifies as a fastbin, first check corresponding bin. |
| This code is safe to execute even if av is not yet initialized, so we |
| @@ -3334,7 +3371,7 @@ _int_malloc(mstate av, size_t bytes) |
| errstr = "malloc(): memory corruption (fast)"; |
| errout: |
| mutex_unlock(&av->mutex); |
| - malloc_printerr (check_action, errstr, chunk2mem (victim)); |
| + malloc_printerr (check_action, errstr, chunk2mem (victim), av); |
| mutex_lock(&av->mutex); |
| return NULL; |
| } |
| @@ -3421,9 +3458,9 @@ _int_malloc(mstate av, size_t bytes) |
| if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) |
| || __builtin_expect (victim->size > av->system_mem, 0)) |
| { |
| - void *p = chunk2mem(victim); |
| mutex_unlock(&av->mutex); |
| - malloc_printerr (check_action, "malloc(): memory corruption", p); |
| + malloc_printerr (check_action, "malloc(): memory corruption", |
| + chunk2mem (victim), av); |
| mutex_lock(&av->mutex); |
| } |
| size = chunksize(victim); |
| @@ -3801,7 +3838,7 @@ _int_free(mstate av, mchunkptr p, int ha |
| errout: |
| if (have_lock || locked) |
| (void)mutex_unlock(&av->mutex); |
| - malloc_printerr (check_action, errstr, chunk2mem(p)); |
| + malloc_printerr (check_action, errstr, chunk2mem(p), av); |
| if (have_lock) |
| mutex_lock(&av->mutex); |
| return; |
| @@ -4196,7 +4233,7 @@ _int_realloc(mstate av, mchunkptr oldp, |
| errstr = "realloc(): invalid old size"; |
| errout: |
| mutex_unlock(&av->mutex); |
| - malloc_printerr (check_action, errstr, chunk2mem(oldp)); |
| + malloc_printerr (check_action, errstr, chunk2mem(oldp), av); |
| mutex_lock(&av->mutex); |
| return NULL; |
| } |
| @@ -4436,7 +4473,7 @@ static void* |
| _int_valloc(mstate av, size_t bytes) |
| { |
| /* Ensure initialization/consolidation */ |
| - if (have_fastchunks(av)) malloc_consolidate(av); |
| + if (av && have_fastchunks(av)) malloc_consolidate(av); |
| return _int_memalign(av, GLRO(dl_pagesize), bytes); |
| } |
| |
| @@ -4451,7 +4488,7 @@ _int_pvalloc(mstate av, size_t bytes) |
| size_t pagesz; |
| |
| /* Ensure initialization/consolidation */ |
| - if (have_fastchunks(av)) malloc_consolidate(av); |
| + if (av && have_fastchunks(av)) malloc_consolidate(av); |
| pagesz = GLRO(dl_pagesize); |
| return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); |
| } |
| @@ -4463,6 +4500,10 @@ _int_pvalloc(mstate av, size_t bytes) |
| |
| static int mtrim(mstate av, size_t pad) |
| { |
| + /* Don't touch corrupt arenas. */ |
| + if (arena_is_corrupt (av)) |
| + return 0; |
| + |
| /* Ensure initialization/consolidation */ |
| malloc_consolidate (av); |
| |
| @@ -4956,8 +4997,14 @@ libc_hidden_def (__libc_mallopt) |
| extern char **__libc_argv attribute_hidden; |
| |
| static void |
| -malloc_printerr(int action, const char *str, void *ptr) |
| +malloc_printerr(int action, const char *str, void *ptr, mstate ar_ptr) |
| { |
| + /* Avoid using this arena in future. We do not attempt to synchronize this |
| + with anything else because we minimally want to ensure that __libc_message |
| + gets its resources safely without stumbling on the current corruption. */ |
| + if (ar_ptr) |
| + set_arena_corrupt (ar_ptr); |
| + |
| if ((action & 5) == 5) |
| __libc_message (action & 2, "%s\n", str); |
| else if (action & 1) |
| |
| |
| |
| |
| @@ -0,0 +1,71 @@ |
| +/* Verify that backtrace does not deadlock on itself on memory corruption. |
| + Copyright (C) 2015 Free Software Foundation, Inc. |
| + This file is part of the GNU C Library. |
| + |
| + The GNU C Library is free software; you can redistribute it and/or |
| + modify it under the terms of the GNU Lesser General Public |
| + License as published by the Free Software Foundation; either |
| + version 2.1 of the License, or (at your option) any later version. |
| + |
| + The GNU C Library is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| + Lesser General Public License for more details. |
| + |
| + You should have received a copy of the GNU Lesser General Public |
| + License along with the GNU C Library; if not, see |
| + <http://www.gnu.org/licenses/>. */ |
| + |
| + |
| +#include <fcntl.h> |
| +#include <paths.h> |
| +#include <stdlib.h> |
| +#include <unistd.h> |
| + |
| +#define SIZE 4096 |
| + |
| +/* Avoid all the buffer overflow messages on stderr. */ |
| +static void |
| +ignore_stderr (void) |
| +{ |
| + int fd = open (_PATH_DEVNULL, O_WRONLY); |
| + if (fd == -1) |
| + close (STDERR_FILENO); |
| + else |
| + { |
| + dup2 (fd, STDERR_FILENO); |
| + close (fd); |
| + } |
| + setenv ("LIBC_FATAL_STDERR_", "1", 1); |
| +} |
| + |
| +/* Wrap free with a function to prevent gcc from optimizing it out. */ |
| +static void |
| +__attribute__((noinline)) |
| +call_free (void *ptr) |
| +{ |
| + free (ptr); |
| + *(size_t *)(ptr - sizeof (size_t)) = 1; |
| +} |
| + |
| +int |
| +do_test (void) |
| +{ |
| + void *ptr1 = malloc (SIZE); |
| + void *ptr2 = malloc (SIZE); |
| + |
| + /* Avoid unwanted output to TTY after an expected memory corruption. */ |
| + ignore_stderr (); |
| + |
| + call_free ((void *) ptr1); |
| + ptr1 = malloc (SIZE); |
| + |
| + /* Not reached. The return statement is to put ptr2 into use so that gcc |
| + doesn't optimize out that malloc call. */ |
| + return (ptr1 == ptr2); |
| +} |
| + |
| +#define TEST_FUNCTION do_test () |
| +#define EXPECTED_SIGNAL SIGABRT |
| + |
| +#include "../test-skeleton.c" |