diff -rup a/malloc/malloc.c b/malloc/malloc.c --- a/malloc/malloc.c 2012-02-13 21:46:11.678847531 -0700 +++ b/malloc/malloc.c 2012-02-13 22:43:14.788431976 -0700 @@ -3669,8 +3669,9 @@ public_mALLOc(size_t bytes) } else { #if USE_ARENAS /* ... or sbrk() has failed and there is still a chance to mmap() */ - ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); - (void)mutex_unlock(&main_arena.mutex); + mstate prev = ar_ptr->next ? ar_ptr : 0; + (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = arena_get2(prev, bytes); if(ar_ptr) { victim = _int_malloc(ar_ptr, bytes); (void)mutex_unlock(&ar_ptr->mutex); @@ -3929,10 +3930,10 @@ public_vALLOc(size_t bytes) if(!ar_ptr) return 0; p = _int_valloc(ar_ptr, bytes); - (void)mutex_unlock(&ar_ptr->mutex); if(!p) { /* Maybe the failure is due to running out of mmapped areas. */ if(ar_ptr != &main_arena) { + (void)mutex_unlock(&ar_ptr->mutex); ar_ptr = &main_arena; (void)mutex_lock(&ar_ptr->mutex); p = _int_memalign(ar_ptr, pagesz, bytes); @@ -3940,14 +3941,17 @@ public_vALLOc(size_t bytes) } else { #if USE_ARENAS /* ... or sbrk() has failed and there is still a chance to mmap() */ - ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); + mstate prev = ar_ptr->next ? ar_ptr : 0; + (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = arena_get2(prev, bytes); if(ar_ptr) { p = _int_memalign(ar_ptr, pagesz, bytes); (void)mutex_unlock(&ar_ptr->mutex); } #endif } - } + } else + (void)mutex_unlock(&ar_ptr->mutex); assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); @@ -3975,10 +3979,10 @@ public_pVALLOc(size_t bytes) arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE); p = _int_pvalloc(ar_ptr, bytes); - (void)mutex_unlock(&ar_ptr->mutex); if(!p) { /* Maybe the failure is due to running out of mmapped areas. */ if(ar_ptr != &main_arena) { + (void)mutex_unlock(&ar_ptr->mutex); ar_ptr = &main_arena; (void)mutex_lock(&ar_ptr->mutex); p = _int_memalign(ar_ptr, pagesz, rounded_bytes); @@ -3986,15 +3990,17 @@ public_pVALLOc(size_t bytes) } else { #if USE_ARENAS /* ... or sbrk() has failed and there is still a chance to mmap() */ - ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, - bytes + 2*pagesz + MINSIZE); + mstate prev = ar_ptr->next ? ar_ptr : 0; + (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE); if(ar_ptr) { p = _int_memalign(ar_ptr, pagesz, rounded_bytes); (void)mutex_unlock(&ar_ptr->mutex); } #endif } - } + } else + (void)mutex_unlock(&ar_ptr->mutex); assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); @@ -4064,8 +4070,6 @@ public_cALLOc(size_t n, size_t elem_size #endif mem = _int_malloc(av, sz); - /* Only clearing follows, so we can unlock early. */ - (void)mutex_unlock(&av->mutex); assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || av == arena_for_chunk(mem2chunk(mem))); @@ -4073,15 +4077,16 @@ public_cALLOc(size_t n, size_t elem_size if (mem == 0) { /* Maybe the failure is due to running out of mmapped areas. */ if(av != &main_arena) { + (void)mutex_unlock(&av->mutex); (void)mutex_lock(&main_arena.mutex); mem = _int_malloc(&main_arena, sz); (void)mutex_unlock(&main_arena.mutex); } else { #if USE_ARENAS /* ... or sbrk() has failed and there is still a chance to mmap() */ - (void)mutex_lock(&main_arena.mutex); - av = arena_get2(av->next ? av : 0, sz); - (void)mutex_unlock(&main_arena.mutex); + mstate prev = av->next ? av : 0; + (void)mutex_unlock(&av->mutex); + av = arena_get2(prev, sz); if(av) { mem = _int_malloc(av, sz); (void)mutex_unlock(&av->mutex); @@ -4089,7 +4094,8 @@ public_cALLOc(size_t n, size_t elem_size #endif } if (mem == 0) return 0; - } + } else + (void)mutex_unlock(&av->mutex); p = mem2chunk(mem); /* Two optional cases in which clearing not necessary */