|
|
00db10 |
commit 362b47fe09ca9a928d444c7e2f7992f7f61bfc3e
|
|
|
00db10 |
Author: Maxim Kuvyrkov <maxim@kugelworks.com>
|
|
|
00db10 |
Date: Tue Dec 24 09:44:50 2013 +1300
|
|
|
00db10 |
|
|
|
00db10 |
Fix race in free() of fastbin chunk: BZ #15073
|
|
|
00db10 |
|
|
|
00db10 |
Perform sanity check only if we have_lock. Due to lockless nature of fastbins
|
|
|
00db10 |
we need to be careful derefencing pointers to fastbin entries (chunksize(old)
|
|
|
00db10 |
in this case) in multithreaded environments.
|
|
|
00db10 |
|
|
|
00db10 |
The fix is to add have_lock to the if-condition checks. The rest of the patch
|
|
|
00db10 |
only makes code more readable.
|
|
|
00db10 |
|
|
|
00db10 |
* malloc/malloc.c (_int_free): Perform sanity check only if we
|
|
|
00db10 |
have_lock.
|
|
|
00db10 |
|
|
|
00db10 |
Index: b/malloc/malloc.c
|
|
|
00db10 |
===================================================================
|
|
|
00db10 |
--- a/malloc/malloc.c
|
|
|
00db10 |
+++ b/malloc/malloc.c
|
|
|
00db10 |
@@ -3909,25 +3909,29 @@ _int_free(mstate av, mchunkptr p, int ha
|
|
|
00db10 |
unsigned int idx = fastbin_index(size);
|
|
|
00db10 |
fb = &fastbin (av, idx);
|
|
|
00db10 |
|
|
|
00db10 |
- mchunkptr fd;
|
|
|
00db10 |
- mchunkptr old = *fb;
|
|
|
00db10 |
+ /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
|
|
|
00db10 |
+ mchunkptr old = *fb, old2;
|
|
|
00db10 |
unsigned int old_idx = ~0u;
|
|
|
00db10 |
do
|
|
|
00db10 |
{
|
|
|
00db10 |
- /* Another simple check: make sure the top of the bin is not the
|
|
|
00db10 |
- record we are going to add (i.e., double free). */
|
|
|
00db10 |
+ /* Check that the top of the bin is not the record we are going to add
|
|
|
00db10 |
+ (i.e., double free). */
|
|
|
00db10 |
if (__builtin_expect (old == p, 0))
|
|
|
00db10 |
{
|
|
|
00db10 |
errstr = "double free or corruption (fasttop)";
|
|
|
00db10 |
goto errout;
|
|
|
00db10 |
}
|
|
|
00db10 |
- if (old != NULL)
|
|
|
00db10 |
+ /* Check that size of fastbin chunk at the top is the same as
|
|
|
00db10 |
+ size of the chunk that we are adding. We can dereference OLD
|
|
|
00db10 |
+ only if we have the lock, otherwise it might have already been
|
|
|
00db10 |
+ deallocated. See use of OLD_IDX below for the actual check. */
|
|
|
00db10 |
+ if (have_lock && old != NULL)
|
|
|
00db10 |
old_idx = fastbin_index(chunksize(old));
|
|
|
00db10 |
- p->fd = fd = old;
|
|
|
00db10 |
+ p->fd = old2 = old;
|
|
|
00db10 |
}
|
|
|
00db10 |
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
|
|
|
00db10 |
+ while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
|
|
|
00db10 |
|
|
|
00db10 |
- if (fd != NULL && __builtin_expect (old_idx != idx, 0))
|
|
|
00db10 |
+ if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
|
|
|
00db10 |
{
|
|
|
00db10 |
errstr = "invalid fastbin entry (free)";
|
|
|
00db10 |
goto errout;
|