| From a1a486d70ebcc47a686ff5846875eacad0940e41 Mon Sep 17 00:00:00 2001 |
| From: Eyal Itkin <eyalit@checkpoint.com> |
| Date: Fri, 20 Mar 2020 21:19:17 +0200 |
| Subject: Add Safe-Linking to fastbins and tcache |
| |
| Safe-Linking is a security mechanism that protects single-linked |
| lists (such as the fastbin and tcache) from being tampered by attackers. |
| The mechanism makes use of randomness from ASLR (mmap_base), and when |
| combined with chunk alignment integrity checks, it protects the "next" |
| pointers from being hijacked by an attacker. |
| |
| While Safe-Unlinking protects double-linked lists (such as the small |
| bins), there wasn't any similar protection for attacks against |
| single-linked lists. This solution protects against 3 common attacks: |
| * Partial pointer override: modifies the lower bytes (Little Endian) |
| * Full pointer override: hijacks the pointer to an attacker's location |
| * Unaligned chunks: pointing the list to an unaligned address |
| |
| The design assumes an attacker doesn't know where the heap is located, |
| and uses the ASLR randomness to "sign" the single-linked pointers. We |
| mark the pointer as P and the location in which it is stored as L, and |
| the calculation will be: |
| * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P) |
| * *L = PROTECT(P) |
| |
| This way, the random bits from the address L (which start at the bit |
| in the PAGE_SHIFT position), will be merged with LSB of the stored |
| protected pointer. This protection layer prevents an attacker from |
| modifying the pointer into a controlled value. |
| |
| An additional check that the chunks are MALLOC_ALIGNed adds an |
| important layer: |
| * Attackers can't point to illegal (unaligned) memory addresses |
| * Attackers must guess correctly the alignment bits |
| |
| On standard 32 bit Linux machines, an attack will directly fail 7 |
| out of 8 times, and on 64 bit machines it will fail 15 out of 16 |
| times. |
| |
| This proposed patch was benchmarked and it's effect on the overall |
| performance of the heap was negligible and couldn't be distinguished |
| from the default variance between tests on the vanilla version. A |
| similar protection was added to Chromium's version of TCMalloc |
| in 2012, and according to their documentation it had an overhead of |
| less than 2%. |
| |
| Reviewed-by: DJ Delorie <dj@redhat.com> |
| Reviewed-by: Carlos O'Donell <carlos@redhat.com> |
| Reviewed-by: Adhemerval Zacnella <adhemerval.zanella@linaro.org> |
| |
| diff --git a/malloc/malloc.c b/malloc/malloc.c |
| index f7cd29bc2f..1282863681 100644 |
| |
| |
| @@ -327,6 +327,18 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line, |
| # define MAX_TCACHE_COUNT UINT16_MAX |
| #endif |
| |
| +/* Safe-Linking: |
| + Use randomness from ASLR (mmap_base) to protect single-linked lists |
| + of Fast-Bins and TCache. That is, mask the "next" pointers of the |
| + lists' chunks, and also perform allocation alignment checks on them. |
| + This mechanism reduces the risk of pointer hijacking, as was done with |
| + Safe-Unlinking in the double-linked lists of Small-Bins. |
| + It assumes a minimum page size of 4096 bytes (12 bits). Systems with |
| + larger pages provide less entropy, although the pointer mangling |
| + still works. */ |
| +#define PROTECT_PTR(pos, ptr) \ |
| + ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr))) |
| +#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr) |
| |
| /* |
| REALLOC_ZERO_BYTES_FREES should be set if a call to |
| @@ -2157,12 +2169,15 @@ do_check_malloc_state (mstate av) |
| |
| while (p != 0) |
| { |
| + if (__glibc_unlikely (!aligned_OK (p))) |
| + malloc_printerr ("do_check_malloc_state(): " \ |
| + "unaligned fastbin chunk detected"); |
| /* each chunk claims to be inuse */ |
| do_check_inuse_chunk (av, p); |
| total += chunksize (p); |
| /* chunk belongs in this bin */ |
| assert (fastbin_index (chunksize (p)) == i); |
| - p = p->fd; |
| + p = REVEAL_PTR (p->fd); |
| } |
| } |
| |
| @@ -2923,7 +2938,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx) |
| detect a double free. */ |
| e->key = tcache; |
| |
| - e->next = tcache->entries[tc_idx]; |
| + e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); |
| tcache->entries[tc_idx] = e; |
| ++(tcache->counts[tc_idx]); |
| } |
| @@ -2934,9 +2949,11 @@ static __always_inline void * |
| tcache_get (size_t tc_idx) |
| { |
| tcache_entry *e = tcache->entries[tc_idx]; |
| - tcache->entries[tc_idx] = e->next; |
| + tcache->entries[tc_idx] = REVEAL_PTR (e->next); |
| --(tcache->counts[tc_idx]); |
| e->key = NULL; |
| + if (__glibc_unlikely (!aligned_OK (e))) |
| + malloc_printerr ("malloc(): unaligned tcache chunk detected"); |
| return (void *) e; |
| } |
| |
| @@ -2960,7 +2977,10 @@ tcache_thread_shutdown (void) |
| while (tcache_tmp->entries[i]) |
| { |
| tcache_entry *e = tcache_tmp->entries[i]; |
| - tcache_tmp->entries[i] = e->next; |
| + if (__glibc_unlikely (!aligned_OK (e))) |
| + malloc_printerr ("tcache_thread_shutdown(): " \ |
| + "unaligned tcache chunk detected"); |
| + tcache_tmp->entries[i] = REVEAL_PTR (e->next); |
| __libc_free (e); |
| } |
| } |
| @@ -3570,8 +3590,11 @@ _int_malloc (mstate av, size_t bytes) |
| victim = pp; \ |
| if (victim == NULL) \ |
| break; \ |
| + pp = REVEAL_PTR (victim->fd); \ |
| + if (__glibc_unlikely (!aligned_OK (pp))) \ |
| + malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \ |
| } \ |
| - while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \ |
| + while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \ |
| != victim); \ |
| |
| if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) |
| @@ -3583,8 +3606,11 @@ _int_malloc (mstate av, size_t bytes) |
| |
| if (victim != NULL) |
| { |
| + if (__glibc_unlikely (!aligned_OK (victim))) |
| + malloc_printerr ("malloc(): unaligned fastbin chunk detected"); |
| + |
| if (SINGLE_THREAD_P) |
| - *fb = victim->fd; |
| + *fb = REVEAL_PTR (victim->fd); |
| else |
| REMOVE_FB (fb, pp, victim); |
| if (__glibc_likely (victim != NULL)) |
| @@ -3605,8 +3631,10 @@ _int_malloc (mstate av, size_t bytes) |
| while (tcache->counts[tc_idx] < mp_.tcache_count |
| && (tc_victim = *fb) != NULL) |
| { |
| + if (__glibc_unlikely (!aligned_OK (tc_victim))) |
| + malloc_printerr ("malloc(): unaligned fastbin chunk detected"); |
| if (SINGLE_THREAD_P) |
| - *fb = tc_victim->fd; |
| + *fb = REVEAL_PTR (tc_victim->fd); |
| else |
| { |
| REMOVE_FB (fb, pp, tc_victim); |
| @@ -4196,11 +4224,15 @@ _int_free (mstate av, mchunkptr p, int have_lock) |
| LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); |
| for (tmp = tcache->entries[tc_idx]; |
| tmp; |
| - tmp = tmp->next) |
| + tmp = REVEAL_PTR (tmp->next)) |
| + { |
| + if (__glibc_unlikely (!aligned_OK (tmp))) |
| + malloc_printerr ("free(): unaligned chunk detected in tcache 2"); |
| if (tmp == e) |
| malloc_printerr ("free(): double free detected in tcache 2"); |
| /* If we get here, it was a coincidence. We've wasted a |
| few cycles, but don't abort. */ |
| + } |
| } |
| |
| if (tcache->counts[tc_idx] < mp_.tcache_count) |
| @@ -4264,7 +4296,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) |
| add (i.e., double free). */ |
| if (__builtin_expect (old == p, 0)) |
| malloc_printerr ("double free or corruption (fasttop)"); |
| - p->fd = old; |
| + p->fd = PROTECT_PTR (&p->fd, old); |
| *fb = p; |
| } |
| else |
| @@ -4274,7 +4306,8 @@ _int_free (mstate av, mchunkptr p, int have_lock) |
| add (i.e., double free). */ |
| if (__builtin_expect (old == p, 0)) |
| malloc_printerr ("double free or corruption (fasttop)"); |
| - p->fd = old2 = old; |
| + old2 = old; |
| + p->fd = PROTECT_PTR (&p->fd, old); |
| } |
| while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) |
| != old2); |
| @@ -4472,13 +4505,17 @@ static void malloc_consolidate(mstate av) |
| if (p != 0) { |
| do { |
| { |
| + if (__glibc_unlikely (!aligned_OK (p))) |
| + malloc_printerr ("malloc_consolidate(): " \ |
| + "unaligned fastbin chunk detected"); |
| + |
| unsigned int idx = fastbin_index (chunksize (p)); |
| if ((&fastbin (av, idx)) != fb) |
| malloc_printerr ("malloc_consolidate(): invalid chunk size"); |
| } |
| |
| check_inuse_chunk(av, p); |
| - nextp = p->fd; |
| + nextp = REVEAL_PTR (p->fd); |
| |
| /* Slightly streamlined version of consolidation code in free() */ |
| size = chunksize (p); |
| @@ -4896,8 +4933,13 @@ int_mallinfo (mstate av, struct mallinfo *m) |
| |
| for (i = 0; i < NFASTBINS; ++i) |
| { |
| - for (p = fastbin (av, i); p != 0; p = p->fd) |
| + for (p = fastbin (av, i); |
| + p != 0; |
| + p = REVEAL_PTR (p->fd)) |
| { |
| + if (__glibc_unlikely (!aligned_OK (p))) |
| + malloc_printerr ("int_mallinfo(): " \ |
| + "unaligned fastbin chunk detected"); |
| ++nfastblocks; |
| fastavail += chunksize (p); |
| } |
| @@ -5437,8 +5479,11 @@ __malloc_info (int options, FILE *fp) |
| |
| while (p != NULL) |
| { |
| + if (__glibc_unlikely (!aligned_OK (p))) |
| + malloc_printerr ("__malloc_info(): " \ |
| + "unaligned fastbin chunk detected"); |
| ++nthissize; |
| - p = p->fd; |
| + p = REVEAL_PTR (p->fd); |
| } |
| |
| fastavail += nthissize * thissize; |