76b6d9
From a1a486d70ebcc47a686ff5846875eacad0940e41 Mon Sep 17 00:00:00 2001
76b6d9
From: Eyal Itkin <eyalit@checkpoint.com>
76b6d9
Date: Fri, 20 Mar 2020 21:19:17 +0200
76b6d9
Subject: Add Safe-Linking to fastbins and tcache
76b6d9
76b6d9
Safe-Linking is a security mechanism that protects single-linked
76b6d9
lists (such as the fastbin and tcache) from being tampered by attackers.
76b6d9
The mechanism makes use of randomness from ASLR (mmap_base), and when
76b6d9
combined with chunk alignment integrity checks, it protects the "next"
76b6d9
pointers from being hijacked by an attacker.
76b6d9
76b6d9
While Safe-Unlinking protects double-linked lists (such as the small
76b6d9
bins), there wasn't any similar protection for attacks against
76b6d9
single-linked lists. This solution protects against 3 common attacks:
76b6d9
  * Partial pointer override: modifies the lower bytes (Little Endian)
76b6d9
  * Full pointer override: hijacks the pointer to an attacker's location
76b6d9
  * Unaligned chunks: pointing the list to an unaligned address
76b6d9
76b6d9
The design assumes an attacker doesn't know where the heap is located,
76b6d9
and uses the ASLR randomness to "sign" the single-linked pointers. We
76b6d9
mark the pointer as P and the location in which it is stored as L, and
76b6d9
the calculation will be:
76b6d9
  * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
76b6d9
  * *L = PROTECT(P)
76b6d9
76b6d9
This way, the random bits from the address L (which start at the bit
76b6d9
in the PAGE_SHIFT position), will be merged with LSB of the stored
76b6d9
protected pointer. This protection layer prevents an attacker from
76b6d9
modifying the pointer into a controlled value.
76b6d9
76b6d9
An additional check that the chunks are MALLOC_ALIGNed adds an
76b6d9
important layer:
76b6d9
  * Attackers can't point to illegal (unaligned) memory addresses
76b6d9
  * Attackers must guess correctly the alignment bits
76b6d9
76b6d9
On standard 32 bit Linux machines, an attack will directly fail 7
76b6d9
out of 8 times, and on 64 bit machines it will fail 15 out of 16
76b6d9
times.
76b6d9
76b6d9
This proposed patch was benchmarked and it's effect on the overall
76b6d9
performance of the heap was negligible and couldn't be distinguished
76b6d9
from the default variance between tests on the vanilla version. A
76b6d9
similar protection was added to Chromium's version of TCMalloc
76b6d9
in 2012, and according to their documentation it had an overhead of
76b6d9
less than 2%.
76b6d9
76b6d9
Reviewed-by: DJ Delorie <dj@redhat.com>
76b6d9
Reviewed-by: Carlos O'Donell <carlos@redhat.com>
76b6d9
Reviewed-by: Adhemerval Zacnella <adhemerval.zanella@linaro.org>
76b6d9
76b6d9
diff --git a/malloc/malloc.c b/malloc/malloc.c
76b6d9
index f7cd29bc2f..1282863681 100644
76b6d9
--- a/malloc/malloc.c
76b6d9
+++ b/malloc/malloc.c
76b6d9
@@ -327,6 +327,18 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
76b6d9
 # define MAX_TCACHE_COUNT UINT16_MAX
76b6d9
 #endif
76b6d9
 
76b6d9
+/* Safe-Linking:
76b6d9
+   Use randomness from ASLR (mmap_base) to protect single-linked lists
76b6d9
+   of Fast-Bins and TCache.  That is, mask the "next" pointers of the
76b6d9
+   lists' chunks, and also perform allocation alignment checks on them.
76b6d9
+   This mechanism reduces the risk of pointer hijacking, as was done with
76b6d9
+   Safe-Unlinking in the double-linked lists of Small-Bins.
76b6d9
+   It assumes a minimum page size of 4096 bytes (12 bits).  Systems with
76b6d9
+   larger pages provide less entropy, although the pointer mangling
76b6d9
+   still works.  */
76b6d9
+#define PROTECT_PTR(pos, ptr) \
76b6d9
+  ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
76b6d9
+#define REVEAL_PTR(ptr)  PROTECT_PTR (&ptr, ptr)
76b6d9
 
76b6d9
 /*
76b6d9
   REALLOC_ZERO_BYTES_FREES should be set if a call to
76b6d9
@@ -2157,12 +2169,15 @@ do_check_malloc_state (mstate av)
76b6d9
 
76b6d9
       while (p != 0)
76b6d9
         {
76b6d9
+	  if (__glibc_unlikely (!aligned_OK (p)))
76b6d9
+	    malloc_printerr ("do_check_malloc_state(): " \
76b6d9
+			     "unaligned fastbin chunk detected");
76b6d9
           /* each chunk claims to be inuse */
76b6d9
           do_check_inuse_chunk (av, p);
76b6d9
           total += chunksize (p);
76b6d9
           /* chunk belongs in this bin */
76b6d9
           assert (fastbin_index (chunksize (p)) == i);
76b6d9
-          p = p->fd;
76b6d9
+	  p = REVEAL_PTR (p->fd);
76b6d9
         }
76b6d9
     }
76b6d9
 
76b6d9
@@ -2923,7 +2938,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
76b6d9
      detect a double free.  */
76b6d9
   e->key = tcache;
76b6d9
 
76b6d9
-  e->next = tcache->entries[tc_idx];
76b6d9
+  e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
76b6d9
   tcache->entries[tc_idx] = e;
76b6d9
   ++(tcache->counts[tc_idx]);
76b6d9
 }
76b6d9
@@ -2934,9 +2949,11 @@ static __always_inline void *
76b6d9
 tcache_get (size_t tc_idx)
76b6d9
 {
76b6d9
   tcache_entry *e = tcache->entries[tc_idx];
76b6d9
-  tcache->entries[tc_idx] = e->next;
76b6d9
+  tcache->entries[tc_idx] = REVEAL_PTR (e->next);
76b6d9
   --(tcache->counts[tc_idx]);
76b6d9
   e->key = NULL;
76b6d9
+  if (__glibc_unlikely (!aligned_OK (e)))
76b6d9
+    malloc_printerr ("malloc(): unaligned tcache chunk detected");
76b6d9
   return (void *) e;
76b6d9
 }
76b6d9
 
76b6d9
@@ -2960,7 +2977,10 @@ tcache_thread_shutdown (void)
76b6d9
       while (tcache_tmp->entries[i])
76b6d9
 	{
76b6d9
 	  tcache_entry *e = tcache_tmp->entries[i];
76b6d9
-	  tcache_tmp->entries[i] = e->next;
76b6d9
+      if (__glibc_unlikely (!aligned_OK (e)))
76b6d9
+	malloc_printerr ("tcache_thread_shutdown(): " \
76b6d9
+			 "unaligned tcache chunk detected");
76b6d9
+	  tcache_tmp->entries[i] = REVEAL_PTR (e->next);
76b6d9
 	  __libc_free (e);
76b6d9
 	}
76b6d9
     }
76b6d9
@@ -3570,8 +3590,11 @@ _int_malloc (mstate av, size_t bytes)
76b6d9
       victim = pp;					\
76b6d9
       if (victim == NULL)				\
76b6d9
 	break;						\
76b6d9
+      pp = REVEAL_PTR (victim->fd);                                     \
76b6d9
+      if (__glibc_unlikely (!aligned_OK (pp)))                          \
76b6d9
+	malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
76b6d9
     }							\
76b6d9
-  while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
76b6d9
+  while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
76b6d9
 	 != victim);					\
76b6d9
 
76b6d9
   if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
76b6d9
@@ -3583,8 +3606,11 @@ _int_malloc (mstate av, size_t bytes)
76b6d9
 
76b6d9
       if (victim != NULL)
76b6d9
 	{
76b6d9
+	  if (__glibc_unlikely (!aligned_OK (victim)))
76b6d9
+	    malloc_printerr ("malloc(): unaligned fastbin chunk detected");
76b6d9
+
76b6d9
 	  if (SINGLE_THREAD_P)
76b6d9
-	    *fb = victim->fd;
76b6d9
+	    *fb = REVEAL_PTR (victim->fd);
76b6d9
 	  else
76b6d9
 	    REMOVE_FB (fb, pp, victim);
76b6d9
 	  if (__glibc_likely (victim != NULL))
76b6d9
@@ -3605,8 +3631,10 @@ _int_malloc (mstate av, size_t bytes)
76b6d9
 		  while (tcache->counts[tc_idx] < mp_.tcache_count
76b6d9
 			 && (tc_victim = *fb) != NULL)
76b6d9
 		    {
76b6d9
+		      if (__glibc_unlikely (!aligned_OK (tc_victim)))
76b6d9
+			malloc_printerr ("malloc(): unaligned fastbin chunk detected");
76b6d9
 		      if (SINGLE_THREAD_P)
76b6d9
-			*fb = tc_victim->fd;
76b6d9
+			*fb = REVEAL_PTR (tc_victim->fd);
76b6d9
 		      else
76b6d9
 			{
76b6d9
 			  REMOVE_FB (fb, pp, tc_victim);
76b6d9
@@ -4196,11 +4224,15 @@ _int_free (mstate av, mchunkptr p, int have_lock)
76b6d9
 	    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
76b6d9
 	    for (tmp = tcache->entries[tc_idx];
76b6d9
 		 tmp;
76b6d9
-		 tmp = tmp->next)
76b6d9
+		 tmp = REVEAL_PTR (tmp->next))
76b6d9
+        {
76b6d9
+	      if (__glibc_unlikely (!aligned_OK (tmp)))
76b6d9
+		malloc_printerr ("free(): unaligned chunk detected in tcache 2");
76b6d9
 	      if (tmp == e)
76b6d9
 		malloc_printerr ("free(): double free detected in tcache 2");
76b6d9
 	    /* If we get here, it was a coincidence.  We've wasted a
76b6d9
 	       few cycles, but don't abort.  */
76b6d9
+        }
76b6d9
 	  }
76b6d9
 
76b6d9
 	if (tcache->counts[tc_idx] < mp_.tcache_count)
76b6d9
@@ -4264,7 +4296,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
76b6d9
 	   add (i.e., double free).  */
76b6d9
 	if (__builtin_expect (old == p, 0))
76b6d9
 	  malloc_printerr ("double free or corruption (fasttop)");
76b6d9
-	p->fd = old;
76b6d9
+	p->fd = PROTECT_PTR (&p->fd, old);
76b6d9
 	*fb = p;
76b6d9
       }
76b6d9
     else
76b6d9
@@ -4274,7 +4306,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
76b6d9
 	     add (i.e., double free).  */
76b6d9
 	  if (__builtin_expect (old == p, 0))
76b6d9
 	    malloc_printerr ("double free or corruption (fasttop)");
76b6d9
-	  p->fd = old2 = old;
76b6d9
+	  old2 = old;
76b6d9
+	  p->fd = PROTECT_PTR (&p->fd, old);
76b6d9
 	}
76b6d9
       while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
76b6d9
 	     != old2);
76b6d9
@@ -4472,13 +4505,17 @@ static void malloc_consolidate(mstate av)
76b6d9
     if (p != 0) {
76b6d9
       do {
76b6d9
 	{
76b6d9
+	  if (__glibc_unlikely (!aligned_OK (p)))
76b6d9
+	    malloc_printerr ("malloc_consolidate(): " \
76b6d9
+			     "unaligned fastbin chunk detected");
76b6d9
+
76b6d9
 	  unsigned int idx = fastbin_index (chunksize (p));
76b6d9
 	  if ((&fastbin (av, idx)) != fb)
76b6d9
 	    malloc_printerr ("malloc_consolidate(): invalid chunk size");
76b6d9
 	}
76b6d9
 
76b6d9
 	check_inuse_chunk(av, p);
76b6d9
-	nextp = p->fd;
76b6d9
+	nextp = REVEAL_PTR (p->fd);
76b6d9
 
76b6d9
 	/* Slightly streamlined version of consolidation code in free() */
76b6d9
 	size = chunksize (p);
76b6d9
@@ -4896,8 +4933,13 @@ int_mallinfo (mstate av, struct mallinfo *m)
76b6d9
 
76b6d9
   for (i = 0; i < NFASTBINS; ++i)
76b6d9
     {
76b6d9
-      for (p = fastbin (av, i); p != 0; p = p->fd)
76b6d9
+      for (p = fastbin (av, i);
76b6d9
+	   p != 0;
76b6d9
+	   p = REVEAL_PTR (p->fd))
76b6d9
         {
76b6d9
+	  if (__glibc_unlikely (!aligned_OK (p)))
76b6d9
+	    malloc_printerr ("int_mallinfo(): " \
76b6d9
+			     "unaligned fastbin chunk detected");
76b6d9
           ++nfastblocks;
76b6d9
           fastavail += chunksize (p);
76b6d9
         }
76b6d9
@@ -5437,8 +5479,11 @@ __malloc_info (int options, FILE *fp)
76b6d9
 
76b6d9
 	      while (p != NULL)
76b6d9
 		{
76b6d9
+		  if (__glibc_unlikely (!aligned_OK (p)))
76b6d9
+		    malloc_printerr ("__malloc_info(): " \
76b6d9
+				     "unaligned fastbin chunk detected");
76b6d9
 		  ++nthissize;
76b6d9
-		  p = p->fd;
76b6d9
+		  p = REVEAL_PTR (p->fd);
76b6d9
 		}
76b6d9
 
76b6d9
 	      fastavail += nthissize * thissize;