313519
From a1a486d70ebcc47a686ff5846875eacad0940e41 Mon Sep 17 00:00:00 2001
313519
From: Eyal Itkin <eyalit@checkpoint.com>
313519
Date: Fri, 20 Mar 2020 21:19:17 +0200
313519
Subject: Add Safe-Linking to fastbins and tcache
313519
313519
Safe-Linking is a security mechanism that protects single-linked
313519
lists (such as the fastbin and tcache) from being tampered by attackers.
313519
The mechanism makes use of randomness from ASLR (mmap_base), and when
313519
combined with chunk alignment integrity checks, it protects the "next"
313519
pointers from being hijacked by an attacker.
313519
313519
While Safe-Unlinking protects double-linked lists (such as the small
313519
bins), there wasn't any similar protection for attacks against
313519
single-linked lists. This solution protects against 3 common attacks:
313519
  * Partial pointer override: modifies the lower bytes (Little Endian)
313519
  * Full pointer override: hijacks the pointer to an attacker's location
313519
  * Unaligned chunks: pointing the list to an unaligned address
313519
313519
The design assumes an attacker doesn't know where the heap is located,
313519
and uses the ASLR randomness to "sign" the single-linked pointers. We
313519
mark the pointer as P and the location in which it is stored as L, and
313519
the calculation will be:
313519
  * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
313519
  * *L = PROTECT(P)
313519
313519
This way, the random bits from the address L (which start at the bit
313519
in the PAGE_SHIFT position), will be merged with LSB of the stored
313519
protected pointer. This protection layer prevents an attacker from
313519
modifying the pointer into a controlled value.
313519
313519
An additional check that the chunks are MALLOC_ALIGNed adds an
313519
important layer:
313519
  * Attackers can't point to illegal (unaligned) memory addresses
313519
  * Attackers must guess correctly the alignment bits
313519
313519
On standard 32 bit Linux machines, an attack will directly fail 7
313519
out of 8 times, and on 64 bit machines it will fail 15 out of 16
313519
times.
313519
313519
This proposed patch was benchmarked and it's effect on the overall
313519
performance of the heap was negligible and couldn't be distinguished
313519
from the default variance between tests on the vanilla version. A
313519
similar protection was added to Chromium's version of TCMalloc
313519
in 2012, and according to their documentation it had an overhead of
313519
less than 2%.
313519
313519
Reviewed-by: DJ Delorie <dj@redhat.com>
313519
Reviewed-by: Carlos O'Donell <carlos@redhat.com>
313519
Reviewed-by: Adhemerval Zacnella <adhemerval.zanella@linaro.org>
313519
313519
diff --git a/malloc/malloc.c b/malloc/malloc.c
313519
index f7cd29bc2f..1282863681 100644
313519
--- a/malloc/malloc.c
313519
+++ b/malloc/malloc.c
313519
@@ -327,6 +327,18 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
313519
 # define MAX_TCACHE_COUNT UINT16_MAX
313519
 #endif
313519
 
313519
+/* Safe-Linking:
313519
+   Use randomness from ASLR (mmap_base) to protect single-linked lists
313519
+   of Fast-Bins and TCache.  That is, mask the "next" pointers of the
313519
+   lists' chunks, and also perform allocation alignment checks on them.
313519
+   This mechanism reduces the risk of pointer hijacking, as was done with
313519
+   Safe-Unlinking in the double-linked lists of Small-Bins.
313519
+   It assumes a minimum page size of 4096 bytes (12 bits).  Systems with
313519
+   larger pages provide less entropy, although the pointer mangling
313519
+   still works.  */
313519
+#define PROTECT_PTR(pos, ptr) \
313519
+  ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
313519
+#define REVEAL_PTR(ptr)  PROTECT_PTR (&ptr, ptr)
313519
 
313519
 /*
313519
   REALLOC_ZERO_BYTES_FREES should be set if a call to
313519
@@ -2157,12 +2169,15 @@ do_check_malloc_state (mstate av)
313519
 
313519
       while (p != 0)
313519
         {
313519
+	  if (__glibc_unlikely (!aligned_OK (p)))
313519
+	    malloc_printerr ("do_check_malloc_state(): " \
313519
+			     "unaligned fastbin chunk detected");
313519
           /* each chunk claims to be inuse */
313519
           do_check_inuse_chunk (av, p);
313519
           total += chunksize (p);
313519
           /* chunk belongs in this bin */
313519
           assert (fastbin_index (chunksize (p)) == i);
313519
-          p = p->fd;
313519
+	  p = REVEAL_PTR (p->fd);
313519
         }
313519
     }
313519
 
313519
@@ -2923,7 +2938,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
313519
      detect a double free.  */
313519
   e->key = tcache;
313519
 
313519
-  e->next = tcache->entries[tc_idx];
313519
+  e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
313519
   tcache->entries[tc_idx] = e;
313519
   ++(tcache->counts[tc_idx]);
313519
 }
313519
@@ -2934,9 +2949,11 @@ static __always_inline void *
313519
 tcache_get (size_t tc_idx)
313519
 {
313519
   tcache_entry *e = tcache->entries[tc_idx];
313519
-  tcache->entries[tc_idx] = e->next;
313519
+  tcache->entries[tc_idx] = REVEAL_PTR (e->next);
313519
   --(tcache->counts[tc_idx]);
313519
   e->key = NULL;
313519
+  if (__glibc_unlikely (!aligned_OK (e)))
313519
+    malloc_printerr ("malloc(): unaligned tcache chunk detected");
313519
   return (void *) e;
313519
 }
313519
 
313519
@@ -2960,7 +2977,10 @@ tcache_thread_shutdown (void)
313519
       while (tcache_tmp->entries[i])
313519
 	{
313519
 	  tcache_entry *e = tcache_tmp->entries[i];
313519
-	  tcache_tmp->entries[i] = e->next;
313519
+      if (__glibc_unlikely (!aligned_OK (e)))
313519
+	malloc_printerr ("tcache_thread_shutdown(): " \
313519
+			 "unaligned tcache chunk detected");
313519
+	  tcache_tmp->entries[i] = REVEAL_PTR (e->next);
313519
 	  __libc_free (e);
313519
 	}
313519
     }
313519
@@ -3570,8 +3590,11 @@ _int_malloc (mstate av, size_t bytes)
313519
       victim = pp;					\
313519
       if (victim == NULL)				\
313519
 	break;						\
313519
+      pp = REVEAL_PTR (victim->fd);                                     \
313519
+      if (__glibc_unlikely (!aligned_OK (pp)))                          \
313519
+	malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
313519
     }							\
313519
-  while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
313519
+  while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
313519
 	 != victim);					\
313519
 
313519
   if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
313519
@@ -3583,8 +3606,11 @@ _int_malloc (mstate av, size_t bytes)
313519
 
313519
       if (victim != NULL)
313519
 	{
313519
+	  if (__glibc_unlikely (!aligned_OK (victim)))
313519
+	    malloc_printerr ("malloc(): unaligned fastbin chunk detected");
313519
+
313519
 	  if (SINGLE_THREAD_P)
313519
-	    *fb = victim->fd;
313519
+	    *fb = REVEAL_PTR (victim->fd);
313519
 	  else
313519
 	    REMOVE_FB (fb, pp, victim);
313519
 	  if (__glibc_likely (victim != NULL))
313519
@@ -3605,8 +3631,10 @@ _int_malloc (mstate av, size_t bytes)
313519
 		  while (tcache->counts[tc_idx] < mp_.tcache_count
313519
 			 && (tc_victim = *fb) != NULL)
313519
 		    {
313519
+		      if (__glibc_unlikely (!aligned_OK (tc_victim)))
313519
+			malloc_printerr ("malloc(): unaligned fastbin chunk detected");
313519
 		      if (SINGLE_THREAD_P)
313519
-			*fb = tc_victim->fd;
313519
+			*fb = REVEAL_PTR (tc_victim->fd);
313519
 		      else
313519
 			{
313519
 			  REMOVE_FB (fb, pp, tc_victim);
313519
@@ -4196,11 +4224,15 @@ _int_free (mstate av, mchunkptr p, int have_lock)
313519
 	    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
313519
 	    for (tmp = tcache->entries[tc_idx];
313519
 		 tmp;
313519
-		 tmp = tmp->next)
313519
+		 tmp = REVEAL_PTR (tmp->next))
313519
+        {
313519
+	      if (__glibc_unlikely (!aligned_OK (tmp)))
313519
+		malloc_printerr ("free(): unaligned chunk detected in tcache 2");
313519
 	      if (tmp == e)
313519
 		malloc_printerr ("free(): double free detected in tcache 2");
313519
 	    /* If we get here, it was a coincidence.  We've wasted a
313519
 	       few cycles, but don't abort.  */
313519
+        }
313519
 	  }
313519
 
313519
 	if (tcache->counts[tc_idx] < mp_.tcache_count)
313519
@@ -4264,7 +4296,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
313519
 	   add (i.e., double free).  */
313519
 	if (__builtin_expect (old == p, 0))
313519
 	  malloc_printerr ("double free or corruption (fasttop)");
313519
-	p->fd = old;
313519
+	p->fd = PROTECT_PTR (&p->fd, old);
313519
 	*fb = p;
313519
       }
313519
     else
313519
@@ -4274,7 +4306,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
313519
 	     add (i.e., double free).  */
313519
 	  if (__builtin_expect (old == p, 0))
313519
 	    malloc_printerr ("double free or corruption (fasttop)");
313519
-	  p->fd = old2 = old;
313519
+	  old2 = old;
313519
+	  p->fd = PROTECT_PTR (&p->fd, old);
313519
 	}
313519
       while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
313519
 	     != old2);
313519
@@ -4472,13 +4505,17 @@ static void malloc_consolidate(mstate av)
313519
     if (p != 0) {
313519
       do {
313519
 	{
313519
+	  if (__glibc_unlikely (!aligned_OK (p)))
313519
+	    malloc_printerr ("malloc_consolidate(): " \
313519
+			     "unaligned fastbin chunk detected");
313519
+
313519
 	  unsigned int idx = fastbin_index (chunksize (p));
313519
 	  if ((&fastbin (av, idx)) != fb)
313519
 	    malloc_printerr ("malloc_consolidate(): invalid chunk size");
313519
 	}
313519
 
313519
 	check_inuse_chunk(av, p);
313519
-	nextp = p->fd;
313519
+	nextp = REVEAL_PTR (p->fd);
313519
 
313519
 	/* Slightly streamlined version of consolidation code in free() */
313519
 	size = chunksize (p);
313519
@@ -4896,8 +4933,13 @@ int_mallinfo (mstate av, struct mallinfo *m)
313519
 
313519
   for (i = 0; i < NFASTBINS; ++i)
313519
     {
313519
-      for (p = fastbin (av, i); p != 0; p = p->fd)
313519
+      for (p = fastbin (av, i);
313519
+	   p != 0;
313519
+	   p = REVEAL_PTR (p->fd))
313519
         {
313519
+	  if (__glibc_unlikely (!aligned_OK (p)))
313519
+	    malloc_printerr ("int_mallinfo(): " \
313519
+			     "unaligned fastbin chunk detected");
313519
           ++nfastblocks;
313519
           fastavail += chunksize (p);
313519
         }
313519
@@ -5437,8 +5479,11 @@ __malloc_info (int options, FILE *fp)
313519
 
313519
 	      while (p != NULL)
313519
 		{
313519
+		  if (__glibc_unlikely (!aligned_OK (p)))
313519
+		    malloc_printerr ("__malloc_info(): " \
313519
+				     "unaligned fastbin chunk detected");
313519
 		  ++nthissize;
313519
-		  p = p->fd;
313519
+		  p = REVEAL_PTR (p->fd);
313519
 		}
313519
 
313519
 	      fastavail += nthissize * thissize;