From 5e60a9bc135f77e98058841a5ad166961b81b9d6 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Oct 15 2022 18:16:28 +0000 Subject: import glibc-2.28-216.el8 --- diff --git a/SOURCES/glibc-rh1871383-1.patch b/SOURCES/glibc-rh1871383-1.patch new file mode 100644 index 0000000..67c88fd --- /dev/null +++ b/SOURCES/glibc-rh1871383-1.patch @@ -0,0 +1,245 @@ +From a1a486d70ebcc47a686ff5846875eacad0940e41 Mon Sep 17 00:00:00 2001 +From: Eyal Itkin +Date: Fri, 20 Mar 2020 21:19:17 +0200 +Subject: Add Safe-Linking to fastbins and tcache + +Safe-Linking is a security mechanism that protects single-linked +lists (such as the fastbin and tcache) from being tampered by attackers. +The mechanism makes use of randomness from ASLR (mmap_base), and when +combined with chunk alignment integrity checks, it protects the "next" +pointers from being hijacked by an attacker. + +While Safe-Unlinking protects double-linked lists (such as the small +bins), there wasn't any similar protection for attacks against +single-linked lists. This solution protects against 3 common attacks: + * Partial pointer override: modifies the lower bytes (Little Endian) + * Full pointer override: hijacks the pointer to an attacker's location + * Unaligned chunks: pointing the list to an unaligned address + +The design assumes an attacker doesn't know where the heap is located, +and uses the ASLR randomness to "sign" the single-linked pointers. We +mark the pointer as P and the location in which it is stored as L, and +the calculation will be: + * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P) + * *L = PROTECT(P) + +This way, the random bits from the address L (which start at the bit +in the PAGE_SHIFT position), will be merged with LSB of the stored +protected pointer. This protection layer prevents an attacker from +modifying the pointer into a controlled value. + +An additional check that the chunks are MALLOC_ALIGNed adds an +important layer: + * Attackers can't point to illegal (unaligned) memory addresses + * Attackers must guess correctly the alignment bits + +On standard 32 bit Linux machines, an attack will directly fail 7 +out of 8 times, and on 64 bit machines it will fail 15 out of 16 +times. + +This proposed patch was benchmarked and it's effect on the overall +performance of the heap was negligible and couldn't be distinguished +from the default variance between tests on the vanilla version. A +similar protection was added to Chromium's version of TCMalloc +in 2012, and according to their documentation it had an overhead of +less than 2%. + +Reviewed-by: DJ Delorie +Reviewed-by: Carlos O'Donell +Reviewed-by: Adhemerval Zacnella + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index f7cd29bc2f..1282863681 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -327,6 +327,18 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line, + # define MAX_TCACHE_COUNT UINT16_MAX + #endif + ++/* Safe-Linking: ++ Use randomness from ASLR (mmap_base) to protect single-linked lists ++ of Fast-Bins and TCache. That is, mask the "next" pointers of the ++ lists' chunks, and also perform allocation alignment checks on them. ++ This mechanism reduces the risk of pointer hijacking, as was done with ++ Safe-Unlinking in the double-linked lists of Small-Bins. ++ It assumes a minimum page size of 4096 bytes (12 bits). Systems with ++ larger pages provide less entropy, although the pointer mangling ++ still works. */ ++#define PROTECT_PTR(pos, ptr) \ ++ ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr))) ++#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr) + + /* + REALLOC_ZERO_BYTES_FREES should be set if a call to +@@ -2157,12 +2169,15 @@ do_check_malloc_state (mstate av) + + while (p != 0) + { ++ if (__glibc_unlikely (!aligned_OK (p))) ++ malloc_printerr ("do_check_malloc_state(): " \ ++ "unaligned fastbin chunk detected"); + /* each chunk claims to be inuse */ + do_check_inuse_chunk (av, p); + total += chunksize (p); + /* chunk belongs in this bin */ + assert (fastbin_index (chunksize (p)) == i); +- p = p->fd; ++ p = REVEAL_PTR (p->fd); + } + } + +@@ -2923,7 +2938,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx) + detect a double free. */ + e->key = tcache; + +- e->next = tcache->entries[tc_idx]; ++ e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); + tcache->entries[tc_idx] = e; + ++(tcache->counts[tc_idx]); + } +@@ -2934,9 +2949,11 @@ static __always_inline void * + tcache_get (size_t tc_idx) + { + tcache_entry *e = tcache->entries[tc_idx]; +- tcache->entries[tc_idx] = e->next; ++ tcache->entries[tc_idx] = REVEAL_PTR (e->next); + --(tcache->counts[tc_idx]); + e->key = NULL; ++ if (__glibc_unlikely (!aligned_OK (e))) ++ malloc_printerr ("malloc(): unaligned tcache chunk detected"); + return (void *) e; + } + +@@ -2960,7 +2977,10 @@ tcache_thread_shutdown (void) + while (tcache_tmp->entries[i]) + { + tcache_entry *e = tcache_tmp->entries[i]; +- tcache_tmp->entries[i] = e->next; ++ if (__glibc_unlikely (!aligned_OK (e))) ++ malloc_printerr ("tcache_thread_shutdown(): " \ ++ "unaligned tcache chunk detected"); ++ tcache_tmp->entries[i] = REVEAL_PTR (e->next); + __libc_free (e); + } + } +@@ -3570,8 +3590,11 @@ _int_malloc (mstate av, size_t bytes) + victim = pp; \ + if (victim == NULL) \ + break; \ ++ pp = REVEAL_PTR (victim->fd); \ ++ if (__glibc_unlikely (!aligned_OK (pp))) \ ++ malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \ + } \ +- while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \ ++ while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \ + != victim); \ + + if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) +@@ -3583,8 +3606,11 @@ _int_malloc (mstate av, size_t bytes) + + if (victim != NULL) + { ++ if (__glibc_unlikely (!aligned_OK (victim))) ++ malloc_printerr ("malloc(): unaligned fastbin chunk detected"); ++ + if (SINGLE_THREAD_P) +- *fb = victim->fd; ++ *fb = REVEAL_PTR (victim->fd); + else + REMOVE_FB (fb, pp, victim); + if (__glibc_likely (victim != NULL)) +@@ -3605,8 +3631,10 @@ _int_malloc (mstate av, size_t bytes) + while (tcache->counts[tc_idx] < mp_.tcache_count + && (tc_victim = *fb) != NULL) + { ++ if (__glibc_unlikely (!aligned_OK (tc_victim))) ++ malloc_printerr ("malloc(): unaligned fastbin chunk detected"); + if (SINGLE_THREAD_P) +- *fb = tc_victim->fd; ++ *fb = REVEAL_PTR (tc_victim->fd); + else + { + REMOVE_FB (fb, pp, tc_victim); +@@ -4196,11 +4224,15 @@ _int_free (mstate av, mchunkptr p, int have_lock) + LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); + for (tmp = tcache->entries[tc_idx]; + tmp; +- tmp = tmp->next) ++ tmp = REVEAL_PTR (tmp->next)) ++ { ++ if (__glibc_unlikely (!aligned_OK (tmp))) ++ malloc_printerr ("free(): unaligned chunk detected in tcache 2"); + if (tmp == e) + malloc_printerr ("free(): double free detected in tcache 2"); + /* If we get here, it was a coincidence. We've wasted a + few cycles, but don't abort. */ ++ } + } + + if (tcache->counts[tc_idx] < mp_.tcache_count) +@@ -4264,7 +4296,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) + add (i.e., double free). */ + if (__builtin_expect (old == p, 0)) + malloc_printerr ("double free or corruption (fasttop)"); +- p->fd = old; ++ p->fd = PROTECT_PTR (&p->fd, old); + *fb = p; + } + else +@@ -4274,7 +4306,8 @@ _int_free (mstate av, mchunkptr p, int have_lock) + add (i.e., double free). */ + if (__builtin_expect (old == p, 0)) + malloc_printerr ("double free or corruption (fasttop)"); +- p->fd = old2 = old; ++ old2 = old; ++ p->fd = PROTECT_PTR (&p->fd, old); + } + while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) + != old2); +@@ -4472,13 +4505,17 @@ static void malloc_consolidate(mstate av) + if (p != 0) { + do { + { ++ if (__glibc_unlikely (!aligned_OK (p))) ++ malloc_printerr ("malloc_consolidate(): " \ ++ "unaligned fastbin chunk detected"); ++ + unsigned int idx = fastbin_index (chunksize (p)); + if ((&fastbin (av, idx)) != fb) + malloc_printerr ("malloc_consolidate(): invalid chunk size"); + } + + check_inuse_chunk(av, p); +- nextp = p->fd; ++ nextp = REVEAL_PTR (p->fd); + + /* Slightly streamlined version of consolidation code in free() */ + size = chunksize (p); +@@ -4896,8 +4933,13 @@ int_mallinfo (mstate av, struct mallinfo *m) + + for (i = 0; i < NFASTBINS; ++i) + { +- for (p = fastbin (av, i); p != 0; p = p->fd) ++ for (p = fastbin (av, i); ++ p != 0; ++ p = REVEAL_PTR (p->fd)) + { ++ if (__glibc_unlikely (!aligned_OK (p))) ++ malloc_printerr ("int_mallinfo(): " \ ++ "unaligned fastbin chunk detected"); + ++nfastblocks; + fastavail += chunksize (p); + } +@@ -5437,8 +5479,11 @@ __malloc_info (int options, FILE *fp) + + while (p != NULL) + { ++ if (__glibc_unlikely (!aligned_OK (p))) ++ malloc_printerr ("__malloc_info(): " \ ++ "unaligned fastbin chunk detected"); + ++nthissize; +- p = p->fd; ++ p = REVEAL_PTR (p->fd); + } + + fastavail += nthissize * thissize; diff --git a/SOURCES/glibc-rh1871383-2.patch b/SOURCES/glibc-rh1871383-2.patch new file mode 100644 index 0000000..0313dbb --- /dev/null +++ b/SOURCES/glibc-rh1871383-2.patch @@ -0,0 +1,87 @@ +From 768358b6a80742f6be68ecd9f952f4b60614df96 Mon Sep 17 00:00:00 2001 +From: Eyal Itkin +Date: Tue, 31 Mar 2020 01:55:13 -0400 +Subject: Typo fixes and CR cleanup in Safe-Linking + +Removed unneeded '\' chars from end of lines and fixed some +indentation issues that were introduced in the original +Safe-Linking patch. + +Reviewed-by: Carlos O'Donell + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 1282863681..0e4acb22f6 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -2170,7 +2170,7 @@ do_check_malloc_state (mstate av) + while (p != 0) + { + if (__glibc_unlikely (!aligned_OK (p))) +- malloc_printerr ("do_check_malloc_state(): " \ ++ malloc_printerr ("do_check_malloc_state(): " + "unaligned fastbin chunk detected"); + /* each chunk claims to be inuse */ + do_check_inuse_chunk (av, p); +@@ -2977,9 +2977,9 @@ tcache_thread_shutdown (void) + while (tcache_tmp->entries[i]) + { + tcache_entry *e = tcache_tmp->entries[i]; +- if (__glibc_unlikely (!aligned_OK (e))) +- malloc_printerr ("tcache_thread_shutdown(): " \ +- "unaligned tcache chunk detected"); ++ if (__glibc_unlikely (!aligned_OK (e))) ++ malloc_printerr ("tcache_thread_shutdown(): " ++ "unaligned tcache chunk detected"); + tcache_tmp->entries[i] = REVEAL_PTR (e->next); + __libc_free (e); + } +@@ -4225,14 +4225,14 @@ _int_free (mstate av, mchunkptr p, int have_lock) + for (tmp = tcache->entries[tc_idx]; + tmp; + tmp = REVEAL_PTR (tmp->next)) +- { +- if (__glibc_unlikely (!aligned_OK (tmp))) +- malloc_printerr ("free(): unaligned chunk detected in tcache 2"); +- if (tmp == e) +- malloc_printerr ("free(): double free detected in tcache 2"); +- /* If we get here, it was a coincidence. We've wasted a +- few cycles, but don't abort. */ +- } ++ { ++ if (__glibc_unlikely (!aligned_OK (tmp))) ++ malloc_printerr ("free(): unaligned chunk detected in tcache 2"); ++ if (tmp == e) ++ malloc_printerr ("free(): double free detected in tcache 2"); ++ /* If we get here, it was a coincidence. We've wasted a ++ few cycles, but don't abort. */ ++ } + } + + if (tcache->counts[tc_idx] < mp_.tcache_count) +@@ -4506,7 +4506,7 @@ static void malloc_consolidate(mstate av) + do { + { + if (__glibc_unlikely (!aligned_OK (p))) +- malloc_printerr ("malloc_consolidate(): " \ ++ malloc_printerr ("malloc_consolidate(): " + "unaligned fastbin chunk detected"); + + unsigned int idx = fastbin_index (chunksize (p)); +@@ -4938,7 +4938,7 @@ int_mallinfo (mstate av, struct mallinfo *m) + p = REVEAL_PTR (p->fd)) + { + if (__glibc_unlikely (!aligned_OK (p))) +- malloc_printerr ("int_mallinfo(): " \ ++ malloc_printerr ("int_mallinfo(): " + "unaligned fastbin chunk detected"); + ++nfastblocks; + fastavail += chunksize (p); +@@ -5480,7 +5480,7 @@ __malloc_info (int options, FILE *fp) + while (p != NULL) + { + if (__glibc_unlikely (!aligned_OK (p))) +- malloc_printerr ("__malloc_info(): " \ ++ malloc_printerr ("__malloc_info(): " + "unaligned fastbin chunk detected"); + ++nthissize; + p = REVEAL_PTR (p->fd); diff --git a/SOURCES/glibc-rh1871383-3.patch b/SOURCES/glibc-rh1871383-3.patch new file mode 100644 index 0000000..e5a18e8 --- /dev/null +++ b/SOURCES/glibc-rh1871383-3.patch @@ -0,0 +1,100 @@ +From 49c3c37651e2d2ec4ff8ce21252bbbc08a9d6639 Mon Sep 17 00:00:00 2001 +From: Eyal Itkin +Date: Tue, 31 Mar 2020 02:00:14 -0400 +Subject: Fix alignment bug in Safe-Linking + +Alignment checks should be performed on the user's buffer and NOT +on the mchunkptr as was done before. This caused bugs in 32 bit +versions, because: 2*sizeof(t) != MALLOC_ALIGNMENT. + +As the tcache works on users' buffers it uses the aligned_OK() +check, and the rest work on mchunkptr and therefore check using +misaligned_chunk(). + +Reviewed-by: Carlos O'Donell + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 0e4acb22f6..6acb5ad43a 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -2169,7 +2169,7 @@ do_check_malloc_state (mstate av) + + while (p != 0) + { +- if (__glibc_unlikely (!aligned_OK (p))) ++ if (__glibc_unlikely (misaligned_chunk (p))) + malloc_printerr ("do_check_malloc_state(): " + "unaligned fastbin chunk detected"); + /* each chunk claims to be inuse */ +@@ -2949,11 +2949,11 @@ static __always_inline void * + tcache_get (size_t tc_idx) + { + tcache_entry *e = tcache->entries[tc_idx]; ++ if (__glibc_unlikely (!aligned_OK (e))) ++ malloc_printerr ("malloc(): unaligned tcache chunk detected"); + tcache->entries[tc_idx] = REVEAL_PTR (e->next); + --(tcache->counts[tc_idx]); + e->key = NULL; +- if (__glibc_unlikely (!aligned_OK (e))) +- malloc_printerr ("malloc(): unaligned tcache chunk detected"); + return (void *) e; + } + +@@ -3591,7 +3591,7 @@ _int_malloc (mstate av, size_t bytes) + if (victim == NULL) \ + break; \ + pp = REVEAL_PTR (victim->fd); \ +- if (__glibc_unlikely (!aligned_OK (pp))) \ ++ if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \ + malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \ + } \ + while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \ +@@ -3606,8 +3606,8 @@ _int_malloc (mstate av, size_t bytes) + + if (victim != NULL) + { +- if (__glibc_unlikely (!aligned_OK (victim))) +- malloc_printerr ("malloc(): unaligned fastbin chunk detected"); ++ if (__glibc_unlikely (misaligned_chunk (victim))) ++ malloc_printerr ("malloc(): unaligned fastbin chunk detected 2"); + + if (SINGLE_THREAD_P) + *fb = REVEAL_PTR (victim->fd); +@@ -3631,8 +3631,8 @@ _int_malloc (mstate av, size_t bytes) + while (tcache->counts[tc_idx] < mp_.tcache_count + && (tc_victim = *fb) != NULL) + { +- if (__glibc_unlikely (!aligned_OK (tc_victim))) +- malloc_printerr ("malloc(): unaligned fastbin chunk detected"); ++ if (__glibc_unlikely (misaligned_chunk (tc_victim))) ++ malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); + if (SINGLE_THREAD_P) + *fb = REVEAL_PTR (tc_victim->fd); + else +@@ -4505,7 +4505,7 @@ static void malloc_consolidate(mstate av) + if (p != 0) { + do { + { +- if (__glibc_unlikely (!aligned_OK (p))) ++ if (__glibc_unlikely (misaligned_chunk (p))) + malloc_printerr ("malloc_consolidate(): " + "unaligned fastbin chunk detected"); + +@@ -4937,7 +4937,7 @@ int_mallinfo (mstate av, struct mallinfo *m) + p != 0; + p = REVEAL_PTR (p->fd)) + { +- if (__glibc_unlikely (!aligned_OK (p))) ++ if (__glibc_unlikely (misaligned_chunk (p))) + malloc_printerr ("int_mallinfo(): " + "unaligned fastbin chunk detected"); + ++nfastblocks; +@@ -5479,7 +5479,7 @@ __malloc_info (int options, FILE *fp) + + while (p != NULL) + { +- if (__glibc_unlikely (!aligned_OK (p))) ++ if (__glibc_unlikely (misaligned_chunk (p))) + malloc_printerr ("__malloc_info(): " + "unaligned fastbin chunk detected"); + ++nthissize; diff --git a/SOURCES/glibc-rh1871383-4.patch b/SOURCES/glibc-rh1871383-4.patch new file mode 100644 index 0000000..cac8349 --- /dev/null +++ b/SOURCES/glibc-rh1871383-4.patch @@ -0,0 +1,215 @@ +From 6310d570bf20348135d09e1f9de84a9ae7d06f83 Mon Sep 17 00:00:00 2001 +From: Eyal Itkin +Date: Thu, 2 Apr 2020 07:26:35 -0400 +Subject: Add tests for Safe-Linking + +Adding the test "tst-safe-linking" for testing that Safe-Linking works +as expected. The test checks these 3 main flows: + * tcache protection + * fastbin protection + * malloc_consolidate() correctness + +As there is a random chance of 1/16 that of the alignment will remain +correct, the test checks each flow up to 10 times, using different random +values for the pointer corruption. As a result, the chance for a false +failure of a given tested flow is 2**(-40), thus highly unlikely. + +Reviewed-by: Carlos O'Donell + +diff --git a/malloc/Makefile b/malloc/Makefile +index 984045b5b9..e22cbde22d 100644 +--- a/malloc/Makefile ++++ b/malloc/Makefile +@@ -39,6 +39,7 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \ + tst-malloc-too-large \ + tst-malloc-stats-cancellation \ + tst-tcfree1 tst-tcfree2 tst-tcfree3 \ ++ tst-safe-linking \ + + tests-static := \ + tst-interpose-static-nothread \ +diff --git a/malloc/tst-safe-linking.c b/malloc/tst-safe-linking.c +new file mode 100644 +index 0000000000..067b6c09cf +--- /dev/null ++++ b/malloc/tst-safe-linking.c +@@ -0,0 +1,179 @@ ++/* Test reporting of Safe-Linking caught errors. ++ Copyright (C) 2020 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ . */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Run CALLBACK and check that the data on standard error equals ++ EXPECTED. */ ++static void ++check (const char *test, void (*callback) (void *), ++ const char *expected) ++{ ++ int i, rand_mask; ++ bool success = false; ++ /* There is a chance of 1/16 that a corrupted pointer will be aligned. ++ Try multiple times so that statistical failure will be improbable. */ ++ for (i = 0; i < 10 && !success; ++i) ++ { ++ rand_mask = rand () & 0xFF; ++ struct support_capture_subprocess result ++ = support_capture_subprocess (callback, &rand_mask); ++ /* Did not crash, could happen. Try again. */ ++ if (strlen (result.err.buffer) == 0) ++ continue; ++ /* Crashed, must be the expected result. */ ++ if (strcmp (result.err.buffer, expected) != 0) ++ { ++ support_record_failure (); ++ printf ("error: test %s unexpected standard error data\n" ++ " expected: %s\n" ++ " actual: %s\n", ++ test, expected, result.err.buffer); ++ } ++ TEST_VERIFY (WIFSIGNALED (result.status)); ++ if (WIFSIGNALED (result.status)) ++ TEST_VERIFY (WTERMSIG (result.status) == SIGABRT); ++ support_capture_subprocess_free (&result); ++ success = true; ++ } ++ TEST_VERIFY (success); ++} ++ ++/* Implementation details must be kept in sync with malloc. */ ++#define TCACHE_FILL_COUNT 7 ++#define TCACHE_ALLOC_SIZE 0x20 ++#define MALLOC_CONSOLIDATE_SIZE 256*1024 ++ ++/* Try corrupting the tcache list. */ ++static void ++test_tcache (void *closure) ++{ ++ int mask = ((int *)closure)[0]; ++ size_t size = TCACHE_ALLOC_SIZE; ++ ++ /* Populate the tcache list. */ ++ void * volatile a = malloc (size); ++ void * volatile b = malloc (size); ++ void * volatile c = malloc (size); ++ free (a); ++ free (b); ++ free (c); ++ ++ /* Corrupt the pointer with a random value, and avoid optimizations. */ ++ printf ("Before: c=%p, c[0]=%p\n", c, ((void **)c)[0]); ++ memset (c, mask & 0xFF, size); ++ printf ("After: c=%p, c[0]=%p\n", c, ((void **)c)[0]); ++ ++ c = malloc (size); ++ /* This line will trigger the Safe-Linking check. */ ++ b = malloc (size); ++ printf ("b=%p\n", b); ++} ++ ++/* Try corrupting the fastbin list. */ ++static void ++test_fastbin (void *closure) ++{ ++ int i; ++ int mask = ((int *)closure)[0]; ++ size_t size = TCACHE_ALLOC_SIZE; ++ ++ /* Take the tcache out of the game. */ ++ for (i = 0; i < TCACHE_FILL_COUNT; ++i) ++ { ++ void * volatile p = calloc (1, size); ++ free (p); ++ } ++ ++ /* Populate the fastbin list. */ ++ void * volatile a = calloc (1, size); ++ void * volatile b = calloc (1, size); ++ void * volatile c = calloc (1, size); ++ free (a); ++ free (b); ++ free (c); ++ ++ /* Corrupt the pointer with a random value, and avoid optimizations. */ ++ printf ("Before: c=%p, c[0]=%p\n", c, ((void **)c)[0]); ++ memset (c, mask & 0xFF, size); ++ printf ("After: c=%p, c[0]=%p\n", c, ((void **)c)[0]); ++ ++ c = calloc (1, size); ++ /* This line will trigger the Safe-Linking check. */ ++ b = calloc (1, size); ++ printf ("b=%p\n", b); ++} ++ ++/* Try corrupting the fastbin list and trigger a consolidate. */ ++static void ++test_fastbin_consolidate (void *closure) ++{ ++ int i; ++ int mask = ((int*)closure)[0]; ++ size_t size = TCACHE_ALLOC_SIZE; ++ ++ /* Take the tcache out of the game. */ ++ for (i = 0; i < TCACHE_FILL_COUNT; ++i) ++ { ++ void * volatile p = calloc (1, size); ++ free (p); ++ } ++ ++ /* Populate the fastbin list. */ ++ void * volatile a = calloc (1, size); ++ void * volatile b = calloc (1, size); ++ void * volatile c = calloc (1, size); ++ free (a); ++ free (b); ++ free (c); ++ ++ /* Corrupt the pointer with a random value, and avoid optimizations. */ ++ printf ("Before: c=%p, c[0]=%p\n", c, ((void **)c)[0]); ++ memset (c, mask & 0xFF, size); ++ printf ("After: c=%p, c[0]=%p\n", c, ((void **)c)[0]); ++ ++ /* This line will trigger the Safe-Linking check. */ ++ b = malloc (MALLOC_CONSOLIDATE_SIZE); ++ printf ("b=%p\n", b); ++} ++ ++static int ++do_test (void) ++{ ++ /* Seed the random for the test. */ ++ srand (time (NULL)); ++ ++ check ("test_tcache", test_tcache, ++ "malloc(): unaligned tcache chunk detected\n"); ++ check ("test_fastbin", test_fastbin, ++ "malloc(): unaligned fastbin chunk detected 2\n"); ++ check ("test_fastbin_consolidate", test_fastbin_consolidate, ++ "malloc_consolidate(): unaligned fastbin chunk detected\n"); ++ ++ return 0; ++} ++ ++#include diff --git a/SOURCES/glibc-rh1871383-5.patch b/SOURCES/glibc-rh1871383-5.patch new file mode 100644 index 0000000..bc51a1e --- /dev/null +++ b/SOURCES/glibc-rh1871383-5.patch @@ -0,0 +1,35 @@ +From b9cde4e3aa1ff338da7064daf1386b2f4a7351ba Mon Sep 17 00:00:00 2001 +From: DJ Delorie +Date: Sat, 4 Apr 2020 01:44:56 -0400 +Subject: malloc: ensure set_max_fast never stores zero [BZ #25733] + +The code for set_max_fast() stores an "impossibly small value" +instead of zero, when the parameter is zero. However, for +small values of the parameter (ex: 1 or 2) the computation +results in a zero being stored anyway. + +This patch checks for the parameter being small enough for the +computation to result in zero instead, so that a zero is never +stored. + +key values which result in zero being stored: + +x86-64: 1..7 (or other 64-bit) +i686: 1..11 +armhfp: 1..3 (or other 32-bit) + +Reviewed-by: Carlos O'Donell + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 6acb5ad43a..ee87ddbbf9 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -1632,7 +1632,7 @@ static INTERNAL_SIZE_T global_max_fast; + */ + + #define set_max_fast(s) \ +- global_max_fast = (((s) == 0) \ ++ global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \ + ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) + + static inline INTERNAL_SIZE_T diff --git a/SOURCES/glibc-rh1871383-6.patch b/SOURCES/glibc-rh1871383-6.patch new file mode 100644 index 0000000..b21971a --- /dev/null +++ b/SOURCES/glibc-rh1871383-6.patch @@ -0,0 +1,35 @@ +From 0e00b35704e67c499c3abfbd5b6224a13d38b012 Mon Sep 17 00:00:00 2001 +From: "W. Hashimoto" +Date: Fri, 11 Dec 2020 16:59:10 -0500 +Subject: malloc: Detect infinite-loop in _int_free when freeing tcache + [BZ#27052] + +If linked-list of tcache contains a loop, it invokes infinite +loop in _int_free when freeing tcache. The PoC which invokes +such infinite loop is on the Bugzilla(#27052). This loop +should terminate when the loop exceeds mp_.tcache_count and +the program should abort. The affected glibc version is +2.29 or later. + +Reviewed-by: DJ Delorie + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 5b87bdb081..ec2d934595 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -4224,11 +4224,14 @@ _int_free (mstate av, mchunkptr p, int have_lock) + if (__glibc_unlikely (e->key == tcache)) + { + tcache_entry *tmp; ++ size_t cnt = 0; + LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); + for (tmp = tcache->entries[tc_idx]; + tmp; +- tmp = REVEAL_PTR (tmp->next)) ++ tmp = REVEAL_PTR (tmp->next), ++cnt) + { ++ if (cnt >= mp_.tcache_count) ++ malloc_printerr ("free(): too many chunks detected in tcache"); + if (__glibc_unlikely (!aligned_OK (tmp))) + malloc_printerr ("free(): unaligned chunk detected in tcache 2"); + if (tmp == e) diff --git a/SOURCES/glibc-rh1871383-7.patch b/SOURCES/glibc-rh1871383-7.patch new file mode 100644 index 0000000..61d11c4 --- /dev/null +++ b/SOURCES/glibc-rh1871383-7.patch @@ -0,0 +1,133 @@ +From fc859c304898a5ec72e0ba5269ed136ed0ea10e1 Mon Sep 17 00:00:00 2001 +From: Siddhesh Poyarekar +Date: Wed, 7 Jul 2021 23:02:46 +0530 +Subject: Harden tcache double-free check + +The tcache allocator layer uses the tcache pointer as a key to +identify a block that may be freed twice. Since this is in the +application data area, an attacker exploiting a use-after-free could +potentially get access to the entire tcache structure through this +key. A detailed write-up was provided by Awarau here: + +https://awaraucom.wordpress.com/2020/07/19/house-of-io-remastered/ + +Replace this static pointer use for key checking with one that is +generated at malloc initialization. The first attempt is through +getrandom with a fallback to random_bits(), which is a simple +pseudo-random number generator based on the clock. The fallback ought +to be sufficient since the goal of the randomness is only to make the +key arbitrary enough that it is very unlikely to collide with user +data. + +Co-authored-by: Eyal Itkin + +[note: context for arena.c chunk #2 changed to accomodate missing +tagging support code - DJ] + +diff -rup a/malloc/arena.c b/malloc/arena.c +--- a/malloc/arena.c 2022-09-16 01:09:02.003843024 -0400 ++++ b/malloc/arena.c 2022-09-16 01:25:51.879994057 -0400 +@@ -286,6 +286,10 @@ extern struct dl_open_hook *_dl_open_hoo + libc_hidden_proto (_dl_open_hook); + #endif + ++#if USE_TCACHE ++static void tcache_key_initialize (void); ++#endif ++ + static void + ptmalloc_init (void) + { +@@ -294,6 +298,10 @@ ptmalloc_init (void) + + __malloc_initialized = 0; + ++#if USE_TCACHE ++ tcache_key_initialize (); ++#endif ++ + #ifdef SHARED + /* In case this libc copy is in a non-default namespace, never use brk. + Likewise if dlopened from statically linked program. */ +diff -rup a/malloc/malloc.c b/malloc/malloc.c +--- a/malloc/malloc.c 2022-09-16 01:09:05.491977387 -0400 ++++ b/malloc/malloc.c 2022-09-16 01:25:51.883994213 -0400 +@@ -247,6 +247,10 @@ + /* For SINGLE_THREAD_P. */ + #include + ++/* For tcache double-free check. */ ++#include ++#include ++ + /* + Debugging: + +@@ -2924,7 +2928,7 @@ typedef struct tcache_entry + { + struct tcache_entry *next; + /* This field exists to detect double frees. */ +- struct tcache_perthread_struct *key; ++ uintptr_t key; + } tcache_entry; + + /* There is one of these for each thread, which contains the +@@ -2941,6 +2945,31 @@ typedef struct tcache_perthread_struct + static __thread bool tcache_shutting_down = false; + static __thread tcache_perthread_struct *tcache = NULL; + ++/* Process-wide key to try and catch a double-free in the same thread. */ ++static uintptr_t tcache_key; ++ ++/* The value of tcache_key does not really have to be a cryptographically ++ secure random number. It only needs to be arbitrary enough so that it does ++ not collide with values present in applications. If a collision does happen ++ consistently enough, it could cause a degradation in performance since the ++ entire list is checked to check if the block indeed has been freed the ++ second time. The odds of this happening are exceedingly low though, about 1 ++ in 2^wordsize. There is probably a higher chance of the performance ++ degradation being due to a double free where the first free happened in a ++ different thread; that's a case this check does not cover. */ ++static void ++tcache_key_initialize (void) ++{ ++ if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK) ++ != sizeof (tcache_key)) ++ { ++ tcache_key = random_bits (); ++#if __WORDSIZE == 64 ++ tcache_key = (tcache_key << 32) | random_bits (); ++#endif ++ } ++} ++ + /* Caller must ensure that we know tc_idx is valid and there's room + for more chunks. */ + static __always_inline void +@@ -2950,7 +2979,7 @@ tcache_put (mchunkptr chunk, size_t tc_i + + /* Mark this chunk as "in the tcache" so the test in _int_free will + detect a double free. */ +- e->key = tcache; ++ e->key = tcache_key; + + e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); + tcache->entries[tc_idx] = e; +@@ -2967,7 +2996,7 @@ tcache_get (size_t tc_idx) + malloc_printerr ("malloc(): unaligned tcache chunk detected"); + tcache->entries[tc_idx] = REVEAL_PTR (e->next); + --(tcache->counts[tc_idx]); +- e->key = NULL; ++ e->key = 0; + return (void *) e; + } + +@@ -4231,7 +4260,7 @@ _int_free (mstate av, mchunkptr p, int h + trust it (it also matches random payload data at a 1 in + 2^ chance), so verify it's not an unlikely + coincidence before aborting. */ +- if (__glibc_unlikely (e->key == tcache)) ++ if (__glibc_unlikely (e->key == tcache_key)) + { + tcache_entry *tmp; + size_t cnt = 0; diff --git a/SOURCES/wrap-find-debuginfo.sh b/SOURCES/wrap-find-debuginfo.sh index 6eeb802..8479217 100755 --- a/SOURCES/wrap-find-debuginfo.sh +++ b/SOURCES/wrap-find-debuginfo.sh @@ -17,6 +17,8 @@ set -evx tar_tmp="$(mktemp)" +declare -A libc_dlink_tmp_list +ldso_annobin_sym_tmp_list="" # Prefer a separately installed debugedit over the RPM-integrated one. if command -v debugedit >/dev/null ; then @@ -26,7 +28,7 @@ else fi cleanup () { - rm -f "$tar_tmp" + rm -f "$tar_tmp" ${libc_dlink_tmp_list[@]} $ldso_annobin_sym_tmp_list } trap cleanup 0 @@ -51,6 +53,15 @@ full_list="$ldso_list $libc_list $libdl_list $libpthread_list $librt_list" # Run the debuginfo extraction. "$script_path" "$@" +# libc.so.6: Extract the .gnu_debuglink section +for f in $libc_list +do + dlink_tmp="$(mktemp)" + libc_dlink_tmp_list["$f"]="$dlink_tmp" + objcopy -j.gnu_debuglink --set-section-flags .gnu_debuglink=alloc \ + -O binary "$sysroot_path/$f" "$dlink_tmp" +done + # Restore the original files. (cd "$sysroot_path"; tar xf "$tar_tmp") (cd "$sysroot_path"; ls -l $full_list) @@ -61,6 +72,20 @@ do objcopy --merge-notes "$sysroot_path/$p" done +# libc.so.6: Restore the .gnu_debuglink section +for f in ${!libc_dlink_tmp_list[@]} +do + dlink_tmp="${libc_dlink_tmp_list[$f]}" + objcopy --add-section .gnu_debuglink="$dlink_tmp" "$sysroot_path/$f" +done + +# ld.so does not have separated debuginfo and so the debuginfo file +# generated by find-debuginfo is redundant. Therefore, remove it. +for ldso_debug in `find "$sysroot_path" -name 'ld-*.so*.debug' -type f` +do + rm -f "$ldso_debug" +done + # libc.so.6 and other shared objects: Reduce to valuable symbols. # Eliminate file symbols, annobin symbols, and symbols used by the # glibc build to implement hidden aliases (__EI_*). We would also @@ -103,6 +128,14 @@ debug_base_name=${last_arg:-$RPM_BUILD_ROOT} for p in $ldso_list do $debugedit -b "$debug_base_name" -d "$debug_dest_name" -n "$sysroot_path/$p" + + # Remove the .annobin* symbols (and only them). + ldso_annobin_sym_tmp="$(mktemp)" + ldso_annobin_sym_tmp_list+=" $ldso_annobin_sym_tmp" + if nm --format=posix "$sysroot_path/$p" | cut -d' ' -f1 \ + | grep '^\.annobin' > "$ldso_annobin_sym_tmp"; then + objcopy --strip-symbols="$ldso_annobin_sym_tmp" "$sysroot_path/$p" + fi done # Apply single-file DWARF optimization. diff --git a/SPECS/glibc.spec b/SPECS/glibc.spec index 4e17000..2565954 100644 --- a/SPECS/glibc.spec +++ b/SPECS/glibc.spec @@ -1,6 +1,6 @@ %define glibcsrcdir glibc-2.28 %define glibcversion 2.28 -%define glibcrelease 214%{?dist} +%define glibcrelease 216%{?dist} # Pre-release tarballs are pulled in from git using a command that is # effectively: # @@ -971,6 +971,13 @@ Patch778: glibc-rh2119304-3.patch Patch779: glibc-rh2118667.patch Patch780: glibc-rh2122498.patch Patch781: glibc-rh2125222.patch +Patch782: glibc-rh1871383-1.patch +Patch783: glibc-rh1871383-2.patch +Patch784: glibc-rh1871383-3.patch +Patch785: glibc-rh1871383-4.patch +Patch786: glibc-rh1871383-5.patch +Patch787: glibc-rh1871383-6.patch +Patch788: glibc-rh1871383-7.patch ############################################################################## # Continued list of core "glibc" package information: @@ -2801,6 +2808,14 @@ fi %files -f compat-libpthread-nonshared.filelist -n compat-libpthread-nonshared %changelog +* Wed Oct 05 2022 Arjun Shankar - 2.28-216 +- Retain .gnu_debuglink section for libc.so.6 (#2115830) +- Remove .annobin* symbols from ld.so +- Remove redundant ld.so debuginfo file + +* Wed Sep 28 2022 DJ Delorie - 2.28-215 +- Improve malloc implementation (#1871383) + * Tue Sep 20 2022 Florian Weimer - 2.28-214 - Fix hwcaps search path size computation (#2125222)