From e776593904a7cbf4eb1088ff424fcbfd01817662 Mon Sep 17 00:00:00 2001 From: Ali Erdinc Koroglu Date: Sep 14 2023 06:48:19 +0000 Subject: RHBZ #2233338 --- diff --git a/SOURCES/glibc-rh2233338-1.patch b/SOURCES/glibc-rh2233338-1.patch new file mode 100644 index 0000000..b865e85 --- /dev/null +++ b/SOURCES/glibc-rh2233338-1.patch @@ -0,0 +1,37 @@ +commit 7b5bfe77836442b9aeb75cc520f0d1eb7f82be67 +Author: Florian Weimer +Date: Mon May 18 15:21:04 2020 +0200 + + elf: Assert that objects are relocated before their constructors run + + If we try to run constructors before relocation, this is always + a dynamic linker bug. An assert is easier to notice than a call + via an invalid function pointer (which may not even produce a valid + call stack). + + Reviewed-by: Carlos O'Donell + +diff --git a/elf/dl-init.c b/elf/dl-init.c +index 45405cd0563845b4..99ce531d7b326f5f 100644 +--- a/elf/dl-init.c ++++ b/elf/dl-init.c +@@ -16,6 +16,7 @@ + License along with the GNU C Library; if not, see + . */ + ++#include + #include + #include + +@@ -27,6 +28,11 @@ typedef void (*init_t) (int, char **, char **); + static void + call_init (struct link_map *l, int argc, char **argv, char **env) + { ++ /* If the object has not been relocated, this is a bug. The ++ function pointers are invalid in this case. (Executables do not ++ need relocation, and neither do proxy objects.) */ ++ assert (l->l_real->l_relocated || l->l_real->l_type == lt_executable); ++ + if (l->l_init_called) + /* This object is all done. */ + return; diff --git a/SOURCES/glibc-rh2233338-2.patch b/SOURCES/glibc-rh2233338-2.patch new file mode 100644 index 0000000..84e10da --- /dev/null +++ b/SOURCES/glibc-rh2233338-2.patch @@ -0,0 +1,237 @@ +commit 6f360366f7f76b158a0f4bf20d42f2854ad56264 +Author: Florian Weimer +Date: Thu Oct 27 11:36:44 2022 +0200 + + elf: Introduce to _dl_call_fini + + This consolidates the destructor invocations from _dl_fini and + dlclose. Remove the micro-optimization that avoids + calling _dl_call_fini if they are no destructors (as dlclose is quite + expensive anyway). The debug log message is now printed + unconditionally. + + Reviewed-by: Adhemerval Zanella + +Conflicts: + elf/dl-fini.c + (Missing ELF_INITFINI support downstream.) + sysdeps/generic/ldsodefs.h + (Missing dl_init_t declaration downstream.) + +diff --git a/elf/Makefile b/elf/Makefile +index 634c3113227d64a6..040d82e243a80c0f 100644 +--- a/elf/Makefile ++++ b/elf/Makefile +@@ -50,6 +50,7 @@ routines = \ + # profiled libraries. + dl-routines = \ + dl-call-libc-early-init \ ++ dl-call_fini \ + dl-close \ + dl-debug \ + dl-deps \ +diff --git a/elf/dl-call_fini.c b/elf/dl-call_fini.c +new file mode 100644 +index 0000000000000000..9e7ba10fa2a4df77 +--- /dev/null ++++ b/elf/dl-call_fini.c +@@ -0,0 +1,50 @@ ++/* Invoke DT_FINI and DT_FINI_ARRAY callbacks. ++ Copyright (C) 1996-2022 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ . */ ++ ++#include ++#include ++ ++void ++_dl_call_fini (void *closure_map) ++{ ++ struct link_map *map = closure_map; ++ ++ /* When debugging print a message first. */ ++ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS)) ++ _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", map->l_name, map->l_ns); ++ ++ /* Make sure nothing happens if we are called twice. */ ++ map->l_init_called = 0; ++ ++ ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY]; ++ if (fini_array != NULL) ++ { ++ ElfW(Addr) *array = (ElfW(Addr) *) (map->l_addr ++ + fini_array->d_un.d_ptr); ++ size_t sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val ++ / sizeof (ElfW(Addr))); ++ ++ while (sz-- > 0) ++ ((fini_t) array[sz]) (); ++ } ++ ++ /* Next try the old-style destructor. */ ++ ElfW(Dyn) *fini = map->l_info[DT_FINI]; ++ if (fini != NULL) ++ DL_CALL_DT_FINI (map, ((void *) map->l_addr + fini->d_un.d_ptr)); ++} +diff --git a/elf/dl-close.c b/elf/dl-close.c +index 22225efb3226c3e1..26ea51dfbadc5b85 100644 +--- a/elf/dl-close.c ++++ b/elf/dl-close.c +@@ -35,11 +35,6 @@ + + #include + +- +-/* Type of the constructor functions. */ +-typedef void (*fini_t) (void); +- +- + /* Special l_idx value used to indicate which objects remain loaded. */ + #define IDX_STILL_USED -1 + +@@ -109,31 +104,6 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp, + return false; + } + +-/* Invoke dstructors for CLOSURE (a struct link_map *). Called with +- exception handling temporarily disabled, to make errors fatal. */ +-static void +-call_destructors (void *closure) +-{ +- struct link_map *map = closure; +- +- if (map->l_info[DT_FINI_ARRAY] != NULL) +- { +- ElfW(Addr) *array = +- (ElfW(Addr) *) (map->l_addr +- + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr); +- unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val +- / sizeof (ElfW(Addr))); +- +- while (sz-- > 0) +- ((fini_t) array[sz]) (); +- } +- +- /* Next try the old-style destructor. */ +- if (map->l_info[DT_FINI] != NULL) +- DL_CALL_DT_FINI (map, ((void *) map->l_addr +- + map->l_info[DT_FINI]->d_un.d_ptr)); +-} +- + void + _dl_close_worker (struct link_map *map, bool force) + { +@@ -279,17 +249,7 @@ _dl_close_worker (struct link_map *map, bool force) + half-cooked objects. Temporarily disable exception + handling, so that errors are fatal. */ + if (imap->l_init_called) +- { +- /* When debugging print a message first. */ +- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, +- 0)) +- _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", +- imap->l_name, nsid); +- +- if (imap->l_info[DT_FINI_ARRAY] != NULL +- || imap->l_info[DT_FINI] != NULL) +- _dl_catch_exception (NULL, call_destructors, imap); +- } ++ _dl_catch_exception (NULL, _dl_call_fini, imap); + + #ifdef SHARED + /* Auditing checkpoint: we remove an object. */ +diff --git a/elf/dl-fini.c b/elf/dl-fini.c +index e14259a3c8806e0d..2d34658d4c3a470c 100644 +--- a/elf/dl-fini.c ++++ b/elf/dl-fini.c +@@ -20,11 +20,6 @@ + #include + #include + +- +-/* Type of the constructor functions. */ +-typedef void (*fini_t) (void); +- +- + void + _dl_fini (void) + { +@@ -115,38 +110,7 @@ _dl_fini (void) + + if (l->l_init_called) + { +- /* Make sure nothing happens if we are called twice. */ +- l->l_init_called = 0; +- +- /* Is there a destructor function? */ +- if (l->l_info[DT_FINI_ARRAY] != NULL +- || l->l_info[DT_FINI] != NULL) +- { +- /* When debugging print a message first. */ +- if (__builtin_expect (GLRO(dl_debug_mask) +- & DL_DEBUG_IMPCALLS, 0)) +- _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", +- DSO_FILENAME (l->l_name), +- ns); +- +- /* First see whether an array is given. */ +- if (l->l_info[DT_FINI_ARRAY] != NULL) +- { +- ElfW(Addr) *array = +- (ElfW(Addr) *) (l->l_addr +- + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr); +- unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val +- / sizeof (ElfW(Addr))); +- while (i-- > 0) +- ((fini_t) array[i]) (); +- } +- +- /* Next try the old-style destructor. */ +- if (l->l_info[DT_FINI] != NULL) +- DL_CALL_DT_FINI +- (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr); +- } +- ++ _dl_call_fini (l); + #ifdef SHARED + /* Auditing checkpoint: another object closed. */ + _dl_audit_objclose (l); +diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h +index 29bbde3e83e37d7e..0bad34d44a5685d9 100644 +--- a/sysdeps/generic/ldsodefs.h ++++ b/sysdeps/generic/ldsodefs.h +@@ -93,6 +93,9 @@ typedef struct link_map *lookup_t; + : (__glibc_unlikely ((ref)->st_shndx == SHN_ABS) ? 0 \ + : LOOKUP_VALUE_ADDRESS (map, map_set)) + (ref)->st_value) + ++/* Type of a constructor function, in DT_FINI, DT_FINI_ARRAY. */ ++typedef void (*fini_t) (void); ++ + /* On some architectures a pointer to a function is not just a pointer + to the actual code of the function but rather an architecture + specific descriptor. */ +@@ -1047,6 +1050,11 @@ extern void _dl_init (struct link_map *main_map, int argc, char **argv, + initializer functions have completed. */ + extern void _dl_fini (void) attribute_hidden; + ++/* Invoke the DT_FINI_ARRAY and DT_FINI destructors for MAP, which ++ must be a struct link_map *. Can be used as an argument to ++ _dl_catch_exception. */ ++void _dl_call_fini (void *map) attribute_hidden; ++ + /* Sort array MAPS according to dependencies of the contained objects. + If FORCE_FIRST, MAPS[0] keeps its place even if the dependencies + say otherwise. */ diff --git a/SOURCES/glibc-rh2233338-3.patch b/SOURCES/glibc-rh2233338-3.patch new file mode 100644 index 0000000..1627d06 --- /dev/null +++ b/SOURCES/glibc-rh2233338-3.patch @@ -0,0 +1,30 @@ +commit f6c8204fd7fabf0cf4162eaf10ccf23258e4d10e +Author: Florian Weimer +Date: Tue Aug 22 13:56:25 2023 +0200 + + elf: Do not run constructors for proxy objects + + Otherwise, the ld.so constructor runs for each audit namespace + and each dlmopen namespace. + +diff --git a/elf/dl-init.c b/elf/dl-init.c +index 99ce531d7b326f5f..73c0259fbe6d19af 100644 +--- a/elf/dl-init.c ++++ b/elf/dl-init.c +@@ -28,10 +28,14 @@ typedef void (*init_t) (int, char **, char **); + static void + call_init (struct link_map *l, int argc, char **argv, char **env) + { ++ /* Do not run constructors for proxy objects. */ ++ if (l != l->l_real) ++ return; ++ + /* If the object has not been relocated, this is a bug. The + function pointers are invalid in this case. (Executables do not +- need relocation, and neither do proxy objects.) */ +- assert (l->l_real->l_relocated || l->l_real->l_type == lt_executable); ++ need relocation.) */ ++ assert (l->l_relocated || l->l_type == lt_executable); + + if (l->l_init_called) + /* This object is all done. */ diff --git a/SOURCES/glibc-rh2233338-4.patch b/SOURCES/glibc-rh2233338-4.patch new file mode 100644 index 0000000..8db76e4 --- /dev/null +++ b/SOURCES/glibc-rh2233338-4.patch @@ -0,0 +1,637 @@ +commit 6985865bc3ad5b23147ee73466583dd7fdf65892 +Author: Florian Weimer +Date: Fri Sep 8 12:32:14 2023 +0200 + + elf: Always call destructors in reverse constructor order (bug 30785) + + The current implementation of dlclose (and process exit) re-sorts the + link maps before calling ELF destructors. Destructor order is not the + reverse of the constructor order as a result: The second sort takes + relocation dependencies into account, and other differences can result + from ambiguous inputs, such as cycles. (The force_first handling in + _dl_sort_maps is not effective for dlclose.) After the changes in + this commit, there is still a required difference due to + dlopen/dlclose ordering by the application, but the previous + discrepancies went beyond that. + + A new global (namespace-spanning) list of link maps, + _dl_init_called_list, is updated right before ELF constructors are + called from _dl_init. + + In dl_close_worker, the maps variable, an on-stack variable length + array, is eliminated. (VLAs are problematic, and dlclose should not + call malloc because it cannot readily deal with malloc failure.) + Marking still-used objects uses the namespace list directly, with + next and next_idx replacing the done_index variable. + + After marking, _dl_init_called_list is used to call the destructors + of now-unused maps in reverse destructor order. These destructors + can call dlopen. Previously, new objects do not have l_map_used set. + This had to change: There is no copy of the link map list anymore, + so processing would cover newly opened (and unmarked) mappings, + unloading them. Now, _dl_init (indirectly) sets l_map_used, too. + (dlclose is handled by the existing reentrancy guard.) + + After _dl_init_called_list traversal, two more loops follow. The + processing order changes to the original link map order in the + namespace. Previously, dependency order was used. The difference + should not matter because relocation dependencies could already + reorder link maps in the old code. + + The changes to _dl_fini remove the sorting step and replace it with + a traversal of _dl_init_called_list. The l_direct_opencount + decrement outside the loader lock is removed because it appears + incorrect: the counter manipulation could race with other dynamic + loader operations. + + tst-audit23 needs adjustments to the changes in LA_ACT_DELETE + notifications. The new approach for checking la_activity should + make it clearer that la_activty calls come in pairs around namespace + updates. + + The dependency sorting test cases need updates because the destructor + order is always the opposite order of constructor order, even with + relocation dependencies or cycles present. + + There is a future cleanup opportunity to remove the now-constant + force_first and for_fini arguments from the _dl_sort_maps function. + + Fixes commit 1df71d32fe5f5905ffd5d100e5e9ca8ad62 ("elf: Implement + force_first handling in _dl_sort_maps_dfs (bug 28937)"). + + Reviewed-by: DJ Delorie + +diff --git a/elf/dl-close.c b/elf/dl-close.c +index 26ea51dfbadc5b85..6b134f66628cfc03 100644 +--- a/elf/dl-close.c ++++ b/elf/dl-close.c +@@ -137,30 +137,31 @@ _dl_close_worker (struct link_map *map, bool force) + + bool any_tls = false; + const unsigned int nloaded = ns->_ns_nloaded; +- struct link_map *maps[nloaded]; + +- /* Run over the list and assign indexes to the link maps and enter +- them into the MAPS array. */ ++ /* Run over the list and assign indexes to the link maps. */ + int idx = 0; + for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) + { + l->l_map_used = 0; + l->l_map_done = 0; + l->l_idx = idx; +- maps[idx] = l; + ++idx; + } + assert (idx == nloaded); + +- /* Keep track of the lowest index link map we have covered already. */ +- int done_index = -1; +- while (++done_index < nloaded) ++ /* Keep marking link maps until no new link maps are found. */ ++ for (struct link_map *l = ns->_ns_loaded; l != NULL; ) + { +- struct link_map *l = maps[done_index]; ++ /* next is reset to earlier link maps for remarking. */ ++ struct link_map *next = l->l_next; ++ int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL. */ + + if (l->l_map_done) +- /* Already handled. */ +- continue; ++ { ++ /* Already handled. */ ++ l = next; ++ continue; ++ } + + /* Check whether this object is still used. */ + if (l->l_type == lt_loaded +@@ -170,7 +171,10 @@ _dl_close_worker (struct link_map *map, bool force) + acquire is sufficient and correct. */ + && atomic_load_acquire (&l->l_tls_dtor_count) == 0 + && !l->l_map_used) +- continue; ++ { ++ l = next; ++ continue; ++ } + + /* We need this object and we handle it now. */ + l->l_map_used = 1; +@@ -197,8 +201,11 @@ _dl_close_worker (struct link_map *map, bool force) + already processed it, then we need to go back + and process again from that point forward to + ensure we keep all of its dependencies also. */ +- if ((*lp)->l_idx - 1 < done_index) +- done_index = (*lp)->l_idx - 1; ++ if ((*lp)->l_idx < next_idx) ++ { ++ next = *lp; ++ next_idx = next->l_idx; ++ } + } + } + +@@ -218,44 +225,65 @@ _dl_close_worker (struct link_map *map, bool force) + if (!jmap->l_map_used) + { + jmap->l_map_used = 1; +- if (jmap->l_idx - 1 < done_index) +- done_index = jmap->l_idx - 1; ++ if (jmap->l_idx < next_idx) ++ { ++ next = jmap; ++ next_idx = next->l_idx; ++ } + } + } + } +- } + +- /* Sort the entries. We can skip looking for the binary itself which is +- at the front of the search list for the main namespace. */ +- _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true); ++ l = next; ++ } + +- /* Call all termination functions at once. */ +- bool unload_any = false; +- bool scope_mem_left = false; +- unsigned int unload_global = 0; +- unsigned int first_loaded = ~0; +- for (unsigned int i = 0; i < nloaded; ++i) ++ /* Call the destructors in reverse constructor order, and remove the ++ closed link maps from the list. */ ++ for (struct link_map **init_called_head = &_dl_init_called_list; ++ *init_called_head != NULL; ) + { +- struct link_map *imap = maps[i]; ++ struct link_map *imap = *init_called_head; + +- /* All elements must be in the same namespace. */ +- assert (imap->l_ns == nsid); +- +- if (!imap->l_map_used) ++ /* _dl_init_called_list is global, to produce a global odering. ++ Ignore the other namespaces (and link maps that are still used). */ ++ if (imap->l_ns != nsid || imap->l_map_used) ++ init_called_head = &imap->l_init_called_next; ++ else + { + assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); + +- /* Call its termination function. Do not do it for +- half-cooked objects. Temporarily disable exception +- handling, so that errors are fatal. */ +- if (imap->l_init_called) ++ /* _dl_init_called_list is updated at the same time as ++ l_init_called. */ ++ assert (imap->l_init_called); ++ ++ if (imap->l_info[DT_FINI_ARRAY] != NULL ++ || imap->l_info[DT_FINI] != NULL) + _dl_catch_exception (NULL, _dl_call_fini, imap); + + #ifdef SHARED + /* Auditing checkpoint: we remove an object. */ + _dl_audit_objclose (imap); + #endif ++ /* Unlink this link map. */ ++ *init_called_head = imap->l_init_called_next; ++ } ++ } ++ ++ ++ bool unload_any = false; ++ bool scope_mem_left = false; ++ unsigned int unload_global = 0; ++ ++ /* For skipping un-unloadable link maps in the second loop. */ ++ struct link_map *first_loaded = ns->_ns_loaded; + ++ /* Iterate over the namespace to find objects to unload. Some ++ unloadable objects may not be on _dl_init_called_list due to ++ dlopen failure. */ ++ for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next) ++ { ++ if (!imap->l_map_used) ++ { + /* This object must not be used anymore. */ + imap->l_removed = 1; + +@@ -266,8 +294,8 @@ _dl_close_worker (struct link_map *map, bool force) + ++unload_global; + + /* Remember where the first dynamically loaded object is. */ +- if (i < first_loaded) +- first_loaded = i; ++ if (first_loaded == NULL) ++ first_loaded = imap; + } + /* Else imap->l_map_used. */ + else if (imap->l_type == lt_loaded) +@@ -403,8 +431,8 @@ _dl_close_worker (struct link_map *map, bool force) + imap->l_loader = NULL; + + /* Remember where the first dynamically loaded object is. */ +- if (i < first_loaded) +- first_loaded = i; ++ if (first_loaded == NULL) ++ first_loaded = imap; + } + } + +@@ -475,10 +503,11 @@ _dl_close_worker (struct link_map *map, bool force) + + /* Check each element of the search list to see if all references to + it are gone. */ +- for (unsigned int i = first_loaded; i < nloaded; ++i) ++ for (struct link_map *imap = first_loaded; imap != NULL; ) + { +- struct link_map *imap = maps[i]; +- if (!imap->l_map_used) ++ if (imap->l_map_used) ++ imap = imap->l_next; ++ else + { + assert (imap->l_type == lt_loaded); + +@@ -686,7 +715,9 @@ _dl_close_worker (struct link_map *map, bool force) + if (imap == GL(dl_initfirst)) + GL(dl_initfirst) = NULL; + ++ struct link_map *next = imap->l_next; + free (imap); ++ imap = next; + } + } + +diff --git a/elf/dl-fini.c b/elf/dl-fini.c +index 2d34658d4c3a470c..25a7767d707721d5 100644 +--- a/elf/dl-fini.c ++++ b/elf/dl-fini.c +@@ -23,116 +23,68 @@ + void + _dl_fini (void) + { +- /* Lots of fun ahead. We have to call the destructors for all still +- loaded objects, in all namespaces. The problem is that the ELF +- specification now demands that dependencies between the modules +- are taken into account. I.e., the destructor for a module is +- called before the ones for any of its dependencies. +- +- To make things more complicated, we cannot simply use the reverse +- order of the constructors. Since the user might have loaded objects +- using `dlopen' there are possibly several other modules with its +- dependencies to be taken into account. Therefore we have to start +- determining the order of the modules once again from the beginning. */ +- +- /* We run the destructors of the main namespaces last. As for the +- other namespaces, we pick run the destructors in them in reverse +- order of the namespace ID. */ +-#ifdef SHARED +- int do_audit = 0; +- again: +-#endif +- for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns) +- { +- /* Protect against concurrent loads and unloads. */ +- __rtld_lock_lock_recursive (GL(dl_load_lock)); +- +- unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded; +- /* No need to do anything for empty namespaces or those used for +- auditing DSOs. */ +- if (nloaded == 0 +-#ifdef SHARED +- || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit +-#endif +- ) +- __rtld_lock_unlock_recursive (GL(dl_load_lock)); +- else +- { ++ /* Call destructors strictly in the reverse order of constructors. ++ This causes fewer surprises than some arbitrary reordering based ++ on new (relocation) dependencies. None of the objects are ++ unmapped, so applications can deal with this if their DSOs remain ++ in a consistent state after destructors have run. */ ++ ++ /* Protect against concurrent loads and unloads. */ ++ __rtld_lock_lock_recursive (GL(dl_load_lock)); ++ ++ /* Ignore objects which are opened during shutdown. */ ++ struct link_map *local_init_called_list = _dl_init_called_list; ++ ++ for (struct link_map *l = local_init_called_list; l != NULL; ++ l = l->l_init_called_next) ++ /* Bump l_direct_opencount of all objects so that they ++ are not dlclose()ed from underneath us. */ ++ ++l->l_direct_opencount; ++ ++ /* After this point, everything linked from local_init_called_list ++ cannot be unloaded because of the reference counter update. */ ++ __rtld_lock_unlock_recursive (GL(dl_load_lock)); ++ ++ /* Perform two passes: One for non-audit modules, one for audit ++ modules. This way, audit modules receive unload notifications ++ for non-audit objects, and the destructors for audit modules ++ still run. */ + #ifdef SHARED +- _dl_audit_activity_nsid (ns, LA_ACT_DELETE); ++ int last_pass = GLRO(dl_naudit) > 0; ++ Lmid_t last_ns = -1; ++ for (int do_audit = 0; do_audit <= last_pass; ++do_audit) + #endif +- +- /* Now we can allocate an array to hold all the pointers and +- copy the pointers in. */ +- struct link_map *maps[nloaded]; +- +- unsigned int i; +- struct link_map *l; +- assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL); +- for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next) +- /* Do not handle ld.so in secondary namespaces. */ +- if (l == l->l_real) +- { +- assert (i < nloaded); +- +- maps[i] = l; +- l->l_idx = i; +- ++i; +- +- /* Bump l_direct_opencount of all objects so that they +- are not dlclose()ed from underneath us. */ +- ++l->l_direct_opencount; +- } +- assert (ns != LM_ID_BASE || i == nloaded); +- assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1); +- unsigned int nmaps = i; +- +- /* Now we have to do the sorting. We can skip looking for the +- binary itself which is at the front of the search list for +- the main namespace. */ +- _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true); +- +- /* We do not rely on the linked list of loaded object anymore +- from this point on. We have our own list here (maps). The +- various members of this list cannot vanish since the open +- count is too high and will be decremented in this loop. So +- we release the lock so that some code which might be called +- from a destructor can directly or indirectly access the +- lock. */ +- __rtld_lock_unlock_recursive (GL(dl_load_lock)); +- +- /* 'maps' now contains the objects in the right order. Now +- call the destructors. We have to process this array from +- the front. */ +- for (i = 0; i < nmaps; ++i) +- { +- struct link_map *l = maps[i]; +- +- if (l->l_init_called) +- { +- _dl_call_fini (l); ++ for (struct link_map *l = local_init_called_list; l != NULL; ++ l = l->l_init_called_next) ++ { + #ifdef SHARED +- /* Auditing checkpoint: another object closed. */ +- _dl_audit_objclose (l); ++ if (GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing != do_audit) ++ continue; ++ ++ /* Avoid back-to-back calls of _dl_audit_activity_nsid for the ++ same namespace. */ ++ if (last_ns != l->l_ns) ++ { ++ if (last_ns >= 0) ++ _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT); ++ _dl_audit_activity_nsid (l->l_ns, LA_ACT_DELETE); ++ last_ns = l->l_ns; ++ } + #endif +- } + +- /* Correct the previous increment. */ +- --l->l_direct_opencount; +- } ++ /* There is no need to re-enable exceptions because _dl_fini ++ is not called from a context where exceptions are caught. */ ++ _dl_call_fini (l); + + #ifdef SHARED +- _dl_audit_activity_nsid (ns, LA_ACT_CONSISTENT); ++ /* Auditing checkpoint: another object closed. */ ++ _dl_audit_objclose (l); + #endif +- } +- } ++ } + + #ifdef SHARED +- if (! do_audit && GLRO(dl_naudit) > 0) +- { +- do_audit = 1; +- goto again; +- } ++ if (last_ns >= 0) ++ _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT); + + if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS)) + _dl_debug_printf ("\nruntime linker statistics:\n" +diff --git a/elf/dl-init.c b/elf/dl-init.c +index 73c0259fbe6d19af..eb6c83d180c3f1a1 100644 +--- a/elf/dl-init.c ++++ b/elf/dl-init.c +@@ -24,6 +24,7 @@ + /* Type of the initializer. */ + typedef void (*init_t) (int, char **, char **); + ++struct link_map *_dl_init_called_list; + + static void + call_init (struct link_map *l, int argc, char **argv, char **env) +@@ -45,6 +46,21 @@ call_init (struct link_map *l, int argc, char **argv, char **env) + dependency. */ + l->l_init_called = 1; + ++ /* Help an already-running dlclose: The just-loaded object must not ++ be removed during the current pass. (No effect if no dlclose in ++ progress.) */ ++ l->l_map_used = 1; ++ ++ /* Record execution before starting any initializers. This way, if ++ the initializers themselves call dlopen, their ELF destructors ++ will eventually be run before this object is destructed, matching ++ that their ELF constructors have run before this object was ++ constructed. _dl_fini uses this list for audit callbacks, so ++ register objects on the list even if they do not have a ++ constructor. */ ++ l->l_init_called_next = _dl_init_called_list; ++ _dl_init_called_list = l; ++ + /* Check for object which constructors we do not run here. */ + if (__builtin_expect (l->l_name[0], 'a') == '\0' + && l->l_type == lt_executable) +diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def +index 4bf9052db16fb352..61dc54f8ae06d465 100644 +--- a/elf/dso-sort-tests-1.def ++++ b/elf/dso-sort-tests-1.def +@@ -53,21 +53,14 @@ tst-dso-ordering10: {}->a->b->c;soname({})=c + output: b>a>{}b->c->d order). +-# The older dynamic_sort=1 algorithm does not achieve this, while the DFS-based +-# dynamic_sort=2 algorithm does, although it is still arguable whether going +-# beyond spec to do this is the right thing to do. +-# The below expected outputs are what the two algorithms currently produce +-# respectively, for regression testing purposes. ++# relocation(dynamic) dependencies. For both sorting algorithms, the ++# destruction order is the reverse of the construction order, and ++# relocation dependencies are not taken into account. + tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c +-output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[a1;a->a2;a2->a;b->b1;c->a1;c=>a1 +-output(glibc.rtld.dynamic_sort=1): {+a[a2>a1>a>];+b[b1>b>];-b[];%c(a1());}a1>a>];+b[b1>b>];-b[];%c(a1());}a1>a>];+b[b1>b>];-b[];%c(a1());} +Date: Fri Sep 8 13:02:06 2023 +0200 + + elf: Remove unused l_text_end field from struct link_map + + It is a left-over from commit 52a01100ad011293197637e42b5be1a479a2 + ("elf: Remove ad-hoc restrictions on dlopen callers [BZ #22787]"). + + When backporting commmit 6985865bc3ad5b23147ee73466583dd7fdf65892 + ("elf: Always call destructors in reverse constructor order + (bug 30785)"), we can move the l_init_called_next field to this + place, so that the internal GLIBC_PRIVATE ABI does not change. + + Reviewed-by: Carlos O'Donell + Tested-by: Carlos O'Donell + +Conflicts: + elf/dl-load.h + (Missing commit "Avoid "inline" after return type in function + definitions.") + elf/rtld.c + (Missing rtld_setup_main_map function. Re-did the l_text_end + removal from scratch.) + +diff --git a/elf/dl-load.c b/elf/dl-load.c +index 0b45e6e3db31c70d..52dc564af9e95878 100644 +--- a/elf/dl-load.c ++++ b/elf/dl-load.c +@@ -1176,7 +1176,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd, + + /* Now process the load commands and map segments into memory. + This is responsible for filling in: +- l_map_start, l_map_end, l_addr, l_contiguous, l_text_end, l_phdr ++ l_map_start, l_map_end, l_addr, l_contiguous, l_phdr + */ + errstring = _dl_map_segments (l, fd, header, type, loadcmds, nloadcmds, + maplength, has_holes, loader); +diff --git a/elf/dl-load.h b/elf/dl-load.h +index 66ea2e9237ab6321..ebf2604e044c3bde 100644 +--- a/elf/dl-load.h ++++ b/elf/dl-load.h +@@ -82,14 +82,11 @@ struct loadcmd + + /* This is a subroutine of _dl_map_segments. It should be called for each + load command, some time after L->l_addr has been set correctly. It is +- responsible for setting up the l_text_end and l_phdr fields. */ +-static void __always_inline ++ responsible for setting the l_phdr fields */ ++static __always_inline void + _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header, + const struct loadcmd *c) + { +- if (c->prot & PROT_EXEC) +- l->l_text_end = l->l_addr + c->mapend; +- + if (l->l_phdr == 0 + && c->mapoff <= header->e_phoff + && ((size_t) (c->mapend - c->mapstart + c->mapoff) +@@ -102,7 +99,7 @@ _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header, + + /* This is a subroutine of _dl_map_object_from_fd. It is responsible + for filling in several fields in *L: l_map_start, l_map_end, l_addr, +- l_contiguous, l_text_end, l_phdr. On successful return, all the ++ l_contiguous, l_phdr. On successful return, all the + segments are mapped (or copied, or whatever) from the file into their + final places in the address space, with the correct page permissions, + and any bss-like regions already zeroed. It returns a null pointer +diff --git a/elf/rtld.c b/elf/rtld.c +index cd2cc4024a3581c2..c2edd0bfdc27f207 100644 +--- a/elf/rtld.c ++++ b/elf/rtld.c +@@ -472,7 +472,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info) + GL(dl_rtld_map).l_real = &GL(dl_rtld_map); + GL(dl_rtld_map).l_map_start = (ElfW(Addr)) _begin; + GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end; +- GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext; + /* Copy the TLS related data if necessary. */ + #ifndef DONT_USE_BOOTSTRAP_MAP + # if NO_TLS_OFFSET != 0 +@@ -1520,7 +1519,6 @@ dl_main (const ElfW(Phdr) *phdr, + } + + main_map->l_map_end = 0; +- main_map->l_text_end = 0; + /* Perhaps the executable has no PT_LOAD header entries at all. */ + main_map->l_map_start = ~0; + /* And it was opened directly. */ +@@ -1591,8 +1589,6 @@ dl_main (const ElfW(Phdr) *phdr, + allocend = main_map->l_addr + ph->p_vaddr + ph->p_memsz; + if (main_map->l_map_end < allocend) + main_map->l_map_end = allocend; +- if ((ph->p_flags & PF_X) && allocend > main_map->l_text_end) +- main_map->l_text_end = allocend; + } + break; + +@@ -1641,8 +1637,6 @@ ERROR: '%s': cannot process note segment.\n", _dl_argv[0]); + = (char *) main_map->l_tls_initimage + main_map->l_addr; + if (! main_map->l_map_end) + main_map->l_map_end = ~0; +- if (! main_map->l_text_end) +- main_map->l_text_end = ~0; + if (! GL(dl_rtld_map).l_libname && GL(dl_rtld_map).l_name) + { + /* We were invoked directly, so the program might not have a +diff --git a/elf/setup-vdso.h b/elf/setup-vdso.h +index d2b35a080b57c183..352e992c578e416f 100644 +--- a/elf/setup-vdso.h ++++ b/elf/setup-vdso.h +@@ -52,9 +52,6 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)), + l->l_addr = ph->p_vaddr; + if (ph->p_vaddr + ph->p_memsz >= l->l_map_end) + l->l_map_end = ph->p_vaddr + ph->p_memsz; +- if ((ph->p_flags & PF_X) +- && ph->p_vaddr + ph->p_memsz >= l->l_text_end) +- l->l_text_end = ph->p_vaddr + ph->p_memsz; + } + else + /* There must be no TLS segment. */ +@@ -63,7 +60,6 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)), + l->l_map_start = (ElfW(Addr)) GLRO(dl_sysinfo_dso); + l->l_addr = l->l_map_start - l->l_addr; + l->l_map_end += l->l_addr; +- l->l_text_end += l->l_addr; + l->l_ld = (void *) ((ElfW(Addr)) l->l_ld + l->l_addr); + elf_get_dynamic_info (l, dyn_temp); + _dl_setup_hash (l); +diff --git a/include/link.h b/include/link.h +index a37b2cf3133eefd5..88683e7a747b86e0 100644 +--- a/include/link.h ++++ b/include/link.h +@@ -251,8 +251,6 @@ struct link_map + /* Start and finish of memory map for this object. l_map_start + need not be the same as l_addr. */ + ElfW(Addr) l_map_start, l_map_end; +- /* End of the executable part of the mapping. */ +- ElfW(Addr) l_text_end; + + /* Default array for 'l_scope'. */ + struct r_scope_elem *l_scope_mem[4]; diff --git a/SOURCES/glibc-rh2233338-6.patch b/SOURCES/glibc-rh2233338-6.patch new file mode 100644 index 0000000..74aca35 --- /dev/null +++ b/SOURCES/glibc-rh2233338-6.patch @@ -0,0 +1,35 @@ +commit d3ba6c1333b10680ce5900a628108507d9d4b844 +Author: Florian Weimer +Date: Mon Sep 11 09:17:52 2023 +0200 + + elf: Move l_init_called_next to old place of l_text_end in link map + + This preserves all member offsets and the GLIBC_PRIVATE ABI + for backporting. + +diff --git a/include/link.h b/include/link.h +index 88683e7a747b86e0..a464dd8e86cf89d0 100644 +--- a/include/link.h ++++ b/include/link.h +@@ -252,6 +252,10 @@ struct link_map + need not be the same as l_addr. */ + ElfW(Addr) l_map_start, l_map_end; + ++ /* Linked list of objects in reverse ELF constructor execution ++ order. Head of list is stored in _dl_init_called_list. */ ++ struct link_map *l_init_called_next; ++ + /* Default array for 'l_scope'. */ + struct r_scope_elem *l_scope_mem[4]; + /* Size of array allocated for 'l_scope'. */ +@@ -274,10 +278,6 @@ struct link_map + /* List of object in order of the init and fini calls. */ + struct link_map **l_initfini; + +- /* Linked list of objects in reverse ELF constructor execution +- order. Head of list is stored in _dl_init_called_list. */ +- struct link_map *l_init_called_next; +- + /* List of the dependencies introduced through symbol binding. */ + struct link_map_reldeps + { diff --git a/SPECS/glibc.spec b/SPECS/glibc.spec index 4a10148..a829875 100644 --- a/SPECS/glibc.spec +++ b/SPECS/glibc.spec @@ -132,7 +132,7 @@ end \ Summary: The GNU libc libraries Name: glibc Version: %{glibcversion} -Release: %{glibcrelease} +Release: %{glibcrelease}.1 # In general, GPLv2+ is used by programs, LGPLv2+ is used for # libraries. @@ -1047,6 +1047,12 @@ Patch854: glibc-rh2180462-1.patch Patch855: glibc-rh2180462-2.patch Patch856: glibc-rh2180462-3.patch Patch857: glibc-rh2180462-4.patch +Patch858: glibc-rh2233338-1.patch +Patch859: glibc-rh2233338-2.patch +Patch860: glibc-rh2233338-3.patch +Patch861: glibc-rh2233338-4.patch +Patch862: glibc-rh2233338-5.patch +Patch863: glibc-rh2233338-6.patch # Intel Optimizations Patch10001: glibc-sw24097-1.patch @@ -2992,6 +2998,9 @@ fi %files -f compat-libpthread-nonshared.filelist -n compat-libpthread-nonshared %changelog +* Mon Sep 11 2023 Florian Weimer - 2.28-238.1 +- Always call destructors in reverse constructor order (#2233338) + * Tue Aug 15 2023 Carlos O'Donell - 2.28-238 - Fix string and memory function tuning on small systems (#2180462)