|
|
76b6d9 |
commit 15a0c5730d1d5aeb95f50c9ec7470640084feae8
|
|
|
76b6d9 |
Author: Chung-Lin Tang <cltang@codesourcery.com>
|
|
|
76b6d9 |
Date: Thu Oct 21 21:41:22 2021 +0800
|
|
|
76b6d9 |
|
|
|
76b6d9 |
elf: Fix slow DSO sorting behavior in dynamic loader (BZ #17645)
|
|
|
76b6d9 |
|
|
|
76b6d9 |
This second patch contains the actual implementation of a new sorting algorithm
|
|
|
76b6d9 |
for shared objects in the dynamic loader, which solves the slow behavior that
|
|
|
76b6d9 |
the current "old" algorithm falls into when the DSO set contains circular
|
|
|
76b6d9 |
dependencies.
|
|
|
76b6d9 |
|
|
|
76b6d9 |
The new algorithm implemented here is simply depth-first search (DFS) to obtain
|
|
|
76b6d9 |
the Reverse-Post Order (RPO) sequence, a topological sort. A new l_visited:1
|
|
|
76b6d9 |
bitfield is added to struct link_map to more elegantly facilitate such a search.
|
|
|
76b6d9 |
|
|
|
76b6d9 |
The DFS algorithm is applied to the input maps[nmap-1] backwards towards
|
|
|
76b6d9 |
maps[0]. This has the effect of a more "shallow" recursion depth in general
|
|
|
76b6d9 |
since the input is in BFS. Also, when combined with the natural order of
|
|
|
76b6d9 |
processing l_initfini[] at each node, this creates a resulting output sorting
|
|
|
76b6d9 |
closer to the intuitive "left-to-right" order in most cases.
|
|
|
76b6d9 |
|
|
|
76b6d9 |
Another notable implementation adjustment related to this _dl_sort_maps change
|
|
|
76b6d9 |
is the removing of two char arrays 'used' and 'done' in _dl_close_worker to
|
|
|
76b6d9 |
represent two per-map attributes. This has been changed to simply use two new
|
|
|
76b6d9 |
bit-fields l_map_used:1, l_map_done:1 added to struct link_map. This also allows
|
|
|
76b6d9 |
discarding the clunky 'used' array sorting that _dl_sort_maps had to sometimes
|
|
|
76b6d9 |
do along the way.
|
|
|
76b6d9 |
|
|
|
76b6d9 |
Tunable support for switching between different sorting algorithms at runtime is
|
|
|
76b6d9 |
also added. A new tunable 'glibc.rtld.dynamic_sort' with current valid values 1
|
|
|
76b6d9 |
(old algorithm) and 2 (new DFS algorithm) has been added. At time of commit
|
|
|
76b6d9 |
of this patch, the default setting is 1 (old algorithm).
|
|
|
76b6d9 |
|
|
|
76b6d9 |
Signed-off-by: Chung-Lin Tang <cltang@codesourcery.com>
|
|
|
76b6d9 |
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
|
|
|
76b6d9 |
|
|
|
76b6d9 |
Conflicts:
|
|
|
76b6d9 |
elf/dl-tunables.list
|
|
|
76b6d9 |
(No mem.tagging tunable downstream.)
|
|
|
76b6d9 |
|
|
|
76b6d9 |
diff --git a/elf/dl-close.c b/elf/dl-close.c
|
|
|
76b6d9 |
index 74ca9a85dd309780..22225efb3226c3e1 100644
|
|
|
76b6d9 |
--- a/elf/dl-close.c
|
|
|
76b6d9 |
+++ b/elf/dl-close.c
|
|
|
76b6d9 |
@@ -167,8 +167,6 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
|
|
|
76b6d9 |
bool any_tls = false;
|
|
|
76b6d9 |
const unsigned int nloaded = ns->_ns_nloaded;
|
|
|
76b6d9 |
- char used[nloaded];
|
|
|
76b6d9 |
- char done[nloaded];
|
|
|
76b6d9 |
struct link_map *maps[nloaded];
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* Run over the list and assign indexes to the link maps and enter
|
|
|
76b6d9 |
@@ -176,24 +174,21 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
int idx = 0;
|
|
|
76b6d9 |
for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
+ l->l_map_used = 0;
|
|
|
76b6d9 |
+ l->l_map_done = 0;
|
|
|
76b6d9 |
l->l_idx = idx;
|
|
|
76b6d9 |
maps[idx] = l;
|
|
|
76b6d9 |
++idx;
|
|
|
76b6d9 |
-
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
assert (idx == nloaded);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
- /* Prepare the bitmaps. */
|
|
|
76b6d9 |
- memset (used, '\0', sizeof (used));
|
|
|
76b6d9 |
- memset (done, '\0', sizeof (done));
|
|
|
76b6d9 |
-
|
|
|
76b6d9 |
/* Keep track of the lowest index link map we have covered already. */
|
|
|
76b6d9 |
int done_index = -1;
|
|
|
76b6d9 |
while (++done_index < nloaded)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
struct link_map *l = maps[done_index];
|
|
|
76b6d9 |
|
|
|
76b6d9 |
- if (done[done_index])
|
|
|
76b6d9 |
+ if (l->l_map_done)
|
|
|
76b6d9 |
/* Already handled. */
|
|
|
76b6d9 |
continue;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
@@ -204,12 +199,12 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
/* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
|
|
|
76b6d9 |
acquire is sufficient and correct. */
|
|
|
76b6d9 |
&& atomic_load_acquire (&l->l_tls_dtor_count) == 0
|
|
|
76b6d9 |
- && !used[done_index])
|
|
|
76b6d9 |
+ && !l->l_map_used)
|
|
|
76b6d9 |
continue;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* We need this object and we handle it now. */
|
|
|
76b6d9 |
- done[done_index] = 1;
|
|
|
76b6d9 |
- used[done_index] = 1;
|
|
|
76b6d9 |
+ l->l_map_used = 1;
|
|
|
76b6d9 |
+ l->l_map_done = 1;
|
|
|
76b6d9 |
/* Signal the object is still needed. */
|
|
|
76b6d9 |
l->l_idx = IDX_STILL_USED;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
@@ -225,9 +220,9 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
- if (!used[(*lp)->l_idx])
|
|
|
76b6d9 |
+ if (!(*lp)->l_map_used)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
- used[(*lp)->l_idx] = 1;
|
|
|
76b6d9 |
+ (*lp)->l_map_used = 1;
|
|
|
76b6d9 |
/* If we marked a new object as used, and we've
|
|
|
76b6d9 |
already processed it, then we need to go back
|
|
|
76b6d9 |
and process again from that point forward to
|
|
|
76b6d9 |
@@ -250,9 +245,9 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
- if (!used[jmap->l_idx])
|
|
|
76b6d9 |
+ if (!jmap->l_map_used)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
- used[jmap->l_idx] = 1;
|
|
|
76b6d9 |
+ jmap->l_map_used = 1;
|
|
|
76b6d9 |
if (jmap->l_idx - 1 < done_index)
|
|
|
76b6d9 |
done_index = jmap->l_idx - 1;
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
@@ -262,8 +257,7 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* Sort the entries. We can skip looking for the binary itself which is
|
|
|
76b6d9 |
at the front of the search list for the main namespace. */
|
|
|
76b6d9 |
- _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
|
|
|
76b6d9 |
- used + (nsid == LM_ID_BASE), true);
|
|
|
76b6d9 |
+ _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* Call all termination functions at once. */
|
|
|
76b6d9 |
bool unload_any = false;
|
|
|
76b6d9 |
@@ -277,7 +271,7 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
/* All elements must be in the same namespace. */
|
|
|
76b6d9 |
assert (imap->l_ns == nsid);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
- if (!used[i])
|
|
|
76b6d9 |
+ if (!imap->l_map_used)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
@@ -315,7 +309,7 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
if (i < first_loaded)
|
|
|
76b6d9 |
first_loaded = i;
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
- /* Else used[i]. */
|
|
|
76b6d9 |
+ /* Else imap->l_map_used. */
|
|
|
76b6d9 |
else if (imap->l_type == lt_loaded)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
struct r_scope_elem *new_list = NULL;
|
|
|
76b6d9 |
@@ -524,7 +518,7 @@ _dl_close_worker (struct link_map *map, bool force)
|
|
|
76b6d9 |
for (unsigned int i = first_loaded; i < nloaded; ++i)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
struct link_map *imap = maps[i];
|
|
|
76b6d9 |
- if (!used[i])
|
|
|
76b6d9 |
+ if (!imap->l_map_used)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
assert (imap->l_type == lt_loaded);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
|
|
|
76b6d9 |
index 007069f670eced95..9365d54c8e03e5f4 100644
|
|
|
76b6d9 |
--- a/elf/dl-deps.c
|
|
|
76b6d9 |
+++ b/elf/dl-deps.c
|
|
|
76b6d9 |
@@ -612,10 +612,9 @@ Filters not supported with LD_TRACE_PRELINKING"));
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* If libc.so.6 is the main map, it participates in the sort, so
|
|
|
76b6d9 |
that the relocation order is correct regarding libc.so.6. */
|
|
|
76b6d9 |
- if (l_initfini[0] == GL (dl_ns)[l_initfini[0]->l_ns].libc_map)
|
|
|
76b6d9 |
- _dl_sort_maps (l_initfini, nlist, NULL, false);
|
|
|
76b6d9 |
- else
|
|
|
76b6d9 |
- _dl_sort_maps (&l_initfini[1], nlist - 1, NULL, false);
|
|
|
76b6d9 |
+ _dl_sort_maps (l_initfini, nlist,
|
|
|
76b6d9 |
+ (l_initfini[0] != GL (dl_ns)[l_initfini[0]->l_ns].libc_map),
|
|
|
76b6d9 |
+ false);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* Terminate the list of dependencies. */
|
|
|
76b6d9 |
l_initfini[nlist] = NULL;
|
|
|
76b6d9 |
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
|
|
|
76b6d9 |
index eea9d8aad736a99e..e14259a3c8806e0d 100644
|
|
|
76b6d9 |
--- a/elf/dl-fini.c
|
|
|
76b6d9 |
+++ b/elf/dl-fini.c
|
|
|
76b6d9 |
@@ -95,8 +95,7 @@ _dl_fini (void)
|
|
|
76b6d9 |
/* Now we have to do the sorting. We can skip looking for the
|
|
|
76b6d9 |
binary itself which is at the front of the search list for
|
|
|
76b6d9 |
the main namespace. */
|
|
|
76b6d9 |
- _dl_sort_maps (maps + (ns == LM_ID_BASE), nmaps - (ns == LM_ID_BASE),
|
|
|
76b6d9 |
- NULL, true);
|
|
|
76b6d9 |
+ _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* We do not rely on the linked list of loaded object anymore
|
|
|
76b6d9 |
from this point on. We have our own list here (maps). The
|
|
|
76b6d9 |
diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
|
|
|
76b6d9 |
index b2a01ede627be1e9..398a08f28c4d9ff1 100644
|
|
|
76b6d9 |
--- a/elf/dl-sort-maps.c
|
|
|
76b6d9 |
+++ b/elf/dl-sort-maps.c
|
|
|
76b6d9 |
@@ -16,16 +16,24 @@
|
|
|
76b6d9 |
License along with the GNU C Library; if not, see
|
|
|
76b6d9 |
<http://www.gnu.org/licenses/>. */
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+#include <assert.h>
|
|
|
76b6d9 |
#include <ldsodefs.h>
|
|
|
76b6d9 |
+#include <elf/dl-tunables.h>
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+/* Note: this is the older, "original" sorting algorithm, being used as
|
|
|
76b6d9 |
+ default up to 2.35.
|
|
|
76b6d9 |
|
|
|
76b6d9 |
-/* Sort array MAPS according to dependencies of the contained objects.
|
|
|
76b6d9 |
- Array USED, if non-NULL, is permutated along MAPS. If FOR_FINI this is
|
|
|
76b6d9 |
- called for finishing an object. */
|
|
|
76b6d9 |
-void
|
|
|
76b6d9 |
-_dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
|
|
|
76b6d9 |
- bool for_fini)
|
|
|
76b6d9 |
+ Sort array MAPS according to dependencies of the contained objects.
|
|
|
76b6d9 |
+ If FOR_FINI is true, this is called for finishing an object. */
|
|
|
76b6d9 |
+static void
|
|
|
76b6d9 |
+_dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
|
|
|
76b6d9 |
+ unsigned int skip, bool for_fini)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
+ /* Allows caller to do the common optimization of skipping the first map,
|
|
|
76b6d9 |
+ usually the main binary. */
|
|
|
76b6d9 |
+ maps += skip;
|
|
|
76b6d9 |
+ nmaps -= skip;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
/* A list of one element need not be sorted. */
|
|
|
76b6d9 |
if (nmaps <= 1)
|
|
|
76b6d9 |
return;
|
|
|
76b6d9 |
@@ -66,14 +74,6 @@ _dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
|
|
|
76b6d9 |
(k - i) * sizeof (maps[0]));
|
|
|
76b6d9 |
maps[k] = thisp;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
- if (used != NULL)
|
|
|
76b6d9 |
- {
|
|
|
76b6d9 |
- char here_used = used[i];
|
|
|
76b6d9 |
- memmove (&used[i], &used[i + 1],
|
|
|
76b6d9 |
- (k - i) * sizeof (used[0]));
|
|
|
76b6d9 |
- used[k] = here_used;
|
|
|
76b6d9 |
- }
|
|
|
76b6d9 |
-
|
|
|
76b6d9 |
if (seen[i + 1] > nmaps - i)
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
++i;
|
|
|
76b6d9 |
@@ -120,3 +120,183 @@ _dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
|
|
|
76b6d9 |
next:;
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+#if !HAVE_TUNABLES
|
|
|
76b6d9 |
+/* In this case, just default to the original algorithm. */
|
|
|
76b6d9 |
+strong_alias (_dl_sort_maps_original, _dl_sort_maps);
|
|
|
76b6d9 |
+#else
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+/* We use a recursive function due to its better clarity and ease of
|
|
|
76b6d9 |
+ implementation, as well as faster execution speed. We already use
|
|
|
76b6d9 |
+ alloca() for list allocation during the breadth-first search of
|
|
|
76b6d9 |
+ dependencies in _dl_map_object_deps(), and this should be on the
|
|
|
76b6d9 |
+ same order of worst-case stack usage.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ Note: the '*rpo' parameter is supposed to point to one past the
|
|
|
76b6d9 |
+ last element of the array where we save the sort results, and is
|
|
|
76b6d9 |
+ decremented before storing the current map at each level. */
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+static void
|
|
|
76b6d9 |
+dfs_traversal (struct link_map ***rpo, struct link_map *map,
|
|
|
76b6d9 |
+ bool *do_reldeps)
|
|
|
76b6d9 |
+{
|
|
|
76b6d9 |
+ if (map->l_visited)
|
|
|
76b6d9 |
+ return;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ map->l_visited = 1;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ if (map->l_initfini)
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ for (int i = 0; map->l_initfini[i] != NULL; i++)
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ struct link_map *dep = map->l_initfini[i];
|
|
|
76b6d9 |
+ if (dep->l_visited == 0
|
|
|
76b6d9 |
+ && dep->l_main_map == 0)
|
|
|
76b6d9 |
+ dfs_traversal (rpo, dep, do_reldeps);
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ /* Indicate that we encountered relocation dependencies during
|
|
|
76b6d9 |
+ traversal. */
|
|
|
76b6d9 |
+ *do_reldeps = true;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ for (int m = map->l_reldeps->act - 1; m >= 0; m--)
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ struct link_map *dep = map->l_reldeps->list[m];
|
|
|
76b6d9 |
+ if (dep->l_visited == 0
|
|
|
76b6d9 |
+ && dep->l_main_map == 0)
|
|
|
76b6d9 |
+ dfs_traversal (rpo, dep, do_reldeps);
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ *rpo -= 1;
|
|
|
76b6d9 |
+ **rpo = map;
|
|
|
76b6d9 |
+}
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+/* Topologically sort array MAPS according to dependencies of the contained
|
|
|
76b6d9 |
+ objects. */
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+static void
|
|
|
76b6d9 |
+_dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
|
|
|
76b6d9 |
+ unsigned int skip __attribute__ ((unused)), bool for_fini)
|
|
|
76b6d9 |
+{
|
|
|
76b6d9 |
+ for (int i = nmaps - 1; i >= 0; i--)
|
|
|
76b6d9 |
+ maps[i]->l_visited = 0;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ /* We apply DFS traversal for each of maps[i] until the whole total order
|
|
|
76b6d9 |
+ is found and we're at the start of the Reverse-Postorder (RPO) sequence,
|
|
|
76b6d9 |
+ which is a topological sort.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ We go from maps[nmaps - 1] backwards towards maps[0] at this level.
|
|
|
76b6d9 |
+ Due to the breadth-first search (BFS) ordering we receive, going
|
|
|
76b6d9 |
+ backwards usually gives a more shallow depth-first recursion depth,
|
|
|
76b6d9 |
+ adding more stack usage safety. Also, combined with the natural
|
|
|
76b6d9 |
+ processing order of l_initfini[] at each node during DFS, this maintains
|
|
|
76b6d9 |
+ an ordering closer to the original link ordering in the sorting results
|
|
|
76b6d9 |
+ under most simpler cases.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ Another reason we order the top level backwards, it that maps[0] is
|
|
|
76b6d9 |
+ usually exactly the main object of which we're in the midst of
|
|
|
76b6d9 |
+ _dl_map_object_deps() processing, and maps[0]->l_initfini[] is still
|
|
|
76b6d9 |
+ blank. If we start the traversal from maps[0], since having no
|
|
|
76b6d9 |
+ dependencies yet filled in, maps[0] will always be immediately
|
|
|
76b6d9 |
+ incorrectly placed at the last place in the order (first in reverse).
|
|
|
76b6d9 |
+ Adjusting the order so that maps[0] is last traversed naturally avoids
|
|
|
76b6d9 |
+ this problem.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ Further, the old "optimization" of skipping the main object at maps[0]
|
|
|
76b6d9 |
+ from the call-site (i.e. _dl_sort_maps(maps+1,nmaps-1)) is in general
|
|
|
76b6d9 |
+ no longer valid, since traversing along object dependency-links
|
|
|
76b6d9 |
+ may "find" the main object even when it is not included in the initial
|
|
|
76b6d9 |
+ order (e.g. a dlopen()'ed shared object can have circular dependencies
|
|
|
76b6d9 |
+ linked back to itself). In such a case, traversing N-1 objects will
|
|
|
76b6d9 |
+ create a N-object result, and raise problems.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ To summarize, just passing in the full list, and iterating from back
|
|
|
76b6d9 |
+ to front makes things much more straightforward. */
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ /* Array to hold RPO sorting results, before we copy back to maps[]. */
|
|
|
76b6d9 |
+ struct link_map *rpo[nmaps];
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ /* The 'head' position during each DFS iteration. Note that we start at
|
|
|
76b6d9 |
+ one past the last element due to first-decrement-then-store (see the
|
|
|
76b6d9 |
+ bottom of above dfs_traversal() routine). */
|
|
|
76b6d9 |
+ struct link_map **rpo_head = &rpo[nmaps];
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ bool do_reldeps = false;
|
|
|
76b6d9 |
+ bool *do_reldeps_ref = (for_fini ? &do_reldeps : NULL);
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ for (int i = nmaps - 1; i >= 0; i--)
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ dfs_traversal (&rpo_head, maps[i], do_reldeps_ref);
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ /* We can break early if all objects are already placed. */
|
|
|
76b6d9 |
+ if (rpo_head == rpo)
|
|
|
76b6d9 |
+ goto end;
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+ assert (rpo_head == rpo);
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ end:
|
|
|
76b6d9 |
+ /* Here we may do a second pass of sorting, using only l_initfini[]
|
|
|
76b6d9 |
+ static dependency links. This is avoided if !FOR_FINI or if we didn't
|
|
|
76b6d9 |
+ find any reldeps in the first DFS traversal.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ The reason we do this is: while it is unspecified how circular
|
|
|
76b6d9 |
+ dependencies should be handled, the presumed reasonable behavior is to
|
|
|
76b6d9 |
+ have destructors to respect static dependency links as much as possible,
|
|
|
76b6d9 |
+ overriding reldeps if needed. And the first sorting pass, which takes
|
|
|
76b6d9 |
+ l_initfini/l_reldeps links equally, may not preserve this priority.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ Hence we do a 2nd sorting pass, taking only DT_NEEDED links into account
|
|
|
76b6d9 |
+ (see how the do_reldeps argument to dfs_traversal() is NULL below). */
|
|
|
76b6d9 |
+ if (do_reldeps)
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ for (int i = nmaps - 1; i >= 0; i--)
|
|
|
76b6d9 |
+ rpo[i]->l_visited = 0;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ struct link_map **maps_head = &maps[nmaps];
|
|
|
76b6d9 |
+ for (int i = nmaps - 1; i >= 0; i--)
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ dfs_traversal (&maps_head, rpo[i], NULL);
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ /* We can break early if all objects are already placed.
|
|
|
76b6d9 |
+ The below memcpy is not needed in the do_reldeps case here,
|
|
|
76b6d9 |
+ since we wrote back to maps[] during DFS traversal. */
|
|
|
76b6d9 |
+ if (maps_head == maps)
|
|
|
76b6d9 |
+ return;
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+ assert (maps_head == maps);
|
|
|
76b6d9 |
+ return;
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ memcpy (maps, rpo, sizeof (struct link_map *) * nmaps);
|
|
|
76b6d9 |
+}
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+void
|
|
|
76b6d9 |
+_dl_sort_maps_init (void)
|
|
|
76b6d9 |
+{
|
|
|
76b6d9 |
+ int32_t algorithm = TUNABLE_GET (glibc, rtld, dynamic_sort, int32_t, NULL);
|
|
|
76b6d9 |
+ GLRO(dl_dso_sort_algo) = algorithm == 1 ? dso_sort_algorithm_original
|
|
|
76b6d9 |
+ : dso_sort_algorithm_dfs;
|
|
|
76b6d9 |
+}
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+void
|
|
|
76b6d9 |
+_dl_sort_maps (struct link_map **maps, unsigned int nmaps,
|
|
|
76b6d9 |
+ unsigned int skip, bool for_fini)
|
|
|
76b6d9 |
+{
|
|
|
76b6d9 |
+ /* It can be tempting to use a static function pointer to store and call
|
|
|
76b6d9 |
+ the current selected sorting algorithm routine, but experimentation
|
|
|
76b6d9 |
+ shows that current processors still do not handle indirect branches
|
|
|
76b6d9 |
+ that efficiently, plus a static function pointer will involve
|
|
|
76b6d9 |
+ PTR_MANGLE/DEMANGLE, further impairing performance of small, common
|
|
|
76b6d9 |
+ input cases. A simple if-case with direct function calls appears to
|
|
|
76b6d9 |
+ be the fastest. */
|
|
|
76b6d9 |
+ if (__glibc_likely (GLRO(dl_dso_sort_algo) == dso_sort_algorithm_original))
|
|
|
76b6d9 |
+ _dl_sort_maps_original (maps, nmaps, skip, for_fini);
|
|
|
76b6d9 |
+ else
|
|
|
76b6d9 |
+ _dl_sort_maps_dfs (maps, nmaps, skip, for_fini);
|
|
|
76b6d9 |
+}
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+#endif /* HAVE_TUNABLES. */
|
|
|
76b6d9 |
diff --git a/elf/dl-support.c b/elf/dl-support.c
|
|
|
76b6d9 |
index e9943e889ef447ad..ae03aec9764e29d3 100644
|
|
|
76b6d9 |
--- a/elf/dl-support.c
|
|
|
76b6d9 |
+++ b/elf/dl-support.c
|
|
|
76b6d9 |
@@ -155,6 +155,8 @@ size_t _dl_phnum;
|
|
|
76b6d9 |
uint64_t _dl_hwcap __attribute__ ((nocommon));
|
|
|
76b6d9 |
uint64_t _dl_hwcap2 __attribute__ ((nocommon));
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+enum dso_sort_algorithm _dl_dso_sort_algo;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
/* The value of the FPU control word the kernel will preset in hardware. */
|
|
|
76b6d9 |
fpu_control_t _dl_fpu_control = _FPU_DEFAULT;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
diff --git a/elf/dl-sysdep.c b/elf/dl-sysdep.c
|
|
|
76b6d9 |
index 998c5d52bcab8193..4e8a986541fc4c09 100644
|
|
|
76b6d9 |
--- a/elf/dl-sysdep.c
|
|
|
76b6d9 |
+++ b/elf/dl-sysdep.c
|
|
|
76b6d9 |
@@ -223,6 +223,9 @@ _dl_sysdep_start (void **start_argptr,
|
|
|
76b6d9 |
|
|
|
76b6d9 |
__tunables_init (_environ);
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+ /* Initialize DSO sorting algorithm after tunables. */
|
|
|
76b6d9 |
+ _dl_sort_maps_init ();
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
#ifdef DL_SYSDEP_INIT
|
|
|
76b6d9 |
DL_SYSDEP_INIT;
|
|
|
76b6d9 |
#endif
|
|
|
76b6d9 |
diff --git a/elf/dl-tunables.list b/elf/dl-tunables.list
|
|
|
76b6d9 |
index 6408a8e5ae92d2c6..54ef2a921310b229 100644
|
|
|
76b6d9 |
--- a/elf/dl-tunables.list
|
|
|
76b6d9 |
+++ b/elf/dl-tunables.list
|
|
|
76b6d9 |
@@ -140,4 +140,13 @@ glibc {
|
|
|
76b6d9 |
default: 512
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+ rtld {
|
|
|
76b6d9 |
+ dynamic_sort {
|
|
|
76b6d9 |
+ type: INT_32
|
|
|
76b6d9 |
+ minval: 1
|
|
|
76b6d9 |
+ maxval: 2
|
|
|
76b6d9 |
+ default: 1
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
+ }
|
|
|
76b6d9 |
}
|
|
|
76b6d9 |
diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def
|
|
|
76b6d9 |
index 873ddf55d91155c6..5f7f18ef270bc12d 100644
|
|
|
76b6d9 |
--- a/elf/dso-sort-tests-1.def
|
|
|
76b6d9 |
+++ b/elf/dso-sort-tests-1.def
|
|
|
76b6d9 |
@@ -62,5 +62,5 @@ output: b>a>{}
|
|
|
76b6d9 |
# The below expected outputs are what the two algorithms currently produce
|
|
|
76b6d9 |
# respectively, for regression testing purposes.
|
|
|
76b6d9 |
tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c
|
|
|
76b6d9 |
-xfail_output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[
|
|
|
76b6d9 |
+output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[
|
|
|
76b6d9 |
output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[
|
|
|
76b6d9 |
diff --git a/elf/rtld.c b/elf/rtld.c
|
|
|
76b6d9 |
index b47e84ca2fb6f03c..cd2cc4024a3581c2 100644
|
|
|
76b6d9 |
--- a/elf/rtld.c
|
|
|
76b6d9 |
+++ b/elf/rtld.c
|
|
|
76b6d9 |
@@ -1453,6 +1453,9 @@ dl_main (const ElfW(Phdr) *phdr,
|
|
|
76b6d9 |
main_map->l_name = (char *) "";
|
|
|
76b6d9 |
*user_entry = main_map->l_entry;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+ /* Set bit indicating this is the main program map. */
|
|
|
76b6d9 |
+ main_map->l_main_map = 1;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
#ifdef HAVE_AUX_VECTOR
|
|
|
76b6d9 |
/* Adjust the on-stack auxiliary vector so that it looks like the
|
|
|
76b6d9 |
binary was executed directly. */
|
|
|
76b6d9 |
diff --git a/elf/tst-rtld-list-tunables.exp b/elf/tst-rtld-list-tunables.exp
|
|
|
76b6d9 |
index 4f3f7ee4e30a2b42..118afc271057afd4 100644
|
|
|
76b6d9 |
--- a/elf/tst-rtld-list-tunables.exp
|
|
|
76b6d9 |
+++ b/elf/tst-rtld-list-tunables.exp
|
|
|
76b6d9 |
@@ -10,5 +10,6 @@ glibc.malloc.tcache_max: 0x0 (min: 0x0, max: 0x[f]+)
|
|
|
76b6d9 |
glibc.malloc.tcache_unsorted_limit: 0x0 (min: 0x0, max: 0x[f]+)
|
|
|
76b6d9 |
glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0x[f]+)
|
|
|
76b6d9 |
glibc.malloc.trim_threshold: 0x0 (min: 0x0, max: 0x[f]+)
|
|
|
76b6d9 |
+glibc.rtld.dynamic_sort: 1 (min: 1, max: 2)
|
|
|
76b6d9 |
glibc.rtld.nns: 0x4 (min: 0x1, max: 0x10)
|
|
|
76b6d9 |
glibc.rtld.optional_static_tls: 0x200 (min: 0x0, max: 0x[f]+)
|
|
|
76b6d9 |
diff --git a/include/link.h b/include/link.h
|
|
|
76b6d9 |
index dd491989beb41353..041ff5f753a9ee11 100644
|
|
|
76b6d9 |
--- a/include/link.h
|
|
|
76b6d9 |
+++ b/include/link.h
|
|
|
76b6d9 |
@@ -181,6 +181,11 @@ struct link_map
|
|
|
76b6d9 |
unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
|
|
|
76b6d9 |
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
|
|
|
76b6d9 |
unsigned int l_reserved:2; /* Reserved for internal use. */
|
|
|
76b6d9 |
+ unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
|
|
|
76b6d9 |
+ unsigned int l_visited:1; /* Used internally for map dependency
|
|
|
76b6d9 |
+ graph traversal. */
|
|
|
76b6d9 |
+ unsigned int l_map_used:1; /* These two bits are used during traversal */
|
|
|
76b6d9 |
+ unsigned int l_map_done:1; /* of maps in _dl_close_worker. */
|
|
|
76b6d9 |
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
|
|
|
76b6d9 |
to by `l_phdr' is allocated. */
|
|
|
76b6d9 |
unsigned int l_soname_added:1; /* Nonzero if the SONAME is for sure in
|
|
|
76b6d9 |
diff --git a/manual/tunables.texi b/manual/tunables.texi
|
|
|
76b6d9 |
index 43272cf885d1e3e6..c3f96cdc85208926 100644
|
|
|
76b6d9 |
--- a/manual/tunables.texi
|
|
|
76b6d9 |
+++ b/manual/tunables.texi
|
|
|
76b6d9 |
@@ -303,6 +303,17 @@ changed once allocated at process startup. The default allocation of
|
|
|
76b6d9 |
optional static TLS is 512 bytes and is allocated in every thread.
|
|
|
76b6d9 |
@end deftp
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+@deftp Tunable glibc.rtld.dynamic_sort
|
|
|
76b6d9 |
+Sets the algorithm to use for DSO sorting, valid values are @samp{1} and
|
|
|
76b6d9 |
+@samp{2}. For value of @samp{1}, an older O(n^3) algorithm is used, which is
|
|
|
76b6d9 |
+long time tested, but may have performance issues when dependencies between
|
|
|
76b6d9 |
+shared objects contain cycles due to circular dependencies. When set to the
|
|
|
76b6d9 |
+value of @samp{2}, a different algorithm is used, which implements a
|
|
|
76b6d9 |
+topological sort through depth-first search, and does not exhibit the
|
|
|
76b6d9 |
+performance issues of @samp{1}.
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
+The default value of this tunable is @samp{1}.
|
|
|
76b6d9 |
+@end deftp
|
|
|
76b6d9 |
|
|
|
76b6d9 |
@node Elision Tunables
|
|
|
76b6d9 |
@section Elision Tunables
|
|
|
76b6d9 |
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
|
|
|
76b6d9 |
index 5e56550a4d556fa7..9f09a4a280396659 100644
|
|
|
76b6d9 |
--- a/sysdeps/generic/ldsodefs.h
|
|
|
76b6d9 |
+++ b/sysdeps/generic/ldsodefs.h
|
|
|
76b6d9 |
@@ -240,6 +240,13 @@ enum allowmask
|
|
|
76b6d9 |
};
|
|
|
76b6d9 |
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+/* DSO sort algorithm to use (check dl-sort-maps.c). */
|
|
|
76b6d9 |
+enum dso_sort_algorithm
|
|
|
76b6d9 |
+ {
|
|
|
76b6d9 |
+ dso_sort_algorithm_original,
|
|
|
76b6d9 |
+ dso_sort_algorithm_dfs
|
|
|
76b6d9 |
+ };
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
struct audit_ifaces
|
|
|
76b6d9 |
{
|
|
|
76b6d9 |
void (*activity) (uintptr_t *, unsigned int);
|
|
|
76b6d9 |
@@ -633,6 +640,8 @@ struct rtld_global_ro
|
|
|
76b6d9 |
platforms. */
|
|
|
76b6d9 |
EXTERN uint64_t _dl_hwcap2;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+ EXTERN enum dso_sort_algorithm _dl_dso_sort_algo;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
#ifdef SHARED
|
|
|
76b6d9 |
/* We add a function table to _rtld_global which is then used to
|
|
|
76b6d9 |
call the function instead of going through the PLT. The result
|
|
|
76b6d9 |
@@ -1049,7 +1058,7 @@ extern void _dl_fini (void) attribute_hidden;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* Sort array MAPS according to dependencies of the contained objects. */
|
|
|
76b6d9 |
extern void _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
|
|
|
76b6d9 |
- char *used, bool for_fini) attribute_hidden;
|
|
|
76b6d9 |
+ unsigned int skip, bool for_fini) attribute_hidden;
|
|
|
76b6d9 |
|
|
|
76b6d9 |
/* The dynamic linker calls this function before and having changing
|
|
|
76b6d9 |
any shared object mappings. The `r_state' member of `struct r_debug'
|
|
|
76b6d9 |
@@ -1167,6 +1176,9 @@ extern struct link_map * _dl_get_dl_main_map (void)
|
|
|
76b6d9 |
# endif
|
|
|
76b6d9 |
#endif
|
|
|
76b6d9 |
|
|
|
76b6d9 |
+/* Initialize the DSO sort algorithm to use. */
|
|
|
76b6d9 |
+extern void _dl_sort_maps_init (void) attribute_hidden;
|
|
|
76b6d9 |
+
|
|
|
76b6d9 |
/* Initialization of libpthread for statically linked applications.
|
|
|
76b6d9 |
If libpthread is not linked in, this is an empty function. */
|
|
|
76b6d9 |
void __pthread_initialize_minimal (void) weak_function;
|