179894
commit 2208066603a136f95cfb815ca9281262e6465784
179894
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
179894
Date:   Thu Feb 11 13:24:47 2021 +0000
179894
179894
    elf: Remove lazy tlsdesc relocation related code
179894
    
179894
    Remove generic tlsdesc code related to lazy tlsdesc processing since
179894
    lazy tlsdesc relocation is no longer supported.  This includes removing
179894
    GL(dl_load_lock) from _dl_make_tlsdesc_dynamic which is only called at
179894
    load time when that lock is already held.
179894
    
179894
    Added a documentation comment too.
179894
    
179894
    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
179894
179894
diff --git a/elf/tlsdeschtab.h b/elf/tlsdeschtab.h
179894
index fea9eefe72edcd6b..c20857e5b4264f00 100644
179894
--- a/elf/tlsdeschtab.h
179894
+++ b/elf/tlsdeschtab.h
179894
@@ -78,6 +78,10 @@ map_generation (struct link_map *map)
179894
   return GL(dl_tls_generation) + 1;
179894
 }
179894
 
179894
+/* Returns the data pointer for a given map and tls offset that is used
179894
+   to fill in one of the GOT entries referenced by a TLSDESC relocation
179894
+   when using dynamic TLS.  This requires allocation, returns NULL on
179894
+   allocation failure.  */
179894
 void *
179894
 _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
179894
 {
179894
@@ -85,18 +89,12 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
179894
   void **entry;
179894
   struct tlsdesc_dynamic_arg *td, test;
179894
 
179894
-  /* FIXME: We could use a per-map lock here, but is it worth it?  */
179894
-  __rtld_lock_lock_recursive (GL(dl_load_lock));
179894
-
179894
   ht = map->l_mach.tlsdesc_table;
179894
   if (! ht)
179894
     {
179894
       ht = htab_create ();
179894
       if (! ht)
179894
-	{
179894
-	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
179894
-	  return 0;
179894
-	}
179894
+	return 0;
179894
       map->l_mach.tlsdesc_table = ht;
179894
     }
179894
 
179894
@@ -104,15 +102,11 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
179894
   test.tlsinfo.ti_offset = ti_offset;
179894
   entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc);
179894
   if (! entry)
179894
-    {
179894
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
179894
-      return 0;
179894
-    }
179894
+    return 0;
179894
 
179894
   if (*entry)
179894
     {
179894
       td = *entry;
179894
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
179894
       return td;
179894
     }
179894
 
179894
@@ -122,44 +116,9 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
179894
      thread.  */
179894
   td->gen_count = map_generation (map);
179894
   td->tlsinfo = test.tlsinfo;
179894
-
179894
-  __rtld_lock_unlock_recursive (GL(dl_load_lock));
179894
   return td;
179894
 }
179894
 
179894
 # endif /* SHARED */
179894
 
179894
-/* The idea of the following two functions is to stop multiple threads
179894
-   from attempting to resolve the same TLS descriptor without busy
179894
-   waiting.  Ideally, we should be able to release the lock right
179894
-   after changing td->entry, and then using say a condition variable
179894
-   or a futex wake to wake up any waiting threads, but let's try to
179894
-   avoid introducing such dependencies.  */
179894
-
179894
-static int
179894
-__attribute__ ((unused))
179894
-_dl_tlsdesc_resolve_early_return_p (struct tlsdesc volatile *td, void *caller)
179894
-{
179894
-  if (caller != atomic_load_relaxed (&td->entry))
179894
-    return 1;
179894
-
179894
-  __rtld_lock_lock_recursive (GL(dl_load_lock));
179894
-  if (caller != atomic_load_relaxed (&td->entry))
179894
-    {
179894
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
179894
-      return 1;
179894
-    }
179894
-
179894
-  atomic_store_relaxed (&td->entry, _dl_tlsdesc_resolve_hold);
179894
-
179894
-  return 0;
179894
-}
179894
-
179894
-static void
179894
-__attribute__ ((unused))
179894
-_dl_tlsdesc_wake_up_held_fixups (void)
179894
-{
179894
-  __rtld_lock_unlock_recursive (GL(dl_load_lock));
179894
-}
179894
-
179894
 #endif
179894
diff --git a/sysdeps/aarch64/tlsdesc.c b/sysdeps/aarch64/tlsdesc.c
179894
index 357465f23d76e2bd..1ead73ab8250e29c 100644
179894
--- a/sysdeps/aarch64/tlsdesc.c
179894
+++ b/sysdeps/aarch64/tlsdesc.c
179894
@@ -22,7 +22,6 @@
179894
 #include <tls.h>
179894
 #include <dl-tlsdesc.h>
179894
 #include <dl-unmap-segments.h>
179894
-#define _dl_tlsdesc_resolve_hold 0
179894
 #include <tlsdeschtab.h>
179894
 
179894
 /* Unmap the dynamic object, but also release its TLS descriptor table
179894
diff --git a/sysdeps/arm/tlsdesc.c b/sysdeps/arm/tlsdesc.c
179894
index d142d7a2c91e9adb..b78e3f65785bf587 100644
179894
--- a/sysdeps/arm/tlsdesc.c
179894
+++ b/sysdeps/arm/tlsdesc.c
179894
@@ -20,7 +20,6 @@
179894
 #include <tls.h>
179894
 #include <dl-tlsdesc.h>
179894
 #include <dl-unmap-segments.h>
179894
-#define _dl_tlsdesc_resolve_hold 0
179894
 #include <tlsdeschtab.h>
179894
 
179894
 /* Unmap the dynamic object, but also release its TLS descriptor table
179894
diff --git a/sysdeps/i386/tlsdesc.c b/sysdeps/i386/tlsdesc.c
179894
index 1b4227c8381e1b3d..c242ffce726d50e4 100644
179894
--- a/sysdeps/i386/tlsdesc.c
179894
+++ b/sysdeps/i386/tlsdesc.c
179894
@@ -20,7 +20,6 @@
179894
 #include <tls.h>
179894
 #include <dl-tlsdesc.h>
179894
 #include <dl-unmap-segments.h>
179894
-#define _dl_tlsdesc_resolve_hold 0
179894
 #include <tlsdeschtab.h>
179894
 
179894
 /* Unmap the dynamic object, but also release its TLS descriptor table
179894
diff --git a/sysdeps/x86_64/tlsdesc.c b/sysdeps/x86_64/tlsdesc.c
179894
index 61a19ae26944c84f..a9325827d0e5e31b 100644
179894
--- a/sysdeps/x86_64/tlsdesc.c
179894
+++ b/sysdeps/x86_64/tlsdesc.c
179894
@@ -20,7 +20,6 @@
179894
 #include <tls.h>
179894
 #include <dl-tlsdesc.h>
179894
 #include <dl-unmap-segments.h>
179894
-#define _dl_tlsdesc_resolve_hold 0
179894
 #include <tlsdeschtab.h>
179894
 
179894
 /* Unmap the dynamic object, but also release its TLS descriptor table