|
|
548bcb |
commit ddcacd91cc10ff92d6201eda87047d029c14158d
|
|
|
548bcb |
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
|
|
|
548bcb |
Date: Thu Feb 11 11:40:11 2021 +0000
|
|
|
548bcb |
|
|
|
548bcb |
i386: Avoid lazy relocation of tlsdesc [BZ #27137]
|
|
|
548bcb |
|
|
|
548bcb |
Lazy tlsdesc relocation is racy because the static tls optimization and
|
|
|
548bcb |
tlsdesc management operations are done without holding the dlopen lock.
|
|
|
548bcb |
|
|
|
548bcb |
This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
|
|
|
548bcb |
for aarch64, but it fixes a different race: bug 27137.
|
|
|
548bcb |
|
|
|
548bcb |
On i386 the code is a bit more complicated than on x86_64 because both
|
|
|
548bcb |
rel and rela relocs are supported.
|
|
|
548bcb |
|
|
|
548bcb |
diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h
|
|
|
548bcb |
index e5776ef7bc8ad749..3a30671591284d79 100644
|
|
|
548bcb |
--- a/sysdeps/i386/dl-machine.h
|
|
|
548bcb |
+++ b/sysdeps/i386/dl-machine.h
|
|
|
548bcb |
@@ -679,50 +679,32 @@ elf_machine_lazy_rel (struct link_map *map,
|
|
|
548bcb |
}
|
|
|
548bcb |
else if (__glibc_likely (r_type == R_386_TLS_DESC))
|
|
|
548bcb |
{
|
|
|
548bcb |
- struct tlsdesc volatile * __attribute__((__unused__)) td =
|
|
|
548bcb |
- (struct tlsdesc volatile *)reloc_addr;
|
|
|
548bcb |
-
|
|
|
548bcb |
- /* Handle relocations that reference the local *ABS* in a simple
|
|
|
548bcb |
- way, so as to preserve a potential addend. */
|
|
|
548bcb |
- if (ELF32_R_SYM (reloc->r_info) == 0)
|
|
|
548bcb |
- td->entry = _dl_tlsdesc_resolve_abs_plus_addend;
|
|
|
548bcb |
- /* Given a known-zero addend, we can store a pointer to the
|
|
|
548bcb |
- reloc in the arg position. */
|
|
|
548bcb |
- else if (td->arg == 0)
|
|
|
548bcb |
- {
|
|
|
548bcb |
- td->arg = (void*)reloc;
|
|
|
548bcb |
- td->entry = _dl_tlsdesc_resolve_rel;
|
|
|
548bcb |
- }
|
|
|
548bcb |
- else
|
|
|
548bcb |
- {
|
|
|
548bcb |
- /* We could handle non-*ABS* relocations with non-zero addends
|
|
|
548bcb |
- by allocating dynamically an arg to hold a pointer to the
|
|
|
548bcb |
- reloc, but that sounds pointless. */
|
|
|
548bcb |
- const Elf32_Rel *const r = reloc;
|
|
|
548bcb |
- /* The code below was borrowed from elf_dynamic_do_rel(). */
|
|
|
548bcb |
- const ElfW(Sym) *const symtab =
|
|
|
548bcb |
- (const void *) D_PTR (map, l_info[DT_SYMTAB]);
|
|
|
548bcb |
+ const Elf32_Rel *const r = reloc;
|
|
|
548bcb |
+ /* The code below was borrowed from elf_dynamic_do_rel(). */
|
|
|
548bcb |
+ const ElfW(Sym) *const symtab =
|
|
|
548bcb |
+ (const void *) D_PTR (map, l_info[DT_SYMTAB]);
|
|
|
548bcb |
|
|
|
548bcb |
+ /* Always initialize TLS descriptors completely at load time, in
|
|
|
548bcb |
+ case static TLS is allocated for it that requires locking. */
|
|
|
548bcb |
# ifdef RTLD_BOOTSTRAP
|
|
|
548bcb |
- /* The dynamic linker always uses versioning. */
|
|
|
548bcb |
- assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL);
|
|
|
548bcb |
+ /* The dynamic linker always uses versioning. */
|
|
|
548bcb |
+ assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL);
|
|
|
548bcb |
# else
|
|
|
548bcb |
- if (map->l_info[VERSYMIDX (DT_VERSYM)])
|
|
|
548bcb |
+ if (map->l_info[VERSYMIDX (DT_VERSYM)])
|
|
|
548bcb |
# endif
|
|
|
548bcb |
- {
|
|
|
548bcb |
- const ElfW(Half) *const version =
|
|
|
548bcb |
- (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
|
|
|
548bcb |
- ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff;
|
|
|
548bcb |
- elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)],
|
|
|
548bcb |
- &map->l_versions[ndx],
|
|
|
548bcb |
- (void *) (l_addr + r->r_offset), skip_ifunc);
|
|
|
548bcb |
- }
|
|
|
548bcb |
+ {
|
|
|
548bcb |
+ const ElfW(Half) *const version =
|
|
|
548bcb |
+ (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
|
|
|
548bcb |
+ ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff;
|
|
|
548bcb |
+ elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)],
|
|
|
548bcb |
+ &map->l_versions[ndx],
|
|
|
548bcb |
+ (void *) (l_addr + r->r_offset), skip_ifunc);
|
|
|
548bcb |
+ }
|
|
|
548bcb |
# ifndef RTLD_BOOTSTRAP
|
|
|
548bcb |
- else
|
|
|
548bcb |
- elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL,
|
|
|
548bcb |
- (void *) (l_addr + r->r_offset), skip_ifunc);
|
|
|
548bcb |
+ else
|
|
|
548bcb |
+ elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL,
|
|
|
548bcb |
+ (void *) (l_addr + r->r_offset), skip_ifunc);
|
|
|
548bcb |
# endif
|
|
|
548bcb |
- }
|
|
|
548bcb |
}
|
|
|
548bcb |
else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
|
|
|
548bcb |
{
|
|
|
548bcb |
@@ -749,11 +731,21 @@ elf_machine_lazy_rela (struct link_map *map,
|
|
|
548bcb |
;
|
|
|
548bcb |
else if (__glibc_likely (r_type == R_386_TLS_DESC))
|
|
|
548bcb |
{
|
|
|
548bcb |
- struct tlsdesc volatile * __attribute__((__unused__)) td =
|
|
|
548bcb |
- (struct tlsdesc volatile *)reloc_addr;
|
|
|
548bcb |
+ const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
|
|
|
548bcb |
+ const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
|
|
|
548bcb |
+ const ElfW (Sym) *sym = &symtab[symndx];
|
|
|
548bcb |
+ const struct r_found_version *version = NULL;
|
|
|
548bcb |
+
|
|
|
548bcb |
+ if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
|
|
|
548bcb |
+ {
|
|
|
548bcb |
+ const ElfW (Half) *vernum =
|
|
|
548bcb |
+ (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
|
|
|
548bcb |
+ version = &map->l_versions[vernum[symndx] & 0x7fff];
|
|
|
548bcb |
+ }
|
|
|
548bcb |
|
|
|
548bcb |
- td->arg = (void*)reloc;
|
|
|
548bcb |
- td->entry = _dl_tlsdesc_resolve_rela;
|
|
|
548bcb |
+ /* Always initialize TLS descriptors completely at load time, in
|
|
|
548bcb |
+ case static TLS is allocated for it that requires locking. */
|
|
|
548bcb |
+ elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
|
|
|
548bcb |
}
|
|
|
548bcb |
else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
|
|
|
548bcb |
{
|