diff --git a/SOURCES/glibc-rh2109510-1.patch b/SOURCES/glibc-rh2109510-1.patch
new file mode 100644
index 0000000..52b069e
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-1.patch
@@ -0,0 +1,27 @@
+commit 97f8225d22ef727ae9935cc231643efdc430d530
+Author: Zack Weinberg <zackw@panix.com>
+Date:   Thu Mar 14 09:44:22 2019 -0400
+
+    scripts/check-obsolete-constructs.py: Process all headers as UTF-8.
+    
+    A few of our installed headers contain UTF-8 in comments.
+    check-obsolete-constructs opened files without explicitly specifying
+    their encoding, so it would barf on these headers if “make check” was
+    run in a non-UTF-8 locale.
+    
+            * scripts/check-obsolete-constructs.py (HeaderChecker.check):
+            Specify encoding="utf-8" when opening headers to check.
+
+diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
+index ce5c72251f4d7cc0..89d21dea6e788783 100755
+--- a/scripts/check-obsolete-constructs.py
++++ b/scripts/check-obsolete-constructs.py
+@@ -437,7 +437,7 @@ class HeaderChecker:
+     def check(self, fname):
+         self.fname = fname
+         try:
+-            with open(fname, "rt") as fp:
++            with open(fname, "rt", encoding="utf-8") as fp:
+                 contents = fp.read()
+         except OSError as e:
+             sys.stderr.write("{}: {}\n".format(fname, e.strerror))
diff --git a/SOURCES/glibc-rh2109510-10.patch b/SOURCES/glibc-rh2109510-10.patch
new file mode 100644
index 0000000..31291df
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-10.patch
@@ -0,0 +1,1449 @@
+commit 30035d67728a846fa39749cd162afd278ac654c4
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon Apr 11 11:28:08 2022 +0200
+
+    scripts: Add glibcelf.py module
+    
+    Hopefully, this will lead to tests that are easier to maintain.  The
+    current approach of parsing readelf -W output using regular expressions
+    is not necessarily easier than parsing the ELF data directly.
+    
+    This module is still somewhat incomplete (e.g., coverage of relocation
+    types and versioning information is missing), but it is sufficient to
+    perform basic symbol analysis or program header analysis.
+    
+    The EM_* mapping for architecture-specific constant classes (e.g.,
+    SttX86_64) is not yet implemented.  The classes are defined for the
+    benefit of elf/tst-glibcelf.py.
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Conflicts:
+	elf/Makefile
+	  (prelink removal upstream)
+
+diff --git a/elf/Makefile b/elf/Makefile
+index 44966b9dfef15463..89ce4f5196e5eb39 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -967,6 +967,13 @@ tests-special += $(objpfx)tst-prelink-cmp.out
+ endif
+ endif
+ 
++tests-special += $(objpfx)tst-glibcelf.out
++$(objpfx)tst-glibcelf.out: tst-glibcelf.py elf.h $(..)/scripts/glibcelf.py \
++  $(..)/scripts/glibcextract.py
++	PYTHONPATH=$(..)scripts $(PYTHON) tst-glibcelf.py \
++          --cc="$(CC) $(patsubst -DMODULE_NAME=%,-DMODULE_NAME=testsuite,$(CPPFLAGS))" \
++	  < /dev/null > $@ 2>&1; $(evaluate-test)
++
+ # The test requires shared _and_ PIE because the executable
+ # unit test driver must be able to link with the shared object
+ # that is going to eventually go into an installed DSO.
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+new file mode 100644
+index 0000000000000000..bf15a3bad4479e08
+--- /dev/null
++++ b/elf/tst-glibcelf.py
+@@ -0,0 +1,260 @@
++#!/usr/bin/python3
++# Verify scripts/glibcelf.py contents against elf/elf.h.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++import argparse
++import enum
++import sys
++
++import glibcelf
++import glibcextract
++
++errors_encountered = 0
++
++def error(message):
++    global errors_encountered
++    sys.stdout.write('error: {}\n'.format(message))
++    errors_encountered += 1
++
++# The enum constants in glibcelf are expected to have exactly these
++# prefixes.
++expected_constant_prefixes = tuple(
++    'ELFCLASS ELFDATA EM_ ET_ DT_ PF_ PT_ SHF_ SHN_ SHT_ STB_ STT_'.split())
++
++def find_constant_prefix(name):
++    """Returns a matching prefix from expected_constant_prefixes or None."""
++    for prefix in expected_constant_prefixes:
++        if name.startswith(prefix):
++            return prefix
++    return None
++
++def find_enum_types():
++    """A generator for OpenIntEnum and IntFlag classes in glibcelf."""
++    for obj in vars(glibcelf).values():
++        if isinstance(obj, type) and obj.__bases__[0] in (
++                glibcelf._OpenIntEnum, enum.Enum, enum.IntFlag):
++            yield obj
++
++def check_duplicates():
++    """Verifies that enum types do not have duplicate values.
++
++    Different types must have different member names, too.
++
++    """
++    global_seen = {}
++    for typ in find_enum_types():
++        seen = {}
++        last = None
++        for (name, e) in typ.__members__.items():
++            if e.value in seen:
++                error('{} has {}={} and {}={}'.format(
++                    typ, seen[e.value], e.value, name, e.value))
++                last = e
++            else:
++                seen[e.value] = name
++                if last is not None and last.value > e.value:
++                    error('{} has {}={} after {}={}'.format(
++                        typ, name, e.value, last.name, last.value))
++                if name in global_seen:
++                    error('{} used in {} and {}'.format(
++                        name, global_seen[name], typ))
++                else:
++                    global_seen[name] = typ
++
++def check_constant_prefixes():
++    """Check that the constant prefixes match expected_constant_prefixes."""
++    seen = set()
++    for typ in find_enum_types():
++        typ_prefix = None
++        for val in typ:
++            prefix = find_constant_prefix(val.name)
++            if prefix is None:
++                error('constant {!r} for {} has unknown prefix'.format(
++                    val, typ))
++                break
++            elif typ_prefix is None:
++                typ_prefix = prefix
++                seen.add(typ_prefix)
++            elif prefix != typ_prefix:
++                error('prefix {!r} for constant {!r}, expected {!r}'.format(
++                    prefix, val, typ_prefix))
++        if typ_prefix is None:
++            error('empty enum type {}'.format(typ))
++
++    for prefix in sorted(set(expected_constant_prefixes) - seen):
++        error('missing constant prefix {!r}'.format(prefix))
++    # Reverse difference is already covered inside the loop.
++
++def find_elf_h_constants(cc):
++    """Returns a dictionary of relevant constants from <elf.h>."""
++    return glibcextract.compute_macro_consts(
++        source_text='#include <elf.h>',
++        cc=cc,
++        macro_re='|'.join(
++            prefix + '.*' for prefix in expected_constant_prefixes))
++
++# The first part of the pair is a name of an <elf.h> constant that is
++# dropped from glibcelf.  The second part is the constant as it is
++# used in <elf.h>.
++glibcelf_skipped_aliases = (
++    ('EM_ARC_A5', 'EM_ARC_COMPACT'),
++    ('PF_PARISC_SBP', 'PF_HP_SBP')
++)
++
++# Constants that provide little value and are not included in
++# glibcelf: *LO*/*HI* range constants, *NUM constants counting the
++# number of constants.  Also includes the alias names from
++# glibcelf_skipped_aliases.
++glibcelf_skipped_constants = frozenset(
++    [e[0] for e in glibcelf_skipped_aliases]) | frozenset("""
++DT_AARCH64_NUM
++DT_ADDRNUM
++DT_ADDRRNGHI
++DT_ADDRRNGLO
++DT_ALPHA_NUM
++DT_ENCODING
++DT_EXTRANUM
++DT_HIOS
++DT_HIPROC
++DT_IA_64_NUM
++DT_LOOS
++DT_LOPROC
++DT_MIPS_NUM
++DT_NUM
++DT_PPC64_NUM
++DT_PPC_NUM
++DT_PROCNUM
++DT_SPARC_NUM
++DT_VALNUM
++DT_VALRNGHI
++DT_VALRNGLO
++DT_VERSIONTAGNUM
++ELFCLASSNUM
++ELFDATANUM
++ET_HIOS
++ET_HIPROC
++ET_LOOS
++ET_LOPROC
++ET_NUM
++PF_MASKOS
++PF_MASKPROC
++PT_HIOS
++PT_HIPROC
++PT_HISUNW
++PT_LOOS
++PT_LOPROC
++PT_LOSUNW
++SHF_MASKOS
++SHF_MASKPROC
++SHN_HIOS
++SHN_HIPROC
++SHN_HIRESERVE
++SHN_LOOS
++SHN_LOPROC
++SHN_LORESERVE
++SHT_HIOS
++SHT_HIPROC
++SHT_HIPROC
++SHT_HISUNW
++SHT_HIUSER
++SHT_LOOS
++SHT_LOPROC
++SHT_LOSUNW
++SHT_LOUSER
++SHT_NUM
++STB_HIOS
++STB_HIPROC
++STB_LOOS
++STB_LOPROC
++STB_NUM
++STT_HIOS
++STT_HIPROC
++STT_LOOS
++STT_LOPROC
++STT_NUM
++""".strip().split())
++
++def check_constant_values(cc):
++    """Checks the values of <elf.h> constants against glibcelf."""
++
++    glibcelf_constants = {
++        e.name: e for typ in find_enum_types() for e in typ}
++    elf_h_constants = find_elf_h_constants(cc=cc)
++
++    missing_in_glibcelf = (set(elf_h_constants) - set(glibcelf_constants)
++                           - glibcelf_skipped_constants)
++    for name in sorted(missing_in_glibcelf):
++        error('constant {} is missing from glibcelf'.format(name))
++
++    unexpected_in_glibcelf = \
++        set(glibcelf_constants) & glibcelf_skipped_constants
++    for name in sorted(unexpected_in_glibcelf):
++        error('constant {} is supposed to be filtered from glibcelf'.format(
++            name))
++
++    missing_in_elf_h = set(glibcelf_constants) - set(elf_h_constants)
++    for name in sorted(missing_in_elf_h):
++        error('constant {} is missing from <elf.h>'.format(name))
++
++    expected_in_elf_h = glibcelf_skipped_constants - set(elf_h_constants)
++    for name in expected_in_elf_h:
++        error('filtered constant {} is missing from <elf.h>'.format(name))
++
++    for alias_name, name_in_glibcelf in glibcelf_skipped_aliases:
++        if name_in_glibcelf not in glibcelf_constants:
++            error('alias value {} for {} not in glibcelf'.format(
++                name_in_glibcelf, alias_name))
++        elif (int(elf_h_constants[alias_name])
++              != glibcelf_constants[name_in_glibcelf].value):
++            error('<elf.h> has {}={}, glibcelf has {}={}'.format(
++                alias_name, elf_h_constants[alias_name],
++                name_in_glibcelf, glibcelf_constants[name_in_glibcelf]))
++
++    # Check for value mismatches:
++    for name in sorted(set(glibcelf_constants) & set(elf_h_constants)):
++        glibcelf_value = glibcelf_constants[name].value
++        elf_h_value = int(elf_h_constants[name])
++        # On 32-bit architectures <elf.h> as some constants that are
++        # parsed as signed, while they are unsigned in glibcelf.  So
++        # far, this only affects some flag constants, so special-case
++        # them here.
++        if (glibcelf_value != elf_h_value
++            and not (isinstance(glibcelf_constants[name], enum.IntFlag)
++                     and glibcelf_value == 1 << 31
++                     and elf_h_value == -(1 << 31))):
++            error('{}: glibcelf has {!r}, <elf.h> has {!r}'.format(
++                name, glibcelf_value, elf_h_value))
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description="Check glibcelf.py and elf.h against each other.")
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    args = parser.parse_args()
++
++    check_duplicates()
++    check_constant_prefixes()
++    check_constant_values(cc=args.cc)
++
++    if errors_encountered > 0:
++        print("note: errors encountered:", errors_encountered)
++        sys.exit(1)
++
++if __name__ == '__main__':
++    main()
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+new file mode 100644
+index 0000000000000000..8f7d0ca184845714
+--- /dev/null
++++ b/scripts/glibcelf.py
+@@ -0,0 +1,1135 @@
++#!/usr/bin/python3
++# ELF support functionality for Python.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++"""Basic ELF parser.
++
++Use Image.readfile(path) to read an ELF file into memory and begin
++parsing it.
++
++"""
++
++import collections
++import enum
++import struct
++
++class _OpenIntEnum(enum.IntEnum):
++    """Integer enumeration that supports arbitrary int values."""
++    @classmethod
++    def _missing_(cls, value):
++        # See enum.IntFlag._create_pseudo_member_.  This allows
++        # creating of enum constants with arbitrary integer values.
++        pseudo_member = int.__new__(cls, value)
++        pseudo_member._name_ = None
++        pseudo_member._value_ = value
++        return pseudo_member
++
++    def __repr__(self):
++        name = self._name_
++        if name is not None:
++            # The names have prefixes like SHT_, implying their type.
++            return name
++        return '{}({})'.format(self.__class__.__name__, self._value_)
++
++    def __str__(self):
++        name = self._name_
++        if name is not None:
++            return name
++        return str(self._value_)
++
++class ElfClass(_OpenIntEnum):
++    """ELF word size.  Type of EI_CLASS values."""
++    ELFCLASSNONE = 0
++    ELFCLASS32 = 1
++    ELFCLASS64 = 2
++
++class ElfData(_OpenIntEnum):
++    """ELF endianess.  Type of EI_DATA values."""
++    ELFDATANONE = 0
++    ELFDATA2LSB = 1
++    ELFDATA2MSB = 2
++
++class Machine(_OpenIntEnum):
++    """ELF machine type.  Type of values in Ehdr.e_machine field."""
++    EM_NONE = 0
++    EM_M32 = 1
++    EM_SPARC = 2
++    EM_386 = 3
++    EM_68K = 4
++    EM_88K = 5
++    EM_IAMCU = 6
++    EM_860 = 7
++    EM_MIPS = 8
++    EM_S370 = 9
++    EM_MIPS_RS3_LE = 10
++    EM_PARISC = 15
++    EM_VPP500 = 17
++    EM_SPARC32PLUS = 18
++    EM_960 = 19
++    EM_PPC = 20
++    EM_PPC64 = 21
++    EM_S390 = 22
++    EM_SPU = 23
++    EM_V800 = 36
++    EM_FR20 = 37
++    EM_RH32 = 38
++    EM_RCE = 39
++    EM_ARM = 40
++    EM_FAKE_ALPHA = 41
++    EM_SH = 42
++    EM_SPARCV9 = 43
++    EM_TRICORE = 44
++    EM_ARC = 45
++    EM_H8_300 = 46
++    EM_H8_300H = 47
++    EM_H8S = 48
++    EM_H8_500 = 49
++    EM_IA_64 = 50
++    EM_MIPS_X = 51
++    EM_COLDFIRE = 52
++    EM_68HC12 = 53
++    EM_MMA = 54
++    EM_PCP = 55
++    EM_NCPU = 56
++    EM_NDR1 = 57
++    EM_STARCORE = 58
++    EM_ME16 = 59
++    EM_ST100 = 60
++    EM_TINYJ = 61
++    EM_X86_64 = 62
++    EM_PDSP = 63
++    EM_PDP10 = 64
++    EM_PDP11 = 65
++    EM_FX66 = 66
++    EM_ST9PLUS = 67
++    EM_ST7 = 68
++    EM_68HC16 = 69
++    EM_68HC11 = 70
++    EM_68HC08 = 71
++    EM_68HC05 = 72
++    EM_SVX = 73
++    EM_ST19 = 74
++    EM_VAX = 75
++    EM_CRIS = 76
++    EM_JAVELIN = 77
++    EM_FIREPATH = 78
++    EM_ZSP = 79
++    EM_MMIX = 80
++    EM_HUANY = 81
++    EM_PRISM = 82
++    EM_AVR = 83
++    EM_FR30 = 84
++    EM_D10V = 85
++    EM_D30V = 86
++    EM_V850 = 87
++    EM_M32R = 88
++    EM_MN10300 = 89
++    EM_MN10200 = 90
++    EM_PJ = 91
++    EM_OPENRISC = 92
++    EM_ARC_COMPACT = 93
++    EM_XTENSA = 94
++    EM_VIDEOCORE = 95
++    EM_TMM_GPP = 96
++    EM_NS32K = 97
++    EM_TPC = 98
++    EM_SNP1K = 99
++    EM_ST200 = 100
++    EM_IP2K = 101
++    EM_MAX = 102
++    EM_CR = 103
++    EM_F2MC16 = 104
++    EM_MSP430 = 105
++    EM_BLACKFIN = 106
++    EM_SE_C33 = 107
++    EM_SEP = 108
++    EM_ARCA = 109
++    EM_UNICORE = 110
++    EM_EXCESS = 111
++    EM_DXP = 112
++    EM_ALTERA_NIOS2 = 113
++    EM_CRX = 114
++    EM_XGATE = 115
++    EM_C166 = 116
++    EM_M16C = 117
++    EM_DSPIC30F = 118
++    EM_CE = 119
++    EM_M32C = 120
++    EM_TSK3000 = 131
++    EM_RS08 = 132
++    EM_SHARC = 133
++    EM_ECOG2 = 134
++    EM_SCORE7 = 135
++    EM_DSP24 = 136
++    EM_VIDEOCORE3 = 137
++    EM_LATTICEMICO32 = 138
++    EM_SE_C17 = 139
++    EM_TI_C6000 = 140
++    EM_TI_C2000 = 141
++    EM_TI_C5500 = 142
++    EM_TI_ARP32 = 143
++    EM_TI_PRU = 144
++    EM_MMDSP_PLUS = 160
++    EM_CYPRESS_M8C = 161
++    EM_R32C = 162
++    EM_TRIMEDIA = 163
++    EM_QDSP6 = 164
++    EM_8051 = 165
++    EM_STXP7X = 166
++    EM_NDS32 = 167
++    EM_ECOG1X = 168
++    EM_MAXQ30 = 169
++    EM_XIMO16 = 170
++    EM_MANIK = 171
++    EM_CRAYNV2 = 172
++    EM_RX = 173
++    EM_METAG = 174
++    EM_MCST_ELBRUS = 175
++    EM_ECOG16 = 176
++    EM_CR16 = 177
++    EM_ETPU = 178
++    EM_SLE9X = 179
++    EM_L10M = 180
++    EM_K10M = 181
++    EM_AARCH64 = 183
++    EM_AVR32 = 185
++    EM_STM8 = 186
++    EM_TILE64 = 187
++    EM_TILEPRO = 188
++    EM_MICROBLAZE = 189
++    EM_CUDA = 190
++    EM_TILEGX = 191
++    EM_CLOUDSHIELD = 192
++    EM_COREA_1ST = 193
++    EM_COREA_2ND = 194
++    EM_ARCV2 = 195
++    EM_OPEN8 = 196
++    EM_RL78 = 197
++    EM_VIDEOCORE5 = 198
++    EM_78KOR = 199
++    EM_56800EX = 200
++    EM_BA1 = 201
++    EM_BA2 = 202
++    EM_XCORE = 203
++    EM_MCHP_PIC = 204
++    EM_INTELGT = 205
++    EM_KM32 = 210
++    EM_KMX32 = 211
++    EM_EMX16 = 212
++    EM_EMX8 = 213
++    EM_KVARC = 214
++    EM_CDP = 215
++    EM_COGE = 216
++    EM_COOL = 217
++    EM_NORC = 218
++    EM_CSR_KALIMBA = 219
++    EM_Z80 = 220
++    EM_VISIUM = 221
++    EM_FT32 = 222
++    EM_MOXIE = 223
++    EM_AMDGPU = 224
++    EM_RISCV = 243
++    EM_BPF = 247
++    EM_CSKY = 252
++    EM_NUM = 253
++    EM_ALPHA = 0x9026
++
++class Et(_OpenIntEnum):
++    """ELF file type.  Type of ET_* values and the Ehdr.e_type field."""
++    ET_NONE = 0
++    ET_REL = 1
++    ET_EXEC = 2
++    ET_DYN = 3
++    ET_CORE = 4
++
++class Shn(_OpenIntEnum):
++    """ELF reserved section indices."""
++    SHN_UNDEF = 0
++    SHN_BEFORE = 0xff00
++    SHN_AFTER = 0xff01
++    SHN_ABS = 0xfff1
++    SHN_COMMON = 0xfff2
++    SHN_XINDEX = 0xffff
++
++class ShnMIPS(enum.Enum):
++    """Supplemental SHN_* constants for EM_MIPS."""
++    SHN_MIPS_ACOMMON = 0xff00
++    SHN_MIPS_TEXT = 0xff01
++    SHN_MIPS_DATA = 0xff02
++    SHN_MIPS_SCOMMON = 0xff03
++    SHN_MIPS_SUNDEFINED = 0xff04
++
++class ShnPARISC(enum.Enum):
++    """Supplemental SHN_* constants for EM_PARISC."""
++    SHN_PARISC_ANSI_COMMON = 0xff00
++    SHN_PARISC_HUGE_COMMON = 0xff01
++
++class Sht(_OpenIntEnum):
++    """ELF section types.  Type of SHT_* values."""
++    SHT_NULL = 0
++    SHT_PROGBITS = 1
++    SHT_SYMTAB = 2
++    SHT_STRTAB = 3
++    SHT_RELA = 4
++    SHT_HASH = 5
++    SHT_DYNAMIC = 6
++    SHT_NOTE = 7
++    SHT_NOBITS = 8
++    SHT_REL = 9
++    SHT_SHLIB = 10
++    SHT_DYNSYM = 11
++    SHT_INIT_ARRAY = 14
++    SHT_FINI_ARRAY = 15
++    SHT_PREINIT_ARRAY = 16
++    SHT_GROUP = 17
++    SHT_SYMTAB_SHNDX = 18
++    SHT_GNU_ATTRIBUTES = 0x6ffffff5
++    SHT_GNU_HASH = 0x6ffffff6
++    SHT_GNU_LIBLIST = 0x6ffffff7
++    SHT_CHECKSUM = 0x6ffffff8
++    SHT_SUNW_move = 0x6ffffffa
++    SHT_SUNW_COMDAT = 0x6ffffffb
++    SHT_SUNW_syminfo = 0x6ffffffc
++    SHT_GNU_verdef = 0x6ffffffd
++    SHT_GNU_verneed = 0x6ffffffe
++    SHT_GNU_versym = 0x6fffffff
++
++class ShtALPHA(enum.Enum):
++    """Supplemental SHT_* constants for EM_ALPHA."""
++    SHT_ALPHA_DEBUG = 0x70000001
++    SHT_ALPHA_REGINFO = 0x70000002
++
++class ShtARM(enum.Enum):
++    """Supplemental SHT_* constants for EM_ARM."""
++    SHT_ARM_EXIDX = 0x70000001
++    SHT_ARM_PREEMPTMAP = 0x70000002
++    SHT_ARM_ATTRIBUTES = 0x70000003
++
++class ShtCSKY(enum.Enum):
++    """Supplemental SHT_* constants for EM_CSKY."""
++    SHT_CSKY_ATTRIBUTES = 0x70000001
++
++class ShtIA_64(enum.Enum):
++    """Supplemental SHT_* constants for EM_IA_64."""
++    SHT_IA_64_EXT = 0x70000000
++    SHT_IA_64_UNWIND = 0x70000001
++
++class ShtMIPS(enum.Enum):
++    """Supplemental SHT_* constants for EM_MIPS."""
++    SHT_MIPS_LIBLIST = 0x70000000
++    SHT_MIPS_MSYM = 0x70000001
++    SHT_MIPS_CONFLICT = 0x70000002
++    SHT_MIPS_GPTAB = 0x70000003
++    SHT_MIPS_UCODE = 0x70000004
++    SHT_MIPS_DEBUG = 0x70000005
++    SHT_MIPS_REGINFO = 0x70000006
++    SHT_MIPS_PACKAGE = 0x70000007
++    SHT_MIPS_PACKSYM = 0x70000008
++    SHT_MIPS_RELD = 0x70000009
++    SHT_MIPS_IFACE = 0x7000000b
++    SHT_MIPS_CONTENT = 0x7000000c
++    SHT_MIPS_OPTIONS = 0x7000000d
++    SHT_MIPS_SHDR = 0x70000010
++    SHT_MIPS_FDESC = 0x70000011
++    SHT_MIPS_EXTSYM = 0x70000012
++    SHT_MIPS_DENSE = 0x70000013
++    SHT_MIPS_PDESC = 0x70000014
++    SHT_MIPS_LOCSYM = 0x70000015
++    SHT_MIPS_AUXSYM = 0x70000016
++    SHT_MIPS_OPTSYM = 0x70000017
++    SHT_MIPS_LOCSTR = 0x70000018
++    SHT_MIPS_LINE = 0x70000019
++    SHT_MIPS_RFDESC = 0x7000001a
++    SHT_MIPS_DELTASYM = 0x7000001b
++    SHT_MIPS_DELTAINST = 0x7000001c
++    SHT_MIPS_DELTACLASS = 0x7000001d
++    SHT_MIPS_DWARF = 0x7000001e
++    SHT_MIPS_DELTADECL = 0x7000001f
++    SHT_MIPS_SYMBOL_LIB = 0x70000020
++    SHT_MIPS_EVENTS = 0x70000021
++    SHT_MIPS_TRANSLATE = 0x70000022
++    SHT_MIPS_PIXIE = 0x70000023
++    SHT_MIPS_XLATE = 0x70000024
++    SHT_MIPS_XLATE_DEBUG = 0x70000025
++    SHT_MIPS_WHIRL = 0x70000026
++    SHT_MIPS_EH_REGION = 0x70000027
++    SHT_MIPS_XLATE_OLD = 0x70000028
++    SHT_MIPS_PDR_EXCEPTION = 0x70000029
++    SHT_MIPS_XHASH = 0x7000002b
++
++class ShtPARISC(enum.Enum):
++    """Supplemental SHT_* constants for EM_PARISC."""
++    SHT_PARISC_EXT = 0x70000000
++    SHT_PARISC_UNWIND = 0x70000001
++    SHT_PARISC_DOC = 0x70000002
++
++class Pf(enum.IntFlag):
++    """Program header flags.  Type of Phdr.p_flags values."""
++    PF_X = 1
++    PF_W = 2
++    PF_R = 4
++
++class PfARM(enum.IntFlag):
++    """Supplemental PF_* flags for EM_ARM."""
++    PF_ARM_SB = 0x10000000
++    PF_ARM_PI = 0x20000000
++    PF_ARM_ABS = 0x40000000
++
++class PfPARISC(enum.IntFlag):
++    """Supplemental PF_* flags for EM_PARISC."""
++    PF_HP_PAGE_SIZE = 0x00100000
++    PF_HP_FAR_SHARED = 0x00200000
++    PF_HP_NEAR_SHARED = 0x00400000
++    PF_HP_CODE = 0x01000000
++    PF_HP_MODIFY = 0x02000000
++    PF_HP_LAZYSWAP = 0x04000000
++    PF_HP_SBP = 0x08000000
++
++class PfIA_64(enum.IntFlag):
++    """Supplemental PF_* flags for EM_IA_64."""
++    PF_IA_64_NORECOV = 0x80000000
++
++class PfMIPS(enum.IntFlag):
++    """Supplemental PF_* flags for EM_MIPS."""
++    PF_MIPS_LOCAL = 0x10000000
++
++class Shf(enum.IntFlag):
++    """Section flags.  Type of Shdr.sh_type values."""
++    SHF_WRITE = 1 << 0
++    SHF_ALLOC = 1 << 1
++    SHF_EXECINSTR = 1 << 2
++    SHF_MERGE = 1 << 4
++    SHF_STRINGS = 1 << 5
++    SHF_INFO_LINK = 1 << 6
++    SHF_LINK_ORDER = 1 << 7
++    SHF_OS_NONCONFORMING = 256
++    SHF_GROUP = 1 << 9
++    SHF_TLS = 1 << 10
++    SHF_COMPRESSED = 1 << 11
++    SHF_GNU_RETAIN = 1 << 21
++    SHF_ORDERED = 1 << 30
++    SHF_EXCLUDE = 1 << 31
++
++class ShfALPHA(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_ALPHA."""
++    SHF_ALPHA_GPREL = 0x10000000
++
++class ShfARM(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_ARM."""
++    SHF_ARM_ENTRYSECT = 0x10000000
++    SHF_ARM_COMDEF = 0x80000000
++
++class ShfIA_64(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_IA_64."""
++    SHF_IA_64_SHORT  = 0x10000000
++    SHF_IA_64_NORECOV = 0x20000000
++
++class ShfMIPS(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_MIPS."""
++    SHF_MIPS_GPREL = 0x10000000
++    SHF_MIPS_MERGE = 0x20000000
++    SHF_MIPS_ADDR = 0x40000000
++    SHF_MIPS_STRINGS = 0x80000000
++    SHF_MIPS_NOSTRIP = 0x08000000
++    SHF_MIPS_LOCAL = 0x04000000
++    SHF_MIPS_NAMES = 0x02000000
++    SHF_MIPS_NODUPE = 0x01000000
++
++class ShfPARISC(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_PARISC."""
++    SHF_PARISC_SHORT = 0x20000000
++    SHF_PARISC_HUGE = 0x40000000
++    SHF_PARISC_SBP = 0x80000000
++
++class Stb(_OpenIntEnum):
++    """ELF symbol binding type."""
++    STB_LOCAL = 0
++    STB_GLOBAL = 1
++    STB_WEAK = 2
++    STB_GNU_UNIQUE = 10
++    STB_MIPS_SPLIT_COMMON = 13
++
++class Stt(_OpenIntEnum):
++    """ELF symbol type."""
++    STT_NOTYPE = 0
++    STT_OBJECT = 1
++    STT_FUNC = 2
++    STT_SECTION = 3
++    STT_FILE = 4
++    STT_COMMON = 5
++    STT_TLS = 6
++    STT_GNU_IFUNC = 10
++
++class SttARM(enum.Enum):
++    """Supplemental STT_* constants for EM_ARM."""
++    STT_ARM_TFUNC = 13
++    STT_ARM_16BIT = 15
++
++class SttPARISC(enum.Enum):
++    """Supplemental STT_* constants for EM_PARISC."""
++    STT_HP_OPAQUE = 11
++    STT_HP_STUB = 12
++    STT_PARISC_MILLICODE = 13
++
++class SttSPARC(enum.Enum):
++    """Supplemental STT_* constants for EM_SPARC."""
++    STT_SPARC_REGISTER = 13
++
++class SttX86_64(enum.Enum):
++    """Supplemental STT_* constants for EM_X86_64."""
++    SHT_X86_64_UNWIND = 0x70000001
++
++class Pt(_OpenIntEnum):
++    """ELF program header types.  Type of Phdr.p_type."""
++    PT_NULL = 0
++    PT_LOAD = 1
++    PT_DYNAMIC = 2
++    PT_INTERP = 3
++    PT_NOTE = 4
++    PT_SHLIB = 5
++    PT_PHDR = 6
++    PT_TLS = 7
++    PT_NUM = 8
++    PT_GNU_EH_FRAME = 0x6474e550
++    PT_GNU_STACK = 0x6474e551
++    PT_GNU_RELRO = 0x6474e552
++    PT_GNU_PROPERTY = 0x6474e553
++    PT_SUNWBSS = 0x6ffffffa
++    PT_SUNWSTACK = 0x6ffffffb
++
++class PtARM(enum.Enum):
++    """Supplemental PT_* constants for EM_ARM."""
++    PT_ARM_EXIDX = 0x70000001
++
++class PtIA_64(enum.Enum):
++    """Supplemental PT_* constants for EM_IA_64."""
++    PT_IA_64_HP_OPT_ANOT = 0x60000012
++    PT_IA_64_HP_HSL_ANOT = 0x60000013
++    PT_IA_64_HP_STACK = 0x60000014
++    PT_IA_64_ARCHEXT = 0x70000000
++    PT_IA_64_UNWIND = 0x70000001
++
++class PtMIPS(enum.Enum):
++    """Supplemental PT_* constants for EM_MIPS."""
++    PT_MIPS_REGINFO = 0x70000000
++    PT_MIPS_RTPROC = 0x70000001
++    PT_MIPS_OPTIONS = 0x70000002
++    PT_MIPS_ABIFLAGS = 0x70000003
++
++class PtPARISC(enum.Enum):
++    """Supplemental PT_* constants for EM_PARISC."""
++    PT_HP_TLS = 0x60000000
++    PT_HP_CORE_NONE = 0x60000001
++    PT_HP_CORE_VERSION = 0x60000002
++    PT_HP_CORE_KERNEL = 0x60000003
++    PT_HP_CORE_COMM = 0x60000004
++    PT_HP_CORE_PROC = 0x60000005
++    PT_HP_CORE_LOADABLE = 0x60000006
++    PT_HP_CORE_STACK = 0x60000007
++    PT_HP_CORE_SHM = 0x60000008
++    PT_HP_CORE_MMF = 0x60000009
++    PT_HP_PARALLEL = 0x60000010
++    PT_HP_FASTBIND = 0x60000011
++    PT_HP_OPT_ANNOT = 0x60000012
++    PT_HP_HSL_ANNOT = 0x60000013
++    PT_HP_STACK = 0x60000014
++    PT_PARISC_ARCHEXT = 0x70000000
++    PT_PARISC_UNWIND = 0x70000001
++
++class Dt(_OpenIntEnum):
++    """ELF dynamic segment tags.  Type of Dyn.d_val."""
++    DT_NULL = 0
++    DT_NEEDED = 1
++    DT_PLTRELSZ = 2
++    DT_PLTGOT = 3
++    DT_HASH = 4
++    DT_STRTAB = 5
++    DT_SYMTAB = 6
++    DT_RELA = 7
++    DT_RELASZ = 8
++    DT_RELAENT = 9
++    DT_STRSZ = 10
++    DT_SYMENT = 11
++    DT_INIT = 12
++    DT_FINI = 13
++    DT_SONAME = 14
++    DT_RPATH = 15
++    DT_SYMBOLIC = 16
++    DT_REL = 17
++    DT_RELSZ = 18
++    DT_RELENT = 19
++    DT_PLTREL = 20
++    DT_DEBUG = 21
++    DT_TEXTREL = 22
++    DT_JMPREL = 23
++    DT_BIND_NOW = 24
++    DT_INIT_ARRAY = 25
++    DT_FINI_ARRAY = 26
++    DT_INIT_ARRAYSZ = 27
++    DT_FINI_ARRAYSZ = 28
++    DT_RUNPATH = 29
++    DT_FLAGS = 30
++    DT_PREINIT_ARRAY = 32
++    DT_PREINIT_ARRAYSZ = 33
++    DT_SYMTAB_SHNDX = 34
++    DT_GNU_PRELINKED = 0x6ffffdf5
++    DT_GNU_CONFLICTSZ = 0x6ffffdf6
++    DT_GNU_LIBLISTSZ = 0x6ffffdf7
++    DT_CHECKSUM = 0x6ffffdf8
++    DT_PLTPADSZ = 0x6ffffdf9
++    DT_MOVEENT = 0x6ffffdfa
++    DT_MOVESZ = 0x6ffffdfb
++    DT_FEATURE_1 = 0x6ffffdfc
++    DT_POSFLAG_1 = 0x6ffffdfd
++    DT_SYMINSZ = 0x6ffffdfe
++    DT_SYMINENT = 0x6ffffdff
++    DT_GNU_HASH = 0x6ffffef5
++    DT_TLSDESC_PLT = 0x6ffffef6
++    DT_TLSDESC_GOT = 0x6ffffef7
++    DT_GNU_CONFLICT = 0x6ffffef8
++    DT_GNU_LIBLIST = 0x6ffffef9
++    DT_CONFIG = 0x6ffffefa
++    DT_DEPAUDIT = 0x6ffffefb
++    DT_AUDIT = 0x6ffffefc
++    DT_PLTPAD = 0x6ffffefd
++    DT_MOVETAB = 0x6ffffefe
++    DT_SYMINFO = 0x6ffffeff
++    DT_VERSYM = 0x6ffffff0
++    DT_RELACOUNT = 0x6ffffff9
++    DT_RELCOUNT = 0x6ffffffa
++    DT_FLAGS_1 = 0x6ffffffb
++    DT_VERDEF = 0x6ffffffc
++    DT_VERDEFNUM = 0x6ffffffd
++    DT_VERNEED = 0x6ffffffe
++    DT_VERNEEDNUM = 0x6fffffff
++    DT_AUXILIARY = 0x7ffffffd
++    DT_FILTER = 0x7fffffff
++
++class DtAARCH64(enum.Enum):
++    """Supplemental DT_* constants for EM_AARCH64."""
++    DT_AARCH64_BTI_PLT = 0x70000001
++    DT_AARCH64_PAC_PLT = 0x70000003
++    DT_AARCH64_VARIANT_PCS = 0x70000005
++
++class DtALPHA(enum.Enum):
++    """Supplemental DT_* constants for EM_ALPHA."""
++    DT_ALPHA_PLTRO = 0x70000000
++
++class DtALTERA_NIOS2(enum.Enum):
++    """Supplemental DT_* constants for EM_ALTERA_NIOS2."""
++    DT_NIOS2_GP = 0x70000002
++
++class DtIA_64(enum.Enum):
++    """Supplemental DT_* constants for EM_IA_64."""
++    DT_IA_64_PLT_RESERVE = 0x70000000
++
++class DtMIPS(enum.Enum):
++    """Supplemental DT_* constants for EM_MIPS."""
++    DT_MIPS_RLD_VERSION = 0x70000001
++    DT_MIPS_TIME_STAMP = 0x70000002
++    DT_MIPS_ICHECKSUM = 0x70000003
++    DT_MIPS_IVERSION = 0x70000004
++    DT_MIPS_FLAGS = 0x70000005
++    DT_MIPS_BASE_ADDRESS = 0x70000006
++    DT_MIPS_MSYM = 0x70000007
++    DT_MIPS_CONFLICT = 0x70000008
++    DT_MIPS_LIBLIST = 0x70000009
++    DT_MIPS_LOCAL_GOTNO = 0x7000000a
++    DT_MIPS_CONFLICTNO = 0x7000000b
++    DT_MIPS_LIBLISTNO = 0x70000010
++    DT_MIPS_SYMTABNO = 0x70000011
++    DT_MIPS_UNREFEXTNO = 0x70000012
++    DT_MIPS_GOTSYM = 0x70000013
++    DT_MIPS_HIPAGENO = 0x70000014
++    DT_MIPS_RLD_MAP = 0x70000016
++    DT_MIPS_DELTA_CLASS = 0x70000017
++    DT_MIPS_DELTA_CLASS_NO = 0x70000018
++    DT_MIPS_DELTA_INSTANCE = 0x70000019
++    DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a
++    DT_MIPS_DELTA_RELOC = 0x7000001b
++    DT_MIPS_DELTA_RELOC_NO = 0x7000001c
++    DT_MIPS_DELTA_SYM = 0x7000001d
++    DT_MIPS_DELTA_SYM_NO = 0x7000001e
++    DT_MIPS_DELTA_CLASSSYM = 0x70000020
++    DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021
++    DT_MIPS_CXX_FLAGS = 0x70000022
++    DT_MIPS_PIXIE_INIT = 0x70000023
++    DT_MIPS_SYMBOL_LIB = 0x70000024
++    DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025
++    DT_MIPS_LOCAL_GOTIDX = 0x70000026
++    DT_MIPS_HIDDEN_GOTIDX = 0x70000027
++    DT_MIPS_PROTECTED_GOTIDX = 0x70000028
++    DT_MIPS_OPTIONS = 0x70000029
++    DT_MIPS_INTERFACE = 0x7000002a
++    DT_MIPS_DYNSTR_ALIGN = 0x7000002b
++    DT_MIPS_INTERFACE_SIZE = 0x7000002c
++    DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d
++    DT_MIPS_PERF_SUFFIX = 0x7000002e
++    DT_MIPS_COMPACT_SIZE = 0x7000002f
++    DT_MIPS_GP_VALUE = 0x70000030
++    DT_MIPS_AUX_DYNAMIC = 0x70000031
++    DT_MIPS_PLTGOT = 0x70000032
++    DT_MIPS_RWPLT = 0x70000034
++    DT_MIPS_RLD_MAP_REL = 0x70000035
++    DT_MIPS_XHASH = 0x70000036
++
++class DtPPC(enum.Enum):
++    """Supplemental DT_* constants for EM_PPC."""
++    DT_PPC_GOT = 0x70000000
++    DT_PPC_OPT = 0x70000001
++
++class DtPPC64(enum.Enum):
++    """Supplemental DT_* constants for EM_PPC64."""
++    DT_PPC64_GLINK = 0x70000000
++    DT_PPC64_OPD = 0x70000001
++    DT_PPC64_OPDSZ = 0x70000002
++    DT_PPC64_OPT = 0x70000003
++
++class DtSPARC(enum.Enum):
++    """Supplemental DT_* constants for EM_SPARC."""
++    DT_SPARC_REGISTER = 0x70000001
++
++class StInfo:
++    """ELF symbol binding and type.  Type of the Sym.st_info field."""
++    def __init__(self, arg0, arg1=None):
++        if isinstance(arg0, int) and arg1 is None:
++            self.bind = Stb(arg0 >> 4)
++            self.type = Stt(arg0 & 15)
++        else:
++            self.bind = Stb(arg0)
++            self.type = Stt(arg1)
++
++    def value(self):
++        """Returns the raw value for the bind/type combination."""
++        return (self.bind.value() << 4) | (self.type.value())
++
++# Type in an ELF file.  Used for deserialization.
++_Layout = collections.namedtuple('_Layout', 'unpack size')
++
++def _define_layouts(baseclass: type, layout32: str, layout64: str,
++                    types=None, fields32=None):
++    """Assign variants dict to baseclass.
++
++    The variants dict is indexed by (ElfClass, ElfData) pairs, and its
++    values are _Layout instances.
++
++    """
++    struct32 = struct.Struct(layout32)
++    struct64 = struct.Struct(layout64)
++
++    # Check that the struct formats yield the right number of components.
++    for s in (struct32, struct64):
++        example = s.unpack(b' ' * s.size)
++        if len(example) != len(baseclass._fields):
++            raise ValueError('{!r} yields wrong field count: {} != {}'.format(
++                s.format, len(example),  len(baseclass._fields)))
++
++    # Check that field names in types are correct.
++    if types is None:
++        types = ()
++    for n in types:
++        if n not in baseclass._fields:
++            raise ValueError('{} does not have field {!r}'.format(
++                baseclass.__name__, n))
++
++    if fields32 is not None \
++       and set(fields32) != set(baseclass._fields):
++        raise ValueError('{!r} is not a permutation of the fields {!r}'.format(
++            fields32, baseclass._fields))
++
++    def unique_name(name, used_names = (set((baseclass.__name__,))
++                                        | set(baseclass._fields)
++                                        | {n.__name__
++                                           for n in (types or {}).values()})):
++        """Find a name that is not used for a class or field name."""
++        candidate = name
++        n = 0
++        while candidate in used_names:
++            n += 1
++            candidate = '{}{}'.format(name, n)
++        used_names.add(candidate)
++        return candidate
++
++    blob_name = unique_name('blob')
++    struct_unpack_name = unique_name('struct_unpack')
++    comps_name = unique_name('comps')
++
++    layouts = {}
++    for (bits, elfclass, layout, fields) in (
++            (32, ElfClass.ELFCLASS32, layout32, fields32),
++            (64, ElfClass.ELFCLASS64, layout64, None),
++    ):
++        for (elfdata, structprefix, funcsuffix) in (
++                (ElfData.ELFDATA2LSB, '<', 'LE'),
++                (ElfData.ELFDATA2MSB, '>', 'BE'),
++        ):
++            env = {
++                baseclass.__name__: baseclass,
++                struct_unpack_name: struct.unpack,
++            }
++
++            # Add the type converters.
++            if types:
++                for cls in types.values():
++                    env[cls.__name__] = cls
++
++            funcname = ''.join(
++                ('unpack_', baseclass.__name__, str(bits), funcsuffix))
++
++            code = '''
++def {funcname}({blob_name}):
++'''.format(funcname=funcname, blob_name=blob_name)
++
++            indent = ' ' * 4
++            unpack_call = '{}({!r}, {})'.format(
++                struct_unpack_name, structprefix + layout, blob_name)
++            field_names = ', '.join(baseclass._fields)
++            if types is None and fields is None:
++                code += '{}return {}({})\n'.format(
++                    indent, baseclass.__name__, unpack_call)
++            else:
++                # Destructuring tuple assignment.
++                if fields is None:
++                    code += '{}{} = {}\n'.format(
++                        indent, field_names, unpack_call)
++                else:
++                    # Use custom field order.
++                    code += '{}{} = {}\n'.format(
++                        indent, ', '.join(fields), unpack_call)
++
++                # Perform the type conversions.
++                for n in baseclass._fields:
++                    if n in types:
++                        code += '{}{} = {}({})\n'.format(
++                            indent, n, types[n].__name__, n)
++                # Create the named tuple.
++                code += '{}return {}({})\n'.format(
++                    indent, baseclass.__name__, field_names)
++
++            exec(code, env)
++            layouts[(elfclass, elfdata)] = _Layout(
++                env[funcname], struct.calcsize(layout))
++    baseclass.layouts = layouts
++
++
++# Corresponds to EI_* indices into Elf*_Ehdr.e_indent.
++class Ident(collections.namedtuple('Ident',
++    'ei_mag ei_class ei_data ei_version ei_osabi ei_abiversion ei_pad')):
++
++    def __new__(cls, *args):
++        """Construct an object from a blob or its constituent fields."""
++        if len(args) == 1:
++            return cls.unpack(args[0])
++        return cls.__base__.__new__(cls, *args)
++
++    @staticmethod
++    def unpack(blob: memoryview) -> 'Ident':
++        """Parse raws data into a tuple."""
++        ei_mag, ei_class, ei_data, ei_version, ei_osabi, ei_abiversion, \
++            ei_pad = struct.unpack('4s5B7s', blob)
++        return Ident(ei_mag, ElfClass(ei_class), ElfData(ei_data),
++                     ei_version, ei_osabi, ei_abiversion, ei_pad)
++    size = 16
++
++# Corresponds to Elf32_Ehdr and Elf64_Ehdr.
++Ehdr = collections.namedtuple('Ehdr',
++   'e_ident e_type e_machine e_version e_entry e_phoff e_shoff e_flags'
++    + ' e_ehsize e_phentsize e_phnum e_shentsize e_shnum e_shstrndx')
++_define_layouts(Ehdr,
++                layout32='16s2H5I6H',
++                layout64='16s2HI3QI6H',
++                types=dict(e_ident=Ident,
++                           e_machine=Machine,
++                           e_type=Et,
++                           e_shstrndx=Shn))
++
++# Corresponds to Elf32_Phdr and Elf64_Pdhr.  Order follows the latter.
++Phdr = collections.namedtuple('Phdr',
++    'p_type p_flags p_offset p_vaddr p_paddr p_filesz p_memsz p_align')
++_define_layouts(Phdr,
++                layout32='8I',
++                fields32=('p_type', 'p_offset', 'p_vaddr', 'p_paddr',
++                          'p_filesz', 'p_memsz', 'p_flags', 'p_align'),
++                layout64='2I6Q',
++            types=dict(p_type=Pt, p_flags=Pf))
++
++
++# Corresponds to Elf32_Shdr and Elf64_Shdr.
++class Shdr(collections.namedtuple('Shdr',
++    'sh_name sh_type sh_flags sh_addr sh_offset sh_size sh_link sh_info'
++    + ' sh_addralign sh_entsize')):
++    def resolve(self, strtab: 'StringTable') -> 'Shdr':
++        """Resolve sh_name using a string table."""
++        return self.__class__(strtab.get(self[0]), *self[1:])
++_define_layouts(Shdr,
++                layout32='10I',
++                layout64='2I4Q2I2Q',
++                types=dict(sh_type=Sht,
++                           sh_flags=Shf,
++                           sh_link=Shn))
++
++# Corresponds to Elf32_Dyn and Elf64_Dyn.  The nesting through the
++# d_un union is skipped, and d_ptr is missing (its representation in
++# Python would be identical to d_val).
++Dyn = collections.namedtuple('Dyn', 'd_tag d_val')
++_define_layouts(Dyn,
++                layout32='2i',
++                layout64='2q',
++                types=dict(d_tag=Dt))
++
++# Corresponds to Elf32_Sym and Elf64_Sym.
++class Sym(collections.namedtuple('Sym',
++    'st_name st_info st_other st_shndx st_value st_size')):
++    def resolve(self, strtab: 'StringTable') -> 'Sym':
++        """Resolve st_name using a string table."""
++        return self.__class__(strtab.get(self[0]), *self[1:])
++_define_layouts(Sym,
++                layout32='3I2BH',
++                layout64='I2BH2Q',
++                fields32=('st_name', 'st_value', 'st_size', 'st_info',
++                          'st_other', 'st_shndx'),
++                types=dict(st_shndx=Shn,
++                           st_info=StInfo))
++
++# Corresponds to Elf32_Rel and Elf64_Rel.
++Rel = collections.namedtuple('Rel', 'r_offset r_info')
++_define_layouts(Rel,
++                layout32='2I',
++                layout64='2Q')
++
++# Corresponds to Elf32_Rel and Elf64_Rel.
++Rela = collections.namedtuple('Rela', 'r_offset r_info r_addend')
++_define_layouts(Rela,
++                layout32='3I',
++                layout64='3Q')
++
++class StringTable:
++    """ELF string table."""
++    def __init__(self, blob):
++        """Create a new string table backed by the data in the blob.
++
++        blob: a memoryview-like object
++
++        """
++        self.blob = blob
++
++    def get(self, index) -> bytes:
++        """Returns the null-terminated byte string at the index."""
++        blob = self.blob
++        endindex = index
++        while True:
++            if blob[endindex] == 0:
++                return bytes(blob[index:endindex])
++            endindex += 1
++
++class Image:
++    """ELF image parser."""
++    def __init__(self, image):
++        """Create an ELF image from binary image data.
++
++        image: a memoryview-like object that supports efficient range
++        subscripting.
++
++        """
++        self.image = image
++        ident = self.read(Ident, 0)
++        classdata = (ident.ei_class, ident.ei_data)
++        # Set self.Ehdr etc. to the subtypes with the right parsers.
++        for typ in (Ehdr, Phdr, Shdr, Dyn, Sym, Rel, Rela):
++            setattr(self, typ.__name__, typ.layouts.get(classdata, None))
++
++        if self.Ehdr is not None:
++            self.ehdr = self.read(self.Ehdr, 0)
++            self._shdr_num = self._compute_shdr_num()
++        else:
++            self.ehdr = None
++            self._shdr_num = 0
++
++        self._section = {}
++        self._stringtab = {}
++
++        if self._shdr_num > 0:
++            self._shdr_strtab = self._find_shdr_strtab()
++        else:
++            self._shdr_strtab = None
++
++    @staticmethod
++    def readfile(path: str) -> 'Image':
++        """Reads the ELF file at the specified path."""
++        with open(path, 'rb') as inp:
++            return Image(memoryview(inp.read()))
++
++    def _compute_shdr_num(self) -> int:
++        """Computes the actual number of section headers."""
++        shnum = self.ehdr.e_shnum
++        if shnum == 0:
++            if self.ehdr.e_shoff == 0 or self.ehdr.e_shentsize == 0:
++                # No section headers.
++                return 0
++            # Otherwise the extension mechanism is used (which may be
++            # needed because e_shnum is just 16 bits).
++            return self.read(self.Shdr, self.ehdr.e_shoff).sh_size
++        return shnum
++
++    def _find_shdr_strtab(self) -> StringTable:
++        """Finds the section header string table (maybe via extensions)."""
++        shstrndx = self.ehdr.e_shstrndx
++        if shstrndx == Shn.SHN_XINDEX:
++            shstrndx = self.read(self.Shdr, self.ehdr.e_shoff).sh_link
++        return self._find_stringtab(shstrndx)
++
++    def read(self, typ: type, offset:int ):
++        """Reads an object at a specific offset.
++
++        The type must have been enhanced using _define_variants.
++
++        """
++        return typ.unpack(self.image[offset: offset + typ.size])
++
++    def phdrs(self) -> Phdr:
++        """Generator iterating over the program headers."""
++        if self.ehdr is None:
++            return
++        size = self.ehdr.e_phentsize
++        if size != self.Phdr.size:
++            raise ValueError('Unexpected Phdr size in ELF header: {} != {}'
++                             .format(size, self.Phdr.size))
++
++        offset = self.ehdr.e_phoff
++        for _ in range(self.ehdr.e_phnum):
++            yield self.read(self.Phdr, offset)
++            offset += size
++
++    def shdrs(self, resolve: bool=True) -> Shdr:
++        """Generator iterating over the section headers.
++
++        If resolve, section names are automatically translated
++        using the section header string table.
++
++        """
++        if self._shdr_num == 0:
++            return
++
++        size = self.ehdr.e_shentsize
++        if size != self.Shdr.size:
++            raise ValueError('Unexpected Shdr size in ELF header: {} != {}'
++                             .format(size, self.Shdr.size))
++
++        offset = self.ehdr.e_shoff
++        for _ in range(self._shdr_num):
++            shdr = self.read(self.Shdr, offset)
++            if resolve:
++                shdr = shdr.resolve(self._shdr_strtab)
++            yield shdr
++            offset += size
++
++    def dynamic(self) -> Dyn:
++        """Generator iterating over the dynamic segment."""
++        for phdr in self.phdrs():
++            if phdr.p_type == Pt.PT_DYNAMIC:
++                # Pick the first dynamic segment, like the loader.
++                if phdr.p_filesz == 0:
++                    # Probably separated debuginfo.
++                    return
++                offset = phdr.p_offset
++                end = offset + phdr.p_memsz
++                size = self.Dyn.size
++                while True:
++                    next_offset = offset + size
++                    if next_offset > end:
++                        raise ValueError(
++                            'Dynamic segment size {} is not a multiple of Dyn size {}'.format(
++                                phdr.p_memsz, size))
++                    yield self.read(self.Dyn, offset)
++                    if next_offset == end:
++                        return
++                    offset = next_offset
++
++    def syms(self, shdr: Shdr, resolve: bool=True) -> Sym:
++        """A generator iterating over a symbol table.
++
++        If resolve, symbol names are automatically translated using
++        the string table for the symbol table.
++
++        """
++        assert shdr.sh_type == Sht.SHT_SYMTAB
++        size = shdr.sh_entsize
++        if size != self.Sym.size:
++            raise ValueError('Invalid symbol table entry size {}'.format(size))
++        offset = shdr.sh_offset
++        end = shdr.sh_offset + shdr.sh_size
++        if resolve:
++            strtab = self._find_stringtab(shdr.sh_link)
++        while offset < end:
++            sym = self.read(self.Sym, offset)
++            if resolve:
++                sym = sym.resolve(strtab)
++            yield sym
++            offset += size
++        if offset != end:
++            raise ValueError('Symbol table is not a multiple of entry size')
++
++    def lookup_string(self, strtab_index: int, strtab_offset: int) -> bytes:
++        """Looks up a string in a string table identified by its link index."""
++        try:
++            strtab = self._stringtab[strtab_index]
++        except KeyError:
++            strtab = self._find_stringtab(strtab_index)
++        return strtab.get(strtab_offset)
++
++    def find_section(self, shndx: Shn) -> Shdr:
++        """Returns the section header for the indexed section.
++
++        The section name is not resolved.
++        """
++        try:
++            return self._section[shndx]
++        except KeyError:
++            pass
++        if shndx in Shn:
++            raise ValueError('Reserved section index {}'.format(shndx))
++        idx = shndx.value
++        if idx < 0 or idx > self._shdr_num:
++            raise ValueError('Section index {} out of range [0, {})'.format(
++                idx, self._shdr_num))
++        shdr = self.read(
++            self.Shdr, self.ehdr.e_shoff + idx * self.Shdr.size)
++        self._section[shndx] = shdr
++        return shdr
++
++    def _find_stringtab(self, sh_link: int) -> StringTable:
++        if sh_link in self._stringtab:
++            return self._stringtab
++        if sh_link < 0 or sh_link >= self._shdr_num:
++            raise ValueError('Section index {} out of range [0, {})'.format(
++                sh_link, self._shdr_num))
++        shdr = self.read(
++            self.Shdr, self.ehdr.e_shoff + sh_link * self.Shdr.size)
++        if shdr.sh_type != Sht.SHT_STRTAB:
++            raise ValueError(
++                'Section {} is not a string table: {}'.format(
++                    sh_link, shdr.sh_type))
++        strtab = StringTable(
++            self.image[shdr.sh_offset:shdr.sh_offset + shdr.sh_size])
++        # This could retrain essentially arbitrary amounts of data,
++        # but caching string tables seems important for performance.
++        self._stringtab[sh_link] = strtab
++        return strtab
++
++
++__all__ = [name for name in dir() if name[0].isupper()]
diff --git a/SOURCES/glibc-rh2109510-11.patch b/SOURCES/glibc-rh2109510-11.patch
new file mode 100644
index 0000000..c7e08fc
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-11.patch
@@ -0,0 +1,409 @@
+commit 198abcbb94618730dae1b3f4393efaa49e0ec8c7
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon Apr 11 11:30:31 2022 +0200
+
+    Default to --with-default-link=no (bug 25812)
+
+    This is necessary to place the libio vtables into the RELRO segment.
+    New tests elf/tst-relro-ldso and elf/tst-relro-libc are added to
+    verify that this is what actually happens.
+
+    The new tests fail on ia64 due to lack of (default) RELRO support
+    inbutils, so they are XFAILed there.
+
+Conflicts:
+	elf/Makefile
+	  (missing valgrind smoke test)
+
+diff --git a/INSTALL b/INSTALL
+index b3a4370f592c5047..b69672b283c0b774 100644
+--- a/INSTALL
++++ b/INSTALL
+@@ -90,6 +90,12 @@ if 'CFLAGS' is specified it must enable optimization.  For example:
+      library will still be usable, but functionality may be lost--for
+      example, you can't build a shared libc with old binutils.
+ 
++'--with-default-link=FLAG'
++     With '--with-default-link=yes', the build system does not use a
++     custom linker script for linking shared objects.  The default for
++     FLAG is the opposite, 'no', because the custom linker script is
++     needed for full RELRO protection.
++
+ '--with-nonshared-cflags=CFLAGS'
+      Use additional compiler flags CFLAGS to build the parts of the
+      library which are always statically linked into applications and
+diff --git a/configure b/configure
+index 8b3681d2e28310c8..c794cea4359b3da3 100755
+--- a/configure
++++ b/configure
+@@ -3339,7 +3339,7 @@ fi
+ if test "${with_default_link+set}" = set; then :
+   withval=$with_default_link; use_default_link=$withval
+ else
+-  use_default_link=default
++  use_default_link=no
+ fi
+ 
+ 
+@@ -5965,69 +5965,6 @@ fi
+ $as_echo "$libc_cv_hashstyle" >&6; }
+ 
+ 
+-# The linker's default -shared behavior is good enough if it
+-# does these things that our custom linker scripts ensure that
+-# all allocated NOTE sections come first.
+-if test "$use_default_link" = default; then
+-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sufficient default -shared layout" >&5
+-$as_echo_n "checking for sufficient default -shared layout... " >&6; }
+-if ${libc_cv_use_default_link+:} false; then :
+-  $as_echo_n "(cached) " >&6
+-else
+-    libc_cv_use_default_link=no
+-  cat > conftest.s <<\EOF
+-	  .section .note.a,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "foo"
+-	  .section .note.b,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "bar"
+-EOF
+-  if { ac_try='  ${CC-cc} $ASFLAGS -shared -o conftest.so conftest.s 1>&5'
+-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+-  (eval $ac_try) 2>&5
+-  ac_status=$?
+-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }; } &&
+-       ac_try=`$READELF -S conftest.so | sed -n \
+-	 '${x;p;}
+-	  s/^ *\[ *[1-9][0-9]*\]  *\([^ ][^ ]*\)  *\([^ ][^ ]*\) .*$/\2 \1/
+-	  t a
+-	  b
+-	  : a
+-	  H'`
+-  then
+-    libc_seen_a=no libc_seen_b=no
+-    set -- $ac_try
+-    while test $# -ge 2 -a "$1" = NOTE; do
+-      case "$2" in
+-      .note.a) libc_seen_a=yes ;;
+-      .note.b) libc_seen_b=yes ;;
+-      esac
+-      shift 2
+-    done
+-    case "$libc_seen_a$libc_seen_b" in
+-    yesyes)
+-      libc_cv_use_default_link=yes
+-      ;;
+-    *)
+-      echo >&5 "\
+-$libc_seen_a$libc_seen_b from:
+-$ac_try"
+-      ;;
+-    esac
+-  fi
+-  rm -f conftest*
+-fi
+-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_use_default_link" >&5
+-$as_echo "$libc_cv_use_default_link" >&6; }
+-  use_default_link=$libc_cv_use_default_link
+-fi
+-
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOB_DAT reloc" >&5
+ $as_echo_n "checking for GLOB_DAT reloc... " >&6; }
+ if ${libc_cv_has_glob_dat+:} false; then :
+diff --git a/configure.ac b/configure.ac
+index 82d9ab2fb67145bb..52429d82344954b3 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -152,7 +152,7 @@ AC_ARG_WITH([default-link],
+ 	    AC_HELP_STRING([--with-default-link],
+ 			   [do not use explicit linker scripts]),
+ 	    [use_default_link=$withval],
+-	    [use_default_link=default])
++	    [use_default_link=no])
+ 
+ dnl Additional build flags injection.
+ AC_ARG_WITH([nonshared-cflags],
+@@ -1352,59 +1352,6 @@ fi
+ rm -f conftest*])
+ AC_SUBST(libc_cv_hashstyle)
+ 
+-# The linker's default -shared behavior is good enough if it
+-# does these things that our custom linker scripts ensure that
+-# all allocated NOTE sections come first.
+-if test "$use_default_link" = default; then
+-  AC_CACHE_CHECK([for sufficient default -shared layout],
+-		  libc_cv_use_default_link, [dnl
+-  libc_cv_use_default_link=no
+-  cat > conftest.s <<\EOF
+-	  .section .note.a,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "foo"
+-	  .section .note.b,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "bar"
+-EOF
+-  if AC_TRY_COMMAND([dnl
+-  ${CC-cc} $ASFLAGS -shared -o conftest.so conftest.s 1>&AS_MESSAGE_LOG_FD]) &&
+-       ac_try=`$READELF -S conftest.so | sed -n \
+-	 ['${x;p;}
+-	  s/^ *\[ *[1-9][0-9]*\]  *\([^ ][^ ]*\)  *\([^ ][^ ]*\) .*$/\2 \1/
+-	  t a
+-	  b
+-	  : a
+-	  H']`
+-  then
+-    libc_seen_a=no libc_seen_b=no
+-    set -- $ac_try
+-    while test $# -ge 2 -a "$1" = NOTE; do
+-      case "$2" in
+-      .note.a) libc_seen_a=yes ;;
+-      .note.b) libc_seen_b=yes ;;
+-      esac
+-      shift 2
+-    done
+-    case "$libc_seen_a$libc_seen_b" in
+-    yesyes)
+-      libc_cv_use_default_link=yes
+-      ;;
+-    *)
+-      echo >&AS_MESSAGE_LOG_FD "\
+-$libc_seen_a$libc_seen_b from:
+-$ac_try"
+-      ;;
+-    esac
+-  fi
+-  rm -f conftest*])
+-  use_default_link=$libc_cv_use_default_link
+-fi
+-
+ AC_CACHE_CHECK(for GLOB_DAT reloc,
+ 	       libc_cv_has_glob_dat, [dnl
+ cat > conftest.c <<EOF
+diff --git a/elf/Makefile b/elf/Makefile
+index 89ce4f5196e5eb39..1fdf40cbd49e233e 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -477,6 +477,40 @@ tests-execstack-yes = \
+   # tests-execstack-yes
+ endif
+ endif
++
++tests-special += $(objpfx)tst-relro-ldso.out $(objpfx)tst-relro-libc.out
++$(objpfx)tst-relro-ldso.out: tst-relro-symbols.py $(..)/scripts/glibcelf.py \
++  $(objpfx)ld.so
++	$(PYTHON) tst-relro-symbols.py $(objpfx)ld.so \
++	  --required=_rtld_global_ro \
++	  > $@ 2>&1; $(evaluate-test)
++# The optional symbols are present in libc only if the architecture has
++# the GLIBC_2.0 symbol set in libc.
++$(objpfx)tst-relro-libc.out: tst-relro-symbols.py $(..)/scripts/glibcelf.py \
++  $(common-objpfx)libc.so
++	$(PYTHON) tst-relro-symbols.py $(common-objpfx)libc.so \
++	    --required=_IO_cookie_jumps \
++	    --required=_IO_file_jumps \
++	    --required=_IO_file_jumps_maybe_mmap \
++	    --required=_IO_file_jumps_mmap \
++	    --required=_IO_helper_jumps \
++	    --required=_IO_mem_jumps \
++	    --required=_IO_obstack_jumps \
++	    --required=_IO_proc_jumps \
++	    --required=_IO_str_chk_jumps \
++	    --required=_IO_str_jumps \
++	    --required=_IO_strn_jumps \
++	    --required=_IO_wfile_jumps \
++	    --required=_IO_wfile_jumps_maybe_mmap \
++	    --required=_IO_wfile_jumps_mmap \
++	    --required=_IO_wmem_jumps \
++	    --required=_IO_wstr_jumps \
++	    --required=_IO_wstrn_jumps \
++	    --optional=_IO_old_cookie_jumps \
++	    --optional=_IO_old_file_jumps \
++	    --optional=_IO_old_proc_jumps \
++	  > $@ 2>&1; $(evaluate-test)
++
+ tests += $(tests-execstack-$(have-z-execstack))
+ ifeq ($(run-built-tests),yes)
+ tests-special += \
+diff --git a/elf/tst-relro-symbols.py b/elf/tst-relro-symbols.py
+new file mode 100644
+index 0000000000000000..368ea3349f86bd81
+--- /dev/null
++++ b/elf/tst-relro-symbols.py
+@@ -0,0 +1,137 @@
++#!/usr/bin/python3
++# Verify that certain symbols are covered by RELRO.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++"""Analyze a (shared) object to verify that certain symbols are
++present and covered by the PT_GNU_RELRO segment.
++
++"""
++
++import argparse
++import os.path
++import sys
++
++# Make available glibc Python modules.
++sys.path.append(os.path.join(
++    os.path.dirname(os.path.realpath(__file__)), os.path.pardir, 'scripts'))
++
++import glibcelf
++
++def find_relro(path: str, img: glibcelf.Image) -> (int, int):
++    """Discover the address range of the PT_GNU_RELRO segment."""
++    for phdr in img.phdrs():
++        if phdr.p_type == glibcelf.Pt.PT_GNU_RELRO:
++            # The computation is not entirely accurate because
++            # _dl_protect_relro in elf/dl-reloc.c rounds both the
++            # start end and downwards using the run-time page size.
++            return phdr.p_vaddr, phdr.p_vaddr + phdr.p_memsz
++    sys.stdout.write('{}: error: no PT_GNU_RELRO segment\n'.format(path))
++    sys.exit(1)
++
++def check_in_relro(kind, relro_begin, relro_end, name, start, size, error):
++    """Check if a section or symbol falls within in the RELRO segment."""
++    end = start + size - 1
++    if not (relro_begin <= start < end < relro_end):
++        error(
++            '{} {!r} of size {} at 0x{:x} is not in RELRO range [0x{:x}, 0x{:x})'.format(
++                kind, name.decode('UTF-8'), start, size,
++                relro_begin, relro_end))
++
++def get_parser():
++    """Return an argument parser for this script."""
++    parser = argparse.ArgumentParser(description=__doc__)
++    parser.add_argument('object', help='path to object file to check')
++    parser.add_argument('--required', metavar='NAME', default=(),
++                        help='required symbol names', nargs='*')
++    parser.add_argument('--optional', metavar='NAME', default=(),
++                        help='required symbol names', nargs='*')
++    return parser
++
++def main(argv):
++    """The main entry point."""
++    parser = get_parser()
++    opts = parser.parse_args(argv)
++    img = glibcelf.Image.readfile(opts.object)
++
++    required_symbols = frozenset([sym.encode('UTF-8')
++                                  for sym in opts.required])
++    optional_symbols = frozenset([sym.encode('UTF-8')
++                                  for sym in opts.optional])
++    check_symbols = required_symbols | optional_symbols
++
++    # Tracks the symbols in check_symbols that have been found.
++    symbols_found = set()
++
++    # Discover the extent of the RELRO segment.
++    relro_begin, relro_end = find_relro(opts.object, img)
++    symbol_table_found = False
++
++    errors = False
++    def error(msg: str) -> None:
++        """Record an error condition and write a message to standard output."""
++        nonlocal errors
++        errors = True
++        sys.stdout.write('{}: error: {}\n'.format(opts.object, msg))
++
++    # Iterate over section headers to find the symbol table.
++    for shdr in img.shdrs():
++        if shdr.sh_type == glibcelf.Sht.SHT_SYMTAB:
++            symbol_table_found = True
++            for sym in img.syms(shdr):
++                if sym.st_name in check_symbols:
++                    symbols_found.add(sym.st_name)
++
++                    # Validate symbol type, section, and size.
++                    if sym.st_info.type != glibcelf.Stt.STT_OBJECT:
++                        error('symbol {!r} has wrong type {}'.format(
++                            sym.st_name.decode('UTF-8'), sym.st_info.type))
++                    if sym.st_shndx in glibcelf.Shn:
++                        error('symbol {!r} has reserved section {}'.format(
++                            sym.st_name.decode('UTF-8'), sym.st_shndx))
++                        continue
++                    if sym.st_size == 0:
++                        error('symbol {!r} has size zero'.format(
++                            sym.st_name.decode('UTF-8')))
++                        continue
++
++                    check_in_relro('symbol', relro_begin, relro_end,
++                                   sym.st_name, sym.st_value, sym.st_size,
++                                   error)
++            continue # SHT_SYMTAB
++        if shdr.sh_name == b'.data.rel.ro' \
++           or shdr.sh_name.startswith(b'.data.rel.ro.'):
++            check_in_relro('section', relro_begin, relro_end,
++                           shdr.sh_name, shdr.sh_addr, shdr.sh_size,
++                           error)
++            continue
++
++    if required_symbols - symbols_found:
++        for sym in sorted(required_symbols - symbols_found):
++            error('symbol {!r} not found'.format(sym.decode('UTF-8')))
++
++    if errors:
++        sys.exit(1)
++
++    if not symbol_table_found:
++        sys.stdout.write(
++            '{}: warning: no symbol table found (stripped object)\n'.format(
++                opts.object))
++        sys.exit(77)
++
++if __name__ == '__main__':
++    main(sys.argv[1:])
+diff --git a/manual/install.texi b/manual/install.texi
+index c262fd56d0cef67b..a2c43bd692de7825 100644
+--- a/manual/install.texi
++++ b/manual/install.texi
+@@ -117,6 +117,12 @@ problem and suppress these constructs, so that the library will still be
+ usable, but functionality may be lost---for example, you can't build a
+ shared libc with old binutils.
+ 
++@item --with-default-link=@var{FLAG}
++With @code{--with-default-link=yes}, the build system does not use a
++custom linker script for linking shared objects.  The default for
++@var{FLAG} is the opposite, @samp{no}, because the custom linker script
++is needed for full RELRO protection.
++
+ @item --with-nonshared-cflags=@var{cflags}
+ Use additional compiler flags @var{cflags} to build the parts of the
+ library which are always statically linked into applications and
+diff --git a/sysdeps/unix/sysv/linux/ia64/Makefile b/sysdeps/unix/sysv/linux/ia64/Makefile
+index 97fc7df0b122d6a0..b1ad1ab7b1efa34c 100644
+--- a/sysdeps/unix/sysv/linux/ia64/Makefile
++++ b/sysdeps/unix/sysv/linux/ia64/Makefile
+@@ -1,3 +1,9 @@
++ifeq ($(subdir),elf)
++# ia64 does not support PT_GNU_RELRO.
++test-xfail-tst-relro-ldso = yes
++test-xfail-tst-relro-libc = yes
++endif
++
+ ifeq ($(subdir),misc)
+ sysdep_headers += sys/rse.h
+ endif
diff --git a/SOURCES/glibc-rh2109510-12.patch b/SOURCES/glibc-rh2109510-12.patch
new file mode 100644
index 0000000..a580b1b
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-12.patch
@@ -0,0 +1,26 @@
+commit b571f3adffdcbed23f35ea39b0ca43809dbb4f5b
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Fri Apr 22 19:34:52 2022 +0200
+
+    scripts/glibcelf.py: Mark as UNSUPPORTED on Python 3.5 and earlier
+    
+    enum.IntFlag and enum.EnumMeta._missing_ support are not part of
+    earlier Python versions.
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 8f7d0ca184845714..da0d5380f33a195e 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -28,6 +28,12 @@ import collections
+ import enum
+ import struct
+ 
++if not hasattr(enum, 'IntFlag'):
++    import sys
++    sys.stdout.write(
++        'warning: glibcelf.py needs Python 3.6 for enum support\n')
++    sys.exit(77)
++
+ class _OpenIntEnum(enum.IntEnum):
+     """Integer enumeration that supports arbitrary int values."""
+     @classmethod
diff --git a/SOURCES/glibc-rh2109510-13.patch b/SOURCES/glibc-rh2109510-13.patch
new file mode 100644
index 0000000..8589a81
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-13.patch
@@ -0,0 +1,30 @@
+Partial backport of the scripts/glibcelf.py part of:
+
+commit 4610b24f5e4e6d2c4b769594efa6d460943163bb
+Author: H.J. Lu <hjl.tools@gmail.com>
+Date:   Tue Mar 29 14:08:54 2022 -0700
+
+    elf: Define DT_RELR related macros and types
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index da0d5380f33a195e..f847b36c55c15b8a 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -304,6 +304,7 @@ class Sht(_OpenIntEnum):
+     SHT_PREINIT_ARRAY = 16
+     SHT_GROUP = 17
+     SHT_SYMTAB_SHNDX = 18
++    SHT_RELR = 19
+     SHT_GNU_ATTRIBUTES = 0x6ffffff5
+     SHT_GNU_HASH = 0x6ffffff6
+     SHT_GNU_LIBLIST = 0x6ffffff7
+@@ -593,6 +594,9 @@ class Dt(_OpenIntEnum):
+     DT_PREINIT_ARRAY = 32
+     DT_PREINIT_ARRAYSZ = 33
+     DT_SYMTAB_SHNDX = 34
++    DT_RELRSZ = 35
++    DT_RELR = 36
++    DT_RELRENT = 37
+     DT_GNU_PRELINKED = 0x6ffffdf5
+     DT_GNU_CONFLICTSZ = 0x6ffffdf6
+     DT_GNU_LIBLISTSZ = 0x6ffffdf7
diff --git a/SOURCES/glibc-rh2109510-14.patch b/SOURCES/glibc-rh2109510-14.patch
new file mode 100644
index 0000000..9448450
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-14.patch
@@ -0,0 +1,50 @@
+commit d055481ce39d03652ac60de5078889e15b6917ff
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon May 16 21:59:24 2022 +0200
+
+    scripts/glibcelf.py: Add *T_RISCV_* constants
+    
+    SHT_RISCV_ATTRIBUTES, PT_RISCV_ATTRIBUTES, DT_RISCV_VARIANT_CC were
+    added in commit 0b6c6750732483b4d59c2fcb45484079cd84157d
+    ("Update RISC-V specific ELF definitions").  This caused the
+    elf/tst-glibcelf consistency check to fail.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index f847b36c55c15b8a..07bef940433b4c99 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -385,6 +385,10 @@ class ShtPARISC(enum.Enum):
+     SHT_PARISC_UNWIND = 0x70000001
+     SHT_PARISC_DOC = 0x70000002
+ 
++class ShtRISCV(enum.Enum):
++    """Supplemental SHT_* constants for EM_RISCV."""
++    SHT_RISCV_ATTRIBUTES = 0x70000003
++
+ class Pf(enum.IntFlag):
+     """Program header flags.  Type of Phdr.p_flags values."""
+     PF_X = 1
+@@ -558,6 +562,10 @@ class PtPARISC(enum.Enum):
+     PT_PARISC_ARCHEXT = 0x70000000
+     PT_PARISC_UNWIND = 0x70000001
+ 
++class PtRISCV(enum.Enum):
++    """Supplemental PT_* constants for EM_RISCV."""
++    PT_RISCV_ATTRIBUTES = 0x70000003
++
+ class Dt(_OpenIntEnum):
+     """ELF dynamic segment tags.  Type of Dyn.d_val."""
+     DT_NULL = 0
+@@ -710,6 +718,10 @@ class DtPPC64(enum.Enum):
+     DT_PPC64_OPDSZ = 0x70000002
+     DT_PPC64_OPT = 0x70000003
+ 
++class DtRISCV(enum.Enum):
++    """Supplemental DT_* constants for EM_RISCV."""
++    DT_RISCV_VARIANT_CC = 0x70000001
++
+ class DtSPARC(enum.Enum):
+     """Supplemental DT_* constants for EM_SPARC."""
+     DT_SPARC_REGISTER = 0x70000001
diff --git a/SOURCES/glibc-rh2109510-15.patch b/SOURCES/glibc-rh2109510-15.patch
new file mode 100644
index 0000000..7979be8
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-15.patch
@@ -0,0 +1,26 @@
+commit 8521001731d6539382fa875f1cac9864c466ef27
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Mon Jun 6 14:41:24 2022 -0300
+
+    scripts/glibcelf.py: Add PT_AARCH64_MEMTAG_MTE constant
+    
+    It was added in commit 603e5c8ba7257483c162cabb06eb6f79096429b6.
+    This caused the elf/tst-glibcelf consistency check to fail.
+    
+    Reviewed-by: Florian Weimer <fweimer@redhat.com>
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 07bef940433b4c99..47f95d07baefb4ae 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -523,6 +523,10 @@ class Pt(_OpenIntEnum):
+     PT_SUNWBSS = 0x6ffffffa
+     PT_SUNWSTACK = 0x6ffffffb
+ 
++class PtAARCH64(enum.Enum):
++    """Supplemental PT_* constants for EM_AARCH64."""
++    PT_AARCH64_MEMTAG_MTE = 0x70000002
++
+ class PtARM(enum.Enum):
+     """Supplemental PT_* constants for EM_ARM."""
+     PT_ARM_EXIDX = 0x70000001
diff --git a/SOURCES/glibc-rh2109510-16.patch b/SOURCES/glibc-rh2109510-16.patch
new file mode 100644
index 0000000..38416a0
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-16.patch
@@ -0,0 +1,22 @@
+Partial backport of the scripts/glibcelf.py part of:
+
+commit 2d83247d90c9f0bfee7f3f2505bc1b13b6f36c04
+Author: caiyinyu <caiyinyu@loongson.cn>
+Date:   Tue Jul 19 09:20:45 2022 +0800
+
+    LoongArch: Add relocations and ELF flags to elf.h and scripts/glibcelf.py
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 47f95d07baefb4ae..de0509130ed9ad47 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -252,7 +252,8 @@ class Machine(_OpenIntEnum):
+     EM_RISCV = 243
+     EM_BPF = 247
+     EM_CSKY = 252
+-    EM_NUM = 253
++    EM_LOONGARCH = 258
++    EM_NUM = 259
+     EM_ALPHA = 0x9026
+ 
+ class Et(_OpenIntEnum):
diff --git a/SOURCES/glibc-rh2109510-17.patch b/SOURCES/glibc-rh2109510-17.patch
new file mode 100644
index 0000000..a7e5a3a
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-17.patch
@@ -0,0 +1,78 @@
+commit bd13cb19f5e15e9e9a92a536e755fd93a97a67f6
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Fri Aug 19 11:16:32 2022 +0200
+
+    scripts/glibcelf.py: Add hashing support
+    
+    ELF and GNU hashes can now be computed using the elf_hash and
+    gnu_hash functions.
+    
+    Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+    Tested-by: Carlos O'Donell <carlos@redhat.com>
+
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+index bf15a3bad4479e08..e5026e2289df206b 100644
+--- a/elf/tst-glibcelf.py
++++ b/elf/tst-glibcelf.py
+@@ -240,6 +240,24 @@ def check_constant_values(cc):
+             error('{}: glibcelf has {!r}, <elf.h> has {!r}'.format(
+                 name, glibcelf_value, elf_h_value))
+ 
++def check_hashes():
++    for name, expected_elf, expected_gnu in (
++            ('', 0, 0x1505),
++            ('PPPPPPPPPPPP', 0, 0x9f105c45),
++            ('GLIBC_2.0', 0xd696910, 0xf66c3dd5),
++            ('GLIBC_2.34', 0x69691b4, 0xc3f3f90c),
++            ('GLIBC_PRIVATE', 0x963cf85, 0x692a260)):
++        for convert in (lambda x: x, lambda x: x.encode('UTF-8')):
++            name = convert(name)
++            actual_elf = glibcelf.elf_hash(name)
++            if actual_elf != expected_elf:
++                error('elf_hash({!r}): {:x} != 0x{:x}'.format(
++                    name, actual_elf, expected_elf))
++            actual_gnu = glibcelf.gnu_hash(name)
++            if actual_gnu != expected_gnu:
++                error('gnu_hash({!r}): {:x} != 0x{:x}'.format(
++                    name, actual_gnu, expected_gnu))
++
+ def main():
+     """The main entry point."""
+     parser = argparse.ArgumentParser(
+@@ -251,6 +269,7 @@ def main():
+     check_duplicates()
+     check_constant_prefixes()
+     check_constant_values(cc=args.cc)
++    check_hashes()
+ 
+     if errors_encountered > 0:
+         print("note: errors encountered:", errors_encountered)
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index de0509130ed9ad47..5c8f46f590722384 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -1158,5 +1158,24 @@ class Image:
+         self._stringtab[sh_link] = strtab
+         return strtab
+ 
++def elf_hash(s):
++    """Computes the ELF hash of the string."""
++    acc = 0
++    for ch in s:
++        if type(ch) is not int:
++            ch = ord(ch)
++        acc = ((acc << 4) + ch) & 0xffffffff
++        top = acc & 0xf0000000
++        acc = (acc ^ (top >> 24)) & ~top
++    return acc
++
++def gnu_hash(s):
++    """Computes the GNU hash of the string."""
++    h = 5381
++    for ch in s:
++        if type(ch) is not int:
++            ch = ord(ch)
++        h = (h * 33 + ch) & 0xffffffff
++    return h
+ 
+ __all__ = [name for name in dir() if name[0].isupper()]
diff --git a/SOURCES/glibc-rh2109510-18.patch b/SOURCES/glibc-rh2109510-18.patch
new file mode 100644
index 0000000..83172fa
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-18.patch
@@ -0,0 +1,439 @@
+commit f40c7887d3cc9bb0b56576ed9edbe505ff8058c0
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Sep 22 12:10:41 2022 +0200
+
+    scripts: Extract glibcpp.py from check-obsolete-constructs.py
+    
+    The C tokenizer is useful separately.
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
+index 89d21dea6e788783..7c7a092e440a3258 100755
+--- a/scripts/check-obsolete-constructs.py
++++ b/scripts/check-obsolete-constructs.py
+@@ -24,193 +24,14 @@
+ """
+ 
+ import argparse
+-import collections
++import os
+ import re
+ import sys
+ 
+-# Simplified lexical analyzer for C preprocessing tokens.
+-# Does not implement trigraphs.
+-# Does not implement backslash-newline in the middle of any lexical
+-#   item other than a string literal.
+-# Does not implement universal-character-names in identifiers.
+-# Treats prefixed strings (e.g. L"...") as two tokens (L and "...")
+-# Accepts non-ASCII characters only within comments and strings.
+-
+-# Caution: The order of the outermost alternation matters.
+-# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
+-# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
+-# be last.
+-# Caution: There should be no capturing groups other than the named
+-# captures in the outermost alternation.
+-
+-# For reference, these are all of the C punctuators as of C11:
+-#   [ ] ( ) { } , ; ? ~
+-#   ! != * *= / /= ^ ^= = ==
+-#   # ##
+-#   % %= %> %: %:%:
+-#   & &= &&
+-#   | |= ||
+-#   + += ++
+-#   - -= -- ->
+-#   . ...
+-#   : :>
+-#   < <% <: << <<= <=
+-#   > >= >> >>=
+-
+-# The BAD_* tokens are not part of the official definition of pp-tokens;
+-# they match unclosed strings, character constants, and block comments,
+-# so that the regex engine doesn't have to backtrack all the way to the
+-# beginning of a broken construct and then emit dozens of junk tokens.
+-
+-PP_TOKEN_RE_ = re.compile(r"""
+-    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
+-   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
+-   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
+-   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
+-   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
+-   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
+-   |(?P<LINE_COMMENT>  //[^\r\n]*)
+-   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
+-   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
+-   |(?P<PUNCTUATOR>
+-       [,;?~(){}\[\]]
+-     | [!*/^=]=?
+-     | \#\#?
+-     | %(?:[=>]|:(?:%:)?)?
+-     | &[=&]?
+-     |\|[=|]?
+-     |\+[=+]?
+-     | -[=->]?
+-     |\.(?:\.\.)?
+-     | :>?
+-     | <(?:[%:]|<(?:=|<=?)?)?
+-     | >(?:=|>=?)?)
+-   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
+-   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
+-   |(?P<OTHER>         .)
+-""", re.DOTALL | re.VERBOSE)
+-
+-HEADER_NAME_RE_ = re.compile(r"""
+-    < [^>\r\n]+ >
+-  | " [^"\r\n]+ "
+-""", re.DOTALL | re.VERBOSE)
+-
+-ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
+-
+-# based on the sample code in the Python re documentation
+-Token_ = collections.namedtuple("Token", (
+-    "kind", "text", "line", "column", "context"))
+-Token_.__doc__ = """
+-   One C preprocessing token, comment, or chunk of whitespace.
+-   'kind' identifies the token type, which will be one of:
+-       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
+-       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
+-       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
+-       handled within tokenize_c, below.
+-
+-   'text' is the sequence of source characters making up the token;
+-       no decoding whatsoever is performed.
+-
+-   'line' and 'column' give the position of the first character of the
+-      token within the source file.  They are both 1-based.
+-
+-   'context' indicates whether or not this token occurred within a
+-      preprocessing directive; it will be None for running text,
+-      '<null>' for the leading '#' of a directive line (because '#'
+-      all by itself on a line is a "null directive"), or the name of
+-      the directive for tokens within a directive line, starting with
+-      the IDENT for the name itself.
+-"""
+-
+-def tokenize_c(file_contents, reporter):
+-    """Yield a series of Token objects, one for each preprocessing
+-       token, comment, or chunk of whitespace within FILE_CONTENTS.
+-       The REPORTER object is expected to have one method,
+-       reporter.error(token, message), which will be called to
+-       indicate a lexical error at the position of TOKEN.
+-       If MESSAGE contains the four-character sequence '{!r}', that
+-       is expected to be replaced by repr(token.text).
+-    """
++# Make available glibc Python modules.
++sys.path.append(os.path.dirname(os.path.realpath(__file__)))
+ 
+-    Token = Token_
+-    PP_TOKEN_RE = PP_TOKEN_RE_
+-    ENDLINE_RE = ENDLINE_RE_
+-    HEADER_NAME_RE = HEADER_NAME_RE_
+-
+-    line_num = 1
+-    line_start = 0
+-    pos = 0
+-    limit = len(file_contents)
+-    directive = None
+-    at_bol = True
+-    while pos < limit:
+-        if directive == "include":
+-            mo = HEADER_NAME_RE.match(file_contents, pos)
+-            if mo:
+-                kind = "HEADER_NAME"
+-                directive = "after_include"
+-            else:
+-                mo = PP_TOKEN_RE.match(file_contents, pos)
+-                kind = mo.lastgroup
+-                if kind != "WHITESPACE":
+-                    directive = "after_include"
+-        else:
+-            mo = PP_TOKEN_RE.match(file_contents, pos)
+-            kind = mo.lastgroup
+-
+-        text = mo.group()
+-        line = line_num
+-        column = mo.start() - line_start
+-        adj_line_start = 0
+-        # only these kinds can contain a newline
+-        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
+-                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
+-            for tmo in ENDLINE_RE.finditer(text):
+-                line_num += 1
+-                adj_line_start = tmo.end()
+-            if adj_line_start:
+-                line_start = mo.start() + adj_line_start
+-
+-        # Track whether or not we are scanning a preprocessing directive.
+-        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
+-            at_bol = True
+-            directive = None
+-        else:
+-            if kind == "PUNCTUATOR" and text == "#" and at_bol:
+-                directive = "<null>"
+-            elif kind == "IDENT" and directive == "<null>":
+-                directive = text
+-            at_bol = False
+-
+-        # Report ill-formed tokens and rewrite them as their well-formed
+-        # equivalents, so downstream processing doesn't have to know about them.
+-        # (Rewriting instead of discarding provides better error recovery.)
+-        if kind == "BAD_BLOCK_COM":
+-            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
+-                           "unclosed block comment")
+-            text += "*/"
+-            kind = "BLOCK_COMMENT"
+-        elif kind == "BAD_STRING":
+-            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
+-                           "unclosed string")
+-            text += "\""
+-            kind = "STRING"
+-        elif kind == "BAD_CHARCONST":
+-            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
+-                           "unclosed char constant")
+-            text += "'"
+-            kind = "CHARCONST"
+-
+-        tok = Token(kind, text, line, column+1,
+-                    "include" if directive == "after_include" else directive)
+-        # Do not complain about OTHER tokens inside macro definitions.
+-        # $ and @ appear in macros defined by headers intended to be
+-        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
+-        if kind == "OTHER" and directive != "define":
+-            self.error(tok, "stray {!r} in program")
+-
+-        yield tok
+-        pos = mo.end()
++import glibcpp
+ 
+ #
+ # Base and generic classes for individual checks.
+@@ -446,7 +267,7 @@ class HeaderChecker:
+ 
+         typedef_checker = ObsoleteTypedefChecker(self, self.fname)
+ 
+-        for tok in tokenize_c(contents, self):
++        for tok in glibcpp.tokenize_c(contents, self):
+             typedef_checker.examine(tok)
+ 
+ def main():
+diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
+new file mode 100644
+index 0000000000000000..b44c6a4392dde8ce
+--- /dev/null
++++ b/scripts/glibcpp.py
+@@ -0,0 +1,212 @@
++#! /usr/bin/python3
++# Approximation to C preprocessing.
++# Copyright (C) 2019-2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++"""
++Simplified lexical analyzer for C preprocessing tokens.
++
++Does not implement trigraphs.
++
++Does not implement backslash-newline in the middle of any lexical
++item other than a string literal.
++
++Does not implement universal-character-names in identifiers.
++
++Treats prefixed strings (e.g. L"...") as two tokens (L and "...").
++
++Accepts non-ASCII characters only within comments and strings.
++"""
++
++import collections
++import re
++
++# Caution: The order of the outermost alternation matters.
++# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
++# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
++# be last.
++# Caution: There should be no capturing groups other than the named
++# captures in the outermost alternation.
++
++# For reference, these are all of the C punctuators as of C11:
++#   [ ] ( ) { } , ; ? ~
++#   ! != * *= / /= ^ ^= = ==
++#   # ##
++#   % %= %> %: %:%:
++#   & &= &&
++#   | |= ||
++#   + += ++
++#   - -= -- ->
++#   . ...
++#   : :>
++#   < <% <: << <<= <=
++#   > >= >> >>=
++
++# The BAD_* tokens are not part of the official definition of pp-tokens;
++# they match unclosed strings, character constants, and block comments,
++# so that the regex engine doesn't have to backtrack all the way to the
++# beginning of a broken construct and then emit dozens of junk tokens.
++
++PP_TOKEN_RE_ = re.compile(r"""
++    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
++   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
++   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
++   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
++   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
++   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
++   |(?P<LINE_COMMENT>  //[^\r\n]*)
++   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
++   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
++   |(?P<PUNCTUATOR>
++       [,;?~(){}\[\]]
++     | [!*/^=]=?
++     | \#\#?
++     | %(?:[=>]|:(?:%:)?)?
++     | &[=&]?
++     |\|[=|]?
++     |\+[=+]?
++     | -[=->]?
++     |\.(?:\.\.)?
++     | :>?
++     | <(?:[%:]|<(?:=|<=?)?)?
++     | >(?:=|>=?)?)
++   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
++   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
++   |(?P<OTHER>         .)
++""", re.DOTALL | re.VERBOSE)
++
++HEADER_NAME_RE_ = re.compile(r"""
++    < [^>\r\n]+ >
++  | " [^"\r\n]+ "
++""", re.DOTALL | re.VERBOSE)
++
++ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
++
++# based on the sample code in the Python re documentation
++Token_ = collections.namedtuple("Token", (
++    "kind", "text", "line", "column", "context"))
++Token_.__doc__ = """
++   One C preprocessing token, comment, or chunk of whitespace.
++   'kind' identifies the token type, which will be one of:
++       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
++       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
++       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
++       handled within tokenize_c, below.
++
++   'text' is the sequence of source characters making up the token;
++       no decoding whatsoever is performed.
++
++   'line' and 'column' give the position of the first character of the
++      token within the source file.  They are both 1-based.
++
++   'context' indicates whether or not this token occurred within a
++      preprocessing directive; it will be None for running text,
++      '<null>' for the leading '#' of a directive line (because '#'
++      all by itself on a line is a "null directive"), or the name of
++      the directive for tokens within a directive line, starting with
++      the IDENT for the name itself.
++"""
++
++def tokenize_c(file_contents, reporter):
++    """Yield a series of Token objects, one for each preprocessing
++       token, comment, or chunk of whitespace within FILE_CONTENTS.
++       The REPORTER object is expected to have one method,
++       reporter.error(token, message), which will be called to
++       indicate a lexical error at the position of TOKEN.
++       If MESSAGE contains the four-character sequence '{!r}', that
++       is expected to be replaced by repr(token.text).
++    """
++
++    Token = Token_
++    PP_TOKEN_RE = PP_TOKEN_RE_
++    ENDLINE_RE = ENDLINE_RE_
++    HEADER_NAME_RE = HEADER_NAME_RE_
++
++    line_num = 1
++    line_start = 0
++    pos = 0
++    limit = len(file_contents)
++    directive = None
++    at_bol = True
++    while pos < limit:
++        if directive == "include":
++            mo = HEADER_NAME_RE.match(file_contents, pos)
++            if mo:
++                kind = "HEADER_NAME"
++                directive = "after_include"
++            else:
++                mo = PP_TOKEN_RE.match(file_contents, pos)
++                kind = mo.lastgroup
++                if kind != "WHITESPACE":
++                    directive = "after_include"
++        else:
++            mo = PP_TOKEN_RE.match(file_contents, pos)
++            kind = mo.lastgroup
++
++        text = mo.group()
++        line = line_num
++        column = mo.start() - line_start
++        adj_line_start = 0
++        # only these kinds can contain a newline
++        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
++                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
++            for tmo in ENDLINE_RE.finditer(text):
++                line_num += 1
++                adj_line_start = tmo.end()
++            if adj_line_start:
++                line_start = mo.start() + adj_line_start
++
++        # Track whether or not we are scanning a preprocessing directive.
++        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
++            at_bol = True
++            directive = None
++        else:
++            if kind == "PUNCTUATOR" and text == "#" and at_bol:
++                directive = "<null>"
++            elif kind == "IDENT" and directive == "<null>":
++                directive = text
++            at_bol = False
++
++        # Report ill-formed tokens and rewrite them as their well-formed
++        # equivalents, so downstream processing doesn't have to know about them.
++        # (Rewriting instead of discarding provides better error recovery.)
++        if kind == "BAD_BLOCK_COM":
++            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
++                           "unclosed block comment")
++            text += "*/"
++            kind = "BLOCK_COMMENT"
++        elif kind == "BAD_STRING":
++            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
++                           "unclosed string")
++            text += "\""
++            kind = "STRING"
++        elif kind == "BAD_CHARCONST":
++            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
++                           "unclosed char constant")
++            text += "'"
++            kind = "CHARCONST"
++
++        tok = Token(kind, text, line, column+1,
++                    "include" if directive == "after_include" else directive)
++        # Do not complain about OTHER tokens inside macro definitions.
++        # $ and @ appear in macros defined by headers intended to be
++        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
++        if kind == "OTHER" and directive != "define":
++            self.error(tok, "stray {!r} in program")
++
++        yield tok
++        pos = mo.end()
diff --git a/SOURCES/glibc-rh2109510-19.patch b/SOURCES/glibc-rh2109510-19.patch
new file mode 100644
index 0000000..f77b415
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-19.patch
@@ -0,0 +1,598 @@
+commit e6e6184bed490403811771fa527eb95b4ae53c7c
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Sep 22 12:10:41 2022 +0200
+
+    scripts: Enhance glibcpp to do basic macro processing
+
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Conflicts:
+	support/Makefile
+	  (spurious tests sorting change upstream)
+
+diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
+index b44c6a4392dde8ce..455459a609eab120 100644
+--- a/scripts/glibcpp.py
++++ b/scripts/glibcpp.py
+@@ -33,7 +33,9 @@ Accepts non-ASCII characters only within comments and strings.
+ """
+ 
+ import collections
++import operator
+ import re
++import sys
+ 
+ # Caution: The order of the outermost alternation matters.
+ # STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
+@@ -210,3 +212,318 @@ def tokenize_c(file_contents, reporter):
+ 
+         yield tok
+         pos = mo.end()
++
++class MacroDefinition(collections.namedtuple('MacroDefinition',
++                                             'name_token args body error')):
++    """A preprocessor macro definition.
++
++    name_token is the Token_ for the name.
++
++    args is None for a macro that is not function-like.  Otherwise, it
++    is a tuple that contains the macro argument name tokens.
++
++    body is a tuple that contains the tokens that constitue the body
++    of the macro definition (excluding whitespace).
++
++    error is None if no error was detected, or otherwise a problem
++    description associated with this macro definition.
++
++    """
++
++    @property
++    def function(self):
++        """Return true if the macro is function-like."""
++        return self.args is not None
++
++    @property
++    def name(self):
++        """Return the name of the macro being defined."""
++        return self.name_token.text
++
++    @property
++    def line(self):
++        """Return the line number of the macro defintion."""
++        return self.name_token.line
++
++    @property
++    def args_lowered(self):
++        """Return the macro argument list as a list of strings"""
++        if self.function:
++            return [token.text for token in self.args]
++        else:
++            return None
++
++    @property
++    def body_lowered(self):
++        """Return the macro body as a list of strings."""
++        return [token.text for token in self.body]
++
++def macro_definitions(tokens):
++    """A generator for C macro definitions among tokens.
++
++    The generator yields MacroDefinition objects.
++
++    tokens must be iterable, yielding Token_ objects.
++
++    """
++
++    macro_name = None
++    macro_start = False # Set to false after macro name and one otken.
++    macro_args = None # Set to a list during the macro argument sequence.
++    in_macro_args = False # True while processing macro identifier-list.
++    error = None
++    body = []
++
++    for token in tokens:
++        if token.context == 'define' and macro_name is None \
++           and token.kind == 'IDENT':
++            # Starting up macro processing.
++            if macro_start:
++                # First identifier is the macro name.
++                macro_name = token
++            else:
++                # Next token is the name.
++                macro_start = True
++            continue
++
++        if macro_name is None:
++            # Drop tokens not in macro definitions.
++            continue
++
++        if token.context != 'define':
++            # End of the macro definition.
++            if in_macro_args and error is None:
++                error = 'macro definition ends in macro argument list'
++            yield MacroDefinition(macro_name, macro_args, tuple(body), error)
++            # No longer in a macro definition.
++            macro_name = None
++            macro_start = False
++            macro_args = None
++            in_macro_args = False
++            error = None
++            body.clear()
++            continue
++
++        if macro_start:
++            # First token after the macro name.
++            macro_start = False
++            if token.kind == 'PUNCTUATOR' and token.text == '(':
++                macro_args = []
++                in_macro_args = True
++            continue
++
++        if in_macro_args:
++            if token.kind == 'IDENT' \
++               or (token.kind == 'PUNCTUATOR' and token.text == '...'):
++                # Macro argument or ... placeholder.
++                macro_args.append(token)
++            if token.kind == 'PUNCTUATOR':
++                if token.text == ')':
++                    macro_args = tuple(macro_args)
++                    in_macro_args = False
++                elif token.text == ',':
++                    pass # Skip.  Not a full syntax check.
++                elif error is None:
++                    error = 'invalid punctuator in macro argument list: ' \
++                        + repr(token.text)
++            elif error is None:
++                error = 'invalid {} token in macro argument list'.format(
++                    token.kind)
++            continue
++
++        if token.kind not in ('WHITESPACE', 'BLOCK_COMMENT'):
++            body.append(token)
++
++    # Emit the macro in case the last line does not end with a newline.
++    if macro_name is not None:
++        if in_macro_args and error is None:
++            error = 'macro definition ends in macro argument list'
++        yield MacroDefinition(macro_name, macro_args, tuple(body), error)
++
++# Used to split UL etc. suffixes from numbers such as 123UL.
++RE_SPLIT_INTEGER_SUFFIX = re.compile(r'([^ullULL]+)([ullULL]*)')
++
++BINARY_OPERATORS = {
++    '+': operator.add,
++    '<<': operator.lshift,
++}
++
++# Use the general-purpose dict type if it is order-preserving.
++if (sys.version_info[0], sys.version_info[1]) <= (3, 6):
++    OrderedDict = collections.OrderedDict
++else:
++    OrderedDict = dict
++
++def macro_eval(macro_defs, reporter):
++    """Compute macro values
++
++    macro_defs is the output from macro_definitions.  reporter is an
++    object that accepts reporter.error(line_number, message) and
++    reporter.note(line_number, message) calls to report errors
++    and error context invocations.
++
++    The returned dict contains the values of macros which are not
++    function-like, pairing their names with their computed values.
++
++    The current implementation is incomplete.  It is deliberately not
++    entirely faithful to C, even in the implemented parts.  It checks
++    that macro replacements follow certain syntactic rules even if
++    they are never evaluated.
++
++    """
++
++    # Unevaluated macro definitions by name.
++    definitions = OrderedDict()
++    for md in macro_defs:
++        if md.name in definitions:
++            reporter.error(md.line, 'macro {} redefined'.format(md.name))
++            reporter.note(definitions[md.name].line,
++                          'location of previous definition')
++        else:
++            definitions[md.name] = md
++
++    # String to value mappings for fully evaluated macros.
++    evaluated = OrderedDict()
++
++    # String to macro definitions during evaluation.  Nice error
++    # reporting relies on determinstic iteration order.
++    stack = OrderedDict()
++
++    def eval_token(current, token):
++        """Evaluate one macro token.
++
++        Integers and strings are returned as such (the latter still
++        quoted).  Identifiers are expanded.
++
++        None indicates an empty expansion or an error.
++
++        """
++
++        if token.kind == 'PP_NUMBER':
++            value = None
++            m = RE_SPLIT_INTEGER_SUFFIX.match(token.text)
++            if m:
++                try:
++                    value = int(m.group(1), 0)
++                except ValueError:
++                    pass
++            if value is None:
++                reporter.error(token.line,
++                    'invalid number {!r} in definition of {}'.format(
++                        token.text, current.name))
++            return value
++
++        if token.kind == 'STRING':
++            return token.text
++
++        if token.kind == 'CHARCONST' and len(token.text) == 3:
++            return ord(token.text[1])
++
++        if token.kind == 'IDENT':
++            name = token.text
++            result = eval1(current, name)
++            if name not in evaluated:
++                evaluated[name] = result
++            return result
++
++        reporter.error(token.line,
++            'unrecognized {!r} in definition of {}'.format(
++                token.text, current.name))
++        return None
++
++
++    def eval1(current, name):
++        """Evaluate one name.
++
++        The name is looked up and the macro definition evaluated
++        recursively if necessary.  The current argument is the macro
++        definition being evaluated.
++
++        None as a return value indicates an error.
++
++        """
++
++        # Fast path if the value has already been evaluated.
++        if name in evaluated:
++            return evaluated[name]
++
++        try:
++            md = definitions[name]
++        except KeyError:
++            reporter.error(current.line,
++                'reference to undefined identifier {} in definition of {}'
++                           .format(name, current.name))
++            return None
++
++        if md.name in stack:
++            # Recursive macro definition.
++            md = stack[name]
++            reporter.error(md.line,
++                'macro definition {} refers to itself'.format(md.name))
++            for md1 in reversed(list(stack.values())):
++                if md1 is md:
++                    break
++                reporter.note(md1.line,
++                              'evaluated from {}'.format(md1.name))
++            return None
++
++        stack[md.name] = md
++        if md.function:
++            reporter.error(current.line,
++                'attempt to evaluate function-like macro {}'.format(name))
++            reporter.note(md.line, 'definition of {}'.format(md.name))
++            return None
++
++        try:
++            body = md.body
++            if len(body) == 0:
++                # Empty expansion.
++                return None
++
++            # Remove surrounding ().
++            if body[0].text == '(' and body[-1].text == ')':
++                body = body[1:-1]
++                had_parens = True
++            else:
++                had_parens = False
++
++            if len(body) == 1:
++                return eval_token(md, body[0])
++
++            # Minimal expression evaluator for binary operators.
++            op = body[1].text
++            if len(body) == 3 and op in BINARY_OPERATORS:
++                if not had_parens:
++                    reporter.error(body[1].line,
++                        'missing parentheses around {} expression'.format(op))
++                    reporter.note(md.line,
++                                  'in definition of macro {}'.format(md.name))
++
++                left = eval_token(md, body[0])
++                right = eval_token(md, body[2])
++
++                if type(left) != type(1):
++                    reporter.error(left.line,
++                        'left operand of {} is not an integer'.format(op))
++                    reporter.note(md.line,
++                                  'in definition of macro {}'.format(md.name))
++                if type(right) != type(1):
++                    reporter.error(left.line,
++                        'right operand of {} is not an integer'.format(op))
++                    reporter.note(md.line,
++                                  'in definition of macro {}'.format(md.name))
++                return BINARY_OPERATORS[op](left, right)
++
++            reporter.error(md.line,
++                'uninterpretable macro token sequence: {}'.format(
++                    ' '.join(md.body_lowered)))
++            return None
++        finally:
++            del stack[md.name]
++
++    # Start of main body of macro_eval.
++    for md in definitions.values():
++        name = md.name
++        if name not in evaluated and not md.function:
++            evaluated[name] = eval1(md, name)
++    return evaluated
+diff --git a/support/Makefile b/support/Makefile
+index 09b41b0d57e9239a..7749ac24f1ac3622 100644
+--- a/support/Makefile
++++ b/support/Makefile
+@@ -223,11 +223,11 @@ $(objpfx)true-container : $(libsupport)
+ tests = \
+   README-testing \
+   tst-support-namespace \
++  tst-support-process_state \
+   tst-support_blob_repeat \
+   tst-support_capture_subprocess \
+   tst-support_descriptors \
+   tst-support_format_dns_packet \
+-  tst-support-process_state \
+   tst-support_quote_blob \
+   tst-support_quote_string \
+   tst-support_record_failure \
+@@ -248,6 +248,12 @@ $(objpfx)tst-support_record_failure-2.out: tst-support_record_failure-2.sh \
+ 	$(evaluate-test)
+ endif
+ 
++tests-special += $(objpfx)tst-glibcpp.out
++
++$(objpfx)tst-glibcpp.out: tst-glibcpp.py $(..)scripts/glibcpp.py
++	PYTHONPATH=$(..)scripts $(PYTHON) tst-glibcpp.py > $@ 2>&1; \
++	$(evaluate-test)
++
+ $(objpfx)tst-support_format_dns_packet: $(common-objpfx)resolv/libresolv.so
+ 
+ tst-support_capture_subprocess-ARGS = -- $(host-test-program-cmd)
+diff --git a/support/tst-glibcpp.py b/support/tst-glibcpp.py
+new file mode 100644
+index 0000000000000000..a2db1916ccfce3c3
+--- /dev/null
++++ b/support/tst-glibcpp.py
+@@ -0,0 +1,217 @@
++#! /usr/bin/python3
++# Tests for scripts/glibcpp.py
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++import inspect
++import sys
++
++import glibcpp
++
++# Error counter.
++errors = 0
++
++class TokenizerErrors:
++    """Used as the error reporter during tokenization."""
++
++    def __init__(self):
++        self.errors = []
++
++    def error(self, token, message):
++        self.errors.append((token, message))
++
++def check_macro_definitions(source, expected):
++    reporter = TokenizerErrors()
++    tokens = glibcpp.tokenize_c(source, reporter)
++
++    actual = []
++    for md in glibcpp.macro_definitions(tokens):
++        if md.function:
++            md_name = '{}({})'.format(md.name, ','.join(md.args_lowered))
++        else:
++            md_name = md.name
++        actual.append((md_name, md.body_lowered))
++
++    if actual != expected or reporter.errors:
++        global errors
++        errors += 1
++        # Obtain python source line information.
++        frame = inspect.stack(2)[1]
++        print('{}:{}: error: macro definition mismatch, actual definitions:'
++              .format(frame[1], frame[2]))
++        for md in actual:
++            print('note: {} {!r}'.format(md[0], md[1]))
++
++        if reporter.errors:
++            for err in reporter.errors:
++                print('note: tokenizer error: {}: {}'.format(
++                    err[0].line, err[1]))
++
++def check_macro_eval(source, expected, expected_errors=''):
++    reporter = TokenizerErrors()
++    tokens = list(glibcpp.tokenize_c(source, reporter))
++
++    if reporter.errors:
++        # Obtain python source line information.
++        frame = inspect.stack(2)[1]
++        for err in reporter.errors:
++            print('{}:{}: tokenizer error: {}: {}'.format(
++                frame[1], frame[2], err[0].line, err[1]))
++        return
++
++    class EvalReporter:
++        """Used as the error reporter during evaluation."""
++
++        def __init__(self):
++            self.lines = []
++
++        def error(self, line, message):
++            self.lines.append('{}: error: {}\n'.format(line, message))
++
++        def note(self, line, message):
++            self.lines.append('{}: note: {}\n'.format(line, message))
++
++    reporter = EvalReporter()
++    actual = glibcpp.macro_eval(glibcpp.macro_definitions(tokens), reporter)
++    actual_errors = ''.join(reporter.lines)
++    if actual != expected or actual_errors != expected_errors:
++        global errors
++        errors += 1
++        # Obtain python source line information.
++        frame = inspect.stack(2)[1]
++        print('{}:{}: error: macro evaluation mismatch, actual results:'
++              .format(frame[1], frame[2]))
++        for k, v in actual.items():
++            print('  {}: {!r}'.format(k, v))
++        for msg in reporter.lines:
++            sys.stdout.write('  | ' + msg)
++
++# Individual test cases follow.
++
++check_macro_definitions('', [])
++check_macro_definitions('int main()\n{\n{\n', [])
++check_macro_definitions("""
++#define A 1
++#define B 2 /* ignored */
++#define C 3 // also ignored
++#define D \
++ 4
++#define STRING "string"
++#define FUNCLIKE(a, b) (a + b)
++#define FUNCLIKE2(a, b) (a + \
++ b)
++""", [('A', ['1']),
++      ('B', ['2']),
++      ('C', ['3']),
++      ('D', ['4']),
++      ('STRING', ['"string"']),
++      ('FUNCLIKE(a,b)', list('(a+b)')),
++      ('FUNCLIKE2(a,b)', list('(a+b)')),
++      ])
++check_macro_definitions('#define MACRO', [('MACRO', [])])
++check_macro_definitions('#define MACRO\n', [('MACRO', [])])
++check_macro_definitions('#define MACRO()', [('MACRO()', [])])
++check_macro_definitions('#define MACRO()\n', [('MACRO()', [])])
++
++check_macro_eval('#define A 1', {'A': 1})
++check_macro_eval('#define A (1)', {'A': 1})
++check_macro_eval('#define A (1 + 1)', {'A': 2})
++check_macro_eval('#define A (1U << 31)', {'A': 1 << 31})
++check_macro_eval('''\
++#define A (B + 1)
++#define B 10
++#define F(x) ignored
++#define C "not ignored"
++''', {
++    'A': 11,
++    'B': 10,
++    'C': '"not ignored"',
++})
++
++# Checking for evaluation errors.
++check_macro_eval('''\
++#define A 1
++#define A 2
++''', {
++    'A': 1,
++}, '''\
++2: error: macro A redefined
++1: note: location of previous definition
++''')
++
++check_macro_eval('''\
++#define A A
++#define B 1
++''', {
++    'A': None,
++    'B': 1,
++}, '''\
++1: error: macro definition A refers to itself
++''')
++
++check_macro_eval('''\
++#define A B
++#define B A
++''', {
++    'A': None,
++    'B': None,
++}, '''\
++1: error: macro definition A refers to itself
++2: note: evaluated from B
++''')
++
++check_macro_eval('''\
++#define A B
++#define B C
++#define C A
++''', {
++    'A': None,
++    'B': None,
++    'C': None,
++}, '''\
++1: error: macro definition A refers to itself
++3: note: evaluated from C
++2: note: evaluated from B
++''')
++
++check_macro_eval('''\
++#define A 1 +
++''', {
++    'A': None,
++}, '''\
++1: error: uninterpretable macro token sequence: 1 +
++''')
++
++check_macro_eval('''\
++#define A 3*5
++''', {
++    'A': None,
++}, '''\
++1: error: uninterpretable macro token sequence: 3 * 5
++''')
++
++check_macro_eval('''\
++#define A 3 + 5
++''', {
++    'A': 8,
++}, '''\
++1: error: missing parentheses around + expression
++1: note: in definition of macro A
++''')
++
++if errors:
++    sys.exit(1)
diff --git a/SOURCES/glibc-rh2109510-2.patch b/SOURCES/glibc-rh2109510-2.patch
new file mode 100644
index 0000000..3aba395
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-2.patch
@@ -0,0 +1,208 @@
+Partial backport of:
+
+commit 7e1d42400c1b8f03316fe14176133c8853cd3bbe
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Fri Nov 30 15:20:41 2018 +0000
+
+    Replace gen-as-const.awk by gen-as-const.py.
+    
+    This patch replaces gen-as-const.awk, and some fragments of the
+    Makefile code that used it, by a Python script.  The point is not such
+    much that awk is problematic for this particular script, as that I'd
+    like to build up a general Python infrastructure for extracting
+    information from C headers, for use in writing tests of such headers.
+    Thus, although this patch does not set up such infrastructure, the
+    compute_c_consts function in gen-as-const.py might be moved to a
+    separate Python module in a subsequent patch as a starting point for
+    such infrastructure.
+    
+    The general idea of the code is the same as in the awk version, but no
+    attempt is made to make the output files textually identical.  When
+    generating a header, a dict of constant names and values is generated
+    internally then defines are printed in sorted order (rather than the
+    order in the .sym file, which would have been used before).  When
+    generating a test that the values computed match those from a normal
+    header inclusion, the test code is made into a compilation test using
+    _Static_assert, where previously the comparisons were done only when
+    the test was executed.  One fragment of test generation (converting
+    the previously generated header to use asconst_* prefixes on its macro
+    names) is still in awk code in the makefiles; only the .sym processing
+    and subsequent execution of the compiler to extract constants have
+    moved to the Python script.
+    
+    Tested for x86_64, and with build-many-glibcs.py.
+    
+            * scripts/gen-as-const.py: New file.
+            * scripts/gen-as-const.awk: Remove.
+            * Makerules ($(common-objpfx)%.h $(common-objpfx)%.h.d): Use
+            gen-as-const.py.
+            ($(objpfx)test-as-const-%.c): Likewise.
+
+In the downstream version, scripts/gen-as-const.awk is not removed and
+still used in Makerules.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+new file mode 100644
+index 0000000000000000..b7a5744bb192dd67
+--- /dev/null
++++ b/scripts/gen-as-const.py
+@@ -0,0 +1,159 @@
++#!/usr/bin/python3
++# Produce headers of assembly constants from C expressions.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++# The input to this script looks like:
++#       #cpp-directive ...
++#       NAME1
++#       NAME2 expression ...
++# A line giving just a name implies an expression consisting of just that name.
++
++import argparse
++import os.path
++import re
++import subprocess
++import tempfile
++
++
++def compute_c_consts(sym_data, cc):
++    """Compute the values of some C constants.
++
++    The first argument is a list whose elements are either strings
++    (preprocessor directives) or pairs of strings (a name and a C
++    expression for the corresponding value).  Preprocessor directives
++    in the middle of the list may be used to select which constants
++    end up being evaluated using which expressions.
++
++    """
++    out_lines = []
++    started = False
++    for arg in sym_data:
++        if isinstance(arg, str):
++            out_lines.append(arg)
++            continue
++        name = arg[0]
++        value = arg[1]
++        if not started:
++            out_lines.append('void\ndummy (void)\n{')
++            started = True
++        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
++                         ': : \"i\" ((long int) (%s)));'
++                         % (name, value))
++    if started:
++        out_lines.append('}')
++    out_lines.append('')
++    out_text = '\n'.join(out_lines)
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        s_file_name = os.path.join(temp_dir, 'test.s')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(out_text)
++        # Compilation has to be from stdin to avoid the temporary file
++        # name being written into the generated dependencies.
++        cmd = ('%s -S -o %s -x c - < %s' % (cc, s_file_name, c_file_name))
++        subprocess.check_call(cmd, shell=True)
++        consts = {}
++        with open(s_file_name, 'r') as s_file:
++            for line in s_file:
++                match = re.search('@@@name@@@([^@]*)'
++                                  '@@@value@@@[^0-9Xxa-fA-F-]*'
++                                  '([0-9Xxa-fA-F-]+).*@@@end@@@', line)
++                if match:
++                    if (match.group(1) in consts
++                        and match.group(2) != consts[match.group(1)]):
++                        raise ValueError('duplicate constant %s'
++                                         % match.group(1))
++                    consts[match.group(1)] = match.group(2)
++        return consts
++
++
++def gen_test(sym_data):
++    """Generate a test for the values of some C constants.
++
++    The first argument is as for compute_c_consts.
++
++    """
++    out_lines = []
++    started = False
++    for arg in sym_data:
++        if isinstance(arg, str):
++            out_lines.append(arg)
++            continue
++        name = arg[0]
++        value = arg[1]
++        if not started:
++            out_lines.append('#include <stdint.h>\n'
++                             '#include <stdio.h>\n'
++                             '#include <bits/wordsize.h>\n'
++                             '#if __WORDSIZE == 64\n'
++                             'typedef uint64_t c_t;\n'
++                             '# define U(n) UINT64_C (n)\n'
++                             '#else\n'
++                             'typedef uint32_t c_t;\n'
++                             '# define U(n) UINT32_C (n)\n'
++                             '#endif\n'
++                             'static int\n'
++                             'do_test (void)\n'
++                             '{\n'
++                             # Compilation test only, using static assertions.
++                             '  return 0;\n'
++                             '}\n'
++                             '#include <support/test-driver.c>')
++            started = True
++        out_lines.append('_Static_assert (U (asconst_%s) == (c_t) (%s), '
++                         '"value of %s");'
++                         % (name, value, name))
++    return '\n'.join(out_lines)
++
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description='Produce headers of assembly constants.')
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    parser.add_argument('--test', action='store_true',
++                        help='Generate test case instead of header')
++    parser.add_argument('sym_file',
++                        help='.sym file to process')
++    args = parser.parse_args()
++    sym_data = []
++    with open(args.sym_file, 'r') as sym_file:
++        for line in sym_file:
++            line = line.strip()
++            if line == '':
++                continue
++            # Pass preprocessor directives through.
++            if line.startswith('#'):
++                sym_data.append(line)
++                continue
++            words = line.split(maxsplit=1)
++            # Separator.
++            if words[0] == '--':
++                continue
++            name = words[0]
++            value = words[1] if len(words) > 1 else words[0]
++            sym_data.append((name, value))
++    if args.test:
++        print(gen_test(sym_data))
++    else:
++        consts = compute_c_consts(sym_data, args.cc)
++        print('\n'.join('#define %s %s' % c for c in sorted(consts.items())))
++
++if __name__ == '__main__':
++    main()
diff --git a/SOURCES/glibc-rh2109510-20.patch b/SOURCES/glibc-rh2109510-20.patch
new file mode 100644
index 0000000..1007e9d
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-20.patch
@@ -0,0 +1,36 @@
+commit 29eb7961197bee68470730aecfdda4d0e206812e
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon Sep 5 12:11:19 2022 +0200
+
+    elf.h: Remove duplicate definition of VER_FLG_WEAK
+    
+    This did not cause a warning before because the token sequence for
+    the two definitions was identical.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/elf.h b/elf/elf.h
+index d6506ea1c7160dea..ec09040be639a52a 100644
+--- a/elf/elf.h
++++ b/elf/elf.h
+@@ -1027,7 +1027,8 @@ typedef struct
+ 
+ /* Legal values for vd_flags (version information flags).  */
+ #define VER_FLG_BASE	0x1		/* Version definition of file itself */
+-#define VER_FLG_WEAK	0x2		/* Weak version identifier */
++#define VER_FLG_WEAK	0x2		/* Weak version identifier.  Also
++					   used by vna_flags below.  */
+ 
+ /* Versym symbol index values.  */
+ #define	VER_NDX_LOCAL		0	/* Symbol is local.  */
+@@ -1105,10 +1106,6 @@ typedef struct
+ } Elf64_Vernaux;
+ 
+ 
+-/* Legal values for vna_flags.  */
+-#define VER_FLG_WEAK	0x2		/* Weak version identifier */
+-
+-
+ /* Auxiliary vector.  */
+ 
+ /* This vector is normally only used by the program interpreter.  The
diff --git a/SOURCES/glibc-rh2109510-21.patch b/SOURCES/glibc-rh2109510-21.patch
new file mode 100644
index 0000000..5e58123
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-21.patch
@@ -0,0 +1,1295 @@
+commit 340097d0b50eff9d3058e06c6989ae398c653d4a
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Sep 22 12:10:41 2022 +0200
+
+    elf: Extract glibcelf constants from <elf.h>
+    
+    The need to maintain elf/elf.h and scripts/glibcelf.py in parallel
+    results in a backporting hazard: they need to be kept in sync to
+    avoid elf/tst-glibcelf consistency check failures.  glibcelf (unlike
+    tst-glibcelf) does not use the C implementation to extract constants.
+    This applies the additional glibcpp syntax checks to <elf.h>.
+    
+    This  changereplaces the types derived from Python enum types with
+    custom types _TypedConstant, _IntConstant, and _FlagConstant.  These
+    types have fewer safeguards, but this also allows incremental
+    construction and greater flexibility for grouping constants among
+    the types.  Architectures-specific named constants are now added
+    as members into their superclasses (but value-based lookup is
+    still restricted to generic constants only).
+    
+    Consequently, check_duplicates in elf/tst-glibcelf has been adjusted
+    to accept differently-named constants of the same value if their
+    subtypes are distinct.  The ordering check for named constants
+    has been dropped because they are no longer strictly ordered.
+    
+    Further test adjustments: Some of the type names are different.
+    The new types do not support iteration (because it is unclear
+    whether iteration should cover the all named values (including
+    architecture-specific constants), or only the generic named values),
+    so elf/tst-glibcelf now uses by_name explicit (to get all constants).
+    PF_HP_SBP and PF_PARISC_SBP are now of distinct types (PfHP and
+    PfPARISC), so they are how both present on the Python side.  EM_NUM
+    and PT_NUM are filtered (which was an oversight in the old
+    conversion).
+    
+    The new version of glibcelf should also be compatible with earlier
+    Python versions because it no longer depends on the enum module and its
+    advanced features.
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+index e5026e2289df206b..a5bff45eae55edea 100644
+--- a/elf/tst-glibcelf.py
++++ b/elf/tst-glibcelf.py
+@@ -18,7 +18,6 @@
+ # <https://www.gnu.org/licenses/>.
+ 
+ import argparse
+-import enum
+ import sys
+ 
+ import glibcelf
+@@ -45,11 +44,57 @@ def find_constant_prefix(name):
+ 
+ def find_enum_types():
+     """A generator for OpenIntEnum and IntFlag classes in glibcelf."""
++    classes = set((glibcelf._TypedConstant, glibcelf._IntConstant,
++                   glibcelf._FlagConstant))
+     for obj in vars(glibcelf).values():
+-        if isinstance(obj, type) and obj.__bases__[0] in (
+-                glibcelf._OpenIntEnum, enum.Enum, enum.IntFlag):
++        if isinstance(obj, type) and obj not in classes \
++           and obj.__bases__[0] in classes:
+             yield obj
+ 
++def check_basic():
++    """Check basic functionality of the constant classes."""
++
++    if glibcelf.Pt.PT_NULL is not glibcelf.Pt(0):
++        error('Pt(0) not interned')
++    if glibcelf.Pt(17609) is glibcelf.Pt(17609):
++        error('Pt(17609) unexpectedly interned')
++    if glibcelf.Pt(17609) == glibcelf.Pt(17609):
++        pass
++    else:
++        error('Pt(17609) equality')
++    if glibcelf.Pt(17610) == glibcelf.Pt(17609):
++        error('Pt(17610) equality')
++
++    if str(glibcelf.Pt.PT_NULL) != 'PT_NULL':
++        error('str(PT_NULL)')
++    if str(glibcelf.Pt(17609)) != '17609':
++        error('str(Pt(17609))')
++
++    if repr(glibcelf.Pt.PT_NULL) != 'PT_NULL':
++        error('repr(PT_NULL)')
++    if repr(glibcelf.Pt(17609)) != 'Pt(17609)':
++        error('repr(Pt(17609))')
++
++    if glibcelf.Pt('PT_AARCH64_MEMTAG_MTE') \
++       is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++        error('PT_AARCH64_MEMTAG_MTE identity')
++    if glibcelf.Pt(0x70000002) is glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++        error('Pt(0x70000002) identity')
++    if glibcelf.PtAARCH64(0x70000002) is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++        error('PtAARCH64(0x70000002) identity')
++    if glibcelf.Pt.PT_AARCH64_MEMTAG_MTE.short_name != 'AARCH64_MEMTAG_MTE':
++        error('PT_AARCH64_MEMTAG_MTE short name')
++
++    # Special cases for int-like Shn.
++    if glibcelf.Shn(32) == glibcelf.Shn.SHN_XINDEX:
++        error('Shn(32)')
++    if glibcelf.Shn(32) + 0 != 32:
++        error('Shn(32) + 0')
++    if 32 in glibcelf.Shn:
++        error('32 in Shn')
++    if 0 not in glibcelf.Shn:
++        error('0 not in Shn')
++
+ def check_duplicates():
+     """Verifies that enum types do not have duplicate values.
+ 
+@@ -59,17 +104,16 @@ def check_duplicates():
+     global_seen = {}
+     for typ in find_enum_types():
+         seen = {}
+-        last = None
+-        for (name, e) in typ.__members__.items():
++        for (name, e) in typ.by_name.items():
+             if e.value in seen:
+-                error('{} has {}={} and {}={}'.format(
+-                    typ, seen[e.value], e.value, name, e.value))
+-                last = e
++                other = seen[e.value]
++                # Value conflicts only count if they are between
++                # the same base type.
++                if e.__class__ is typ and other.__class__ is typ:
++                    error('{} has {}={} and {}={}'.format(
++                        typ, other, e.value, name, e.value))
+             else:
+                 seen[e.value] = name
+-                if last is not None and last.value > e.value:
+-                    error('{} has {}={} after {}={}'.format(
+-                        typ, name, e.value, last.name, last.value))
+                 if name in global_seen:
+                     error('{} used in {} and {}'.format(
+                         name, global_seen[name], typ))
+@@ -81,7 +125,7 @@ def check_constant_prefixes():
+     seen = set()
+     for typ in find_enum_types():
+         typ_prefix = None
+-        for val in typ:
++        for val in typ.by_name.values():
+             prefix = find_constant_prefix(val.name)
+             if prefix is None:
+                 error('constant {!r} for {} has unknown prefix'.format(
+@@ -113,7 +157,6 @@ def find_elf_h_constants(cc):
+ # used in <elf.h>.
+ glibcelf_skipped_aliases = (
+     ('EM_ARC_A5', 'EM_ARC_COMPACT'),
+-    ('PF_PARISC_SBP', 'PF_HP_SBP')
+ )
+ 
+ # Constants that provide little value and are not included in
+@@ -146,6 +189,7 @@ DT_VALRNGLO
+ DT_VERSIONTAGNUM
+ ELFCLASSNUM
+ ELFDATANUM
++EM_NUM
+ ET_HIOS
+ ET_HIPROC
+ ET_LOOS
+@@ -159,6 +203,7 @@ PT_HISUNW
+ PT_LOOS
+ PT_LOPROC
+ PT_LOSUNW
++PT_NUM
+ SHF_MASKOS
+ SHF_MASKPROC
+ SHN_HIOS
+@@ -193,7 +238,7 @@ def check_constant_values(cc):
+     """Checks the values of <elf.h> constants against glibcelf."""
+ 
+     glibcelf_constants = {
+-        e.name: e for typ in find_enum_types() for e in typ}
++        e.name: e for typ in find_enum_types() for e in typ.by_name.values()}
+     elf_h_constants = find_elf_h_constants(cc=cc)
+ 
+     missing_in_glibcelf = (set(elf_h_constants) - set(glibcelf_constants)
+@@ -229,12 +274,13 @@ def check_constant_values(cc):
+     for name in sorted(set(glibcelf_constants) & set(elf_h_constants)):
+         glibcelf_value = glibcelf_constants[name].value
+         elf_h_value = int(elf_h_constants[name])
+-        # On 32-bit architectures <elf.h> as some constants that are
++        # On 32-bit architectures <elf.h> has some constants that are
+         # parsed as signed, while they are unsigned in glibcelf.  So
+         # far, this only affects some flag constants, so special-case
+         # them here.
+         if (glibcelf_value != elf_h_value
+-            and not (isinstance(glibcelf_constants[name], enum.IntFlag)
++            and not (isinstance(glibcelf_constants[name],
++                                glibcelf._FlagConstant)
+                      and glibcelf_value == 1 << 31
+                      and elf_h_value == -(1 << 31))):
+             error('{}: glibcelf has {!r}, <elf.h> has {!r}'.format(
+@@ -266,6 +312,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+ 
++    check_basic()
+     check_duplicates()
+     check_constant_prefixes()
+     check_constant_values(cc=args.cc)
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 5c8f46f590722384..420cb21943b28bba 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -25,711 +25,445 @@ parsing it.
+ """
+ 
+ import collections
+-import enum
++import functools
++import os
+ import struct
+ 
+-if not hasattr(enum, 'IntFlag'):
+-    import sys
+-    sys.stdout.write(
+-        'warning: glibcelf.py needs Python 3.6 for enum support\n')
+-    sys.exit(77)
++import glibcpp
++
++class _MetaNamedValue(type):
++    """Used to set up _NamedValue subclasses."""
+ 
+-class _OpenIntEnum(enum.IntEnum):
+-    """Integer enumeration that supports arbitrary int values."""
+     @classmethod
+-    def _missing_(cls, value):
+-        # See enum.IntFlag._create_pseudo_member_.  This allows
+-        # creating of enum constants with arbitrary integer values.
+-        pseudo_member = int.__new__(cls, value)
+-        pseudo_member._name_ = None
+-        pseudo_member._value_ = value
+-        return pseudo_member
++    def __prepare__(metacls, cls, bases, **kwds):
++        # Indicates an int-based class.  Needed for types like Shn.
++        int_based = False
++        for base in bases:
++            if issubclass(base, int):
++                int_based = int
++                break
++        return dict(by_value={},
++                    by_name={},
++                    prefix=None,
++                    _int_based=int_based)
+ 
+-    def __repr__(self):
+-        name = self._name_
+-        if name is not None:
+-            # The names have prefixes like SHT_, implying their type.
+-            return name
+-        return '{}({})'.format(self.__class__.__name__, self._value_)
++    def __contains__(self, other):
++        return other in self.by_value
++
++class _NamedValue(metaclass=_MetaNamedValue):
++    """Typed, named integer constants.
++
++    Constants have the following instance attributes:
++
++    name: The full name of the constant (e.g., "PT_NULL").
++    short_name: The name with of the constant without the prefix ("NULL").
++    value: The integer value of the constant.
++
++    The following class attributes are available:
++
++    by_value: A dict mapping integers to constants.
++    by_name: A dict mapping strings to constants.
++    prefix: A string that is removed from the start of short names, or None.
++
++    """
++
++    def __new__(cls, arg0, arg1=None):
++        """Instance creation.
++
++        For the one-argument form, the argument must be a string, an
++        int, or an instance of this class.  Strings are looked up via
++        by_name.  Values are looked up via by_value; if value lookup
++        fails, a new unnamed instance is returned.  Instances of this
++        class a re returned as-is.
++
++        The two-argument form expects the name (a string) and the
++        value (an integer).  A new instance is created in this case.
++        The instance is not registered in the by_value/by_name
++        dictionaries (but the caller can do that).
++
++        """
++
++        typ0 = type(arg0)
++        if arg1 is None:
++            if isinstance(typ0, cls):
++                # Re-use the existing object.
++                return arg0
++            if typ0 is int:
++                by_value = cls.by_value
++                try:
++                    return by_value[arg0]
++                except KeyError:
++                    # Create a new object of the requested value.
++                    if cls._int_based:
++                        result = int.__new__(cls, arg0)
++                    else:
++                        result = object.__new__(cls)
++                    result.value = arg0
++                    result.name = None
++                    return result
++            if typ0 is str:
++                by_name = cls.by_name
++                try:
++                    return by_name[arg0]
++                except KeyError:
++                    raise ValueError('unknown {} constant: {!r}'.format(
++                        cls.__name__, arg0))
++        else:
++            # Types for the two-argument form are rigid.
++            if typ0 is not str and typ0 is not None:
++                raise ValueError('type {} of name {!r} should be str'.format(
++                    typ0.__name__, arg0))
++            if type(arg1) is not int:
++                raise ValueError('type {} of value {!r} should be int'.format(
++                    type(arg1).__name__, arg1))
++            # Create a new named constants.
++            if cls._int_based:
++                result = int.__new__(cls, arg1)
++            else:
++                result = object.__new__(cls)
++            result.value = arg1
++            result.name = arg0
++            # Set up the short_name attribute.
++            prefix = cls.prefix
++            if prefix and arg0.startswith(prefix):
++                result.short_name = arg0[len(prefix):]
++            else:
++                result.short_name = arg0
++            return result
+ 
+     def __str__(self):
+-        name = self._name_
+-        if name is not None:
++        name = self.name
++        if name:
++            return name
++        else:
++            return str(self.value)
++
++    def __repr__(self):
++        name = self.name
++        if name:
+             return name
+-        return str(self._value_)
++        else:
++            return '{}({})'.format(self.__class__.__name__, self.value)
++
++    def __setattr__(self, name, value):
++        # Prevent modification of the critical attributes once they
++        # have been set.
++        if name in ('name', 'value', 'short_name') and hasattr(self, name):
++            raise AttributeError('can\'t set attribute {}'.format(name))
++        object.__setattr__(self, name, value)
++
++@functools.total_ordering
++class _TypedConstant(_NamedValue):
++    """Base class for integer-valued optionally named constants.
++
++    This type is not an integer type.
++
++    """
++
++    def __eq__(self, other):
++        return isinstance(other, self.__class__) and self.value == other.value
++
++    def __lt__(self, other):
++        return isinstance(other, self.__class__) and self.value <= other.value
++
++    def __hash__(self):
++        return hash(self.value)
++
++class _IntConstant(_NamedValue, int):
++    """Base class for integer-like optionally named constants.
++
++    Instances compare equal to the integer of the same value, and can
++    be used in integer arithmetic.
++
++    """
+ 
+-class ElfClass(_OpenIntEnum):
++    pass
++
++class _FlagConstant(_TypedConstant, int):
++    pass
++
++def _parse_elf_h():
++    """Read ../elf/elf.h and return a dict with the constants in it."""
++
++    path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
++                        '..', 'elf', 'elf.h')
++    class TokenizerReporter:
++        """Report tokenizer errors to standard output."""
++
++        def __init__(self):
++            self.errors = 0
++
++        def error(self, token, message):
++            self.errors += 1
++            print('{}:{}:{}: error: {}'.format(
++                path, token.line, token.column, message))
++
++    reporter = TokenizerReporter()
++    with open(path) as inp:
++        tokens = glibcpp.tokenize_c(inp.read(), reporter)
++    if reporter.errors:
++        raise IOError('parse error in elf.h')
++
++    class MacroReporter:
++        """Report macro errors to standard output."""
++
++        def __init__(self):
++            self.errors = 0
++
++        def error(self, line, message):
++            errors += 1
++            print('{}:{}: error: {}'.format(path, line, message))
++
++        def note(self, line, message):
++            print('{}:{}: note: {}'.format(path, line, message))
++
++    reporter = MacroReporter()
++    result = glibcpp.macro_eval(glibcpp.macro_definitions(tokens), reporter)
++    if reporter.errors:
++        raise IOError('parse error in elf.h')
++
++    return result
++_elf_h = _parse_elf_h()
++del _parse_elf_h
++_elf_h_processed = set()
++
++def _register_elf_h(cls, prefix=None, skip=(), ranges=False, parent=None):
++    prefix = prefix or cls.prefix
++    if not prefix:
++        raise ValueError('missing prefix for {}'.format(cls.__name__))
++    by_value = cls.by_value
++    by_name = cls.by_name
++    processed = _elf_h_processed
++
++    skip = set(skip)
++    skip.add(prefix + 'NUM')
++    if ranges:
++        skip.add(prefix + 'LOOS')
++        skip.add(prefix + 'HIOS')
++        skip.add(prefix + 'LOPROC')
++        skip.add(prefix + 'HIPROC')
++        cls.os_range = (_elf_h[prefix + 'LOOS'], _elf_h[prefix + 'HIOS'])
++        cls.proc_range = (_elf_h[prefix + 'LOPROC'], _elf_h[prefix + 'HIPROC'])
++
++    # Inherit the prefix from the parent if not set.
++    if parent and cls.prefix is None and parent.prefix is not None:
++        cls.prefix = parent.prefix
++
++    processed_len_start = len(processed)
++    for name, value in _elf_h.items():
++        if name in skip or name in processed:
++            continue
++        if name.startswith(prefix):
++            processed.add(name)
++            if value in by_value:
++                raise ValueError('duplicate value {}: {}, {}'.format(
++                    value, name, by_value[value]))
++            obj = cls(name, value)
++            by_value[value] = obj
++            by_name[name] = obj
++            setattr(cls, name, obj)
++            if parent:
++                # Make the symbolic name available through the parent as well.
++                parent.by_name[name] = obj
++                setattr(parent, name, obj)
++
++    if len(processed) == processed_len_start:
++        raise ValueError('nothing matched prefix {!r}'.format(prefix))
++
++class ElfClass(_TypedConstant):
+     """ELF word size.  Type of EI_CLASS values."""
+-    ELFCLASSNONE = 0
+-    ELFCLASS32 = 1
+-    ELFCLASS64 = 2
++_register_elf_h(ElfClass, prefix='ELFCLASS')
+ 
+-class ElfData(_OpenIntEnum):
++class ElfData(_TypedConstant):
+     """ELF endianess.  Type of EI_DATA values."""
+-    ELFDATANONE = 0
+-    ELFDATA2LSB = 1
+-    ELFDATA2MSB = 2
++_register_elf_h(ElfData, prefix='ELFDATA')
+ 
+-class Machine(_OpenIntEnum):
++class Machine(_TypedConstant):
+     """ELF machine type.  Type of values in Ehdr.e_machine field."""
+-    EM_NONE = 0
+-    EM_M32 = 1
+-    EM_SPARC = 2
+-    EM_386 = 3
+-    EM_68K = 4
+-    EM_88K = 5
+-    EM_IAMCU = 6
+-    EM_860 = 7
+-    EM_MIPS = 8
+-    EM_S370 = 9
+-    EM_MIPS_RS3_LE = 10
+-    EM_PARISC = 15
+-    EM_VPP500 = 17
+-    EM_SPARC32PLUS = 18
+-    EM_960 = 19
+-    EM_PPC = 20
+-    EM_PPC64 = 21
+-    EM_S390 = 22
+-    EM_SPU = 23
+-    EM_V800 = 36
+-    EM_FR20 = 37
+-    EM_RH32 = 38
+-    EM_RCE = 39
+-    EM_ARM = 40
+-    EM_FAKE_ALPHA = 41
+-    EM_SH = 42
+-    EM_SPARCV9 = 43
+-    EM_TRICORE = 44
+-    EM_ARC = 45
+-    EM_H8_300 = 46
+-    EM_H8_300H = 47
+-    EM_H8S = 48
+-    EM_H8_500 = 49
+-    EM_IA_64 = 50
+-    EM_MIPS_X = 51
+-    EM_COLDFIRE = 52
+-    EM_68HC12 = 53
+-    EM_MMA = 54
+-    EM_PCP = 55
+-    EM_NCPU = 56
+-    EM_NDR1 = 57
+-    EM_STARCORE = 58
+-    EM_ME16 = 59
+-    EM_ST100 = 60
+-    EM_TINYJ = 61
+-    EM_X86_64 = 62
+-    EM_PDSP = 63
+-    EM_PDP10 = 64
+-    EM_PDP11 = 65
+-    EM_FX66 = 66
+-    EM_ST9PLUS = 67
+-    EM_ST7 = 68
+-    EM_68HC16 = 69
+-    EM_68HC11 = 70
+-    EM_68HC08 = 71
+-    EM_68HC05 = 72
+-    EM_SVX = 73
+-    EM_ST19 = 74
+-    EM_VAX = 75
+-    EM_CRIS = 76
+-    EM_JAVELIN = 77
+-    EM_FIREPATH = 78
+-    EM_ZSP = 79
+-    EM_MMIX = 80
+-    EM_HUANY = 81
+-    EM_PRISM = 82
+-    EM_AVR = 83
+-    EM_FR30 = 84
+-    EM_D10V = 85
+-    EM_D30V = 86
+-    EM_V850 = 87
+-    EM_M32R = 88
+-    EM_MN10300 = 89
+-    EM_MN10200 = 90
+-    EM_PJ = 91
+-    EM_OPENRISC = 92
+-    EM_ARC_COMPACT = 93
+-    EM_XTENSA = 94
+-    EM_VIDEOCORE = 95
+-    EM_TMM_GPP = 96
+-    EM_NS32K = 97
+-    EM_TPC = 98
+-    EM_SNP1K = 99
+-    EM_ST200 = 100
+-    EM_IP2K = 101
+-    EM_MAX = 102
+-    EM_CR = 103
+-    EM_F2MC16 = 104
+-    EM_MSP430 = 105
+-    EM_BLACKFIN = 106
+-    EM_SE_C33 = 107
+-    EM_SEP = 108
+-    EM_ARCA = 109
+-    EM_UNICORE = 110
+-    EM_EXCESS = 111
+-    EM_DXP = 112
+-    EM_ALTERA_NIOS2 = 113
+-    EM_CRX = 114
+-    EM_XGATE = 115
+-    EM_C166 = 116
+-    EM_M16C = 117
+-    EM_DSPIC30F = 118
+-    EM_CE = 119
+-    EM_M32C = 120
+-    EM_TSK3000 = 131
+-    EM_RS08 = 132
+-    EM_SHARC = 133
+-    EM_ECOG2 = 134
+-    EM_SCORE7 = 135
+-    EM_DSP24 = 136
+-    EM_VIDEOCORE3 = 137
+-    EM_LATTICEMICO32 = 138
+-    EM_SE_C17 = 139
+-    EM_TI_C6000 = 140
+-    EM_TI_C2000 = 141
+-    EM_TI_C5500 = 142
+-    EM_TI_ARP32 = 143
+-    EM_TI_PRU = 144
+-    EM_MMDSP_PLUS = 160
+-    EM_CYPRESS_M8C = 161
+-    EM_R32C = 162
+-    EM_TRIMEDIA = 163
+-    EM_QDSP6 = 164
+-    EM_8051 = 165
+-    EM_STXP7X = 166
+-    EM_NDS32 = 167
+-    EM_ECOG1X = 168
+-    EM_MAXQ30 = 169
+-    EM_XIMO16 = 170
+-    EM_MANIK = 171
+-    EM_CRAYNV2 = 172
+-    EM_RX = 173
+-    EM_METAG = 174
+-    EM_MCST_ELBRUS = 175
+-    EM_ECOG16 = 176
+-    EM_CR16 = 177
+-    EM_ETPU = 178
+-    EM_SLE9X = 179
+-    EM_L10M = 180
+-    EM_K10M = 181
+-    EM_AARCH64 = 183
+-    EM_AVR32 = 185
+-    EM_STM8 = 186
+-    EM_TILE64 = 187
+-    EM_TILEPRO = 188
+-    EM_MICROBLAZE = 189
+-    EM_CUDA = 190
+-    EM_TILEGX = 191
+-    EM_CLOUDSHIELD = 192
+-    EM_COREA_1ST = 193
+-    EM_COREA_2ND = 194
+-    EM_ARCV2 = 195
+-    EM_OPEN8 = 196
+-    EM_RL78 = 197
+-    EM_VIDEOCORE5 = 198
+-    EM_78KOR = 199
+-    EM_56800EX = 200
+-    EM_BA1 = 201
+-    EM_BA2 = 202
+-    EM_XCORE = 203
+-    EM_MCHP_PIC = 204
+-    EM_INTELGT = 205
+-    EM_KM32 = 210
+-    EM_KMX32 = 211
+-    EM_EMX16 = 212
+-    EM_EMX8 = 213
+-    EM_KVARC = 214
+-    EM_CDP = 215
+-    EM_COGE = 216
+-    EM_COOL = 217
+-    EM_NORC = 218
+-    EM_CSR_KALIMBA = 219
+-    EM_Z80 = 220
+-    EM_VISIUM = 221
+-    EM_FT32 = 222
+-    EM_MOXIE = 223
+-    EM_AMDGPU = 224
+-    EM_RISCV = 243
+-    EM_BPF = 247
+-    EM_CSKY = 252
+-    EM_LOONGARCH = 258
+-    EM_NUM = 259
+-    EM_ALPHA = 0x9026
+-
+-class Et(_OpenIntEnum):
++    prefix = 'EM_'
++_register_elf_h(Machine, skip=('EM_ARC_A5',))
++
++class Et(_TypedConstant):
+     """ELF file type.  Type of ET_* values and the Ehdr.e_type field."""
+-    ET_NONE = 0
+-    ET_REL = 1
+-    ET_EXEC = 2
+-    ET_DYN = 3
+-    ET_CORE = 4
++    prefix = 'ET_'
++_register_elf_h(Et, ranges=True)
+ 
+-class Shn(_OpenIntEnum):
++class Shn(_IntConstant):
+     """ELF reserved section indices."""
+-    SHN_UNDEF = 0
+-    SHN_BEFORE = 0xff00
+-    SHN_AFTER = 0xff01
+-    SHN_ABS = 0xfff1
+-    SHN_COMMON = 0xfff2
+-    SHN_XINDEX = 0xffff
+-
+-class ShnMIPS(enum.Enum):
++    prefix = 'SHN_'
++class ShnMIPS(Shn):
+     """Supplemental SHN_* constants for EM_MIPS."""
+-    SHN_MIPS_ACOMMON = 0xff00
+-    SHN_MIPS_TEXT = 0xff01
+-    SHN_MIPS_DATA = 0xff02
+-    SHN_MIPS_SCOMMON = 0xff03
+-    SHN_MIPS_SUNDEFINED = 0xff04
+-
+-class ShnPARISC(enum.Enum):
++class ShnPARISC(Shn):
+     """Supplemental SHN_* constants for EM_PARISC."""
+-    SHN_PARISC_ANSI_COMMON = 0xff00
+-    SHN_PARISC_HUGE_COMMON = 0xff01
++_register_elf_h(ShnMIPS, prefix='SHN_MIPS_', parent=Shn)
++_register_elf_h(ShnPARISC, prefix='SHN_PARISC_', parent=Shn)
++_register_elf_h(Shn, skip='SHN_LORESERVE SHN_HIRESERVE'.split(), ranges=True)
+ 
+-class Sht(_OpenIntEnum):
++class Sht(_TypedConstant):
+     """ELF section types.  Type of SHT_* values."""
+-    SHT_NULL = 0
+-    SHT_PROGBITS = 1
+-    SHT_SYMTAB = 2
+-    SHT_STRTAB = 3
+-    SHT_RELA = 4
+-    SHT_HASH = 5
+-    SHT_DYNAMIC = 6
+-    SHT_NOTE = 7
+-    SHT_NOBITS = 8
+-    SHT_REL = 9
+-    SHT_SHLIB = 10
+-    SHT_DYNSYM = 11
+-    SHT_INIT_ARRAY = 14
+-    SHT_FINI_ARRAY = 15
+-    SHT_PREINIT_ARRAY = 16
+-    SHT_GROUP = 17
+-    SHT_SYMTAB_SHNDX = 18
+-    SHT_RELR = 19
+-    SHT_GNU_ATTRIBUTES = 0x6ffffff5
+-    SHT_GNU_HASH = 0x6ffffff6
+-    SHT_GNU_LIBLIST = 0x6ffffff7
+-    SHT_CHECKSUM = 0x6ffffff8
+-    SHT_SUNW_move = 0x6ffffffa
+-    SHT_SUNW_COMDAT = 0x6ffffffb
+-    SHT_SUNW_syminfo = 0x6ffffffc
+-    SHT_GNU_verdef = 0x6ffffffd
+-    SHT_GNU_verneed = 0x6ffffffe
+-    SHT_GNU_versym = 0x6fffffff
+-
+-class ShtALPHA(enum.Enum):
++    prefix = 'SHT_'
++class ShtALPHA(Sht):
+     """Supplemental SHT_* constants for EM_ALPHA."""
+-    SHT_ALPHA_DEBUG = 0x70000001
+-    SHT_ALPHA_REGINFO = 0x70000002
+-
+-class ShtARM(enum.Enum):
++class ShtARM(Sht):
+     """Supplemental SHT_* constants for EM_ARM."""
+-    SHT_ARM_EXIDX = 0x70000001
+-    SHT_ARM_PREEMPTMAP = 0x70000002
+-    SHT_ARM_ATTRIBUTES = 0x70000003
+-
+-class ShtCSKY(enum.Enum):
++class ShtCSKY(Sht):
+     """Supplemental SHT_* constants for EM_CSKY."""
+-    SHT_CSKY_ATTRIBUTES = 0x70000001
+-
+-class ShtIA_64(enum.Enum):
++class ShtIA_64(Sht):
+     """Supplemental SHT_* constants for EM_IA_64."""
+-    SHT_IA_64_EXT = 0x70000000
+-    SHT_IA_64_UNWIND = 0x70000001
+-
+-class ShtMIPS(enum.Enum):
++class ShtMIPS(Sht):
+     """Supplemental SHT_* constants for EM_MIPS."""
+-    SHT_MIPS_LIBLIST = 0x70000000
+-    SHT_MIPS_MSYM = 0x70000001
+-    SHT_MIPS_CONFLICT = 0x70000002
+-    SHT_MIPS_GPTAB = 0x70000003
+-    SHT_MIPS_UCODE = 0x70000004
+-    SHT_MIPS_DEBUG = 0x70000005
+-    SHT_MIPS_REGINFO = 0x70000006
+-    SHT_MIPS_PACKAGE = 0x70000007
+-    SHT_MIPS_PACKSYM = 0x70000008
+-    SHT_MIPS_RELD = 0x70000009
+-    SHT_MIPS_IFACE = 0x7000000b
+-    SHT_MIPS_CONTENT = 0x7000000c
+-    SHT_MIPS_OPTIONS = 0x7000000d
+-    SHT_MIPS_SHDR = 0x70000010
+-    SHT_MIPS_FDESC = 0x70000011
+-    SHT_MIPS_EXTSYM = 0x70000012
+-    SHT_MIPS_DENSE = 0x70000013
+-    SHT_MIPS_PDESC = 0x70000014
+-    SHT_MIPS_LOCSYM = 0x70000015
+-    SHT_MIPS_AUXSYM = 0x70000016
+-    SHT_MIPS_OPTSYM = 0x70000017
+-    SHT_MIPS_LOCSTR = 0x70000018
+-    SHT_MIPS_LINE = 0x70000019
+-    SHT_MIPS_RFDESC = 0x7000001a
+-    SHT_MIPS_DELTASYM = 0x7000001b
+-    SHT_MIPS_DELTAINST = 0x7000001c
+-    SHT_MIPS_DELTACLASS = 0x7000001d
+-    SHT_MIPS_DWARF = 0x7000001e
+-    SHT_MIPS_DELTADECL = 0x7000001f
+-    SHT_MIPS_SYMBOL_LIB = 0x70000020
+-    SHT_MIPS_EVENTS = 0x70000021
+-    SHT_MIPS_TRANSLATE = 0x70000022
+-    SHT_MIPS_PIXIE = 0x70000023
+-    SHT_MIPS_XLATE = 0x70000024
+-    SHT_MIPS_XLATE_DEBUG = 0x70000025
+-    SHT_MIPS_WHIRL = 0x70000026
+-    SHT_MIPS_EH_REGION = 0x70000027
+-    SHT_MIPS_XLATE_OLD = 0x70000028
+-    SHT_MIPS_PDR_EXCEPTION = 0x70000029
+-    SHT_MIPS_XHASH = 0x7000002b
+-
+-class ShtPARISC(enum.Enum):
++class ShtPARISC(Sht):
+     """Supplemental SHT_* constants for EM_PARISC."""
+-    SHT_PARISC_EXT = 0x70000000
+-    SHT_PARISC_UNWIND = 0x70000001
+-    SHT_PARISC_DOC = 0x70000002
+-
+-class ShtRISCV(enum.Enum):
++class ShtRISCV(Sht):
+     """Supplemental SHT_* constants for EM_RISCV."""
+-    SHT_RISCV_ATTRIBUTES = 0x70000003
+-
+-class Pf(enum.IntFlag):
++_register_elf_h(ShtALPHA, prefix='SHT_ALPHA_', parent=Sht)
++_register_elf_h(ShtARM, prefix='SHT_ARM_', parent=Sht)
++_register_elf_h(ShtCSKY, prefix='SHT_CSKY_', parent=Sht)
++_register_elf_h(ShtIA_64, prefix='SHT_IA_64_', parent=Sht)
++_register_elf_h(ShtMIPS, prefix='SHT_MIPS_', parent=Sht)
++_register_elf_h(ShtPARISC, prefix='SHT_PARISC_', parent=Sht)
++_register_elf_h(ShtRISCV, prefix='SHT_RISCV_', parent=Sht)
++_register_elf_h(Sht, ranges=True,
++                skip='SHT_LOSUNW SHT_HISUNW SHT_LOUSER SHT_HIUSER'.split())
++
++class Pf(_FlagConstant):
+     """Program header flags.  Type of Phdr.p_flags values."""
+-    PF_X = 1
+-    PF_W = 2
+-    PF_R = 4
+-
+-class PfARM(enum.IntFlag):
++    prefix = 'PF_'
++class PfARM(Pf):
+     """Supplemental PF_* flags for EM_ARM."""
+-    PF_ARM_SB = 0x10000000
+-    PF_ARM_PI = 0x20000000
+-    PF_ARM_ABS = 0x40000000
+-
+-class PfPARISC(enum.IntFlag):
+-    """Supplemental PF_* flags for EM_PARISC."""
+-    PF_HP_PAGE_SIZE = 0x00100000
+-    PF_HP_FAR_SHARED = 0x00200000
+-    PF_HP_NEAR_SHARED = 0x00400000
+-    PF_HP_CODE = 0x01000000
+-    PF_HP_MODIFY = 0x02000000
+-    PF_HP_LAZYSWAP = 0x04000000
+-    PF_HP_SBP = 0x08000000
+-
+-class PfIA_64(enum.IntFlag):
++class PfHP(Pf):
++    """Supplemental PF_* flags for HP-UX."""
++class PfIA_64(Pf):
+     """Supplemental PF_* flags for EM_IA_64."""
+-    PF_IA_64_NORECOV = 0x80000000
+-
+-class PfMIPS(enum.IntFlag):
++class PfMIPS(Pf):
+     """Supplemental PF_* flags for EM_MIPS."""
+-    PF_MIPS_LOCAL = 0x10000000
+-
+-class Shf(enum.IntFlag):
++class PfPARISC(Pf):
++    """Supplemental PF_* flags for EM_PARISC."""
++_register_elf_h(PfARM, prefix='PF_ARM_', parent=Pf)
++_register_elf_h(PfHP, prefix='PF_HP_', parent=Pf)
++_register_elf_h(PfIA_64, prefix='PF_IA_64_', parent=Pf)
++_register_elf_h(PfMIPS, prefix='PF_MIPS_', parent=Pf)
++_register_elf_h(PfPARISC, prefix='PF_PARISC_', parent=Pf)
++_register_elf_h(Pf, skip='PF_MASKOS PF_MASKPROC'.split())
++
++class Shf(_FlagConstant):
+     """Section flags.  Type of Shdr.sh_type values."""
+-    SHF_WRITE = 1 << 0
+-    SHF_ALLOC = 1 << 1
+-    SHF_EXECINSTR = 1 << 2
+-    SHF_MERGE = 1 << 4
+-    SHF_STRINGS = 1 << 5
+-    SHF_INFO_LINK = 1 << 6
+-    SHF_LINK_ORDER = 1 << 7
+-    SHF_OS_NONCONFORMING = 256
+-    SHF_GROUP = 1 << 9
+-    SHF_TLS = 1 << 10
+-    SHF_COMPRESSED = 1 << 11
+-    SHF_GNU_RETAIN = 1 << 21
+-    SHF_ORDERED = 1 << 30
+-    SHF_EXCLUDE = 1 << 31
+-
+-class ShfALPHA(enum.IntFlag):
++    prefix = 'SHF_'
++class ShfALPHA(Shf):
+     """Supplemental SHF_* constants for EM_ALPHA."""
+-    SHF_ALPHA_GPREL = 0x10000000
+-
+-class ShfARM(enum.IntFlag):
++class ShfARM(Shf):
+     """Supplemental SHF_* constants for EM_ARM."""
+-    SHF_ARM_ENTRYSECT = 0x10000000
+-    SHF_ARM_COMDEF = 0x80000000
+-
+-class ShfIA_64(enum.IntFlag):
++class ShfIA_64(Shf):
+     """Supplemental SHF_* constants for EM_IA_64."""
+-    SHF_IA_64_SHORT  = 0x10000000
+-    SHF_IA_64_NORECOV = 0x20000000
+-
+-class ShfMIPS(enum.IntFlag):
++class ShfMIPS(Shf):
+     """Supplemental SHF_* constants for EM_MIPS."""
+-    SHF_MIPS_GPREL = 0x10000000
+-    SHF_MIPS_MERGE = 0x20000000
+-    SHF_MIPS_ADDR = 0x40000000
+-    SHF_MIPS_STRINGS = 0x80000000
+-    SHF_MIPS_NOSTRIP = 0x08000000
+-    SHF_MIPS_LOCAL = 0x04000000
+-    SHF_MIPS_NAMES = 0x02000000
+-    SHF_MIPS_NODUPE = 0x01000000
+-
+-class ShfPARISC(enum.IntFlag):
++class ShfPARISC(Shf):
+     """Supplemental SHF_* constants for EM_PARISC."""
+-    SHF_PARISC_SHORT = 0x20000000
+-    SHF_PARISC_HUGE = 0x40000000
+-    SHF_PARISC_SBP = 0x80000000
+-
+-class Stb(_OpenIntEnum):
++_register_elf_h(ShfALPHA, prefix='SHF_ALPHA_', parent=Shf)
++_register_elf_h(ShfARM, prefix='SHF_ARM_', parent=Shf)
++_register_elf_h(ShfIA_64, prefix='SHF_IA_64_', parent=Shf)
++_register_elf_h(ShfMIPS, prefix='SHF_MIPS_', parent=Shf)
++_register_elf_h(ShfPARISC, prefix='SHF_PARISC_', parent=Shf)
++_register_elf_h(Shf, skip='SHF_MASKOS SHF_MASKPROC'.split())
++
++class Stb(_TypedConstant):
+     """ELF symbol binding type."""
+-    STB_LOCAL = 0
+-    STB_GLOBAL = 1
+-    STB_WEAK = 2
+-    STB_GNU_UNIQUE = 10
+-    STB_MIPS_SPLIT_COMMON = 13
++    prefix = 'STB_'
++_register_elf_h(Stb, ranges=True)
+ 
+-class Stt(_OpenIntEnum):
++class Stt(_TypedConstant):
+     """ELF symbol type."""
+-    STT_NOTYPE = 0
+-    STT_OBJECT = 1
+-    STT_FUNC = 2
+-    STT_SECTION = 3
+-    STT_FILE = 4
+-    STT_COMMON = 5
+-    STT_TLS = 6
+-    STT_GNU_IFUNC = 10
+-
+-class SttARM(enum.Enum):
++    prefix = 'STT_'
++class SttARM(Sht):
+     """Supplemental STT_* constants for EM_ARM."""
+-    STT_ARM_TFUNC = 13
+-    STT_ARM_16BIT = 15
+-
+-class SttPARISC(enum.Enum):
++class SttPARISC(Sht):
+     """Supplemental STT_* constants for EM_PARISC."""
+-    STT_HP_OPAQUE = 11
+-    STT_HP_STUB = 12
+-    STT_PARISC_MILLICODE = 13
+-
+-class SttSPARC(enum.Enum):
++class SttSPARC(Sht):
+     """Supplemental STT_* constants for EM_SPARC."""
+     STT_SPARC_REGISTER = 13
+-
+-class SttX86_64(enum.Enum):
++class SttX86_64(Sht):
+     """Supplemental STT_* constants for EM_X86_64."""
+-    SHT_X86_64_UNWIND = 0x70000001
++_register_elf_h(SttARM, prefix='STT_ARM_', parent=Stt)
++_register_elf_h(SttPARISC, prefix='STT_PARISC_', parent=Stt)
++_register_elf_h(SttSPARC, prefix='STT_SPARC_', parent=Stt)
++_register_elf_h(Stt, ranges=True)
++
+ 
+-class Pt(_OpenIntEnum):
++class Pt(_TypedConstant):
+     """ELF program header types.  Type of Phdr.p_type."""
+-    PT_NULL = 0
+-    PT_LOAD = 1
+-    PT_DYNAMIC = 2
+-    PT_INTERP = 3
+-    PT_NOTE = 4
+-    PT_SHLIB = 5
+-    PT_PHDR = 6
+-    PT_TLS = 7
+-    PT_NUM = 8
+-    PT_GNU_EH_FRAME = 0x6474e550
+-    PT_GNU_STACK = 0x6474e551
+-    PT_GNU_RELRO = 0x6474e552
+-    PT_GNU_PROPERTY = 0x6474e553
+-    PT_SUNWBSS = 0x6ffffffa
+-    PT_SUNWSTACK = 0x6ffffffb
+-
+-class PtAARCH64(enum.Enum):
++    prefix = 'PT_'
++class PtAARCH64(Pt):
+     """Supplemental PT_* constants for EM_AARCH64."""
+-    PT_AARCH64_MEMTAG_MTE = 0x70000002
+-
+-class PtARM(enum.Enum):
++class PtARM(Pt):
+     """Supplemental PT_* constants for EM_ARM."""
+-    PT_ARM_EXIDX = 0x70000001
+-
+-class PtIA_64(enum.Enum):
++class PtHP(Pt):
++    """Supplemental PT_* constants for HP-U."""
++class PtIA_64(Pt):
+     """Supplemental PT_* constants for EM_IA_64."""
+-    PT_IA_64_HP_OPT_ANOT = 0x60000012
+-    PT_IA_64_HP_HSL_ANOT = 0x60000013
+-    PT_IA_64_HP_STACK = 0x60000014
+-    PT_IA_64_ARCHEXT = 0x70000000
+-    PT_IA_64_UNWIND = 0x70000001
+-
+-class PtMIPS(enum.Enum):
++class PtMIPS(Pt):
+     """Supplemental PT_* constants for EM_MIPS."""
+-    PT_MIPS_REGINFO = 0x70000000
+-    PT_MIPS_RTPROC = 0x70000001
+-    PT_MIPS_OPTIONS = 0x70000002
+-    PT_MIPS_ABIFLAGS = 0x70000003
+-
+-class PtPARISC(enum.Enum):
++class PtPARISC(Pt):
+     """Supplemental PT_* constants for EM_PARISC."""
+-    PT_HP_TLS = 0x60000000
+-    PT_HP_CORE_NONE = 0x60000001
+-    PT_HP_CORE_VERSION = 0x60000002
+-    PT_HP_CORE_KERNEL = 0x60000003
+-    PT_HP_CORE_COMM = 0x60000004
+-    PT_HP_CORE_PROC = 0x60000005
+-    PT_HP_CORE_LOADABLE = 0x60000006
+-    PT_HP_CORE_STACK = 0x60000007
+-    PT_HP_CORE_SHM = 0x60000008
+-    PT_HP_CORE_MMF = 0x60000009
+-    PT_HP_PARALLEL = 0x60000010
+-    PT_HP_FASTBIND = 0x60000011
+-    PT_HP_OPT_ANNOT = 0x60000012
+-    PT_HP_HSL_ANNOT = 0x60000013
+-    PT_HP_STACK = 0x60000014
+-    PT_PARISC_ARCHEXT = 0x70000000
+-    PT_PARISC_UNWIND = 0x70000001
+-
+-class PtRISCV(enum.Enum):
++class PtRISCV(Pt):
+     """Supplemental PT_* constants for EM_RISCV."""
+-    PT_RISCV_ATTRIBUTES = 0x70000003
+-
+-class Dt(_OpenIntEnum):
++_register_elf_h(PtAARCH64, prefix='PT_AARCH64_', parent=Pt)
++_register_elf_h(PtARM, prefix='PT_ARM_', parent=Pt)
++_register_elf_h(PtHP, prefix='PT_HP_', parent=Pt)
++_register_elf_h(PtIA_64, prefix='PT_IA_64_', parent=Pt)
++_register_elf_h(PtMIPS, prefix='PT_MIPS_', parent=Pt)
++_register_elf_h(PtPARISC, prefix='PT_PARISC_', parent=Pt)
++_register_elf_h(PtRISCV, prefix='PT_RISCV_', parent=Pt)
++_register_elf_h(Pt, skip='PT_LOSUNW PT_HISUNW'.split(), ranges=True)
++
++class Dt(_TypedConstant):
+     """ELF dynamic segment tags.  Type of Dyn.d_val."""
+-    DT_NULL = 0
+-    DT_NEEDED = 1
+-    DT_PLTRELSZ = 2
+-    DT_PLTGOT = 3
+-    DT_HASH = 4
+-    DT_STRTAB = 5
+-    DT_SYMTAB = 6
+-    DT_RELA = 7
+-    DT_RELASZ = 8
+-    DT_RELAENT = 9
+-    DT_STRSZ = 10
+-    DT_SYMENT = 11
+-    DT_INIT = 12
+-    DT_FINI = 13
+-    DT_SONAME = 14
+-    DT_RPATH = 15
+-    DT_SYMBOLIC = 16
+-    DT_REL = 17
+-    DT_RELSZ = 18
+-    DT_RELENT = 19
+-    DT_PLTREL = 20
+-    DT_DEBUG = 21
+-    DT_TEXTREL = 22
+-    DT_JMPREL = 23
+-    DT_BIND_NOW = 24
+-    DT_INIT_ARRAY = 25
+-    DT_FINI_ARRAY = 26
+-    DT_INIT_ARRAYSZ = 27
+-    DT_FINI_ARRAYSZ = 28
+-    DT_RUNPATH = 29
+-    DT_FLAGS = 30
+-    DT_PREINIT_ARRAY = 32
+-    DT_PREINIT_ARRAYSZ = 33
+-    DT_SYMTAB_SHNDX = 34
+-    DT_RELRSZ = 35
+-    DT_RELR = 36
+-    DT_RELRENT = 37
+-    DT_GNU_PRELINKED = 0x6ffffdf5
+-    DT_GNU_CONFLICTSZ = 0x6ffffdf6
+-    DT_GNU_LIBLISTSZ = 0x6ffffdf7
+-    DT_CHECKSUM = 0x6ffffdf8
+-    DT_PLTPADSZ = 0x6ffffdf9
+-    DT_MOVEENT = 0x6ffffdfa
+-    DT_MOVESZ = 0x6ffffdfb
+-    DT_FEATURE_1 = 0x6ffffdfc
+-    DT_POSFLAG_1 = 0x6ffffdfd
+-    DT_SYMINSZ = 0x6ffffdfe
+-    DT_SYMINENT = 0x6ffffdff
+-    DT_GNU_HASH = 0x6ffffef5
+-    DT_TLSDESC_PLT = 0x6ffffef6
+-    DT_TLSDESC_GOT = 0x6ffffef7
+-    DT_GNU_CONFLICT = 0x6ffffef8
+-    DT_GNU_LIBLIST = 0x6ffffef9
+-    DT_CONFIG = 0x6ffffefa
+-    DT_DEPAUDIT = 0x6ffffefb
+-    DT_AUDIT = 0x6ffffefc
+-    DT_PLTPAD = 0x6ffffefd
+-    DT_MOVETAB = 0x6ffffefe
+-    DT_SYMINFO = 0x6ffffeff
+-    DT_VERSYM = 0x6ffffff0
+-    DT_RELACOUNT = 0x6ffffff9
+-    DT_RELCOUNT = 0x6ffffffa
+-    DT_FLAGS_1 = 0x6ffffffb
+-    DT_VERDEF = 0x6ffffffc
+-    DT_VERDEFNUM = 0x6ffffffd
+-    DT_VERNEED = 0x6ffffffe
+-    DT_VERNEEDNUM = 0x6fffffff
+-    DT_AUXILIARY = 0x7ffffffd
+-    DT_FILTER = 0x7fffffff
+-
+-class DtAARCH64(enum.Enum):
++    prefix = 'DT_'
++class DtAARCH64(Dt):
+     """Supplemental DT_* constants for EM_AARCH64."""
+-    DT_AARCH64_BTI_PLT = 0x70000001
+-    DT_AARCH64_PAC_PLT = 0x70000003
+-    DT_AARCH64_VARIANT_PCS = 0x70000005
+-
+-class DtALPHA(enum.Enum):
++class DtALPHA(Dt):
+     """Supplemental DT_* constants for EM_ALPHA."""
+-    DT_ALPHA_PLTRO = 0x70000000
+-
+-class DtALTERA_NIOS2(enum.Enum):
++class DtALTERA_NIOS2(Dt):
+     """Supplemental DT_* constants for EM_ALTERA_NIOS2."""
+-    DT_NIOS2_GP = 0x70000002
+-
+-class DtIA_64(enum.Enum):
++class DtIA_64(Dt):
+     """Supplemental DT_* constants for EM_IA_64."""
+-    DT_IA_64_PLT_RESERVE = 0x70000000
+-
+-class DtMIPS(enum.Enum):
++class DtMIPS(Dt):
+     """Supplemental DT_* constants for EM_MIPS."""
+-    DT_MIPS_RLD_VERSION = 0x70000001
+-    DT_MIPS_TIME_STAMP = 0x70000002
+-    DT_MIPS_ICHECKSUM = 0x70000003
+-    DT_MIPS_IVERSION = 0x70000004
+-    DT_MIPS_FLAGS = 0x70000005
+-    DT_MIPS_BASE_ADDRESS = 0x70000006
+-    DT_MIPS_MSYM = 0x70000007
+-    DT_MIPS_CONFLICT = 0x70000008
+-    DT_MIPS_LIBLIST = 0x70000009
+-    DT_MIPS_LOCAL_GOTNO = 0x7000000a
+-    DT_MIPS_CONFLICTNO = 0x7000000b
+-    DT_MIPS_LIBLISTNO = 0x70000010
+-    DT_MIPS_SYMTABNO = 0x70000011
+-    DT_MIPS_UNREFEXTNO = 0x70000012
+-    DT_MIPS_GOTSYM = 0x70000013
+-    DT_MIPS_HIPAGENO = 0x70000014
+-    DT_MIPS_RLD_MAP = 0x70000016
+-    DT_MIPS_DELTA_CLASS = 0x70000017
+-    DT_MIPS_DELTA_CLASS_NO = 0x70000018
+-    DT_MIPS_DELTA_INSTANCE = 0x70000019
+-    DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a
+-    DT_MIPS_DELTA_RELOC = 0x7000001b
+-    DT_MIPS_DELTA_RELOC_NO = 0x7000001c
+-    DT_MIPS_DELTA_SYM = 0x7000001d
+-    DT_MIPS_DELTA_SYM_NO = 0x7000001e
+-    DT_MIPS_DELTA_CLASSSYM = 0x70000020
+-    DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021
+-    DT_MIPS_CXX_FLAGS = 0x70000022
+-    DT_MIPS_PIXIE_INIT = 0x70000023
+-    DT_MIPS_SYMBOL_LIB = 0x70000024
+-    DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025
+-    DT_MIPS_LOCAL_GOTIDX = 0x70000026
+-    DT_MIPS_HIDDEN_GOTIDX = 0x70000027
+-    DT_MIPS_PROTECTED_GOTIDX = 0x70000028
+-    DT_MIPS_OPTIONS = 0x70000029
+-    DT_MIPS_INTERFACE = 0x7000002a
+-    DT_MIPS_DYNSTR_ALIGN = 0x7000002b
+-    DT_MIPS_INTERFACE_SIZE = 0x7000002c
+-    DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d
+-    DT_MIPS_PERF_SUFFIX = 0x7000002e
+-    DT_MIPS_COMPACT_SIZE = 0x7000002f
+-    DT_MIPS_GP_VALUE = 0x70000030
+-    DT_MIPS_AUX_DYNAMIC = 0x70000031
+-    DT_MIPS_PLTGOT = 0x70000032
+-    DT_MIPS_RWPLT = 0x70000034
+-    DT_MIPS_RLD_MAP_REL = 0x70000035
+-    DT_MIPS_XHASH = 0x70000036
+-
+-class DtPPC(enum.Enum):
++class DtPPC(Dt):
+     """Supplemental DT_* constants for EM_PPC."""
+-    DT_PPC_GOT = 0x70000000
+-    DT_PPC_OPT = 0x70000001
+-
+-class DtPPC64(enum.Enum):
++class DtPPC64(Dt):
+     """Supplemental DT_* constants for EM_PPC64."""
+-    DT_PPC64_GLINK = 0x70000000
+-    DT_PPC64_OPD = 0x70000001
+-    DT_PPC64_OPDSZ = 0x70000002
+-    DT_PPC64_OPT = 0x70000003
+-
+-class DtRISCV(enum.Enum):
++class DtRISCV(Dt):
+     """Supplemental DT_* constants for EM_RISCV."""
+-    DT_RISCV_VARIANT_CC = 0x70000001
+-
+-class DtSPARC(enum.Enum):
++class DtSPARC(Dt):
+     """Supplemental DT_* constants for EM_SPARC."""
+-    DT_SPARC_REGISTER = 0x70000001
++_dt_skip = '''
++DT_ENCODING DT_PROCNUM
++DT_ADDRRNGLO DT_ADDRRNGHI DT_ADDRNUM
++DT_VALRNGLO DT_VALRNGHI DT_VALNUM
++DT_VERSIONTAGNUM DT_EXTRANUM
++DT_AARCH64_NUM
++DT_ALPHA_NUM
++DT_IA_64_NUM
++DT_MIPS_NUM
++DT_PPC_NUM
++DT_PPC64_NUM
++DT_SPARC_NUM
++'''.strip().split()
++_register_elf_h(DtAARCH64, prefix='DT_AARCH64_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtALPHA, prefix='DT_ALPHA_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtALTERA_NIOS2, prefix='DT_NIOS2_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtIA_64, prefix='DT_IA_64_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtMIPS, prefix='DT_MIPS_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtPPC, prefix='DT_PPC_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtPPC64, prefix='DT_PPC64_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtRISCV, prefix='DT_RISCV_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtSPARC, prefix='DT_SPARC_', skip=_dt_skip, parent=Dt)
++_register_elf_h(Dt, skip=_dt_skip, ranges=True)
++del _dt_skip
++
++# Constant extraction is complete.
++del _register_elf_h
++del _elf_h
+ 
+ class StInfo:
+     """ELF symbol binding and type.  Type of the Sym.st_info field."""
diff --git a/SOURCES/glibc-rh2109510-22.patch b/SOURCES/glibc-rh2109510-22.patch
new file mode 100644
index 0000000..e87b99f
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-22.patch
@@ -0,0 +1,34 @@
+commit d33705c0b020632274318323931695a99753b5be
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Nov 3 12:24:17 2022 +0100
+
+    scripts/glibcelf.py: Properly report <elf.h> parsing failures
+    
+    Without this change, parse failures result in an exception:
+    
+    Traceback (most recent call last):
+      File "tst-glibcelf.py", line 23, in <module>
+        import glibcelf
+      File "/path/to/git/scripts/glibcelf.py", line 226, in <module>
+        _elf_h = _parse_elf_h()
+      File "/path/to/git/scripts/glibcelf.py", line 221, in _parse_elf_h
+        result = glibcpp.macro_eval(glibcpp.macro_definitions(tokens), reporter)
+      File "/path/to/git/scripts/glibcpp.py", line 379, in macro_eval
+        reporter.error(md.line, 'macro {} redefined'.format(md.name))
+      File "/path/to/git/scripts/glibcelf.py", line 214, in error
+        errors += 1
+    UnboundLocalError: local variable 'errors' referenced before assignment
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 420cb21943b28bba..59aab56ecf9deb3e 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -211,7 +211,7 @@ def _parse_elf_h():
+             self.errors = 0
+ 
+         def error(self, line, message):
+-            errors += 1
++            self.errors += 1
+             print('{}:{}: error: {}'.format(path, line, message))
+ 
+         def note(self, line, message):
diff --git a/SOURCES/glibc-rh2109510-23.patch b/SOURCES/glibc-rh2109510-23.patch
new file mode 100644
index 0000000..7823014
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-23.patch
@@ -0,0 +1,108 @@
+Downstream-only adjustments to scripts/glibcelf.py.  We do not have
+CSKY nor RISC-V constants in <elf.h>, so glibcelf cannot extract
+those.  PT_AARCH64_* constants are missing as well.
+
+Adjust elf/tst-glibcelf.py to use PT_MIPS_OPTIONS instead of
+PT_AARCH64_MEMTAG_MTE for testing.  It has the same numeric value
+(0x70000002).
+
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+index a5bff45eae55edea..9cb0861589d6ae2e 100644
+--- a/elf/tst-glibcelf.py
++++ b/elf/tst-glibcelf.py
+@@ -75,15 +75,17 @@ def check_basic():
+     if repr(glibcelf.Pt(17609)) != 'Pt(17609)':
+         error('repr(Pt(17609))')
+ 
+-    if glibcelf.Pt('PT_AARCH64_MEMTAG_MTE') \
+-       is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
+-        error('PT_AARCH64_MEMTAG_MTE identity')
+-    if glibcelf.Pt(0x70000002) is glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++    # Note: Upstream uses PT_AARCH64_MEMTAG_MTE instead of PT_MIPS_OPTIONS.
++    # PT_AARCH64_MEMTAG_MTE is not yet available downstream.
++    if glibcelf.Pt('PT_MIPS_OPTIONS') \
++       is not glibcelf.Pt.PT_MIPS_OPTIONS:
++        error('PT_MIPS_OPTIONS identity')
++    if glibcelf.Pt(0x70000002) is glibcelf.Pt.PT_MIPS_OPTIONS:
+         error('Pt(0x70000002) identity')
+-    if glibcelf.PtAARCH64(0x70000002) is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
+-        error('PtAARCH64(0x70000002) identity')
+-    if glibcelf.Pt.PT_AARCH64_MEMTAG_MTE.short_name != 'AARCH64_MEMTAG_MTE':
+-        error('PT_AARCH64_MEMTAG_MTE short name')
++    if glibcelf.PtMIPS(0x70000002) is not glibcelf.Pt.PT_MIPS_OPTIONS:
++        error('PtMIPS(0x70000002) identity')
++    if glibcelf.Pt.PT_MIPS_OPTIONS.short_name != 'MIPS_OPTIONS':
++        error('PT_MIPS_OPTIONS short name')
+ 
+     # Special cases for int-like Shn.
+     if glibcelf.Shn(32) == glibcelf.Shn.SHN_XINDEX:
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 59aab56ecf9deb3e..5980d7cc906005e2 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -306,23 +306,17 @@ class ShtALPHA(Sht):
+     """Supplemental SHT_* constants for EM_ALPHA."""
+ class ShtARM(Sht):
+     """Supplemental SHT_* constants for EM_ARM."""
+-class ShtCSKY(Sht):
+-    """Supplemental SHT_* constants for EM_CSKY."""
+ class ShtIA_64(Sht):
+     """Supplemental SHT_* constants for EM_IA_64."""
+ class ShtMIPS(Sht):
+     """Supplemental SHT_* constants for EM_MIPS."""
+ class ShtPARISC(Sht):
+     """Supplemental SHT_* constants for EM_PARISC."""
+-class ShtRISCV(Sht):
+-    """Supplemental SHT_* constants for EM_RISCV."""
+ _register_elf_h(ShtALPHA, prefix='SHT_ALPHA_', parent=Sht)
+ _register_elf_h(ShtARM, prefix='SHT_ARM_', parent=Sht)
+-_register_elf_h(ShtCSKY, prefix='SHT_CSKY_', parent=Sht)
+ _register_elf_h(ShtIA_64, prefix='SHT_IA_64_', parent=Sht)
+ _register_elf_h(ShtMIPS, prefix='SHT_MIPS_', parent=Sht)
+ _register_elf_h(ShtPARISC, prefix='SHT_PARISC_', parent=Sht)
+-_register_elf_h(ShtRISCV, prefix='SHT_RISCV_', parent=Sht)
+ _register_elf_h(Sht, ranges=True,
+                 skip='SHT_LOSUNW SHT_HISUNW SHT_LOUSER SHT_HIUSER'.split())
+ 
+@@ -392,8 +386,6 @@ _register_elf_h(Stt, ranges=True)
+ class Pt(_TypedConstant):
+     """ELF program header types.  Type of Phdr.p_type."""
+     prefix = 'PT_'
+-class PtAARCH64(Pt):
+-    """Supplemental PT_* constants for EM_AARCH64."""
+ class PtARM(Pt):
+     """Supplemental PT_* constants for EM_ARM."""
+ class PtHP(Pt):
+@@ -404,15 +396,11 @@ class PtMIPS(Pt):
+     """Supplemental PT_* constants for EM_MIPS."""
+ class PtPARISC(Pt):
+     """Supplemental PT_* constants for EM_PARISC."""
+-class PtRISCV(Pt):
+-    """Supplemental PT_* constants for EM_RISCV."""
+-_register_elf_h(PtAARCH64, prefix='PT_AARCH64_', parent=Pt)
+ _register_elf_h(PtARM, prefix='PT_ARM_', parent=Pt)
+ _register_elf_h(PtHP, prefix='PT_HP_', parent=Pt)
+ _register_elf_h(PtIA_64, prefix='PT_IA_64_', parent=Pt)
+ _register_elf_h(PtMIPS, prefix='PT_MIPS_', parent=Pt)
+ _register_elf_h(PtPARISC, prefix='PT_PARISC_', parent=Pt)
+-_register_elf_h(PtRISCV, prefix='PT_RISCV_', parent=Pt)
+ _register_elf_h(Pt, skip='PT_LOSUNW PT_HISUNW'.split(), ranges=True)
+ 
+ class Dt(_TypedConstant):
+@@ -432,8 +420,6 @@ class DtPPC(Dt):
+     """Supplemental DT_* constants for EM_PPC."""
+ class DtPPC64(Dt):
+     """Supplemental DT_* constants for EM_PPC64."""
+-class DtRISCV(Dt):
+-    """Supplemental DT_* constants for EM_RISCV."""
+ class DtSPARC(Dt):
+     """Supplemental DT_* constants for EM_SPARC."""
+ _dt_skip = '''
+@@ -456,7 +442,6 @@ _register_elf_h(DtIA_64, prefix='DT_IA_64_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtMIPS, prefix='DT_MIPS_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtPPC, prefix='DT_PPC_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtPPC64, prefix='DT_PPC64_', skip=_dt_skip, parent=Dt)
+-_register_elf_h(DtRISCV, prefix='DT_RISCV_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtSPARC, prefix='DT_SPARC_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(Dt, skip=_dt_skip, ranges=True)
+ del _dt_skip
diff --git a/SOURCES/glibc-rh2109510-3.patch b/SOURCES/glibc-rh2109510-3.patch
new file mode 100644
index 0000000..59496a7
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-3.patch
@@ -0,0 +1,32 @@
+commit 7b36d26b22d147ffc347f427f9fd584700578a94
+Author: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Date:   Mon Dec 3 14:40:48 2018 +0100
+
+    Fix test-as-const-jmp_buf-ssp.c generation on gnu-i386
+    
+    hurd's jmp_buf-ssp.sym does not define any symbol.
+    scripts/gen-as-const.py currently was emitting an empty line in that
+    case, and the gawk invocation was prepending "asconst_" to it, ending up
+    with:
+    
+    .../build/glibc/setjmp/test-as-const-jmp_buf-ssp.c:1:2: error: expected « = », « , », « ; », « asm » or
+    « __attribute__ » at end of input
+        1 |  asconst_
+          |  ^~~~~~~~
+    
+            * scripts/gen-as-const.py (main): Avoid emitting empty line when
+            there is no element in `consts'.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index b7a5744bb192dd67..cabf401ed15e8367 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -153,7 +153,7 @@ def main():
+         print(gen_test(sym_data))
+     else:
+         consts = compute_c_consts(sym_data, args.cc)
+-        print('\n'.join('#define %s %s' % c for c in sorted(consts.items())))
++        print(''.join('#define %s %s\n' % c for c in sorted(consts.items())), end='')
+ 
+ if __name__ == '__main__':
+     main()
diff --git a/SOURCES/glibc-rh2109510-4.patch b/SOURCES/glibc-rh2109510-4.patch
new file mode 100644
index 0000000..a56943a
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-4.patch
@@ -0,0 +1,157 @@
+commit 477a02f63751c4b759ddd9454d17f2a7ad120ee3
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 3 22:08:50 2018 +0000
+
+    Make gen-as-const.py handle '--' consistently with awk script.
+    
+    It was reported in
+    <https://sourceware.org/ml/libc-alpha/2018-12/msg00045.html> that
+    gen-as-const.py fails to generate test code in the case where a .sym
+    file has no symbols in it, so resulting in a test failing to link for
+    Hurd.
+    
+    The relevant difference from the old awk script is that the old script
+    treated '--' lines as indicating that the text to do at the start of
+    the test (or file used to compute constants) should be output at that
+    point if not already output, as well as treating lines with actual
+    entries for constants like that.  This patch changes gen-as-const.py
+    accordingly, making it the sole responsibility of the code parsing
+    .sym files to determine when such text should be output and ensuring
+    it's always output at some point even if there are no symbols and no
+    '--' lines, since not outputting it means the test fails to link.
+    Handling '--' like that also avoids any problems that would arise if
+    the first entry for a symbol were inside #ifdef (since the text in
+    question must not be output inside #ifdef).
+    
+    Tested for x86_64, and with build-many-glibcs.py for i686-gnu.  Note
+    that there are still compilation test failures for i686-gnu
+    (linknamespace tests, possibly arising from recent posix_spawn-related
+    changes).
+    
+            * scripts/gen-as-const.py (compute_c_consts): Take an argument
+            'START' to indicate that start text should be output.
+            (gen_test): Likewise.
+            (main): Generate 'START' for first symbol or '--' line, or at end
+            of input if not previously generated.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index cabf401ed15e8367..eb85ef1aa0f4934d 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -34,28 +34,28 @@ def compute_c_consts(sym_data, cc):
+     """Compute the values of some C constants.
+ 
+     The first argument is a list whose elements are either strings
+-    (preprocessor directives) or pairs of strings (a name and a C
++    (preprocessor directives, or the special string 'START' to
++    indicate this function should insert its initial boilerplate text
++    in the output there) or pairs of strings (a name and a C
+     expression for the corresponding value).  Preprocessor directives
+     in the middle of the list may be used to select which constants
+     end up being evaluated using which expressions.
+ 
+     """
+     out_lines = []
+-    started = False
+     for arg in sym_data:
+         if isinstance(arg, str):
+-            out_lines.append(arg)
++            if arg == 'START':
++                out_lines.append('void\ndummy (void)\n{')
++            else:
++                out_lines.append(arg)
+             continue
+         name = arg[0]
+         value = arg[1]
+-        if not started:
+-            out_lines.append('void\ndummy (void)\n{')
+-            started = True
+         out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
+                          ': : \"i\" ((long int) (%s)));'
+                          % (name, value))
+-    if started:
+-        out_lines.append('}')
++    out_lines.append('}')
+     out_lines.append('')
+     out_text = '\n'.join(out_lines)
+     with tempfile.TemporaryDirectory() as temp_dir:
+@@ -89,32 +89,32 @@ def gen_test(sym_data):
+ 
+     """
+     out_lines = []
+-    started = False
+     for arg in sym_data:
+         if isinstance(arg, str):
+-            out_lines.append(arg)
++            if arg == 'START':
++                out_lines.append('#include <stdint.h>\n'
++                                 '#include <stdio.h>\n'
++                                 '#include <bits/wordsize.h>\n'
++                                 '#if __WORDSIZE == 64\n'
++                                 'typedef uint64_t c_t;\n'
++                                 '# define U(n) UINT64_C (n)\n'
++                                 '#else\n'
++                                 'typedef uint32_t c_t;\n'
++                                 '# define U(n) UINT32_C (n)\n'
++                                 '#endif\n'
++                                 'static int\n'
++                                 'do_test (void)\n'
++                                 '{\n'
++                                 # Compilation test only, using static
++                                 # assertions.
++                                 '  return 0;\n'
++                                 '}\n'
++                                 '#include <support/test-driver.c>')
++            else:
++                out_lines.append(arg)
+             continue
+         name = arg[0]
+         value = arg[1]
+-        if not started:
+-            out_lines.append('#include <stdint.h>\n'
+-                             '#include <stdio.h>\n'
+-                             '#include <bits/wordsize.h>\n'
+-                             '#if __WORDSIZE == 64\n'
+-                             'typedef uint64_t c_t;\n'
+-                             '# define U(n) UINT64_C (n)\n'
+-                             '#else\n'
+-                             'typedef uint32_t c_t;\n'
+-                             '# define U(n) UINT32_C (n)\n'
+-                             '#endif\n'
+-                             'static int\n'
+-                             'do_test (void)\n'
+-                             '{\n'
+-                             # Compilation test only, using static assertions.
+-                             '  return 0;\n'
+-                             '}\n'
+-                             '#include <support/test-driver.c>')
+-            started = True
+         out_lines.append('_Static_assert (U (asconst_%s) == (c_t) (%s), '
+                          '"value of %s");'
+                          % (name, value, name))
+@@ -134,6 +134,7 @@ def main():
+     args = parser.parse_args()
+     sym_data = []
+     with open(args.sym_file, 'r') as sym_file:
++        started = False
+         for line in sym_file:
+             line = line.strip()
+             if line == '':
+@@ -143,12 +144,17 @@ def main():
+                 sym_data.append(line)
+                 continue
+             words = line.split(maxsplit=1)
++            if not started:
++                sym_data.append('START')
++                started = True
+             # Separator.
+             if words[0] == '--':
+                 continue
+             name = words[0]
+             value = words[1] if len(words) > 1 else words[0]
+             sym_data.append((name, value))
++        if not started:
++            sym_data.append('START')
+     if args.test:
+         print(gen_test(sym_data))
+     else:
diff --git a/SOURCES/glibc-rh2109510-5.patch b/SOURCES/glibc-rh2109510-5.patch
new file mode 100644
index 0000000..3e93b78
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-5.patch
@@ -0,0 +1,483 @@
+commit a8110b727e508f7ddf34f940af622e6f95435201
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 10 22:27:13 2018 +0000
+
+    Move tst-signal-numbers to Python.
+    
+    This patch converts the tst-signal-numbers test from shell + awk to
+    Python.
+    
+    As with gen-as-const, the point is not so much that shell and awk are
+    problematic for this code, as that it's useful to build up general
+    infrastructure in Python for use of a range of code involving
+    extracting values from C headers.  This patch moves some code from
+    gen-as-const.py to a new glibcextract.py, which also gains functions
+    relating to listing macros, and comparing the values of a set of
+    macros from compiling two different pieces of code.
+    
+    It's not just signal numbers that should have such tests; pretty much
+    any case where glibc copies constants from Linux kernel headers should
+    have such tests that the values and sets of constants agree except
+    where differences are known to be OK.  Much the same also applies to
+    structure layouts (although testing those without hardcoding lists of
+    fields to test will be more complicated).
+    
+    Given this patch, another test for a set of macros would essentially
+    be just a call to glibcextract.compare_macro_consts (plus boilerplate
+    code - and we could move to having separate text files defining such
+    tests, like the .sym inputs to gen-as-const, so that only a single
+    Python script is needed for most such tests).  Some such tests would
+    of course need new features, e.g. where the set of macros changes in
+    new kernel versions (so you need to allow new macro names on the
+    kernel side if the kernel headers are newer than the version known to
+    glibc, and extra macros on the glibc side if the kernel headers are
+    older).  tst-syscall-list.sh could become a Python script that uses
+    common code to generate lists of macros but does other things with its
+    own custom logic.
+    
+    There are a few differences from the existing shell + awk test.
+    Because the new test evaluates constants using the compiler, no
+    special handling is needed any more for one signal name being defined
+    to another.  Because asm/signal.h now needs to pass through the
+    compiler, not just the preprocessor, stddef.h is included as well
+    (given the asm/signal.h issue that it requires an externally provided
+    definition of size_t).  The previous code defined __ASSEMBLER__ with
+    asm/signal.h; this is removed (__ASSEMBLY__, a different macro,
+    eliminates the requirement for stddef.h on some but not all
+    architectures).
+    
+    Tested for x86_64, and with build-many-glibcs.py.
+    
+            * scripts/glibcextract.py: New file.
+            * scripts/gen-as-const.py: Do not import os.path, re, subprocess
+            or tempfile.  Import glibcexctract.
+            (compute_c_consts): Remove.  Moved to glibcextract.py.
+            (gen_test): Update reference to compute_c_consts.
+            (main): Likewise.
+            * sysdeps/unix/sysv/linux/tst-signal-numbers.py: New file.
+            * sysdeps/unix/sysv/linux/tst-signal-numbers.sh: Remove.
+            * sysdeps/unix/sysv/linux/Makefile
+            ($(objpfx)tst-signal-numbers.out): Use tst-signal-numbers.py.
+            Redirect stderr as well as stdout.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index eb85ef1aa0f4934d..f85e359394acb1a4 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -24,68 +24,14 @@
+ # A line giving just a name implies an expression consisting of just that name.
+ 
+ import argparse
+-import os.path
+-import re
+-import subprocess
+-import tempfile
+ 
+-
+-def compute_c_consts(sym_data, cc):
+-    """Compute the values of some C constants.
+-
+-    The first argument is a list whose elements are either strings
+-    (preprocessor directives, or the special string 'START' to
+-    indicate this function should insert its initial boilerplate text
+-    in the output there) or pairs of strings (a name and a C
+-    expression for the corresponding value).  Preprocessor directives
+-    in the middle of the list may be used to select which constants
+-    end up being evaluated using which expressions.
+-
+-    """
+-    out_lines = []
+-    for arg in sym_data:
+-        if isinstance(arg, str):
+-            if arg == 'START':
+-                out_lines.append('void\ndummy (void)\n{')
+-            else:
+-                out_lines.append(arg)
+-            continue
+-        name = arg[0]
+-        value = arg[1]
+-        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
+-                         ': : \"i\" ((long int) (%s)));'
+-                         % (name, value))
+-    out_lines.append('}')
+-    out_lines.append('')
+-    out_text = '\n'.join(out_lines)
+-    with tempfile.TemporaryDirectory() as temp_dir:
+-        c_file_name = os.path.join(temp_dir, 'test.c')
+-        s_file_name = os.path.join(temp_dir, 'test.s')
+-        with open(c_file_name, 'w') as c_file:
+-            c_file.write(out_text)
+-        # Compilation has to be from stdin to avoid the temporary file
+-        # name being written into the generated dependencies.
+-        cmd = ('%s -S -o %s -x c - < %s' % (cc, s_file_name, c_file_name))
+-        subprocess.check_call(cmd, shell=True)
+-        consts = {}
+-        with open(s_file_name, 'r') as s_file:
+-            for line in s_file:
+-                match = re.search('@@@name@@@([^@]*)'
+-                                  '@@@value@@@[^0-9Xxa-fA-F-]*'
+-                                  '([0-9Xxa-fA-F-]+).*@@@end@@@', line)
+-                if match:
+-                    if (match.group(1) in consts
+-                        and match.group(2) != consts[match.group(1)]):
+-                        raise ValueError('duplicate constant %s'
+-                                         % match.group(1))
+-                    consts[match.group(1)] = match.group(2)
+-        return consts
++import glibcextract
+ 
+ 
+ def gen_test(sym_data):
+     """Generate a test for the values of some C constants.
+ 
+-    The first argument is as for compute_c_consts.
++    The first argument is as for glibcextract.compute_c_consts.
+ 
+     """
+     out_lines = []
+@@ -158,7 +104,7 @@ def main():
+     if args.test:
+         print(gen_test(sym_data))
+     else:
+-        consts = compute_c_consts(sym_data, args.cc)
++        consts = glibcextract.compute_c_consts(sym_data, args.cc)
+         print(''.join('#define %s %s\n' % c for c in sorted(consts.items())), end='')
+ 
+ if __name__ == '__main__':
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+new file mode 100644
+index 0000000000000000..ecc4d5b6cc387c7d
+--- /dev/null
++++ b/scripts/glibcextract.py
+@@ -0,0 +1,162 @@
++#!/usr/bin/python3
++# Extract information from C headers.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++import os.path
++import re
++import subprocess
++import tempfile
++
++
++def compute_c_consts(sym_data, cc):
++    """Compute the values of some C constants.
++
++    The first argument is a list whose elements are either strings
++    (preprocessor directives, or the special string 'START' to
++    indicate this function should insert its initial boilerplate text
++    in the output there) or pairs of strings (a name and a C
++    expression for the corresponding value).  Preprocessor directives
++    in the middle of the list may be used to select which constants
++    end up being evaluated using which expressions.
++
++    """
++    out_lines = []
++    for arg in sym_data:
++        if isinstance(arg, str):
++            if arg == 'START':
++                out_lines.append('void\ndummy (void)\n{')
++            else:
++                out_lines.append(arg)
++            continue
++        name = arg[0]
++        value = arg[1]
++        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
++                         ': : \"i\" ((long int) (%s)));'
++                         % (name, value))
++    out_lines.append('}')
++    out_lines.append('')
++    out_text = '\n'.join(out_lines)
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        s_file_name = os.path.join(temp_dir, 'test.s')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(out_text)
++        # Compilation has to be from stdin to avoid the temporary file
++        # name being written into the generated dependencies.
++        cmd = ('%s -S -o %s -x c - < %s' % (cc, s_file_name, c_file_name))
++        subprocess.check_call(cmd, shell=True)
++        consts = {}
++        with open(s_file_name, 'r') as s_file:
++            for line in s_file:
++                match = re.search('@@@name@@@([^@]*)'
++                                  '@@@value@@@[^0-9Xxa-fA-F-]*'
++                                  '([0-9Xxa-fA-F-]+).*@@@end@@@', line)
++                if match:
++                    if (match.group(1) in consts
++                        and match.group(2) != consts[match.group(1)]):
++                        raise ValueError('duplicate constant %s'
++                                         % match.group(1))
++                    consts[match.group(1)] = match.group(2)
++        return consts
++
++
++def list_macros(source_text, cc):
++    """List the preprocessor macros defined by the given source code.
++
++    The return value is a pair of dicts, the first one mapping macro
++    names to their expansions and the second one mapping macro names
++    to lists of their arguments, or to None for object-like macros.
++
++    """
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        i_file_name = os.path.join(temp_dir, 'test.i')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(source_text)
++        cmd = ('%s -E -dM -o %s %s' % (cc, i_file_name, c_file_name))
++        subprocess.check_call(cmd, shell=True)
++        macros_exp = {}
++        macros_args = {}
++        with open(i_file_name, 'r') as i_file:
++            for line in i_file:
++                match = re.fullmatch('#define ([0-9A-Za-z_]+)(.*)\n', line)
++                if not match:
++                    raise ValueError('bad -dM output line: %s' % line)
++                name = match.group(1)
++                value = match.group(2)
++                if value.startswith(' '):
++                    value = value[1:]
++                    args = None
++                elif value.startswith('('):
++                    match = re.fullmatch(r'\((.*?)\) (.*)', value)
++                    if not match:
++                        raise ValueError('bad -dM output line: %s' % line)
++                    args = match.group(1).split(',')
++                    value = match.group(2)
++                else:
++                    raise ValueError('bad -dM output line: %s' % line)
++                if name in macros_exp:
++                    raise ValueError('duplicate macro: %s' % line)
++                macros_exp[name] = value
++                macros_args[name] = args
++    return macros_exp, macros_args
++
++
++def compute_macro_consts(source_text, cc, macro_re, exclude_re=None):
++    """Compute the integer constant values of macros defined by source_text.
++
++    Macros must match the regular expression macro_re, and if
++    exclude_re is defined they must not match exclude_re.  Values are
++    computed with compute_c_consts.
++
++    """
++    macros_exp, macros_args = list_macros(source_text, cc)
++    macros_set = {m for m in macros_exp
++                  if (macros_args[m] is None
++                      and re.fullmatch(macro_re, m)
++                      and (exclude_re is None
++                           or not re.fullmatch(exclude_re, m)))}
++    sym_data = [source_text, 'START']
++    sym_data.extend(sorted((m, m) for m in macros_set))
++    return compute_c_consts(sym_data, cc)
++
++
++def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None):
++    """Compare the values of macros defined by two different sources.
++
++    The sources would typically be includes of a glibc header and a
++    kernel header.  Return 1 if there were any differences, 0 if the
++    macro values were the same.
++
++    """
++    macros_1 = compute_macro_consts(source_1, cc, macro_re, exclude_re)
++    macros_2 = compute_macro_consts(source_2, cc, macro_re, exclude_re)
++    if macros_1 == macros_2:
++        return 0
++    print('First source:\n%s\n' % source_1)
++    print('Second source:\n%s\n' % source_2)
++    for name, value in sorted(macros_1.items()):
++        if name not in macros_2:
++            print('Only in first source: %s' % name)
++        elif macros_1[name] != macros_2[name]:
++            print('Different values for %s: %s != %s'
++                  % (name, macros_1[name], macros_2[name]))
++    for name in sorted(macros_2.keys()):
++        if name not in macros_1:
++            print('Only in second source: %s' % name)
++    return 1
+diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile
+index bb055f9d6b841ff5..9c10ee53b26e1b1b 100644
+--- a/sysdeps/unix/sysv/linux/Makefile
++++ b/sysdeps/unix/sysv/linux/Makefile
+@@ -113,11 +113,14 @@ tests-special += $(objpfx)tst-signal-numbers.out
+ # in this context, but signal.c includes signal.h and not much else so it'll
+ # be conservatively correct.
+ $(objpfx)tst-signal-numbers.out: \
+-		../sysdeps/unix/sysv/linux/tst-signal-numbers.sh \
++		../sysdeps/unix/sysv/linux/tst-signal-numbers.py \
+ 		$(objpfx)signal.o*
+-	AWK=$(AWK) $(SHELL) ../sysdeps/unix/sysv/linux/tst-signal-numbers.sh \
+-	$(CC) $(patsubst -DMODULE_NAME=%,-DMODULE_NAME=testsuite,$(CPPFLAGS)) \
+-	< /dev/null > $@; $(evaluate-test)
++	PYTHONPATH=../scripts \
++	$(PYTHON) ../sysdeps/unix/sysv/linux/tst-signal-numbers.py \
++		   --cc="$(CC) $(patsubst -DMODULE_NAME=%, \
++					  -DMODULE_NAME=testsuite, \
++					  $(CPPFLAGS))" \
++	< /dev/null > $@ 2>&1; $(evaluate-test)
+ endif
+ 
+ ifeq ($(subdir),socket)
+diff --git a/sysdeps/unix/sysv/linux/tst-signal-numbers.py b/sysdeps/unix/sysv/linux/tst-signal-numbers.py
+new file mode 100644
+index 0000000000000000..48c63d1218e8303d
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/tst-signal-numbers.py
+@@ -0,0 +1,48 @@
++#!/usr/bin/python3
++# Test that glibc's signal numbers match the kernel's.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++import argparse
++import sys
++
++import glibcextract
++
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description="Test that glibc's signal numbers match the kernel's.")
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    args = parser.parse_args()
++    sys.exit(glibcextract.compare_macro_consts(
++        '#define _GNU_SOURCE 1\n'
++        '#include <signal.h>\n',
++        '#define _GNU_SOURCE 1\n'
++        '#include <stddef.h>\n'
++        '#include <asm/signal.h>\n',
++        args.cc,
++        # Filter out constants that aren't signal numbers.
++        'SIG[A-Z]+',
++        # Discard obsolete signal numbers and unrelated constants:
++        #    SIGCLD, SIGIOT, SIGSWI, SIGUNUSED.
++        #    SIGSTKSZ, SIGRTMIN, SIGRTMAX.
++        'SIG(CLD|IOT|RT(MIN|MAX)|STKSZ|SWI|UNUSED)'))
++
++if __name__ == '__main__':
++    main()
+diff --git a/sysdeps/unix/sysv/linux/tst-signal-numbers.sh b/sysdeps/unix/sysv/linux/tst-signal-numbers.sh
+deleted file mode 100644
+index e1f7be0337c720a6..0000000000000000
+--- a/sysdeps/unix/sysv/linux/tst-signal-numbers.sh
++++ /dev/null
+@@ -1,86 +0,0 @@
+-#! /bin/sh
+-# Test that glibc's signal numbers match the kernel's.
+-# Copyright (C) 2017-2018 Free Software Foundation, Inc.
+-# This file is part of the GNU C Library.
+-
+-# The GNU C Library is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU Lesser General Public
+-# License as published by the Free Software Foundation; either
+-# version 2.1 of the License, or (at your option) any later version.
+-
+-# The GNU C Library is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+-# Lesser General Public License for more details.
+-
+-# You should have received a copy of the GNU Lesser General Public
+-# License along with the GNU C Library; if not, see
+-# <http://www.gnu.org/licenses/>.
+-
+-set -e
+-if [ -n "$BASH_VERSION" ]; then set -o pipefail; fi
+-LC_ALL=C; export LC_ALL
+-
+-# We cannot use Linux's asm/signal.h to define signal numbers, because
+-# it isn't sufficiently namespace-clean.  Instead, this test checks
+-# that our signal numbers match the kernel's.  This script expects
+-# "$@" to be $(CC) $(CPPFLAGS) as set by glibc's Makefiles, and $AWK
+-# to be set in the environment.
+-
+-# Before doing anything else, fail if the compiler doesn't work.
+-"$@" -E -xc -dM - < /dev/null > /dev/null
+-
+-tmpG=`mktemp -t signums_glibc.XXXXXXXXX`
+-tmpK=`mktemp -t signums_kernel.XXXXXXXXX`
+-trap "rm -f '$tmpG' '$tmpK'" 0
+-
+-# Filter out constants that aren't signal numbers.
+-# If SIGPOLL is defined as SIGIO, swap it around so SIGIO is defined as
+-# SIGPOLL. Similarly for SIGABRT and SIGIOT.
+-# Discard obsolete signal numbers and unrelated constants:
+-#    SIGCLD, SIGIOT, SIGSWI, SIGUNUSED.
+-#    SIGSTKSZ, SIGRTMIN, SIGRTMAX.
+-# Then sort the list.
+-filter_defines ()
+-{
+-    $AWK '
+-/^#define SIG[A-Z]+ ([0-9]+|SIG[A-Z0-9]+)$/ { signals[$2] = $3 }
+-END {
+-  if ("SIGPOLL" in signals && "SIGIO" in signals &&
+-      signals["SIGPOLL"] == "SIGIO") {
+-    signals["SIGPOLL"] = signals["SIGIO"]
+-    signals["SIGIO"] = "SIGPOLL"
+-  }
+-  if ("SIGABRT" in signals && "SIGIOT" in signals &&
+-      signals["SIGABRT"] == "SIGIOT") {
+-    signals["SIGABRT"] = signals["SIGIOT"]
+-    signals["SIGIOT"] = "SIGABRT"
+-  }
+-  for (sig in signals) {
+-    if (sig !~ /^SIG(CLD|IOT|RT(MIN|MAX)|STKSZ|SWI|UNUSED)$/) {
+-      printf("#define %s %s\n", sig, signals[sig])
+-    }
+-  }
+-}' | sort
+-}
+-
+-# $CC may contain command-line switches, so it should be word-split.
+-printf '%s' '#define _GNU_SOURCE 1
+-#include <signal.h>
+-' |
+-    "$@" -E -xc -dM - |
+-    filter_defines > "$tmpG"
+-
+-printf '%s' '#define _GNU_SOURCE 1
+-#define __ASSEMBLER__ 1
+-#include <asm/signal.h>
+-' |
+-    "$@" -E -xc -dM - |
+-    filter_defines > "$tmpK"
+-
+-if cmp -s "$tmpG" "$tmpK"; then
+-    exit 0
+-else
+-    diff -u "$tmpG" "$tmpK"
+-    exit 1
+-fi
diff --git a/SOURCES/glibc-rh2109510-6.patch b/SOURCES/glibc-rh2109510-6.patch
new file mode 100644
index 0000000..61251dc
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-6.patch
@@ -0,0 +1,98 @@
+Partial backport of:
+
+commit cb7be1590e9b18e272e72eb4e910a7ad06a53bd0
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 10 22:56:59 2018 +0000
+
+    Use gen-as-const.py to process .pysym files.
+    
+    This patch eliminates the gen-py-const.awk variant of gen-as-const,
+    switching to use of gnu-as-const.py (with a new --python option) to
+    process .pysym files (i.e., to generate nptl_lock_constants.py), as
+    the syntax of those files is identical to that of .sym files.
+    
+    Note that the generated nptl_lock_constants.py is *not* identical to
+    the version generated by the awk script.  Apart from the trivial
+    changes (comment referencing the new script, and output being sorted),
+    the constant FUTEX_WAITERS, PTHREAD_MUTEXATTR_FLAG_BITS,
+    PTHREAD_MUTEXATTR_FLAG_PSHARED and PTHREAD_MUTEX_PRIO_CEILING_MASK are
+    now output as positive rather than negative constants (on x86_64
+    anyway; maybe not necessarily on 32-bit systems):
+    
+    < FUTEX_WAITERS = -2147483648
+    ---
+    > FUTEX_WAITERS = 2147483648
+    
+    < PTHREAD_MUTEXATTR_FLAG_BITS = -251662336
+    < PTHREAD_MUTEXATTR_FLAG_PSHARED = -2147483648
+    ---
+    > PTHREAD_MUTEXATTR_FLAG_BITS = 4043304960
+    > PTHREAD_MUTEXATTR_FLAG_PSHARED = 2147483648
+    
+    < PTHREAD_MUTEX_PRIO_CEILING_MASK = -524288
+    ---
+    > PTHREAD_MUTEX_PRIO_CEILING_MASK = 4294443008
+    
+    This is because gen-as-const has a cast of the constant value to long
+    int, which gen-py-const lacks.
+    
+    I think the positive values are more logically correct, since the
+    constants in question are in fact unsigned in C.  But to reliably
+    produce gen-as-const.py output for constants that always (in C and
+    Python) reflects the signedness of values with the high bit of "long
+    int" set would mean more complicated logic needs to be used in
+    computing values.
+    
+    The more correct positive values by themselves produce a failure of
+    nptl/test-mutexattr-printers, because masking with
+    ~PTHREAD_MUTEXATTR_FLAG_BITS & ~PTHREAD_MUTEX_NO_ELISION_NP now leaves
+    a bit -1 << 32 in the Python value, resulting in a KeyError exception.
+    To avoid that, places masking with ~ of one of the constants in
+    question are changed to mask with 0xffffffff as well (this reflects
+    how ~ in Python applies to an infinite-precision integer whereas ~ in
+    C does not do any promotions beyond the width of int).
+    
+    Tested for x86_64.
+    
+            * scripts/gen-as-const.py (main): Handle --python option.
+            * scripts/gen-py-const.awk: Remove.
+            * Makerules (py-const-script): Use gen-as-const.py.
+            ($(py-const)): Likewise.
+            * nptl/nptl-printers.py (MutexPrinter.read_status_no_robust): Mask
+            with 0xffffffff together with ~(PTHREAD_MUTEX_PRIO_CEILING_MASK).
+            (MutexAttributesPrinter.read_values): Mask with 0xffffffff
+            together with ~PTHREAD_MUTEXATTR_FLAG_BITS and
+            ~PTHREAD_MUTEX_NO_ELISION_NP.
+            * manual/README.pretty-printers: Update reference to
+            gen-py-const.awk.
+
+Only the gen-as-const.py changes are included downstream.  We keep using
+gen-py-const.awk for the build.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index f85e359394acb1a4..2f1dff092b98e044 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -75,6 +75,8 @@ def main():
+                         help='C compiler (including options) to use')
+     parser.add_argument('--test', action='store_true',
+                         help='Generate test case instead of header')
++    parser.add_argument('--python', action='store_true',
++                        help='Generate Python file instead of header')
+     parser.add_argument('sym_file',
+                         help='.sym file to process')
+     args = parser.parse_args()
+@@ -103,6 +105,13 @@ def main():
+             sym_data.append('START')
+     if args.test:
+         print(gen_test(sym_data))
++    elif args.python:
++        consts = glibcextract.compute_c_consts(sym_data, args.cc)
++        print('# GENERATED FILE\n'
++              '\n'
++              '# Constant definitions.\n'
++              '# See gen-as-const.py for details.\n')
++        print(''.join('%s = %s\n' % c for c in sorted(consts.items())), end='')
+     else:
+         consts = glibcextract.compute_c_consts(sym_data, args.cc)
+         print(''.join('#define %s %s\n' % c for c in sorted(consts.items())), end='')
diff --git a/SOURCES/glibc-rh2109510-7.patch b/SOURCES/glibc-rh2109510-7.patch
new file mode 100644
index 0000000..3da8337
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-7.patch
@@ -0,0 +1,178 @@
+commit df648905e7d8340bb3e78813fd25e2077b9685d9
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 17 18:29:36 2018 +0000
+
+    Add test that MAP_* constants agree with kernel.
+    
+    Continuing the process of building up and using Python infrastructure
+    for extracting and using values in headers, this patch adds a test
+    that MAP_* constants from sys/mman.h agree with those in the Linux
+    kernel headers.  (Other sys/mman.h constants could be added to the
+    test separately.)
+    
+    This set of constants has grown over time, so the generic code is
+    enhanced to allow saying extra constants are OK on either side of the
+    comparison (where the caller sets those parameters based on the Linux
+    kernel headers version, compared with the version the headers were
+    last updated from).  Although the test is a custom Python file, my
+    intention is to move in future to a single Python script for such
+    tests and text files it takes as inputs, once there are enough
+    examples to provide a guide to the common cases in such tests (I'd
+    like to end up with most or all such sets of constants copied from
+    kernel headers having such tests, and likewise for structure layouts
+    from the kernel).
+    
+    The Makefile code is essentially the same as for tst-signal-numbers,
+    but I didn't try to find an object file to depend on to represent the
+    dependency on the headers used by the test (the conform/ tests don't
+    try to represent such header dependencies at all, for example).
+    
+    Tested with build-many-glibcs.py, and also for x86_64 with older
+    kernel headers.
+    
+            * scripts/glibcextract.py (compare_macro_consts): Take parameters
+            to allow extra macros from first or second sources.
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py: New file.
+            * sysdeps/unix/sysv/linux/Makefile [$(subdir) = misc]
+            (tests-special): Add $(objpfx)tst-mman-consts.out.
+            ($(objpfx)tst-mman-consts.out): New makefile target.
+
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+index ecc4d5b6cc387c7d..06f712ad115e0f9e 100644
+--- a/scripts/glibcextract.py
++++ b/scripts/glibcextract.py
+@@ -136,12 +136,19 @@ def compute_macro_consts(source_text, cc, macro_re, exclude_re=None):
+     return compute_c_consts(sym_data, cc)
+ 
+ 
+-def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None):
++def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None,
++                         allow_extra_1=False, allow_extra_2=False):
+     """Compare the values of macros defined by two different sources.
+ 
+     The sources would typically be includes of a glibc header and a
+-    kernel header.  Return 1 if there were any differences, 0 if the
+-    macro values were the same.
++    kernel header.  If allow_extra_1, the first source may define
++    extra macros (typically if the kernel headers are older than the
++    version glibc has taken definitions from); if allow_extra_2, the
++    second source may define extra macros (typically if the kernel
++    headers are newer than the version glibc has taken definitions
++    from).  Return 1 if there were any differences other than those
++    allowed, 0 if the macro values were the same apart from any
++    allowed differences.
+ 
+     """
+     macros_1 = compute_macro_consts(source_1, cc, macro_re, exclude_re)
+@@ -150,13 +157,19 @@ def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None):
+         return 0
+     print('First source:\n%s\n' % source_1)
+     print('Second source:\n%s\n' % source_2)
++    ret = 0
+     for name, value in sorted(macros_1.items()):
+         if name not in macros_2:
+             print('Only in first source: %s' % name)
++            if not allow_extra_1:
++                ret = 1
+         elif macros_1[name] != macros_2[name]:
+             print('Different values for %s: %s != %s'
+                   % (name, macros_1[name], macros_2[name]))
++            ret = 1
+     for name in sorted(macros_2.keys()):
+         if name not in macros_1:
+             print('Only in second source: %s' % name)
+-    return 1
++            if not allow_extra_2:
++                ret = 1
++    return ret
+diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile
+index 9c10ee53b26e1b1b..863ed80c2a2713d3 100644
+--- a/sysdeps/unix/sysv/linux/Makefile
++++ b/sysdeps/unix/sysv/linux/Makefile
+@@ -98,6 +98,15 @@ $(objpfx)tst-sysconf-iov_max: $(objpfx)tst-sysconf-iov_max-uapi.o
+ 
+ $(objpfx)tst-pkey: $(shared-thread-library)
+ 
++tests-special += $(objpfx)tst-mman-consts.out
++$(objpfx)tst-mman-consts.out: ../sysdeps/unix/sysv/linux/tst-mman-consts.py
++	PYTHONPATH=../scripts \
++	$(PYTHON) ../sysdeps/unix/sysv/linux/tst-mman-consts.py \
++		   --cc="$(CC) $(patsubst -DMODULE_NAME=%, \
++					  -DMODULE_NAME=testsuite, \
++					  $(CPPFLAGS))" \
++	< /dev/null > $@ 2>&1; $(evaluate-test)
++
+ endif # $(subdir) == misc
+ 
+ ifeq ($(subdir),time)
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+new file mode 100644
+index 0000000000000000..1a613beec0da16fb
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -0,0 +1,65 @@
++#!/usr/bin/python3
++# Test that glibc's sys/mman.h constants match the kernel's.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++import argparse
++import sys
++
++import glibcextract
++
++
++def linux_kernel_version(cc):
++    """Return the (major, minor) version of the Linux kernel headers."""
++    sym_data = ['#include <linux/version.h>', 'START',
++                ('LINUX_VERSION_CODE', 'LINUX_VERSION_CODE')]
++    val = glibcextract.compute_c_consts(sym_data, cc)['LINUX_VERSION_CODE']
++    val = int(val)
++    return ((val & 0xff0000) >> 16, (val & 0xff00) >> 8)
++
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description="Test that glibc's sys/mman.h constants "
++        "match the kernel's.")
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    args = parser.parse_args()
++    linux_version_headers = linux_kernel_version(args.cc)
++    linux_version_glibc = (4, 19)
++    sys.exit(glibcextract.compare_macro_consts(
++        '#define _GNU_SOURCE 1\n'
++        '#include <sys/mman.h>\n',
++        '#define _GNU_SOURCE 1\n'
++        '#include <linux/mman.h>\n',
++        args.cc,
++        'MAP_.*',
++        # A series of MAP_HUGE_<size> macros are defined by the kernel
++        # but not by glibc.  MAP_UNINITIALIZED is kernel-only.
++        # MAP_FAILED is not a MAP_* flag and is glibc-only, as is the
++        # MAP_ANON alias for MAP_ANONYMOUS.  MAP_RENAME, MAP_AUTOGROW,
++        # MAP_LOCAL and MAP_AUTORSRV are in the kernel header for
++        # MIPS, marked as "not used by linux"; SPARC has MAP_INHERIT
++        # in the kernel header, but does not use it.
++        'MAP_HUGE_[0-9].*|MAP_UNINITIALIZED|MAP_FAILED|MAP_ANON'
++        '|MAP_RENAME|MAP_AUTOGROW|MAP_LOCAL|MAP_AUTORSRV|MAP_INHERIT',
++        linux_version_glibc > linux_version_headers,
++        linux_version_headers > linux_version_glibc))
++
++if __name__ == '__main__':
++    main()
diff --git a/SOURCES/glibc-rh2109510-8.patch b/SOURCES/glibc-rh2109510-8.patch
new file mode 100644
index 0000000..120abed
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-8.patch
@@ -0,0 +1,23 @@
+commit 46baeb61e16511f26db1b255e19dc9163f590367
+Author: Fangrui Song <maskray@google.com>
+Date:   Tue Oct 19 09:58:16 2021 -0700
+
+    glibcextract.py: Place un-assemblable @@@ in a comment
+    
+    Unlike GCC, Clang parses asm statements and verifies they are valid
+    instructions/directives. Place the magic @@@ into a comment to avoid
+    a parse error.
+
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+index 06f712ad115e0f9e..8f2246aae6a9dfb7 100644
+--- a/scripts/glibcextract.py
++++ b/scripts/glibcextract.py
+@@ -45,7 +45,7 @@ def compute_c_consts(sym_data, cc):
+             continue
+         name = arg[0]
+         value = arg[1]
+-        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
++        out_lines.append('asm ("/* @@@name@@@%s@@@value@@@%%0@@@end@@@ */" '
+                          ': : \"i\" ((long int) (%s)));'
+                          % (name, value))
+     out_lines.append('}')
diff --git a/SOURCES/glibc-rh2109510-9.patch b/SOURCES/glibc-rh2109510-9.patch
new file mode 100644
index 0000000..289f6df
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-9.patch
@@ -0,0 +1,45 @@
+commit 841afa116e32b3c7195475769c26bf46fd870d32
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Wed Aug 10 16:24:06 2022 -0300
+
+    glibcextract.py: Add compile_c_snippet
+    
+    It might be used on tests to check if a snippet build with the provided
+    compiler and flags.
+    
+    Reviewed-by: Florian Weimer <fweimer@redhat.com>
+
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+index 8f2246aae6a9dfb7..0fb50dc8f9c4f7f9 100644
+--- a/scripts/glibcextract.py
++++ b/scripts/glibcextract.py
+@@ -17,6 +17,7 @@
+ # License along with the GNU C Library; if not, see
+ # <http://www.gnu.org/licenses/>.
+ 
++import collections
+ import os.path
+ import re
+ import subprocess
+@@ -173,3 +174,21 @@ def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None,
+             if not allow_extra_2:
+                 ret = 1
+     return ret
++
++CompileResult = collections.namedtuple("CompileResult", "returncode output")
++
++def compile_c_snippet(snippet, cc, extra_cc_args=''):
++    """Compile and return whether the SNIPPET can be build with CC along
++       EXTRA_CC_ARGS compiler flags.  Return a CompileResult with RETURNCODE
++       being 0 for success, or the failure value and the compiler output.
++    """
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        obj_file_name = os.path.join(temp_dir, 'test.o')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(snippet + '\n')
++        cmd = cc.split() + extra_cc_args.split() + ['-c', '-o', obj_file_name,
++                c_file_name]
++        r = subprocess.run(cmd, check=False, stdout=subprocess.PIPE,
++                stderr=subprocess.STDOUT)
++        return CompileResult(r.returncode, r.stdout)
diff --git a/SOURCES/glibc-rh2139875-1.patch b/SOURCES/glibc-rh2139875-1.patch
new file mode 100644
index 0000000..32091ab
--- /dev/null
+++ b/SOURCES/glibc-rh2139875-1.patch
@@ -0,0 +1,32 @@
+commit acb55dcb892d4321ada6fd9b663b28fada432682
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Wed Jan 2 18:35:50 2019 +0000
+
+    Update Linux kernel version in tst-mman-consts.py.
+    
+    This patch updates the Linux kernel version in tst-mman-consts.py to
+    4.20 (meaning that's the version for which glibc is expected to have
+    the same constants as the kernel, up to the exceptions listed in the
+    test).  (Once we have more such tests sharing common infrastructure, I
+    expect the kernel version will be something set in the infrastructure
+    shared by all such tests, rather than something needing updating
+    separately for each test for each new kernel version.)
+    
+    Tested with build-many-glibcs.py.
+    
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py (main): Expect
+            constants to match with Linux 4.20.
+
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+index 1a613beec0da16fb..4a2ddd49c4c7282b 100644
+--- a/sysdeps/unix/sysv/linux/tst-mman-consts.py
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -41,7 +41,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+     linux_version_headers = linux_kernel_version(args.cc)
+-    linux_version_glibc = (4, 19)
++    linux_version_glibc = (4, 20)
+     sys.exit(glibcextract.compare_macro_consts(
+         '#define _GNU_SOURCE 1\n'
+         '#include <sys/mman.h>\n',
diff --git a/SOURCES/glibc-rh2139875-2.patch b/SOURCES/glibc-rh2139875-2.patch
new file mode 100644
index 0000000..1c3ac5b
--- /dev/null
+++ b/SOURCES/glibc-rh2139875-2.patch
@@ -0,0 +1,31 @@
+commit c7a26cba2ab949216ac9ef245ca78696815ea4c4
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Fri Aug 2 11:36:07 2019 +0000
+
+    Update Linux kernel version number in tst-mman-consts.py to 5.2.
+    
+    The tst-mman-consts.py test includes a kernel version number, to avoid
+    failures because of newly added constants in the kernel (if kernel
+    headers are newer than this version of glibc) or missing constants in
+    the kernel (if kernel headers are older than this version of glibc).
+    This patch updates it to 5.2 to reflect that the MAP_* constants in
+    glibc are still current as of that kernel version.
+    
+    Tested with build-many-glibcs.py.
+    
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py (main): Update Linux
+            kernel version number to 5.2.
+
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+index 4a2ddd49c4c7282b..9e326b1f31799a72 100644
+--- a/sysdeps/unix/sysv/linux/tst-mman-consts.py
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -41,7 +41,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+     linux_version_headers = linux_kernel_version(args.cc)
+-    linux_version_glibc = (4, 20)
++    linux_version_glibc = (5, 2)
+     sys.exit(glibcextract.compare_macro_consts(
+         '#define _GNU_SOURCE 1\n'
+         '#include <sys/mman.h>\n',
diff --git a/SOURCES/glibc-rh2139875-3.patch b/SOURCES/glibc-rh2139875-3.patch
new file mode 100644
index 0000000..6c48115
--- /dev/null
+++ b/SOURCES/glibc-rh2139875-3.patch
@@ -0,0 +1,61 @@
+commit 71bdf29ac1de04efcce96bc5ce50af3263851ac7
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Sep 30 15:49:25 2019 +0000
+
+    Update bits/mman.h constants and tst-mman-consts.py for Linux 5.3.
+    
+    The Linux 5.3 uapi headers have some rearrangement relating to MAP_*
+    constants, which includes the effect of adding definitions of MAP_SYNC
+    on powerpc and sparc.  This patch updates the corresponding glibc
+    bits/mman.h headers accordingly, and updates the Linux kernel version
+    number in tst-mman-consts.py to reflect that these constants are now
+    current with that kernel version.
+    
+    Tested with build-many-glibcs.py.
+    
+            * sysdeps/unix/sysv/linux/powerpc/bits/mman.h [__USE_MISC]
+            (MAP_SYNC): New macro.
+            * sysdeps/unix/sysv/linux/sparc/bits/mman.h [__USE_MISC]
+            (MAP_SYNC): Likewise.
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py (main): Update Linux
+            kernel version number to 5.3.
+
+diff --git a/sysdeps/unix/sysv/linux/powerpc/bits/mman.h b/sysdeps/unix/sysv/linux/powerpc/bits/mman.h
+index e652467c8c091381..0e7fa647793ed585 100644
+--- a/sysdeps/unix/sysv/linux/powerpc/bits/mman.h
++++ b/sysdeps/unix/sysv/linux/powerpc/bits/mman.h
+@@ -36,6 +36,8 @@
+ # define MAP_NONBLOCK	0x10000		/* Do not block on IO.  */
+ # define MAP_STACK	0x20000		/* Allocation is for a stack.  */
+ # define MAP_HUGETLB	0x40000		/* Create huge page mapping.  */
++# define MAP_SYNC	0x80000		/* Perform synchronous page
++					   faults for the mapping.  */
+ # define MAP_FIXED_NOREPLACE 0x100000	/* MAP_FIXED but do not unmap
+ 					   underlying mapping.  */
+ #endif
+diff --git a/sysdeps/unix/sysv/linux/sparc/bits/mman.h b/sysdeps/unix/sysv/linux/sparc/bits/mman.h
+index 3a3ffb994631e2b6..03f6f732bb5efbe2 100644
+--- a/sysdeps/unix/sysv/linux/sparc/bits/mman.h
++++ b/sysdeps/unix/sysv/linux/sparc/bits/mman.h
+@@ -36,6 +36,8 @@
+ # define MAP_NONBLOCK	0x10000		/* Do not block on IO.  */
+ # define MAP_STACK	0x20000		/* Allocation is for a stack.  */
+ # define MAP_HUGETLB	0x40000		/* Create huge page mapping.  */
++# define MAP_SYNC	0x80000		/* Perform synchronous page
++					   faults for the mapping.  */
+ # define MAP_FIXED_NOREPLACE 0x100000	/* MAP_FIXED but do not unmap
+ 					   underlying mapping.  */
+ #endif
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+index 9e326b1f31799a72..42914e4e0ba84712 100644
+--- a/sysdeps/unix/sysv/linux/tst-mman-consts.py
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -41,7 +41,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+     linux_version_headers = linux_kernel_version(args.cc)
+-    linux_version_glibc = (5, 2)
++    linux_version_glibc = (5, 3)
+     sys.exit(glibcextract.compare_macro_consts(
+         '#define _GNU_SOURCE 1\n'
+         '#include <sys/mman.h>\n',
diff --git a/SPECS/glibc.spec b/SPECS/glibc.spec
index 4e9f654..79a3282 100644
--- a/SPECS/glibc.spec
+++ b/SPECS/glibc.spec
@@ -1,6 +1,6 @@
 %define glibcsrcdir glibc-2.28
 %define glibcversion 2.28
-%define glibcrelease 221%{?dist}
+%define glibcrelease 222%{?dist}
 # Pre-release tarballs are pulled in from git using a command that is
 # effectively:
 #
@@ -986,6 +986,32 @@ Patch793: glibc-rh2122501-5.patch
 Patch794: glibc-rh2121746-1.patch
 Patch795: glibc-rh2121746-2.patch
 Patch796: glibc-rh2116938.patch
+Patch797: glibc-rh2109510-1.patch
+Patch798: glibc-rh2109510-2.patch
+Patch799: glibc-rh2109510-3.patch
+Patch800: glibc-rh2109510-4.patch
+Patch801: glibc-rh2109510-5.patch
+Patch802: glibc-rh2109510-6.patch
+Patch803: glibc-rh2109510-7.patch
+Patch804: glibc-rh2109510-8.patch
+Patch805: glibc-rh2109510-9.patch
+Patch806: glibc-rh2109510-10.patch
+Patch807: glibc-rh2109510-11.patch
+Patch808: glibc-rh2109510-12.patch
+Patch809: glibc-rh2109510-13.patch
+Patch810: glibc-rh2109510-14.patch
+Patch811: glibc-rh2109510-15.patch
+Patch812: glibc-rh2109510-16.patch
+Patch813: glibc-rh2109510-17.patch
+Patch814: glibc-rh2109510-18.patch
+Patch815: glibc-rh2109510-19.patch
+Patch816: glibc-rh2109510-20.patch
+Patch817: glibc-rh2109510-21.patch
+Patch818: glibc-rh2109510-22.patch
+Patch819: glibc-rh2109510-23.patch
+Patch820: glibc-rh2139875-1.patch
+Patch821: glibc-rh2139875-2.patch
+Patch822: glibc-rh2139875-3.patch 
 
 # Intel Optimizations
 Patch10001: glibc-sw24097-1.patch
@@ -2931,6 +2957,10 @@ fi
 %files -f compat-libpthread-nonshared.filelist -n compat-libpthread-nonshared
 
 %changelog
+* Thu Nov  3 2022 Florian Weimer <fweimer@redhat.com> - 2.28-222
+- Explicitly switch to --with-default-link=no (#2109510)
+- Define MAP_SYNC on ppc64le (#2139875) 
+
 * Mon Oct 24 2022 Arjun Shankar <arjun@redhat.com> - 2.28-221  
 - Fix -Wstrict-overflow warning when using CMSG_NXTHDR macro (#2116938)