diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ddbc9dd --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +SOURCES/cmake-3.4.3.tar.gz +SOURCES/llvm-3.9.1.src.tar.xz diff --git a/.mesa-private-llvm.metadata b/.mesa-private-llvm.metadata new file mode 100644 index 0000000..d7a792d --- /dev/null +++ b/.mesa-private-llvm.metadata @@ -0,0 +1,2 @@ +49e4f05d46d4752e514b19ba36bf97d20a7da66a SOURCES/cmake-3.4.3.tar.gz +ce801cf456b8dacd565ce8df8288b4d90e7317ff SOURCES/llvm-3.9.1.src.tar.xz diff --git a/README.md b/README.md deleted file mode 100644 index 0e7897f..0000000 --- a/README.md +++ /dev/null @@ -1,5 +0,0 @@ -The master branch has no content - -Look at the c7 branch if you are working with CentOS-7, or the c4/c5/c6 branch for CentOS-4, 5 or 6 - -If you find this file in a distro specific branch, it means that no content has been checked in yet diff --git a/SOURCES/0001-Fix-R_AARCH64_MOVW_UABS_G3-relocation.patch b/SOURCES/0001-Fix-R_AARCH64_MOVW_UABS_G3-relocation.patch new file mode 100644 index 0000000..c5517ad --- /dev/null +++ b/SOURCES/0001-Fix-R_AARCH64_MOVW_UABS_G3-relocation.patch @@ -0,0 +1,435 @@ +From 29cf3bd00fe84ddab138c9311fe288bb9da8a273 Mon Sep 17 00:00:00 2001 +From: root +Date: Thu, 9 Mar 2017 12:22:48 -0600 +Subject: [PATCH] Fix R_AARCH64_MOVW_UABS_G3 relocation + +Summary: The relocation is missing mask so an address that +has non-zero bits in 47:43 may overwrite the register +number. (Frequently shows up as target register changed +to xzr....) + +Reviewers: t.p.northover, lhames +Subscribers: davide, aemerson, rengolin, llvm-commits +Differential Revision: https://reviews.llvm.org/D27609 +--- + llvm-3.9.1.src/include/llvm/Object/ELFObjectFile.h | 2 +- + llvm-3.9.1.src/include/llvm/Object/RelocVisitor.h | 1 + + .../ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp | 67 +++++++++----- + .../RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s | 102 +++++++++++++++++++++ + .../RuntimeDyld/AArch64/ELF_ARM64_relocations.s | 99 ++++++++++++++++++++ + 5 files changed, 249 insertions(+), 22 deletions(-) + create mode 100644 llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s + create mode 100644 llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s + +diff --git a/llvm-3.9.1.src/include/llvm/Object/ELFObjectFile.h b/llvm-3.9.1.src/include/llvm/Object/ELFObjectFile.h +index 07c6364..d3b83f9 100644 +--- a/llvm-3.9.1.src/include/llvm/Object/ELFObjectFile.h ++++ b/llvm-3.9.1.src/include/llvm/Object/ELFObjectFile.h +@@ -907,7 +907,7 @@ unsigned ELFObjectFile::getArch() const { + case ELF::EM_X86_64: + return Triple::x86_64; + case ELF::EM_AARCH64: +- return Triple::aarch64; ++ return IsLittleEndian ? Triple::aarch64 : Triple::aarch64_be; + case ELF::EM_ARM: + return Triple::arm; + case ELF::EM_AVR: +diff --git a/llvm-3.9.1.src/include/llvm/Object/RelocVisitor.h b/llvm-3.9.1.src/include/llvm/Object/RelocVisitor.h +index 5e0df98..b59e8ec 100644 +--- a/llvm-3.9.1.src/include/llvm/Object/RelocVisitor.h ++++ b/llvm-3.9.1.src/include/llvm/Object/RelocVisitor.h +@@ -86,6 +86,7 @@ private: + return RelocToApply(); + } + case Triple::aarch64: ++ case Triple::aarch64_be: + switch (RelocType) { + case llvm::ELF::R_AARCH64_ABS32: + return visitELF_AARCH64_ABS32(R, Value); +diff --git a/llvm-3.9.1.src/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm-3.9.1.src/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +index 9cbdb13..9e04b5d 100644 +--- a/llvm-3.9.1.src/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp ++++ b/llvm-3.9.1.src/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +@@ -309,6 +309,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, + uint32_t *TargetPtr = + reinterpret_cast(Section.getAddressWithOffset(Offset)); + uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); ++ // Data should use target endian. Code should always use little endian. ++ bool isBE = Arch == Triple::aarch64_be; + + DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" + << format("%llx", Section.getAddressWithOffset(Offset)) +@@ -324,14 +326,22 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, + case ELF::R_AARCH64_ABS64: { + uint64_t *TargetPtr = + reinterpret_cast(Section.getAddressWithOffset(Offset)); +- *TargetPtr = Value + Addend; ++ if (isBE) ++ support::ubig64_t::ref{TargetPtr} = Value + Addend; ++ else ++ support::ulittle64_t::ref{TargetPtr} = Value + Addend; + break; + } + case ELF::R_AARCH64_PREL32: { + uint64_t Result = Value + Addend - FinalAddress; + assert(static_cast(Result) >= INT32_MIN && + static_cast(Result) <= UINT32_MAX); +- *TargetPtr = static_cast(Result & 0xffffffffU); ++ if (isBE) ++ support::ubig32_t::ref{TargetPtr} = ++ static_cast(Result & 0xffffffffU); ++ else ++ support::ulittle32_t::ref{TargetPtr} = ++ static_cast(Result & 0xffffffffU); + break; + } + case ELF::R_AARCH64_CALL26: // fallthrough +@@ -339,6 +349,7 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, + // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the + // calculation. + uint64_t BranchImm = Value + Addend - FinalAddress; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // "Check that -2^27 <= result < 2^27". + assert(isInt<28>(BranchImm)); +@@ -352,91 +363,105 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, + } + case ELF::R_AARCH64_MOVW_UABS_G3: { + uint64_t Result = Value + Addend; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0xffe0001fU; ++ TargetValue &= 0xffe0001fU; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction +- *TargetPtr |= Result >> (48 - 5); ++ TargetValue |= ((Result & 0xffff000000000000ULL) >> (48 - 5)); + // Shift must be "lsl #48", in bits 22:21 +- assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); ++ assert((TargetValue >> 21 & 0x3) == 3 && "invalid shift for relocation"); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G2_NC: { + uint64_t Result = Value + Addend; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0xffe0001fU; ++ TargetValue &= 0xffe0001fU; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction +- *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); ++ TargetValue |= ((Result & 0xffff00000000ULL) >> (32 - 5)); + // Shift must be "lsl #32", in bits 22:21 +- assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); ++ assert((TargetValue >> 21 & 0x3) == 2 && "invalid shift for relocation"); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G1_NC: { + uint64_t Result = Value + Addend; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0xffe0001fU; ++ TargetValue &= 0xffe0001fU; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction +- *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); ++ TargetValue |= ((Result & 0xffff0000U) >> (16 - 5)); + // Shift must be "lsl #16", in bits 22:2 +- assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); ++ assert((TargetValue >> 21 & 0x3) == 1 && "invalid shift for relocation"); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G0_NC: { + uint64_t Result = Value + Addend; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0xffe0001fU; ++ TargetValue &= 0xffe0001fU; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction +- *TargetPtr |= ((Result & 0xffffU) << 5); ++ TargetValue |= ((Result & 0xffffU) << 5); + // Shift must be "lsl #0", in bits 22:21. +- assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); ++ assert((TargetValue >> 21 & 0x3) == 0 && "invalid shift for relocation"); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + case ELF::R_AARCH64_ADR_PREL_PG_HI21: { + // Operation: Page(S+A) - Page(P) + uint64_t Result = + ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // Check that -2^32 <= X < 2^32 + assert(isInt<33>(Result) && "overflow check failed for relocation"); + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0x9f00001fU; ++ TargetValue &= 0x9f00001fU; + // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken + // from bits 32:12 of X. +- *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); +- *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); ++ TargetValue |= ((Result & 0x3000U) << (29 - 12)); ++ TargetValue |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { + // Operation: S + A + uint64_t Result = Value + Addend; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0xffc003ffU; ++ TargetValue &= 0xffc003ffU; + // Immediate goes in bits 21:10 of LD/ST instruction, taken + // from bits 11:2 of X +- *TargetPtr |= ((Result & 0xffc) << (10 - 2)); ++ TargetValue |= ((Result & 0xffc) << (10 - 2)); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { + // Operation: S + A + uint64_t Result = Value + Addend; ++ uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; + + // AArch64 code is emitted with .rela relocations. The data already in any + // bits affected by the relocation on entry is garbage. +- *TargetPtr &= 0xffc003ffU; ++ TargetValue &= 0xffc003ffU; + // Immediate goes in bits 21:10 of LD/ST instruction, taken + // from bits 11:3 of X +- *TargetPtr |= ((Result & 0xff8) << (10 - 3)); ++ TargetValue |= ((Result & 0xff8) << (10 - 3)); ++ support::ulittle32_t::ref{TargetPtr} = TargetValue; + break; + } + } +diff --git a/llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s b/llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s +new file mode 100644 +index 0000000..01d01e5 +--- /dev/null ++++ b/llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s +@@ -0,0 +1,102 @@ ++# RUN: llvm-mc -triple=aarch64_be-none-linux-gnu -filetype=obj -o %T/be-reloc.o %s ++# RUN: llvm-rtdyld -triple=aarch64_be-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/be-reloc.o ++ ++ .text ++ .globl g ++ .p2align 2 ++ .type g,@function ++g: ++# R_AARCH64_MOVW_UABS_G3 ++ movz x0, #:abs_g3:f ++# R_AARCH64_MOVW_UABS_G2_NC ++ movk x0, #:abs_g2_nc:f ++# R_AARCH64_MOVW_UABS_G1_NC ++ movk x0, #:abs_g1_nc:f ++# R_AARCH64_MOVW_UABS_G0_NC ++ movk x0, #:abs_g0_nc:f ++ ret ++ .Lfunc_end0: ++ .size g, .Lfunc_end0-g ++ ++ .type k,@object ++ .data ++ .globl k ++ .p2align 3 ++k: ++ .xword f ++ .size k, 8 ++ ++# LE instructions read as BE ++# rtdyld-check: *{4}(g) = 0x6024e0d2 ++# rtdyld-check: *{4}(g + 4) = 0xe0acc8f2 ++# rtdyld-check: *{4}(g + 8) = 0x6035b1f2 ++# rtdyld-check: *{4}(g + 12) = 0xe0bd99f2 ++# rtdyld-check: *{8}k = f ++# RUN: llvm-mc -triple=aarch64_be-none-linux-gnu -filetype=obj -o %T/be-reloc.o %s ++# RUN: llvm-rtdyld -triple=aarch64_be-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/be-reloc.o ++ ++ .text ++ .globl g ++ .p2align 2 ++ .type g,@function ++g: ++# R_AARCH64_MOVW_UABS_G3 ++ movz x0, #:abs_g3:f ++# R_AARCH64_MOVW_UABS_G2_NC ++ movk x0, #:abs_g2_nc:f ++# R_AARCH64_MOVW_UABS_G1_NC ++ movk x0, #:abs_g1_nc:f ++# R_AARCH64_MOVW_UABS_G0_NC ++ movk x0, #:abs_g0_nc:f ++ ret ++ .Lfunc_end0: ++ .size g, .Lfunc_end0-g ++ ++ .type k,@object ++ .data ++ .globl k ++ .p2align 3 ++k: ++ .xword f ++ .size k, 8 ++ ++# LE instructions read as BE ++# rtdyld-check: *{4}(g) = 0x6024e0d2 ++# rtdyld-check: *{4}(g + 4) = 0xe0acc8f2 ++# rtdyld-check: *{4}(g + 8) = 0x6035b1f2 ++# rtdyld-check: *{4}(g + 12) = 0xe0bd99f2 ++# rtdyld-check: *{8}k = f ++# RUN: llvm-mc -triple=aarch64_be-none-linux-gnu -filetype=obj -o %T/be-reloc.o %s ++# RUN: llvm-rtdyld -triple=aarch64_be-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/be-reloc.o ++ ++ .text ++ .globl g ++ .p2align 2 ++ .type g,@function ++g: ++# R_AARCH64_MOVW_UABS_G3 ++ movz x0, #:abs_g3:f ++# R_AARCH64_MOVW_UABS_G2_NC ++ movk x0, #:abs_g2_nc:f ++# R_AARCH64_MOVW_UABS_G1_NC ++ movk x0, #:abs_g1_nc:f ++# R_AARCH64_MOVW_UABS_G0_NC ++ movk x0, #:abs_g0_nc:f ++ ret ++ .Lfunc_end0: ++ .size g, .Lfunc_end0-g ++ ++ .type k,@object ++ .data ++ .globl k ++ .p2align 3 ++k: ++ .xword f ++ .size k, 8 ++ ++# LE instructions read as BE ++# rtdyld-check: *{4}(g) = 0x6024e0d2 ++# rtdyld-check: *{4}(g + 4) = 0xe0acc8f2 ++# rtdyld-check: *{4}(g + 8) = 0x6035b1f2 ++# rtdyld-check: *{4}(g + 12) = 0xe0bd99f2 ++# rtdyld-check: *{8}k = f +diff --git a/llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s b/llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s +new file mode 100644 +index 0000000..e07fa97 +--- /dev/null ++++ b/llvm-3.9.1.src/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s +@@ -0,0 +1,99 @@ ++# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/reloc.o %s ++# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/reloc.o ++ ++ .text ++ .globl g ++ .p2align 2 ++ .type g,@function ++g: ++# R_AARCH64_MOVW_UABS_G3 ++ movz x0, #:abs_g3:f ++# R_AARCH64_MOVW_UABS_G2_NC ++ movk x0, #:abs_g2_nc:f ++# R_AARCH64_MOVW_UABS_G1_NC ++ movk x0, #:abs_g1_nc:f ++# R_AARCH64_MOVW_UABS_G0_NC ++ movk x0, #:abs_g0_nc:f ++ ret ++ .Lfunc_end0: ++ .size g, .Lfunc_end0-g ++ ++ .type k,@object ++ .data ++ .globl k ++ .p2align 3 ++k: ++ .xword f ++ .size k, 8 ++ ++# rtdyld-check: *{4}(g) = 0xd2e02460 ++# rtdyld-check: *{4}(g + 4) = 0xf2c8ace0 ++# rtdyld-check: *{4}(g + 8) = 0xf2b13560 ++# rtdyld-check: *{4}(g + 12) = 0xf299bde0 ++# rtdyld-check: *{8}k = f ++# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/reloc.o %s ++# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/reloc.o ++ ++ .text ++ .globl g ++ .p2align 2 ++ .type g,@function ++g: ++# R_AARCH64_MOVW_UABS_G3 ++ movz x0, #:abs_g3:f ++# R_AARCH64_MOVW_UABS_G2_NC ++ movk x0, #:abs_g2_nc:f ++# R_AARCH64_MOVW_UABS_G1_NC ++ movk x0, #:abs_g1_nc:f ++# R_AARCH64_MOVW_UABS_G0_NC ++ movk x0, #:abs_g0_nc:f ++ ret ++ .Lfunc_end0: ++ .size g, .Lfunc_end0-g ++ ++ .type k,@object ++ .data ++ .globl k ++ .p2align 3 ++k: ++ .xword f ++ .size k, 8 ++ ++# rtdyld-check: *{4}(g) = 0xd2e02460 ++# rtdyld-check: *{4}(g + 4) = 0xf2c8ace0 ++# rtdyld-check: *{4}(g + 8) = 0xf2b13560 ++# rtdyld-check: *{4}(g + 12) = 0xf299bde0 ++# rtdyld-check: *{8}k = f ++# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/reloc.o %s ++# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/reloc.o ++ ++ .text ++ .globl g ++ .p2align 2 ++ .type g,@function ++g: ++# R_AARCH64_MOVW_UABS_G3 ++ movz x0, #:abs_g3:f ++# R_AARCH64_MOVW_UABS_G2_NC ++ movk x0, #:abs_g2_nc:f ++# R_AARCH64_MOVW_UABS_G1_NC ++ movk x0, #:abs_g1_nc:f ++# R_AARCH64_MOVW_UABS_G0_NC ++ movk x0, #:abs_g0_nc:f ++ ret ++ .Lfunc_end0: ++ .size g, .Lfunc_end0-g ++ ++ .type k,@object ++ .data ++ .globl k ++ .p2align 3 ++k: ++ .xword f ++ .size k, 8 ++ ++# rtdyld-check: *{4}(g) = 0xd2e02460 ++# rtdyld-check: *{4}(g + 4) = 0xf2c8ace0 ++# rtdyld-check: *{4}(g + 8) = 0xf2b13560 ++# rtdyld-check: *{4}(g + 12) = 0xf299bde0 ++# rtdyld-check: *{8}k = f +-- +2.12.0 + diff --git a/SOURCES/0001-Revert-InstCombine-transform-bitcasted-bitwise-logic.patch b/SOURCES/0001-Revert-InstCombine-transform-bitcasted-bitwise-logic.patch new file mode 100644 index 0000000..dc3c292 --- /dev/null +++ b/SOURCES/0001-Revert-InstCombine-transform-bitcasted-bitwise-logic.patch @@ -0,0 +1,164 @@ +From 6674146ac94c8744c807ed06e1bdb99cef87b2fe Mon Sep 17 00:00:00 2001 +From: root +Date: Fri, 14 Apr 2017 16:06:58 -0400 +Subject: [PATCH] Revert "[InstCombine] transform bitcasted bitwise logic ops + with constants (PR26702)" + +This reverts commit 76b12c4bf0bbc5c70def7b5d083a8a70547ea4e3. + +Conflicts: + lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +--- + lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 34 +++++----------------- + test/Transforms/InstCombine/bitcast-bigendian.ll | 14 +++++---- + test/Transforms/InstCombine/bitcast.ll | 14 +++++---- + 3 files changed, 23 insertions(+), 39 deletions(-) + +diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +index 1a6459b..36c2136 100644 +--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp ++++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +@@ -1201,41 +1201,21 @@ Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) { + + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); + CastInst *Cast0 = dyn_cast(Op0); +- if (!Cast0) +- return nullptr; +- +- // This must be a cast from an integer or integer vector source type to allow +- // transformation of the logic operation to the source type. +- Type *DestTy = I.getType(); +- Type *SrcTy = Cast0->getSrcTy(); +- if (!SrcTy->isIntOrIntVectorTy()) +- return nullptr; +- +- // If one operand is a bitcast and the other is a constant, move the logic +- // operation ahead of the bitcast. That is, do the logic operation in the +- // original type. This can eliminate useless bitcasts and allow normal +- // combines that would otherwise be impeded by the bitcast. Canonicalization +- // ensures that if there is a constant operand, it will be the second operand. +- Value *BC = nullptr; +- Constant *C = nullptr; +- if ((match(Op0, m_BitCast(m_Value(BC))) && match(Op1, m_Constant(C)))) { +- Value *NewConstant = ConstantExpr::getBitCast(C, SrcTy); +- Value *NewOp = Builder->CreateBinOp(LogicOpc, BC, NewConstant, I.getName()); +- return CastInst::CreateBitOrPointerCast(NewOp, DestTy); +- } +- + CastInst *Cast1 = dyn_cast(Op1); +- if (!Cast1) ++ if (!Cast0 || !Cast1) + return nullptr; + +- // Both operands of the logic operation are casts. The casts must be of the +- // same type for reduction. ++ // The casts must be of the same type, and this must be a cast from an integer ++ // or integer vector source type. + auto CastOpcode = Cast0->getOpcode(); +- if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy()) ++ Type *SrcTy = Cast0->getSrcTy(); ++ if ((CastOpcode != Cast1->getOpcode()) || (SrcTy != Cast1->getSrcTy()) || ++ !SrcTy->isIntOrIntVectorTy()) + return nullptr; + + Value *Cast0Src = Cast0->getOperand(0); + Value *Cast1Src = Cast1->getOperand(0); ++ Type *DestTy = I.getType(); + + // fold (logic (cast A), (cast B)) -> (cast (logic A, B)) + +diff --git a/test/Transforms/InstCombine/bitcast-bigendian.ll b/test/Transforms/InstCombine/bitcast-bigendian.ll +index f558ecc..4042dad 100644 +--- a/test/Transforms/InstCombine/bitcast-bigendian.ll ++++ b/test/Transforms/InstCombine/bitcast-bigendian.ll +@@ -90,6 +90,8 @@ define <2 x float> @test6(float %A){ + ; CHECK: ret + } + ++; FIXME: Do the logic in the original type for the following 3 tests. ++ + ; Verify that 'xor' of vector and constant is done as a vector bitwise op before the bitcast. + + define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { +@@ -98,8 +100,8 @@ define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { + ret <2 x i32> %t2 + + ; CHECK-LABEL: @xor_bitcast_vec_to_vec( +-; CHECK-NEXT: %t21 = xor <1 x i64> %a, +-; CHECK-NEXT: %t2 = bitcast <1 x i64> %t21 to <2 x i32> ++; CHECK-NEXT: %t1 = bitcast <1 x i64> %a to <2 x i32> ++; CHECK-NEXT: %t2 = xor <2 x i32> %t1, + ; CHECK-NEXT: ret <2 x i32> %t2 + } + +@@ -111,8 +113,8 @@ define i64 @and_bitcast_vec_to_int(<2 x i32> %a) { + ret i64 %t2 + + ; CHECK-LABEL: @and_bitcast_vec_to_int( +-; CHECK-NEXT: %t21 = and <2 x i32> %a, +-; CHECK-NEXT: %t2 = bitcast <2 x i32> %t21 to i64 ++; CHECK-NEXT: %t1 = bitcast <2 x i32> %a to i64 ++; CHECK-NEXT: %t2 = and i64 %t1, 3 + ; CHECK-NEXT: ret i64 %t2 + } + +@@ -124,8 +126,8 @@ define <2 x i32> @or_bitcast_int_to_vec(i64 %a) { + ret <2 x i32> %t2 + + ; CHECK-LABEL: @or_bitcast_int_to_vec( +-; CHECK-NEXT: %t21 = or i64 %a, 4294967298 +-; CHECK-NEXT: %t2 = bitcast i64 %t21 to <2 x i32> ++; CHECK-NEXT: %t1 = bitcast i64 %a to <2 x i32> ++; CHECK-NEXT: %t2 = or <2 x i32> %t1, + ; CHECK-NEXT: ret <2 x i32> %t2 + } + +diff --git a/test/Transforms/InstCombine/bitcast.ll b/test/Transforms/InstCombine/bitcast.ll +index 7495859..34e9206 100644 +--- a/test/Transforms/InstCombine/bitcast.ll ++++ b/test/Transforms/InstCombine/bitcast.ll +@@ -30,6 +30,8 @@ define <2 x i32> @xor_two_vector_bitcasts(<1 x i64> %a, <1 x i64> %b) { + ; CHECK-NEXT: ret <2 x i32> %t3 + } + ++; FIXME: Do the logic in the original type for the following 3 tests. ++ + ; Verify that 'xor' of vector and constant is done as a vector bitwise op before the bitcast. + + define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { +@@ -38,8 +40,8 @@ define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { + ret <2 x i32> %t2 + + ; CHECK-LABEL: @xor_bitcast_vec_to_vec( +-; CHECK-NEXT: %t21 = xor <1 x i64> %a, +-; CHECK-NEXT: %t2 = bitcast <1 x i64> %t21 to <2 x i32> ++; CHECK-NEXT: %t1 = bitcast <1 x i64> %a to <2 x i32> ++; CHECK-NEXT: %t2 = xor <2 x i32> %t1, + ; CHECK-NEXT: ret <2 x i32> %t2 + } + +@@ -51,8 +53,8 @@ define i64 @and_bitcast_vec_to_int(<2 x i32> %a) { + ret i64 %t2 + + ; CHECK-LABEL: @and_bitcast_vec_to_int( +-; CHECK-NEXT: %t21 = and <2 x i32> %a, +-; CHECK-NEXT: %t2 = bitcast <2 x i32> %t21 to i64 ++; CHECK-NEXT: %t1 = bitcast <2 x i32> %a to i64 ++; CHECK-NEXT: %t2 = and i64 %t1, 3 + ; CHECK-NEXT: ret i64 %t2 + } + +@@ -64,8 +66,8 @@ define <2 x i32> @or_bitcast_int_to_vec(i64 %a) { + ret <2 x i32> %t2 + + ; CHECK-LABEL: @or_bitcast_int_to_vec( +-; CHECK-NEXT: %t21 = or i64 %a, 8589934593 +-; CHECK-NEXT: %t2 = bitcast i64 %t21 to <2 x i32> ++; CHECK-NEXT: %t1 = bitcast i64 %a to <2 x i32> ++; CHECK-NEXT: %t2 = or <2 x i32> %t1, + ; CHECK-NEXT: ret <2 x i32> %t2 + } + +-- +1.8.3.1 + diff --git a/SOURCES/0001-Revert-Merging-r280589.patch b/SOURCES/0001-Revert-Merging-r280589.patch new file mode 100644 index 0000000..ef950f4 --- /dev/null +++ b/SOURCES/0001-Revert-Merging-r280589.patch @@ -0,0 +1,119 @@ +From 95b15b3d2f180b15267032e16c947c0f9b8a112d Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Wed, 1 Mar 2017 13:02:38 +1000 +Subject: [PATCH] Revert "Merging r280589:" + +This reverts commit 25e2616626caafb896517e18cd8aa724fba2b200. +--- + lib/Target/AMDGPU/SIInstructions.td | 1 - + lib/Target/AMDGPU/SIWholeQuadMode.cpp | 7 +++++ + test/CodeGen/AMDGPU/wqm.ll | 49 +++-------------------------------- + 3 files changed, 11 insertions(+), 46 deletions(-) + +diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td +index dde5f2f..18b7d5d 100644 +--- a/lib/Target/AMDGPU/SIInstructions.td ++++ b/lib/Target/AMDGPU/SIInstructions.td +@@ -2029,7 +2029,6 @@ def SI_RETURN : PseudoInstSI < + let hasSideEffects = 1; + let SALU = 1; + let hasNoSchedulingInfo = 1; +- let DisableWQM = 1; + } + + let Uses = [EXEC], Defs = [EXEC, VCC, M0], +diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp +index 1534d58..b200c15 100644 +--- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp ++++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp +@@ -219,6 +219,13 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF, + markInstruction(MI, Flags, Worklist); + GlobalFlags |= Flags; + } ++ ++ if (WQMOutputs && MBB.succ_empty()) { ++ // This is a prolog shader. Make sure we go back to exact mode at the end. ++ Blocks[&MBB].OutNeeds = StateExact; ++ Worklist.push_back(&MBB); ++ GlobalFlags |= StateExact; ++ } + } + + return GlobalFlags; +diff --git a/test/CodeGen/AMDGPU/wqm.ll b/test/CodeGen/AMDGPU/wqm.ll +index 41e4264..809a7ba 100644 +--- a/test/CodeGen/AMDGPU/wqm.ll ++++ b/test/CodeGen/AMDGPU/wqm.ll +@@ -17,18 +17,17 @@ main_body: + ;CHECK-LABEL: {{^}}test2: + ;CHECK-NEXT: ; %main_body + ;CHECK-NEXT: s_wqm_b64 exec, exec ++;CHECK: image_sample + ;CHECK-NOT: exec +-define amdgpu_ps void @test2(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <4 x i32> %c) { ++;CHECK: _load_dword v0, ++define amdgpu_ps float @test2(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <4 x i32> %c) { + main_body: + %c.1 = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + %c.2 = bitcast <4 x float> %c.1 to <4 x i32> + %c.3 = extractelement <4 x i32> %c.2, i32 0 + %gep = getelementptr float, float addrspace(1)* %ptr, i32 %c.3 + %data = load float, float addrspace(1)* %gep +- +- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %data, float undef, float undef, float undef) +- +- ret void ++ ret float %data + } + + ; ... but disabled for stores (and, in this simple case, not re-enabled). +@@ -415,46 +414,6 @@ entry: + ret void + } + +-; Must return to exact at the end of a non-void returning shader, +-; otherwise the EXEC mask exported by the epilog will be wrong. This is true +-; even if the shader has no kills, because a kill could have happened in a +-; previous shader fragment. +-; +-; CHECK-LABEL: {{^}}test_nonvoid_return: +-; CHECK: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec +-; CHECK: s_wqm_b64 exec, exec +-; +-; CHECK: s_and_b64 exec, exec, [[LIVE]] +-; CHECK-NOT: exec +-define amdgpu_ps <4 x float> @test_nonvoid_return() nounwind { +- %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) +- %tex.i = bitcast <4 x float> %tex to <4 x i32> +- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) +- ret <4 x float> %dtex +-} +- +-; CHECK-LABEL: {{^}}test_nonvoid_return_unreachable: +-; CHECK: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec +-; CHECK: s_wqm_b64 exec, exec +-; +-; CHECK: s_and_b64 exec, exec, [[LIVE]] +-; CHECK-NOT: exec +-define amdgpu_ps <4 x float> @test_nonvoid_return_unreachable(i32 inreg %c) nounwind { +-entry: +- %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) +- %tex.i = bitcast <4 x float> %tex to <4 x i32> +- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) +- +- %cc = icmp sgt i32 %c, 0 +- br i1 %cc, label %if, label %else +- +-if: +- store volatile <4 x float> %dtex, <4 x float>* undef +- unreachable +- +-else: +- ret <4 x float> %dtex +-} + + declare void @llvm.amdgcn.image.store.v4i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1 + declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #1 +-- +2.9.3 + diff --git a/SOURCES/fix-cmake-include.patch b/SOURCES/fix-cmake-include.patch new file mode 100644 index 0000000..842b5c1 --- /dev/null +++ b/SOURCES/fix-cmake-include.patch @@ -0,0 +1,41 @@ +diff -up llvm-3.8.0rc2.src/CMakeLists.txt.fixinc llvm-3.8.0rc2.src/CMakeLists.txt +--- llvm-3.8.0rc2.src/CMakeLists.txt.fixinc 2016-01-14 05:03:44.000000000 +1000 ++++ llvm-3.8.0rc2.src/CMakeLists.txt 2016-02-26 10:21:44.477295728 +1000 +@@ -192,6 +192,7 @@ else() + endif() + + # Each of them corresponds to llvm-config's. ++# + set(LLVM_TOOLS_BINARY_DIR ${LLVM_RUNTIME_OUTPUT_INTDIR}) # --bindir + set(LLVM_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}) # --libdir + set(LLVM_MAIN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR} ) # --src-root +@@ -558,6 +559,11 @@ set( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${LL + set( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} ) + set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} ) + ++if(INCLUDE_INSTALL_DIR) ++else() ++set(INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/include) ++endif() ++ + set(CMAKE_BUILD_WITH_INSTALL_RPATH ON) + if (APPLE) + set(CMAKE_INSTALL_NAME_DIR "@rpath") +@@ -728,7 +734,7 @@ add_subdirectory(cmake/modules) + + if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY) + install(DIRECTORY include/llvm include/llvm-c +- DESTINATION include ++ DESTINATION "${INCLUDE_INSTALL_DIR}" + COMPONENT llvm-headers + FILES_MATCHING + PATTERN "*.def" +@@ -740,7 +746,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY) + ) + + install(DIRECTORY ${LLVM_INCLUDE_DIR}/llvm +- DESTINATION include ++ DESTINATION "${INCLUDE_INSTALL_DIR}" + COMPONENT llvm-headers + FILES_MATCHING + PATTERN "*.def" diff --git a/SOURCES/llvm-config.h b/SOURCES/llvm-config.h new file mode 100644 index 0000000..2fa08c9 --- /dev/null +++ b/SOURCES/llvm-config.h @@ -0,0 +1,9 @@ +#include + +#if __WORDSIZE == 32 +#include "llvm-config-32.h" +#elif __WORDSIZE == 64 +#include "llvm-config-64.h" +#else +#error "Unknown word size" +#endif diff --git a/SPECS/llvm.spec b/SPECS/llvm.spec new file mode 100644 index 0000000..5f332c0 --- /dev/null +++ b/SPECS/llvm.spec @@ -0,0 +1,256 @@ +# Components enabled if supported by target architecture: +%ifarch %ix86 x86_64 + %bcond_without gold +%else + %bcond_with gold +%endif + +%if 0%{?rhel} == 6 +%define rhel6 1 +%endif + +# llvm works on the 64-bit versions of these, but not the 32 versions. +# consequently we build swrast on them instead of llvmpipe. +ExcludeArch: ppc s390 %{?rhel6:s390x} + +%ifarch s390x +%global host_target SystemZ +%endif +%ifarch ppc64 ppc64le +%global host_target PowerPC +%endif +%ifarch %ix86 x86_64 +%global host_target X86 +%endif +%ifarch aarch64 +%global host_target AArch64 +%endif +%ifarch %{arm} +%global host_target ARM +%endif + +%ifnarch s390x +%global amdgpu ;AMDGPU +%endif + +Name: mesa-private-llvm +Version: 3.9.1 +Release: 3.p1%{?dist} +Summary: llvm engine for Mesa + +Group: System Environment/Libraries +License: NCSA +URL: http://llvm.org +Source0: http://llvm.org/releases/%{version}/llvm-%{version}.src.tar.xz +Source1: cmake-3.4.3.tar.gz +Source100: llvm-config.h + +Patch1: fix-cmake-include.patch +Patch2: 0001-Revert-Merging-r280589.patch +Patch3: 0001-Revert-InstCombine-transform-bitcasted-bitwise-logic.patch +Patch4: 0001-Fix-R_AARCH64_MOVW_UABS_G3-relocation.patch + +BuildRequires: cmake +BuildRequires: zlib-devel +%if %{with gold} +BuildRequires: binutils-devel +%endif +BuildRequires: libstdc++-static +BuildRequires: python + +%description +This package contains the LLVM-based runtime support for Mesa. It is not a +fully-featured build of LLVM, and use by any package other than Mesa is not +supported. + +%package devel +Summary: Libraries and header files for LLVM +Requires: %{name}%{?_isa} = %{version}-%{release} + +%description devel +This package contains library and header files needed to build the LLVM +support in Mesa. + +%prep +%setup -q -n llvm-%{version}.src + +tar xf %{SOURCE1} + +%patch1 -p1 -b .fixinc +%patch2 -p1 -b .radeonsi-fix +%patch3 -p1 -b .bigendian-fix +%patch4 -p2 -b .aarch64-reloc + +%build + +BUILD_DIR=`pwd`/cmake_build +cd cmake-3.4.3 +cmake . -DCMAKE_INSTALL_PREFIX=$BUILD_DIR +make +make install +cd - + + +sed -i 's|ActiveIncludeDir = ActivePrefix + "/include|&/mesa-private|g' tools/llvm-config/llvm-config.cpp + +mkdir -p _build +cd _build + +export PATH=$BUILD_DIR/bin:$PATH +%global __cmake $BUILD_DIR/bin/cmake +# force off shared libs as cmake macros turns it on. +%cmake .. \ + -DINCLUDE_INSTALL_DIR=%{_includedir}/mesa-private \ + -DLLVM_VERSION_SUFFIX="-mesa" \ + -DBUILD_SHARED_LIBS:BOOL=OFF \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DCMAKE_SHARED_LINKER_FLAGS="-Wl,-Bsymbolic -static-libstdc++" \ +%if 0%{?__isa_bits} == 64 + -DLLVM_LIBDIR_SUFFIX=64 \ +%else + -DLLVM_LIBDIR_SUFFIX= \ +%endif + \ + -DLLVM_TARGETS_TO_BUILD="%{host_target}%{?amdgpu}" \ + -DLLVM_ENABLE_LIBCXX:BOOL=OFF \ + -DLLVM_ENABLE_ZLIB:BOOL=ON \ + -DLLVM_ENABLE_FFI:BOOL=OFF \ + -DLLVM_ENABLE_RTTI:BOOL=OFF \ +%if %{with gold} + -DLLVM_BINUTILS_INCDIR=%{_includedir} \ +%endif + \ + -DLLVM_BUILD_RUNTIME:BOOL=ON \ + \ + -DLLVM_INCLUDE_TOOLS:BOOL=ON \ + -DLLVM_BUILD_TOOLS:BOOL=ON \ + \ + -DLLVM_INCLUDE_TESTS:BOOL=ON \ + -DLLVM_BUILD_TESTS:BOOL=ON \ + \ + -DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \ + -DLLVM_BUILD_EXAMPLES:BOOL=OFF \ + \ + -DLLVM_INCLUDE_UTILS:BOOL=ON \ + -DLLVM_INSTALL_UTILS:BOOL=OFF \ + \ + -DLLVM_INCLUDE_DOCS:BOOL=OFF \ + -DLLVM_BUILD_DOCS:BOOL=OFF \ + -DLLVM_ENABLE_SPHINX:BOOL=OFF \ + -DLLVM_ENABLE_DOXYGEN:BOOL=OFF \ + \ + -DLLVM_BUILD_LLVM_DYLIB:BOOL=ON \ + -DLLVM_DYLIB_EXPORT_ALL:BOOL=ON \ + -DLLVM_LINK_LLVM_DYLIB:BOOL=ON \ + -DLLVM_BUILD_EXTERNAL_COMPILER_RT:BOOL=ON \ + -DLLVM_INSTALL_TOOLCHAIN_ONLY:BOOL=OFF + +make %{?_smp_mflags} VERBOSE=1 + +%install +cd _build +make install DESTDIR=%{buildroot} + +# fix multi-lib +mv -v %{buildroot}%{_bindir}/llvm-config %{buildroot}%{_bindir}/%{name}-config-%{__isa_bits} +mv -v %{buildroot}%{_includedir}/mesa-private/llvm/Config/llvm-config{,-%{__isa_bits}}.h +install -m 0644 %{SOURCE100} %{buildroot}%{_includedir}/mesa-private/llvm/Config/llvm-config.h + +rm -f %{buildroot}%{_libdir}/*.a + +rm -f %{buildroot}%{_libdir}/libLLVM.so + +# remove documentation makefiles: +# they require the build directory to work +find examples -name 'Makefile' | xargs -0r rm -f + +# RHEL: strip out most binaries, most libs, and man pages +ls %{buildroot}%{_bindir}/* | grep -v bin/mesa-private | xargs rm -f +ls %{buildroot}%{_libdir}/* | grep -v libLLVM | xargs rm -f +rm -rf %{buildroot}%{_mandir}/man1 + +# RHEL: Strip out some headers Mesa doesn't need +rm -rf %{buildroot}%{_includedir}/mesa-private/llvm/{Assembly} +rm -rf %{buildroot}%{_includedir}/mesa-private/llvm/Option +rm -rf %{buildroot}%{_includedir}/mesa-private/llvm/TableGen +rm -rf %{buildroot}%{_includedir}/llvm-c/lto.h + +# RHEL: Strip out cmake build foo +rm -rf %{buildroot}%{_datadir}/llvm/cmake +rm -rf %{buildroot}%{_libdir}/cmake/llvm + +%check +cd _build +# 3.8.1 note: skx failures are XFAIL. the skylake backport does not wire +# up AVX512 for skylake, but the tests are from code that expects that. +# safe to ignore. +make check-all || : + +%post -p /sbin/ldconfig +%postun -p /sbin/ldconfig + +%files +%doc LICENSE.TXT +%{_libdir}/libLLVM-3.9*-mesa.so + +%files devel +%{_bindir}/%{name}-config-%{__isa_bits} +%{_includedir}/mesa-private/llvm +%{_includedir}/mesa-private/llvm-c + +%changelog +* Mon Jun 19 2017 Yaakov Selkowitz - 3.9.1-3.p1 +- Fix relocations on AArch64 (#1461815) + +* Wed May 03 2017 Lyude Paul - 3.9.1-3 +- Add temporary revert for #1445423 + +* Fri Mar 24 2017 Tom Stellard - 3.9.1-2 +- Add fix for radeonsi regression + +* Tue Jan 10 2017 Jeff Law - 3.9.1-1 +- Update to 3.9.1 + +* Wed Jul 13 2016 Adam Jackson - 3.8.1-1 +- Update to 3.8.1 +- Sync some x86 getHostCPUName updates from trunk + +* Tue Jun 14 2016 Dave Airlie - 3.8.0-2 +- drop private cmake build + +* Thu Mar 10 2016 Dave Airlie 3.8.0-1 +- llvm 3.8.0 final release + +* Thu Mar 03 2016 Dave Airlie 3.8.0-0.2 +- llvm 3.8.0 rc3 release + +* Fri Feb 19 2016 Dave Airlie 3.8.0-0.1 +- llvm 3.8.0 rc2 release + +* Tue Feb 16 2016 Dan HorĂ¡k 3.7.1-7 +- recognize s390 as SystemZ when configuring build + +* Sat Feb 13 2016 Dave Airlie 3.7.1-6 +- export C++ API for mesa. + +* Sat Feb 13 2016 Dave Airlie 3.7.1-5 +- reintroduce llvm-static, clang needs it currently. + +* Fri Feb 12 2016 Dave Airlie 3.7.1-4 +- jump back to single llvm library, the split libs aren't working very well. + +* Fri Feb 05 2016 Dave Airlie 3.7.1-3 +- add missing obsoletes (#1303497) + +* Thu Feb 04 2016 Fedora Release Engineering - 3.7.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Thu Jan 07 2016 Jan Vcelak 3.7.1-1 +- new upstream release +- enable gold linker + +* Wed Nov 04 2015 Jan Vcelak 3.7.0-100 +- fix Requires for subpackages on the main package + +* Tue Oct 06 2015 Jan Vcelak 3.7.0-100 +- initial version using cmake build system