From e94d6442f45f7a020c285f0aab96429245084c17 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Oct 19 2017 13:25:18 +0000 Subject: import valgrind-3.12.0-9.el7_4 --- diff --git a/SOURCES/valgrind-3.12.0-ll-sc-fallback1.patch b/SOURCES/valgrind-3.12.0-ll-sc-fallback1.patch new file mode 100644 index 0000000..4b08704 --- /dev/null +++ b/SOURCES/valgrind-3.12.0-ll-sc-fallback1.patch @@ -0,0 +1,611 @@ +Only arm64. Removed the MIPS part. + +commit 6b72dc54b722af5f6a87ebe258d3da6bcba059b7 +Author: Julian Seward +Date: Mon Apr 24 09:23:43 2017 +0000 + + Bug 369459 - valgrind on arm64 violates the ARMv8 spec (ldxr/stxr) + + This implements a fallback LL/SC implementation as described in bug 344524. + + The fallback implementation is not enabled by default, and there is no + auto-detection for when it should be used. To use it, run with the + flag --sim-hints=fallback-llsc. This commit also allows the existing + MIPS fallback implementation to be enabled with that flag. + + VEX side changes: + + * priv/main_main.c, pub/libvex.h + + Adds new field guest__use_fallback_LLSC to VexAbiInfo + + * pub/libvex_guest_arm64.h priv/guest_arm64_toIR.c + + add front end support, new guest state fields + guest_LLSC_{SIZE,ADDR,DATA}, also documentation of the scheme + + * priv/guest_mips_toIR.c + + allow manual selection of fallback implementation via + --sim-hints=fallback-llsc + + * priv/host_arm64_defs.c priv/host_arm64_defs.h priv/host_arm64_isel.c + + Add support for generating CAS on arm64, as needed by the front end changes + + + + git-svn-id: svn://svn.valgrind.org/vex/trunk@3352 + +diff --git a/VEX/priv/guest_arm64_toIR.c b/VEX/priv/guest_arm64_toIR.c +index 088af55..421db37 100644 +--- a/VEX/priv/guest_arm64_toIR.c ++++ b/VEX/priv/guest_arm64_toIR.c +@@ -1147,6 +1147,10 @@ static IRExpr* narrowFrom64 ( IRType dstTy, IRExpr* e ) + #define OFFB_CMSTART offsetof(VexGuestARM64State,guest_CMSTART) + #define OFFB_CMLEN offsetof(VexGuestARM64State,guest_CMLEN) + ++#define OFFB_LLSC_SIZE offsetof(VexGuestARM64State,guest_LLSC_SIZE) ++#define OFFB_LLSC_ADDR offsetof(VexGuestARM64State,guest_LLSC_ADDR) ++#define OFFB_LLSC_DATA offsetof(VexGuestARM64State,guest_LLSC_DATA) ++ + + /* ---------------- Integer registers ---------------- */ + +@@ -4702,7 +4706,9 @@ const HChar* nameArr_Q_SZ ( UInt bitQ, UInt size ) + + + static +-Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn) ++Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn, ++ const VexAbiInfo* abiinfo ++) + { + # define INSN(_bMax,_bMin) SLICE_UInt(insn, (_bMax), (_bMin)) + +@@ -6457,6 +6463,32 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn) + sz 001000 000 s 0 11111 n t STX{R,RH,RB} Ws, Rt, [Xn|SP] + sz 001000 000 s 1 11111 n t STLX{R,RH,RB} Ws, Rt, [Xn|SP] + */ ++ /* For the "standard" implementation we pass through the LL and SC to ++ the host. For the "fallback" implementation, for details see ++ https://bugs.kde.org/show_bug.cgi?id=344524 and ++ https://bugs.kde.org/show_bug.cgi?id=369459, ++ but in short: ++ ++ LoadLinked(addr) ++ gs.LLsize = load_size // 1, 2, 4 or 8 ++ gs.LLaddr = addr ++ gs.LLdata = zeroExtend(*addr) ++ ++ StoreCond(addr, data) ++ tmp_LLsize = gs.LLsize ++ gs.LLsize = 0 // "no transaction" ++ if tmp_LLsize != store_size -> fail ++ if addr != gs.LLaddr -> fail ++ if zeroExtend(*addr) != gs.LLdata -> fail ++ cas_ok = CAS(store_size, addr, gs.LLdata -> data) ++ if !cas_ok -> fail ++ succeed ++ ++ When thread scheduled ++ gs.LLsize = 0 // "no transaction" ++ (coregrind/m_scheduler/scheduler.c, run_thread_for_a_while() ++ has to do this bit) ++ */ + if (INSN(29,23) == BITS7(0,0,1,0,0,0,0) + && (INSN(23,21) & BITS3(1,0,1)) == BITS3(0,0,0) + && INSN(14,10) == BITS5(1,1,1,1,1)) { +@@ -6478,29 +6510,99 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn) + + if (isLD && ss == BITS5(1,1,1,1,1)) { + IRTemp res = newTemp(ty); +- stmt(IRStmt_LLSC(Iend_LE, res, mkexpr(ea), NULL/*LL*/)); +- putIReg64orZR(tt, widenUto64(ty, mkexpr(res))); ++ if (abiinfo->guest__use_fallback_LLSC) { ++ // Do the load first so we don't update any guest state ++ // if it faults. ++ IRTemp loaded_data64 = newTemp(Ity_I64); ++ assign(loaded_data64, widenUto64(ty, loadLE(ty, mkexpr(ea)))); ++ stmt( IRStmt_Put( OFFB_LLSC_DATA, mkexpr(loaded_data64) )); ++ stmt( IRStmt_Put( OFFB_LLSC_ADDR, mkexpr(ea) )); ++ stmt( IRStmt_Put( OFFB_LLSC_SIZE, mkU64(szB) )); ++ putIReg64orZR(tt, mkexpr(loaded_data64)); ++ } else { ++ stmt(IRStmt_LLSC(Iend_LE, res, mkexpr(ea), NULL/*LL*/)); ++ putIReg64orZR(tt, widenUto64(ty, mkexpr(res))); ++ } + if (isAcqOrRel) { + stmt(IRStmt_MBE(Imbe_Fence)); + } +- DIP("ld%sx%s %s, [%s]\n", isAcqOrRel ? "a" : "", suffix[szBlg2], +- nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn)); ++ DIP("ld%sx%s %s, [%s] %s\n", isAcqOrRel ? "a" : "", suffix[szBlg2], ++ nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn), ++ abiinfo->guest__use_fallback_LLSC ++ ? "(fallback implementation)" : ""); + return True; + } + if (!isLD) { + if (isAcqOrRel) { + stmt(IRStmt_MBE(Imbe_Fence)); + } +- IRTemp res = newTemp(Ity_I1); + IRExpr* data = narrowFrom64(ty, getIReg64orZR(tt)); +- stmt(IRStmt_LLSC(Iend_LE, res, mkexpr(ea), data)); +- /* IR semantics: res is 1 if store succeeds, 0 if it fails. +- Need to set rS to 1 on failure, 0 on success. */ +- putIReg64orZR(ss, binop(Iop_Xor64, unop(Iop_1Uto64, mkexpr(res)), +- mkU64(1))); +- DIP("st%sx%s %s, %s, [%s]\n", isAcqOrRel ? "a" : "", suffix[szBlg2], ++ if (abiinfo->guest__use_fallback_LLSC) { ++ // This is really ugly, since we don't have any way to do ++ // proper if-then-else. First, set up as if the SC failed, ++ // and jump forwards if it really has failed. ++ ++ // Continuation address ++ IRConst* nia = IRConst_U64(guest_PC_curr_instr + 4); ++ ++ // "the SC failed". Any non-zero value means failure. ++ putIReg64orZR(ss, mkU64(1)); ++ ++ IRTemp tmp_LLsize = newTemp(Ity_I64); ++ assign(tmp_LLsize, IRExpr_Get(OFFB_LLSC_SIZE, Ity_I64)); ++ stmt( IRStmt_Put( OFFB_LLSC_SIZE, mkU64(0) // "no transaction" ++ )); ++ // Fail if no or wrong-size transaction ++ vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); ++ stmt( IRStmt_Exit( ++ binop(Iop_CmpNE64, mkexpr(tmp_LLsize), mkU64(szB)), ++ Ijk_Boring, nia, OFFB_PC ++ )); ++ // Fail if the address doesn't match the LL address ++ stmt( IRStmt_Exit( ++ binop(Iop_CmpNE64, mkexpr(ea), ++ IRExpr_Get(OFFB_LLSC_ADDR, Ity_I64)), ++ Ijk_Boring, nia, OFFB_PC ++ )); ++ // Fail if the data doesn't match the LL data ++ IRTemp llsc_data64 = newTemp(Ity_I64); ++ assign(llsc_data64, IRExpr_Get(OFFB_LLSC_DATA, Ity_I64)); ++ stmt( IRStmt_Exit( ++ binop(Iop_CmpNE64, widenUto64(ty, loadLE(ty, mkexpr(ea))), ++ mkexpr(llsc_data64)), ++ Ijk_Boring, nia, OFFB_PC ++ )); ++ // Try to CAS the new value in. ++ IRTemp old = newTemp(ty); ++ IRTemp expd = newTemp(ty); ++ assign(expd, narrowFrom64(ty, mkexpr(llsc_data64))); ++ stmt( IRStmt_CAS(mkIRCAS(/*oldHi*/IRTemp_INVALID, old, ++ Iend_LE, mkexpr(ea), ++ /*expdHi*/NULL, mkexpr(expd), ++ /*dataHi*/NULL, data ++ ))); ++ // Fail if the CAS failed (viz, old != expd) ++ stmt( IRStmt_Exit( ++ binop(Iop_CmpNE64, ++ widenUto64(ty, mkexpr(old)), ++ widenUto64(ty, mkexpr(expd))), ++ Ijk_Boring, nia, OFFB_PC ++ )); ++ // Otherwise we succeeded (!) ++ putIReg64orZR(ss, mkU64(0)); ++ } else { ++ IRTemp res = newTemp(Ity_I1); ++ stmt(IRStmt_LLSC(Iend_LE, res, mkexpr(ea), data)); ++ /* IR semantics: res is 1 if store succeeds, 0 if it fails. ++ Need to set rS to 1 on failure, 0 on success. */ ++ putIReg64orZR(ss, binop(Iop_Xor64, unop(Iop_1Uto64, mkexpr(res)), ++ mkU64(1))); ++ } ++ DIP("st%sx%s %s, %s, [%s] %s\n", isAcqOrRel ? "a" : "", suffix[szBlg2], + nameIRegOrZR(False, ss), +- nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn)); ++ nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn), ++ abiinfo->guest__use_fallback_LLSC ++ ? "(fallback implementation)" : ""); + return True; + } + /* else fall through */ +@@ -6589,7 +6691,8 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn) + + static + Bool dis_ARM64_branch_etc(/*MB_OUT*/DisResult* dres, UInt insn, +- const VexArchInfo* archinfo) ++ const VexArchInfo* archinfo, ++ const VexAbiInfo* abiinfo) + { + # define INSN(_bMax,_bMin) SLICE_UInt(insn, (_bMax), (_bMin)) + +@@ -7048,7 +7151,11 @@ Bool dis_ARM64_branch_etc(/*MB_OUT*/DisResult* dres, UInt insn, + /* AFAICS, this simply cancels a (all?) reservations made by a + (any?) preceding LDREX(es). Arrange to hand it through to + the back end. */ +- stmt( IRStmt_MBE(Imbe_CancelReservation) ); ++ if (abiinfo->guest__use_fallback_LLSC) { ++ stmt( IRStmt_Put( OFFB_LLSC_SIZE, mkU64(0) )); // "no transaction" ++ } else { ++ stmt( IRStmt_MBE(Imbe_CancelReservation) ); ++ } + DIP("clrex #%u\n", mm); + return True; + } +@@ -14411,12 +14518,12 @@ Bool disInstr_ARM64_WRK ( + break; + case BITS4(1,0,1,0): case BITS4(1,0,1,1): + // Branch, exception generation and system instructions +- ok = dis_ARM64_branch_etc(dres, insn, archinfo); ++ ok = dis_ARM64_branch_etc(dres, insn, archinfo, abiinfo); + break; + case BITS4(0,1,0,0): case BITS4(0,1,1,0): + case BITS4(1,1,0,0): case BITS4(1,1,1,0): + // Loads and stores +- ok = dis_ARM64_load_store(dres, insn); ++ ok = dis_ARM64_load_store(dres, insn, abiinfo); + break; + case BITS4(0,1,0,1): case BITS4(1,1,0,1): + // Data processing - register +diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c +index cc7c832..c9affbd 100644 +--- a/VEX/priv/host_arm64_defs.c ++++ b/VEX/priv/host_arm64_defs.c +@@ -1005,6 +1005,13 @@ ARM64Instr* ARM64Instr_StrEX ( Int szB ) { + vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); + return i; + } ++ARM64Instr* ARM64Instr_CAS ( Int szB ) { ++ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); ++ i->tag = ARM64in_CAS; ++ i->ARM64in.CAS.szB = szB; ++ vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); ++ return i; ++} + ARM64Instr* ARM64Instr_MFence ( void ) { + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); + i->tag = ARM64in_MFence; +@@ -1569,6 +1576,10 @@ void ppARM64Instr ( const ARM64Instr* i ) { + sz, i->ARM64in.StrEX.szB == 8 ? 'x' : 'w'); + return; + } ++ case ARM64in_CAS: { ++ vex_printf("x1 = cas(%dbit)(x3, x5 -> x7)", 8 * i->ARM64in.CAS.szB); ++ return; ++ } + case ARM64in_MFence: + vex_printf("(mfence) dsb sy; dmb sy; isb"); + return; +@@ -2064,6 +2075,14 @@ void getRegUsage_ARM64Instr ( HRegUsage* u, const ARM64Instr* i, Bool mode64 ) + addHRegUse(u, HRmWrite, hregARM64_X0()); + addHRegUse(u, HRmRead, hregARM64_X2()); + return; ++ case ARM64in_CAS: ++ addHRegUse(u, HRmRead, hregARM64_X3()); ++ addHRegUse(u, HRmRead, hregARM64_X5()); ++ addHRegUse(u, HRmRead, hregARM64_X7()); ++ addHRegUse(u, HRmWrite, hregARM64_X1()); ++ /* Pointless to state this since X8 is not available to RA. */ ++ addHRegUse(u, HRmWrite, hregARM64_X8()); ++ break; + case ARM64in_MFence: + return; + case ARM64in_ClrEX: +@@ -2326,6 +2345,8 @@ void mapRegs_ARM64Instr ( HRegRemap* m, ARM64Instr* i, Bool mode64 ) + return; + case ARM64in_StrEX: + return; ++ case ARM64in_CAS: ++ return; + case ARM64in_MFence: + return; + case ARM64in_ClrEX: +@@ -3803,6 +3824,61 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc, + } + goto bad; + } ++ case ARM64in_CAS: { ++ /* This isn't simple. For an explanation see the comment in ++ host_arm64_defs.h on the the definition of ARM64Instr case ++ CAS. */ ++ /* Generate: ++ -- one of: ++ mov x8, x5 // AA0503E8 ++ and x8, x5, #0xFFFFFFFF // 92407CA8 ++ and x8, x5, #0xFFFF // 92403CA8 ++ and x8, x5, #0xFF // 92401CA8 ++ ++ -- one of: ++ ldxr x1, [x3] // C85F7C61 ++ ldxr w1, [x3] // 885F7C61 ++ ldxrh w1, [x3] // 485F7C61 ++ ldxrb w1, [x3] // 085F7C61 ++ ++ -- always: ++ cmp x1, x8 // EB08003F ++ bne out // 54000061 ++ ++ -- one of: ++ stxr w1, x7, [x3] // C8017C67 ++ stxr w1, w7, [x3] // 88017C67 ++ stxrh w1, w7, [x3] // 48017C67 ++ stxrb w1, w7, [x3] // 08017C67 ++ ++ -- always: ++ eor x1, x5, x1 // CA0100A1 ++ out: ++ */ ++ switch (i->ARM64in.CAS.szB) { ++ case 8: *p++ = 0xAA0503E8; break; ++ case 4: *p++ = 0x92407CA8; break; ++ case 2: *p++ = 0x92403CA8; break; ++ case 1: *p++ = 0x92401CA8; break; ++ default: vassert(0); ++ } ++ switch (i->ARM64in.CAS.szB) { ++ case 8: *p++ = 0xC85F7C61; break; ++ case 4: *p++ = 0x885F7C61; break; ++ case 2: *p++ = 0x485F7C61; break; ++ case 1: *p++ = 0x085F7C61; break; ++ } ++ *p++ = 0xEB08003F; ++ *p++ = 0x54000061; ++ switch (i->ARM64in.CAS.szB) { ++ case 8: *p++ = 0xC8017C67; break; ++ case 4: *p++ = 0x88017C67; break; ++ case 2: *p++ = 0x48017C67; break; ++ case 1: *p++ = 0x08017C67; break; ++ } ++ *p++ = 0xCA0100A1; ++ goto done; ++ } + case ARM64in_MFence: { + *p++ = 0xD5033F9F; /* DSB sy */ + *p++ = 0xD5033FBF; /* DMB sy */ +diff --git a/VEX/priv/host_arm64_defs.h b/VEX/priv/host_arm64_defs.h +index 62b25fd..92d247e 100644 +--- a/VEX/priv/host_arm64_defs.h ++++ b/VEX/priv/host_arm64_defs.h +@@ -481,6 +481,7 @@ typedef + ARM64in_Mul, + ARM64in_LdrEX, + ARM64in_StrEX, ++ ARM64in_CAS, + ARM64in_MFence, + ARM64in_ClrEX, + /* ARM64in_V*: scalar ops involving vector registers */ +@@ -668,6 +669,32 @@ typedef + struct { + Int szB; /* 1, 2, 4 or 8 */ + } StrEX; ++ /* x1 = CAS(x3(addr), x5(expected) -> x7(new)), ++ where x1[8*szB-1 : 0] == x5[8*szB-1 : 0] indicates success, ++ x1[8*szB-1 : 0] != x5[8*szB-1 : 0] indicates failure. ++ Uses x8 as scratch (but that's not allocatable). ++ Hence: RD x3, x5, x7; WR x1 ++ ++ (szB=8) mov x8, x5 ++ (szB=4) and x8, x5, #0xFFFFFFFF ++ (szB=2) and x8, x5, #0xFFFF ++ (szB=1) and x8, x5, #0xFF ++ -- x8 is correctly zero-extended expected value ++ ldxr x1, [x3] ++ -- x1 is correctly zero-extended actual value ++ cmp x1, x8 ++ bne after ++ -- if branch taken, failure; x1[[8*szB-1 : 0] holds old value ++ -- attempt to store ++ stxr w1, x7, [x3] ++ -- if store successful, x1==0, so the eor is "x1 := x5" ++ -- if store failed, x1==1, so the eor makes x1 != x5 ++ eor x1, x5, x1 ++ after: ++ */ ++ struct { ++ Int szB; /* 1, 2, 4 or 8 */ ++ } CAS; + /* Mem fence. An insn which fences all loads and stores as + much as possible before continuing. On ARM64 we emit the + sequence "dsb sy ; dmb sy ; isb sy", which is probably +@@ -912,6 +939,7 @@ extern ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR, + ARM64MulOp op ); + extern ARM64Instr* ARM64Instr_LdrEX ( Int szB ); + extern ARM64Instr* ARM64Instr_StrEX ( Int szB ); ++extern ARM64Instr* ARM64Instr_CAS ( Int szB ); + extern ARM64Instr* ARM64Instr_MFence ( void ); + extern ARM64Instr* ARM64Instr_ClrEX ( void ); + extern ARM64Instr* ARM64Instr_VLdStH ( Bool isLoad, HReg sD, HReg rN, +diff --git a/VEX/priv/host_arm64_isel.c b/VEX/priv/host_arm64_isel.c +index 42748e4..07ce87a 100644 +--- a/VEX/priv/host_arm64_isel.c ++++ b/VEX/priv/host_arm64_isel.c +@@ -1383,12 +1383,13 @@ static ARM64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) + || e->Iex.Binop.op == Iop_CmpLT64S + || e->Iex.Binop.op == Iop_CmpLT64U + || e->Iex.Binop.op == Iop_CmpLE64S +- || e->Iex.Binop.op == Iop_CmpLE64U)) { ++ || e->Iex.Binop.op == Iop_CmpLE64U ++ || e->Iex.Binop.op == Iop_CasCmpEQ64)) { + HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1); + ARM64RIA* argR = iselIntExpr_RIA(env, e->Iex.Binop.arg2); + addInstr(env, ARM64Instr_Cmp(argL, argR, True/*is64*/)); + switch (e->Iex.Binop.op) { +- case Iop_CmpEQ64: return ARM64cc_EQ; ++ case Iop_CmpEQ64: case Iop_CasCmpEQ64: return ARM64cc_EQ; + case Iop_CmpNE64: return ARM64cc_NE; + case Iop_CmpLT64S: return ARM64cc_LT; + case Iop_CmpLT64U: return ARM64cc_CC; +@@ -1405,12 +1406,13 @@ static ARM64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) + || e->Iex.Binop.op == Iop_CmpLT32S + || e->Iex.Binop.op == Iop_CmpLT32U + || e->Iex.Binop.op == Iop_CmpLE32S +- || e->Iex.Binop.op == Iop_CmpLE32U)) { ++ || e->Iex.Binop.op == Iop_CmpLE32U ++ || e->Iex.Binop.op == Iop_CasCmpEQ32)) { + HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1); + ARM64RIA* argR = iselIntExpr_RIA(env, e->Iex.Binop.arg2); + addInstr(env, ARM64Instr_Cmp(argL, argR, False/*!is64*/)); + switch (e->Iex.Binop.op) { +- case Iop_CmpEQ32: return ARM64cc_EQ; ++ case Iop_CmpEQ32: case Iop_CasCmpEQ32: return ARM64cc_EQ; + case Iop_CmpNE32: return ARM64cc_NE; + case Iop_CmpLT32S: return ARM64cc_LT; + case Iop_CmpLT32U: return ARM64cc_CC; +@@ -1420,6 +1422,34 @@ static ARM64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) + } + } + ++ /* --- Cmp*16*(x,y) --- */ ++ if (e->tag == Iex_Binop ++ && (e->Iex.Binop.op == Iop_CasCmpEQ16)) { ++ HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1); ++ HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2); ++ HReg argL2 = widen_z_16_to_64(env, argL); ++ HReg argR2 = widen_z_16_to_64(env, argR); ++ addInstr(env, ARM64Instr_Cmp(argL2, ARM64RIA_R(argR2), True/*is64*/)); ++ switch (e->Iex.Binop.op) { ++ case Iop_CasCmpEQ16: return ARM64cc_EQ; ++ default: vpanic("iselCondCode(arm64): CmpXX16"); ++ } ++ } ++ ++ /* --- Cmp*8*(x,y) --- */ ++ if (e->tag == Iex_Binop ++ && (e->Iex.Binop.op == Iop_CasCmpEQ8)) { ++ HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1); ++ HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2); ++ HReg argL2 = widen_z_8_to_64(env, argL); ++ HReg argR2 = widen_z_8_to_64(env, argR); ++ addInstr(env, ARM64Instr_Cmp(argL2, ARM64RIA_R(argR2), True/*is64*/)); ++ switch (e->Iex.Binop.op) { ++ case Iop_CasCmpEQ8: return ARM64cc_EQ; ++ default: vpanic("iselCondCode(arm64): CmpXX8"); ++ } ++ } ++ + ppIRExpr(e); + vpanic("iselCondCode"); + } +@@ -3833,6 +3863,57 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) + break; + } + ++ /* --------- ACAS --------- */ ++ case Ist_CAS: { ++ if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) { ++ /* "normal" singleton CAS */ ++ UChar sz; ++ IRCAS* cas = stmt->Ist.CAS.details; ++ IRType ty = typeOfIRExpr(env->type_env, cas->dataLo); ++ switch (ty) { ++ case Ity_I64: sz = 8; break; ++ case Ity_I32: sz = 4; break; ++ case Ity_I16: sz = 2; break; ++ case Ity_I8: sz = 1; break; ++ default: goto unhandled_cas; ++ } ++ HReg rAddr = iselIntExpr_R(env, cas->addr); ++ HReg rExpd = iselIntExpr_R(env, cas->expdLo); ++ HReg rData = iselIntExpr_R(env, cas->dataLo); ++ vassert(cas->expdHi == NULL); ++ vassert(cas->dataHi == NULL); ++ addInstr(env, ARM64Instr_MovI(hregARM64_X3(), rAddr)); ++ addInstr(env, ARM64Instr_MovI(hregARM64_X5(), rExpd)); ++ addInstr(env, ARM64Instr_MovI(hregARM64_X7(), rData)); ++ addInstr(env, ARM64Instr_CAS(sz)); ++ /* Now we have the lowest szB bytes of x1 are either equal to ++ the lowest szB bytes of x5, indicating success, or they ++ aren't, indicating failure. The IR semantics actually ++ require us to return the old value at the location, ++ regardless of success or failure, but in the case of ++ failure it's not clear how to do this, since ++ ARM64Instr_CAS can't provide that. Instead we'll just ++ return the relevant bit of x1, since that's at least ++ guaranteed to be different from the lowest bits of x5 on ++ failure. */ ++ HReg rResult = hregARM64_X1(); ++ switch (sz) { ++ case 8: break; ++ case 4: rResult = widen_z_32_to_64(env, rResult); break; ++ case 2: rResult = widen_z_16_to_64(env, rResult); break; ++ case 1: rResult = widen_z_8_to_64(env, rResult); break; ++ default: vassert(0); ++ } ++ // "old" in this case is interpreted somewhat liberally, per ++ // the previous comment. ++ HReg rOld = lookupIRTemp(env, cas->oldLo); ++ addInstr(env, ARM64Instr_MovI(rOld, rResult)); ++ return; ++ } ++ unhandled_cas: ++ break; ++ } ++ + /* --------- MEM FENCE --------- */ + case Ist_MBE: + switch (stmt->Ist.MBE.event) { +diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c +index 8c4845e..26e9880 100644 +--- a/VEX/priv/main_main.c ++++ b/VEX/priv/main_main.c +@@ -1556,6 +1556,7 @@ void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi ) + vbi->guest_amd64_assume_gs_is_const = False; + vbi->guest_ppc_zap_RZ_at_blr = False; + vbi->guest_ppc_zap_RZ_at_bl = NULL; ++ vbi->guest__use_fallback_LLSC = False; + vbi->host_ppc_calls_use_fndescrs = False; + } + +diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h +index 8ac3d9f..cbbb1ad 100644 +--- a/VEX/pub/libvex.h ++++ b/VEX/pub/libvex.h +@@ -369,6 +369,11 @@ void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai ); + guest is ppc32-linux ==> const False + guest is other ==> inapplicable + ++ guest__use_fallback_LLSC ++ guest is mips32 ==> applicable, default True ++ guest is mips64 ==> applicable, default True ++ guest is arm64 ==> applicable, default False ++ + host_ppc_calls_use_fndescrs: + host is ppc32-linux ==> False + host is ppc64-linux ==> True +@@ -401,11 +406,17 @@ typedef + is assumed equivalent to a fn which always returns False. */ + Bool (*guest_ppc_zap_RZ_at_bl)(Addr); + ++ /* Potentially for all guests that use LL/SC: use the fallback ++ (synthesised) implementation rather than passing LL/SC on to ++ the host? */ ++ Bool guest__use_fallback_LLSC; ++ + /* PPC32/PPC64 HOSTS only: does '&f' give us a pointer to a + function descriptor on the host, or to the function code + itself? True => descriptor, False => code. */ + Bool host_ppc_calls_use_fndescrs; + ++ /* ??? Description ??? */ + Bool guest_mips_fp_mode64; + } + VexAbiInfo; +diff --git a/VEX/pub/libvex_guest_arm64.h b/VEX/pub/libvex_guest_arm64.h +index c438c1e..8b62cdd 100644 +--- a/VEX/pub/libvex_guest_arm64.h ++++ b/VEX/pub/libvex_guest_arm64.h +@@ -159,9 +159,14 @@ typedef + note of bits 23 and 22. */ + UInt guest_FPCR; + ++ /* Fallback LL/SC support. See bugs 344524 and 369459. */ ++ ULong guest_LLSC_SIZE; // 0==no current transaction, else 1,2,4 or 8. ++ ULong guest_LLSC_ADDR; // Address of transaction. ++ ULong guest_LLSC_DATA; // Original value at _ADDR, zero-extended. ++ + /* Padding to make it have an 16-aligned size */ + /* UInt pad_end_0; */ +- /* ULong pad_end_1; */ ++ ULong pad_end_1; + } + VexGuestARM64State; + diff --git a/SOURCES/valgrind-3.12.0-ll-sc-fallback2.patch b/SOURCES/valgrind-3.12.0-ll-sc-fallback2.patch new file mode 100644 index 0000000..4389a78 --- /dev/null +++ b/SOURCES/valgrind-3.12.0-ll-sc-fallback2.patch @@ -0,0 +1,181 @@ +commit 9d6e165ea7cf9da0086b9b107d6dd2498f1af6d0 +Author: Julian Seward +Date: Mon Apr 24 09:24:57 2017 +0000 + + Bug 369459 - valgrind on arm64 violates the ARMv8 spec (ldxr/stxr) + + This implements a fallback LL/SC implementation as described in bug 344524. + + Valgrind side changes: + + * Command line plumbing for --sim-hints=fallback-llsc + + * memcheck: handle new arm64 guest state in memcheck/mc_machine.c + + + + git-svn-id: svn://svn.valgrind.org/valgrind/trunk@16309 + +diff --git a/coregrind/m_main.c b/coregrind/m_main.c +index 9a4b60e..424daf7 100644 +--- a/coregrind/m_main.c ++++ b/coregrind/m_main.c +@@ -187,7 +187,7 @@ static void usage_NORETURN ( Bool debug_help ) + " --sim-hints=hint1,hint2,... activate unusual sim behaviours [none] \n" + " where hint is one of:\n" + " lax-ioctls lax-doors fuse-compatible enable-outer\n" +-" no-inner-prefix no-nptl-pthread-stackcache none\n" ++" no-inner-prefix no-nptl-pthread-stackcache fallback-llsc none\n" + " --fair-sched=no|yes|try schedule threads fairly on multicore systems [no]\n" + " --kernel-variant=variant1,variant2,...\n" + " handle non-standard kernel variants [none]\n" +@@ -417,7 +417,7 @@ static void early_process_cmd_line_options ( /*OUT*/Int* need_help ) + else if VG_USETX_CLO (str, "--sim-hints", + "lax-ioctls,lax-doors,fuse-compatible," + "enable-outer,no-inner-prefix," +- "no-nptl-pthread-stackcache", ++ "no-nptl-pthread-stackcache,fallback-llsc", + VG_(clo_sim_hints)) {} + } + +diff --git a/coregrind/m_scheduler/scheduler.c b/coregrind/m_scheduler/scheduler.c +index 9ae3f21..96a24f8 100644 +--- a/coregrind/m_scheduler/scheduler.c ++++ b/coregrind/m_scheduler/scheduler.c +@@ -925,6 +925,14 @@ void run_thread_for_a_while ( /*OUT*/HWord* two_words, + tst->arch.vex.host_EvC_FAILADDR + = (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail) ); + ++ /* Invalidate any in-flight LL/SC transactions, in the case that we're ++ using the fallback LL/SC implementation. See bugs 344524 and 369459. */ ++# if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) ++ tst->arch.vex.guest_LLaddr = (HWord)(-1); ++# elif defined(VGP_arm64_linux) ++ tst->arch.vex.guest_LLSC_SIZE = 0; ++# endif ++ + if (0) { + vki_sigset_t m; + Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m); +diff --git a/coregrind/m_translate.c b/coregrind/m_translate.c +index 2d6d3ba..c467e33 100644 +--- a/coregrind/m_translate.c ++++ b/coregrind/m_translate.c +@@ -1663,30 +1663,51 @@ Bool VG_(translate) ( ThreadId tid, + vex_abiinfo.guest_amd64_assume_fs_is_const = True; + vex_abiinfo.guest_amd64_assume_gs_is_const = True; + # endif ++ + # if defined(VGP_amd64_darwin) + vex_abiinfo.guest_amd64_assume_gs_is_const = True; + # endif ++ ++# if defined(VGP_amd64_solaris) ++ vex_abiinfo.guest_amd64_assume_fs_is_const = True; ++# endif ++ + # if defined(VGP_ppc32_linux) + vex_abiinfo.guest_ppc_zap_RZ_at_blr = False; + vex_abiinfo.guest_ppc_zap_RZ_at_bl = NULL; + # endif ++ + # if defined(VGP_ppc64be_linux) + vex_abiinfo.guest_ppc_zap_RZ_at_blr = True; + vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True; + vex_abiinfo.host_ppc_calls_use_fndescrs = True; + # endif ++ + # if defined(VGP_ppc64le_linux) + vex_abiinfo.guest_ppc_zap_RZ_at_blr = True; + vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True; + vex_abiinfo.host_ppc_calls_use_fndescrs = False; + # endif +-# if defined(VGP_amd64_solaris) +- vex_abiinfo.guest_amd64_assume_fs_is_const = True; +-# endif ++ + # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) + ThreadArchState* arch = &VG_(threads)[tid].arch; + vex_abiinfo.guest_mips_fp_mode64 = + !!(arch->vex.guest_CP0_status & MIPS_CP0_STATUS_FR); ++ /* Compute guest__use_fallback_LLSC, overiding any settings of ++ VG_(clo_fallback_llsc) that we know would cause the guest to ++ fail (loop). */ ++ if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) { ++ /* We must use the fallback scheme. */ ++ vex_abiinfo.guest__use_fallback_LLSC = True; ++ } else { ++ vex_abiinfo.guest__use_fallback_LLSC ++ = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints)); ++ } ++# endif ++ ++# if defined(VGP_arm64_linux) ++ vex_abiinfo.guest__use_fallback_LLSC ++ = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints)); + # endif + + /* Set up closure args. */ +diff --git a/coregrind/pub_core_options.h b/coregrind/pub_core_options.h +index ba27127..703d08a 100644 +--- a/coregrind/pub_core_options.h ++++ b/coregrind/pub_core_options.h +@@ -222,14 +222,15 @@ typedef + SimHint_fuse_compatible, + SimHint_enable_outer, + SimHint_no_inner_prefix, +- SimHint_no_nptl_pthread_stackcache ++ SimHint_no_nptl_pthread_stackcache, ++ SimHint_fallback_llsc + } + SimHint; + + // Build mask to check or set SimHint a membership + #define SimHint2S(a) (1 << (a)) + // SimHint h is member of the Set s ? +-#define SimHintiS(h,s) ((s) & SimHint2S(h)) ++#define SimHintiS(h,s) (((s) & SimHint2S(h)) != 0) + extern UInt VG_(clo_sim_hints); + + /* Show symbols in the form 'name+offset' ? Default: NO */ +diff --git a/memcheck/mc_machine.c b/memcheck/mc_machine.c +index f6acc0b..608a374 100644 +--- a/memcheck/mc_machine.c ++++ b/memcheck/mc_machine.c +@@ -1040,6 +1040,10 @@ static Int get_otrack_shadow_offset_wrk ( Int offset, Int szB ) + if (o == GOF(CMSTART) && sz == 8) return -1; // untracked + if (o == GOF(CMLEN) && sz == 8) return -1; // untracked + ++ if (o == GOF(LLSC_SIZE) && sz == 8) return -1; // untracked ++ if (o == GOF(LLSC_ADDR) && sz == 8) return o; ++ if (o == GOF(LLSC_DATA) && sz == 8) return o; ++ + VG_(printf)("MC_(get_otrack_shadow_offset)(arm64)(off=%d,sz=%d)\n", + offset,szB); + tl_assert(0); +diff --git a/none/tests/cmdline1.stdout.exp b/none/tests/cmdline1.stdout.exp +index 4e8bca4..a4d8175 100644 +--- a/none/tests/cmdline1.stdout.exp ++++ b/none/tests/cmdline1.stdout.exp +@@ -101,7 +101,7 @@ usage: valgrind [options] prog-and-args + --sim-hints=hint1,hint2,... activate unusual sim behaviours [none] + where hint is one of: + lax-ioctls lax-doors fuse-compatible enable-outer +- no-inner-prefix no-nptl-pthread-stackcache none ++ no-inner-prefix no-nptl-pthread-stackcache fallback-llsc none + --fair-sched=no|yes|try schedule threads fairly on multicore systems [no] + --kernel-variant=variant1,variant2,... + handle non-standard kernel variants [none] +diff --git a/none/tests/cmdline2.stdout.exp b/none/tests/cmdline2.stdout.exp +index 644013c..461ad2d 100644 +--- a/none/tests/cmdline2.stdout.exp ++++ b/none/tests/cmdline2.stdout.exp +@@ -101,7 +101,7 @@ usage: valgrind [options] prog-and-args + --sim-hints=hint1,hint2,... activate unusual sim behaviours [none] + where hint is one of: + lax-ioctls lax-doors fuse-compatible enable-outer +- no-inner-prefix no-nptl-pthread-stackcache none ++ no-inner-prefix no-nptl-pthread-stackcache fallback-llsc none + --fair-sched=no|yes|try schedule threads fairly on multicore systems [no] + --kernel-variant=variant1,variant2,... + handle non-standard kernel variants [none] diff --git a/SOURCES/valgrind-3.12.0-ll-sc-fallback3.patch b/SOURCES/valgrind-3.12.0-ll-sc-fallback3.patch new file mode 100644 index 0000000..eedd891 --- /dev/null +++ b/SOURCES/valgrind-3.12.0-ll-sc-fallback3.patch @@ -0,0 +1,132 @@ +commit 306353a8d233c3d7c60e7b02799b8675e745d5c2 +Author: Julian Seward +Date: Tue May 16 05:35:23 2017 +0000 + + arm64-linux: detect Cavium CPUs (implementer = 0x43) and enable the + fallback LLSC implementation in that case. Pertains to bug #369459. + + + + git-svn-id: svn://svn.valgrind.org/valgrind/trunk@16380 + +diff --git a/coregrind/m_machine.c b/coregrind/m_machine.c +index 93bdd72..eac1c16 100644 +--- a/coregrind/m_machine.c ++++ b/coregrind/m_machine.c +@@ -634,7 +634,7 @@ static UInt VG_(get_machine_model)(void) + return model; + } + +-#endif /* VGA_s390x */ ++#endif /* defined(VGA_s390x) */ + + #if defined(VGA_mips32) || defined(VGA_mips64) + +@@ -755,12 +755,65 @@ static Bool VG_(parse_cpuinfo)(void) + return True; + } + +-#endif ++#endif /* defined(VGA_mips32) || defined(VGA_mips64) */ ++ ++#if defined(VGP_arm64_linux) ++ ++/* Check to see whether we are running on a Cavium core, and if so auto-enable ++ the fallback LLSC implementation. See #369459. */ ++ ++static Bool VG_(parse_cpuinfo)(void) ++{ ++ const char *search_Cavium_str = "CPU implementer\t: 0x43"; ++ ++ Int n, fh; ++ SysRes fd; ++ SizeT num_bytes, file_buf_size; ++ HChar *file_buf; ++ ++ /* Slurp contents of /proc/cpuinfo into FILE_BUF */ ++ fd = VG_(open)( "/proc/cpuinfo", 0, VKI_S_IRUSR ); ++ if ( sr_isError(fd) ) return False; ++ ++ fh = sr_Res(fd); ++ ++ /* Determine the size of /proc/cpuinfo. ++ Work around broken-ness in /proc file system implementation. ++ fstat returns a zero size for /proc/cpuinfo although it is ++ claimed to be a regular file. */ ++ num_bytes = 0; ++ file_buf_size = 1000; ++ file_buf = VG_(malloc)("cpuinfo", file_buf_size + 1); ++ while (42) { ++ n = VG_(read)(fh, file_buf, file_buf_size); ++ if (n < 0) break; ++ ++ num_bytes += n; ++ if (n < file_buf_size) break; /* reached EOF */ ++ } ++ ++ if (n < 0) num_bytes = 0; /* read error; ignore contents */ ++ ++ if (num_bytes > file_buf_size) { ++ VG_(free)( file_buf ); ++ VG_(lseek)( fh, 0, VKI_SEEK_SET ); ++ file_buf = VG_(malloc)( "cpuinfo", num_bytes + 1 ); ++ n = VG_(read)( fh, file_buf, num_bytes ); ++ if (n < 0) num_bytes = 0; ++ } + +-/* Determine what insn set and insn set variant the host has, and +- record it. To be called once at system startup. Returns False if +- this a CPU incapable of running Valgrind. +- Also determine information about the caches on this host. */ ++ file_buf[num_bytes] = '\0'; ++ VG_(close)(fh); ++ ++ /* Parse file */ ++ if (VG_(strstr)(file_buf, search_Cavium_str) != NULL) ++ vai.arm64_requires_fallback_LLSC = True; ++ ++ VG_(free)(file_buf); ++ return True; ++} ++ ++#endif /* defined(VGP_arm64_linux) */ + + Bool VG_(machine_get_hwcaps)( void ) + { +@@ -1588,6 +1641,11 @@ Bool VG_(machine_get_hwcaps)( void ) + + VG_(machine_get_cache_info)(&vai); + ++ /* Check whether we need to use the fallback LLSC implementation. ++ If the check fails, give up. */ ++ if (! VG_(parse_cpuinfo)()) ++ return False; ++ + /* 0 denotes 'not set'. The range of legitimate values here, + after being set that is, is 2 though 17 inclusive. */ + vg_assert(vai.arm64_dMinLine_lg2_szB == 0); +@@ -1600,6 +1658,8 @@ Bool VG_(machine_get_hwcaps)( void ) + "ctr_el0.iMinLine_szB = %d\n", + 1 << vai.arm64_dMinLine_lg2_szB, + 1 << vai.arm64_iMinLine_lg2_szB); ++ VG_(debugLog)(1, "machine", "ARM64: requires_fallback_LLSC: %s\n", ++ vai.arm64_requires_fallback_LLSC ? "yes" : "no"); + + return True; + } +diff --git a/coregrind/m_translate.c b/coregrind/m_translate.c +index 2f0ceac..55c845d 100644 +--- a/coregrind/m_translate.c ++++ b/coregrind/m_translate.c +@@ -1707,7 +1707,10 @@ Bool VG_(translate) ( ThreadId tid, + + # if defined(VGP_arm64_linux) + vex_abiinfo.guest__use_fallback_LLSC +- = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints)); ++ = /* The user asked explicitly */ ++ SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints)) ++ || /* we autodetected that it is necessary */ ++ vex_archinfo.arm64_requires_fallback_LLSC; + # endif + + /* Set up closure args. */ diff --git a/SOURCES/valgrind-3.12.0-ll-sc-fallback4.patch b/SOURCES/valgrind-3.12.0-ll-sc-fallback4.patch new file mode 100644 index 0000000..ce60328 --- /dev/null +++ b/SOURCES/valgrind-3.12.0-ll-sc-fallback4.patch @@ -0,0 +1,37 @@ +commit b1983ee86743f987e28d9fdb363d460bc5f3b23f +Author: Julian Seward +Date: Tue May 16 06:26:48 2017 +0000 + + arm64-linux: detect Cavium CPUs (implementer = 0x43) and enable the + fallback LLSC implementation in that case. Pertains to bug #369459. + (VEX side changes) + + + git-svn-id: svn://svn.valgrind.org/vex/trunk@3371 + +diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c +index d4b142d..7c125ce 100644 +--- a/VEX/priv/main_main.c ++++ b/VEX/priv/main_main.c +@@ -1468,6 +1468,7 @@ void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai ) + vai->ppc_dcbzl_szB = 0; + vai->arm64_dMinLine_lg2_szB = 0; + vai->arm64_iMinLine_lg2_szB = 0; ++ vai->arm64_requires_fallback_LLSC = False; + vai->hwcache_info.num_levels = 0; + vai->hwcache_info.num_caches = 0; + vai->hwcache_info.caches = NULL; +diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h +index b0ce1da..d75919d 100644 +--- a/VEX/pub/libvex.h ++++ b/VEX/pub/libvex.h +@@ -323,6 +323,9 @@ typedef + line size of 64 bytes would be encoded here as 6. */ + UInt arm64_dMinLine_lg2_szB; + UInt arm64_iMinLine_lg2_szB; ++ /* ARM64: does the host require us to use the fallback LLSC ++ implementation? */ ++ Bool arm64_requires_fallback_LLSC; + } + VexArchInfo; + diff --git a/SPECS/valgrind.spec b/SPECS/valgrind.spec index bb63bdb..a59435e 100644 --- a/SPECS/valgrind.spec +++ b/SPECS/valgrind.spec @@ -3,7 +3,7 @@ Summary: Tool for finding memory management bugs in programs Name: %{?scl_prefix}valgrind Version: 3.12.0 -Release: 8%{?dist} +Release: 9%{?dist} Epoch: 1 License: GPLv2+ URL: http://www.valgrind.org/ @@ -133,6 +133,12 @@ Patch19: valgrind-3.12.0-powerpc-register-pair.patch # KDE#377478 PPC64: ISA 3.0 setup fixes Patch20: valgrind-3.12.0-ppc64-isa-3_00.patch +# KDE#369459 valgrind on arm64 violates the ARMv8 spec (ldxr/stxr) +Patch21: valgrind-3.12.0-ll-sc-fallback1.patch +Patch22: valgrind-3.12.0-ll-sc-fallback2.patch +Patch23: valgrind-3.12.0-ll-sc-fallback3.patch +Patch24: valgrind-3.12.0-ll-sc-fallback4.patch + # RHEL7 specific patches. # RHBZ#996927 Ignore PPC floating point phased out category. @@ -270,6 +276,12 @@ Valgrind User Manual for details. %patch18 -p1 %patch19 -p1 %patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +# Remove patch artifacts from tests to pacify makefile consistency checker. +rm -f none/tests/cmdline?.stdout.exp.orig # RHEL7 specific patches %patch7001 -p1 @@ -467,7 +479,10 @@ echo ===============END TESTING=============== %endif %changelog -* Tue Mar 28 2017 Mark Wielaard +* Thu Sep 21 2017 Mark Wielaard - 3.12.0-9 +- Add valgrind-3.12.0-ll-sc-fallback[1234].patch (#1492753) + +* Tue Mar 28 2017 Mark Wielaard - 3.12.0-8 - Add valgrind-3.12.0-powerpc-register-pair.patch (#1437030) - Add valgrind-3.12.0-ppc64-isa-3_00.patch (#1437032)