diff --git a/valgrind-3.14.0-ppc64-lxvb16x.patch b/valgrind-3.14.0-ppc64-lxvb16x.patch new file mode 100644 index 0000000..e821d81 --- /dev/null +++ b/valgrind-3.14.0-ppc64-lxvb16x.patch @@ -0,0 +1,88 @@ +commit 5c00e04a1b61475a7f731f8cfede114201815e0a +Author: Mark Wielaard +Date: Sun Dec 9 23:25:05 2018 +0100 + + Implement ppc64 lxvb16x as 128-bit vector load with reversed double words. + + This makes it possible for memcheck to know which part of the 128bit + vector is defined, even if the load is partly beyond an addressable block. + + Partially resolves bug 386945. + +diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c +index 7af4973..ec2f90a 100644 +--- a/VEX/priv/guest_ppc_toIR.c ++++ b/VEX/priv/guest_ppc_toIR.c +@@ -20702,54 +20702,29 @@ dis_vx_load ( UInt theInstr ) + { + DIP("lxvb16x %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr); + +- IRTemp byte[16]; +- int i; +- UInt ea_off = 0; +- IRExpr* irx_addr; +- IRTemp tmp_low[9]; +- IRTemp tmp_hi[9]; ++ /* The result of lxvb16x should be the same on big and little ++ endian systems. We do a host load, then reverse the bytes in ++ the double words. If the host load was little endian we swap ++ them around again. */ + +- tmp_low[0] = newTemp( Ity_I64 ); +- tmp_hi[0] = newTemp( Ity_I64 ); +- assign( tmp_low[0], mkU64( 0 ) ); +- assign( tmp_hi[0], mkU64( 0 ) ); +- +- for ( i = 0; i < 8; i++ ) { +- byte[i] = newTemp( Ity_I64 ); +- tmp_low[i+1] = newTemp( Ity_I64 ); +- +- irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ), +- ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) ); +- ea_off += 1; +- +- assign( byte[i], binop( Iop_Shl64, +- unop( Iop_8Uto64, +- load( Ity_I8, irx_addr ) ), +- mkU8( 8 * ( 7 - i ) ) ) ); ++ IRTemp high = newTemp(Ity_I64); ++ IRTemp high_rev = newTemp(Ity_I64); ++ IRTemp low = newTemp(Ity_I64); ++ IRTemp low_rev = newTemp(Ity_I64); + +- assign( tmp_low[i+1], +- binop( Iop_Or64, +- mkexpr( byte[i] ), mkexpr( tmp_low[i] ) ) ); +- } ++ IRExpr *t128 = load( Ity_V128, mkexpr( EA ) ); + +- for ( i = 0; i < 8; i++ ) { +- byte[i + 8] = newTemp( Ity_I64 ); +- tmp_hi[i+1] = newTemp( Ity_I64 ); ++ assign( high, unop(Iop_V128HIto64, t128) ); ++ assign( high_rev, unop(Iop_Reverse8sIn64_x1, mkexpr(high)) ); ++ assign( low, unop(Iop_V128to64, t128) ); ++ assign( low_rev, unop(Iop_Reverse8sIn64_x1, mkexpr(low)) ); + +- irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ), +- ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) ); +- ea_off += 1; ++ if (host_endness == VexEndnessLE) ++ t128 = binop( Iop_64HLtoV128, mkexpr (low_rev), mkexpr (high_rev) ); ++ else ++ t128 = binop( Iop_64HLtoV128, mkexpr (high_rev), mkexpr (low_rev) ); + +- assign( byte[i+8], binop( Iop_Shl64, +- unop( Iop_8Uto64, +- load( Ity_I8, irx_addr ) ), +- mkU8( 8 * ( 7 - i ) ) ) ); +- assign( tmp_hi[i+1], binop( Iop_Or64, +- mkexpr( byte[i+8] ), +- mkexpr( tmp_hi[i] ) ) ); +- } +- putVSReg( XT, binop( Iop_64HLtoV128, +- mkexpr( tmp_low[8] ), mkexpr( tmp_hi[8] ) ) ); ++ putVSReg( XT, t128 ); + break; + } + diff --git a/valgrind.spec b/valgrind.spec index 6924a70..2c82094 100644 --- a/valgrind.spec +++ b/valgrind.spec @@ -141,6 +141,7 @@ Patch19: valgrind-3.14.0-ppc64-ldbrx.patch Patch20: valgrind-3.14.0-ppc64-unaligned-words.patch Patch21: valgrind-3.14.0-ppc64-lxvd2x.patch Patch22: valgrind-3.14.0-ppc64-unaligned-vecs.patch +Patch23: valgrind-3.14.0-ppc64-lxvb16x.patch %if %{build_multilib} # Ensure glibc{,-devel} is installed for both multilib arches @@ -296,6 +297,7 @@ Valgrind User Manual for details. %patch20 -p1 %patch21 -p1 %patch22 -p1 +%patch23 -p1 %build CC=gcc @@ -536,6 +538,7 @@ fi - Add valgrind-3.14.0-ppc64-unaligned-words.patch - Add valgrind-3.14.0-ppc64-lxvd2x.patch - Add valgrind-3.14.0-ppc64-unaligned-vecs.patch +- Add valgrind-3.14.0-ppc64-lxvb16x.patch * Sat Dec 1 2018 Mark Wielaard - 3.14.0.5 - Add valgrind-3.14.0-wcsncmp.patch (#1645971)