Blame 0001-target-ppc-fix-vcipher-vcipherlast-vncipherlast-and-.patch

c5e576
From: Aurelien Jarno <aurelien@aurel32.net>
c5e576
Date: Sun, 13 Sep 2015 23:03:44 +0200
cf8819
Subject: [PATCH] target-ppc: fix vcipher, vcipherlast, vncipherlast and
c5e576
 vpermxor
c5e576
c5e576
For vector instructions, the helpers get pointers to the vector register
c5e576
in arguments. Some operands might point to the same register, including
c5e576
the operand holding the result.
c5e576
c5e576
When emulating instructions which access the vector elements in a
c5e576
non-linear way, we need to store the result in an temporary variable.
c5e576
c5e576
This fixes openssl when emulating a POWER8 CPU.
c5e576
c5e576
Cc: Tom Musta <tommusta@gmail.com>
c5e576
Cc: Alexander Graf <agraf@suse.de>
c5e576
Cc: qemu-stable@nongnu.org
c5e576
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
c5e576
---
c5e576
 target-ppc/int_helper.c | 19 ++++++++++++++-----
c5e576
 1 file changed, 14 insertions(+), 5 deletions(-)
c5e576
c5e576
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
c5e576
index 0a55d5e..b122868 100644
c5e576
--- a/target-ppc/int_helper.c
c5e576
+++ b/target-ppc/int_helper.c
c5e576
@@ -2327,24 +2327,28 @@ void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
c5e576
 
c5e576
 void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
c5e576
 {
c5e576
+    ppc_avr_t result;
c5e576
     int i;
c5e576
 
c5e576
     VECTOR_FOR_INORDER_I(i, u32) {
c5e576
-        r->AVRW(i) = b->AVRW(i) ^
c5e576
+        result.AVRW(i) = b->AVRW(i) ^
c5e576
             (AES_Te0[a->AVRB(AES_shifts[4*i + 0])] ^
c5e576
              AES_Te1[a->AVRB(AES_shifts[4*i + 1])] ^
c5e576
              AES_Te2[a->AVRB(AES_shifts[4*i + 2])] ^
c5e576
              AES_Te3[a->AVRB(AES_shifts[4*i + 3])]);
c5e576
     }
c5e576
+    *r = result;
c5e576
 }
c5e576
 
c5e576
 void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
c5e576
 {
c5e576
+    ppc_avr_t result;
c5e576
     int i;
c5e576
 
c5e576
     VECTOR_FOR_INORDER_I(i, u8) {
c5e576
-        r->AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
c5e576
+        result.AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
c5e576
     }
c5e576
+    *r = result;
c5e576
 }
c5e576
 
c5e576
 void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
c5e576
@@ -2369,11 +2373,13 @@ void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
c5e576
 
c5e576
 void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
c5e576
 {
c5e576
+    ppc_avr_t result;
c5e576
     int i;
c5e576
 
c5e576
     VECTOR_FOR_INORDER_I(i, u8) {
c5e576
-        r->AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
c5e576
+        result.AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
c5e576
     }
c5e576
+    *r = result;
c5e576
 }
c5e576
 
c5e576
 #define ROTRu32(v, n) (((v) >> (n)) | ((v) << (32-n)))
c5e576
@@ -2460,16 +2466,19 @@ void helper_vshasigmad(ppc_avr_t *r,  ppc_avr_t *a, uint32_t st_six)
c5e576
 
c5e576
 void helper_vpermxor(ppc_avr_t *r,  ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
c5e576
 {
c5e576
+    ppc_avr_t result;
c5e576
     int i;
c5e576
+
c5e576
     VECTOR_FOR_INORDER_I(i, u8) {
c5e576
         int indexA = c->u8[i] >> 4;
c5e576
         int indexB = c->u8[i] & 0xF;
c5e576
 #if defined(HOST_WORDS_BIGENDIAN)
c5e576
-        r->u8[i] = a->u8[indexA] ^ b->u8[indexB];
c5e576
+        result.u8[i] = a->u8[indexA] ^ b->u8[indexB];
c5e576
 #else
c5e576
-        r->u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
c5e576
+        result.u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
c5e576
 #endif
c5e576
     }
c5e576
+    *r = result;
c5e576
 }
c5e576
 
c5e576
 #undef VECTOR_FOR_INORDER_I