|
|
00db10 |
Backport of
|
|
|
00db10 |
commit ce6615c9c686acd34672a9f4eba9bcf5553496f6
|
|
|
00db10 |
Author: Adhemerval Zanella <azanella@linux.vnet.ibm.com>
|
|
|
00db10 |
Date: Sun Jan 11 19:33:17 2015 -0600
|
|
|
00db10 |
|
|
|
00db10 |
powerpc: Fix POWER7/PPC64 performance regression on LE
|
|
|
00db10 |
|
|
|
00db10 |
This patch fixes a performance regression on the POWER7/PPC64 memcmp
|
|
|
00db10 |
porting for Little Endian. The LE code uses 'ldbrx' instruction to read
|
|
|
00db10 |
the memory on byte reversed form, however ISA 2.06 just provide the indexed
|
|
|
00db10 |
form which uses a register value as additional index, instead of a fixed value
|
|
|
00db10 |
enconded in the instruction.
|
|
|
00db10 |
|
|
|
00db10 |
And the port strategy for LE uses r0 index value and update the address
|
|
|
00db10 |
value on each compare loop interation. For large compare size values,
|
|
|
00db10 |
it adds 8 more instructions plus some more depending of trailing
|
|
|
00db10 |
size. This patch fixes it by adding pre-calculate indexes to remove the
|
|
|
00db10 |
address update on loops and tailing sizes.
|
|
|
00db10 |
|
|
|
00db10 |
For large sizes it shows a considerable gain, with double performance
|
|
|
00db10 |
pairing with BE.
|
|
|
00db10 |
|
|
|
00db10 |
ChangeLog:
|
|
|
00db10 |
2015-01-13 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
|
|
|
00db10 |
|
|
|
00db10 |
* sysdeps/powerpc/powerpc64/power7/memcmp.S (memcmp): Fix performance
|
|
|
00db10 |
regression on LE.
|
|
|
00db10 |
|
|
|
00db10 |
diff --git a/sysdeps/powerpc/powerpc64/power7/memcmp.S b/sysdeps/powerpc/powerpc64/power7/memcmp.S
|
|
|
00db10 |
index 09bff69..98b9e54 100644
|
|
|
00db10 |
--- a/sysdeps/powerpc/powerpc64/power7/memcmp.S
|
|
|
00db10 |
+++ b/sysdeps/powerpc/powerpc64/power7/memcmp.S
|
|
|
00db10 |
@@ -26,18 +26,48 @@
|
|
|
00db10 |
EALIGN (memcmp, 4, 0)
|
|
|
00db10 |
CALL_MCOUNT 3
|
|
|
00db10 |
|
|
|
00db10 |
-#define rRTN r3
|
|
|
00db10 |
-#define rSTR1 r3 /* first string arg */
|
|
|
00db10 |
-#define rSTR2 r4 /* second string arg */
|
|
|
00db10 |
-#define rN r5 /* max string length */
|
|
|
00db10 |
-#define rWORD1 r6 /* current word in s1 */
|
|
|
00db10 |
-#define rWORD2 r7 /* current word in s2 */
|
|
|
00db10 |
-#define rWORD3 r8 /* next word in s1 */
|
|
|
00db10 |
-#define rWORD4 r9 /* next word in s2 */
|
|
|
00db10 |
-#define rWORD5 r10 /* next word in s1 */
|
|
|
00db10 |
-#define rWORD6 r11 /* next word in s2 */
|
|
|
00db10 |
-#define rWORD7 r30 /* next word in s1 */
|
|
|
00db10 |
-#define rWORD8 r31 /* next word in s2 */
|
|
|
00db10 |
+#define rRTN r3
|
|
|
00db10 |
+#define rSTR1 r3 /* first string arg */
|
|
|
00db10 |
+#define rSTR2 r4 /* second string arg */
|
|
|
00db10 |
+#define rN r5 /* max string length */
|
|
|
00db10 |
+#define rWORD1 r6 /* current word in s1 */
|
|
|
00db10 |
+#define rWORD2 r7 /* current word in s2 */
|
|
|
00db10 |
+#define rWORD3 r8 /* next word in s1 */
|
|
|
00db10 |
+#define rWORD4 r9 /* next word in s2 */
|
|
|
00db10 |
+#define rWORD5 r10 /* next word in s1 */
|
|
|
00db10 |
+#define rWORD6 r11 /* next word in s2 */
|
|
|
00db10 |
+
|
|
|
00db10 |
+#define rOFF8 r20 /* 8 bytes offset. */
|
|
|
00db10 |
+#define rOFF16 r21 /* 16 bytes offset. */
|
|
|
00db10 |
+#define rOFF24 r22 /* 24 bytes offset. */
|
|
|
00db10 |
+#define rOFF32 r23 /* 24 bytes offset. */
|
|
|
00db10 |
+#define rWORD6_SHIFT r24 /* Left rotation temp for rWORD8. */
|
|
|
00db10 |
+#define rWORD4_SHIFT r25 /* Left rotation temp for rWORD6. */
|
|
|
00db10 |
+#define rWORD2_SHIFT r26 /* Left rotation temp for rWORD4. */
|
|
|
00db10 |
+#define rWORD8_SHIFT r27 /* Left rotation temp for rWORD2. */
|
|
|
00db10 |
+#define rSHR r28 /* Unaligned shift right count. */
|
|
|
00db10 |
+#define rSHL r29 /* Unaligned shift left count. */
|
|
|
00db10 |
+#define rWORD7 r30 /* next word in s1 */
|
|
|
00db10 |
+#define rWORD8 r31 /* next word in s2 */
|
|
|
00db10 |
+
|
|
|
00db10 |
+#define rWORD8SAVE (-8)
|
|
|
00db10 |
+#define rWORD7SAVE (-16)
|
|
|
00db10 |
+#define rOFF8SAVE (-24)
|
|
|
00db10 |
+#define rOFF16SAVE (-32)
|
|
|
00db10 |
+#define rOFF24SAVE (-40)
|
|
|
00db10 |
+#define rOFF32SAVE (-48)
|
|
|
00db10 |
+#define rSHRSAVE (-56)
|
|
|
00db10 |
+#define rSHLSAVE (-64)
|
|
|
00db10 |
+#define rWORD8SHIFTSAVE (-72)
|
|
|
00db10 |
+#define rWORD2SHIFTSAVE (-80)
|
|
|
00db10 |
+#define rWORD4SHIFTSAVE (-88)
|
|
|
00db10 |
+#define rWORD6SHIFTSAVE (-96)
|
|
|
00db10 |
+
|
|
|
00db10 |
+#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
+# define LD ldbrx
|
|
|
00db10 |
+#else
|
|
|
00db10 |
+# define LD ldx
|
|
|
00db10 |
+#endif
|
|
|
00db10 |
|
|
|
00db10 |
xor r0, rSTR2, rSTR1
|
|
|
00db10 |
cmpldi cr6, rN, 0
|
|
|
00db10 |
@@ -51,10 +81,24 @@ EALIGN (memcmp, 4, 0)
|
|
|
00db10 |
/* If less than 8 bytes or not aligned, use the unaligned
|
|
|
00db10 |
byte loop. */
|
|
|
00db10 |
blt cr1, L(bytealigned)
|
|
|
00db10 |
- std rWORD8, -8(r1)
|
|
|
00db10 |
- cfi_offset(rWORD8, -8)
|
|
|
00db10 |
- std rWORD7, -16(r1)
|
|
|
00db10 |
- cfi_offset(rWORD7, -16)
|
|
|
00db10 |
+ std rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD8, rWORD8SAVE)
|
|
|
00db10 |
+ std rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD7, rWORD7SAVE)
|
|
|
00db10 |
+ std rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD7, rOFF8SAVE)
|
|
|
00db10 |
+ std rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD7, rOFF16SAVE)
|
|
|
00db10 |
+ std rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD7, rOFF24SAVE)
|
|
|
00db10 |
+ std rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD7, rOFF32SAVE)
|
|
|
00db10 |
+
|
|
|
00db10 |
+ li rOFF8,8
|
|
|
00db10 |
+ li rOFF16,16
|
|
|
00db10 |
+ li rOFF24,24
|
|
|
00db10 |
+ li rOFF32,32
|
|
|
00db10 |
+
|
|
|
00db10 |
bne L(unaligned)
|
|
|
00db10 |
/* At this point we know both strings have the same alignment and the
|
|
|
00db10 |
compare length is at least 8 bytes. r12 contains the low order
|
|
|
00db10 |
@@ -79,15 +123,8 @@ L(samealignment):
|
|
|
00db10 |
sldi rWORD6, r12, 3
|
|
|
00db10 |
srdi r0, rN, 5 /* Divide by 32 */
|
|
|
00db10 |
andi. r12, rN, 24 /* Get the DW remainder */
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 0(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 0(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, 0, rSTR1
|
|
|
00db10 |
+ LD rWORD2, 0, rSTR2
|
|
|
00db10 |
cmpldi cr1, r12, 16
|
|
|
00db10 |
cmpldi cr7, rN, 32
|
|
|
00db10 |
clrldi rN, rN, 61
|
|
|
00db10 |
@@ -104,15 +141,8 @@ L(dsP1):
|
|
|
00db10 |
cmpld cr5, rWORD5, rWORD6
|
|
|
00db10 |
blt cr7, L(dP1x)
|
|
|
00db10 |
/* Do something useful in this cycle since we have to branch anyway. */
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
b L(dP1e)
|
|
|
00db10 |
/* Remainder is 16 */
|
|
|
00db10 |
@@ -123,15 +153,8 @@ L(dPs2):
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
blt cr7, L(dP2x)
|
|
|
00db10 |
/* Do something useful in this cycle since we have to branch anyway. */
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD7, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD8, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
b L(dP2e)
|
|
|
00db10 |
/* Remainder is 24 */
|
|
|
00db10 |
@@ -173,72 +196,43 @@ L(dP1):
|
|
|
00db10 |
change any on the early exit path. The key here is the non-early
|
|
|
00db10 |
exit path only cares about the condition code (cr5), not about which
|
|
|
00db10 |
register pair was used. */
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 0(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 0(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, 0, rSTR1
|
|
|
00db10 |
+ LD rWORD6, 0, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD5, rWORD6
|
|
|
00db10 |
blt cr7, L(dP1x)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
L(dP1e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
bne cr5, L(dLcr5x)
|
|
|
00db10 |
bne cr7, L(dLcr7x)
|
|
|
00db10 |
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ldu rWORD7, 32(rSTR1)
|
|
|
00db10 |
- ldu rWORD8, 32(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF32, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF32, rSTR2
|
|
|
00db10 |
+ addi rSTR1, rSTR1, 32
|
|
|
00db10 |
+ addi rSTR2, rSTR2, 32
|
|
|
00db10 |
bne cr1, L(dLcr1)
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bdnz L(dLoop)
|
|
|
00db10 |
bne cr6, L(dLcr6)
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
.align 3
|
|
|
00db10 |
L(dP1x):
|
|
|
00db10 |
sldi. r12, rN, 3
|
|
|
00db10 |
bne cr5, L(dLcr5x)
|
|
|
00db10 |
subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
|
|
|
00db10 |
bne L(d00)
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
blr
|
|
|
00db10 |
|
|
|
00db10 |
@@ -246,79 +240,41 @@ L(dP1x):
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dP2):
|
|
|
00db10 |
mtctr r0
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 0(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 0(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, 0, rSTR1
|
|
|
00db10 |
+ LD rWORD6, 0, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
blt cr7, L(dP2x)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD7, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD8, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
L(dP2e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 8
|
|
|
00db10 |
addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
bne cr6, L(dLcr6)
|
|
|
00db10 |
bne cr5, L(dLcr5)
|
|
|
00db10 |
b L(dLoop2)
|
|
|
00db10 |
-/* Again we are on a early exit path (16-23 byte compare), we want to
|
|
|
00db10 |
- only use volatile registers and avoid restoring non-volatile
|
|
|
00db10 |
- registers. */
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dP2x):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
sldi. r12, rN, 3
|
|
|
00db10 |
bne cr6, L(dLcr6x)
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 8
|
|
|
00db10 |
addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
bne cr1, L(dLcr1x)
|
|
|
00db10 |
subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
|
|
|
00db10 |
bne L(d00)
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
blr
|
|
|
00db10 |
|
|
|
00db10 |
@@ -326,52 +282,22 @@ L(dP2x):
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dP3):
|
|
|
00db10 |
mtctr r0
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 0(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 0(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, 0, rSTR1
|
|
|
00db10 |
+ LD rWORD4, 0, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
L(dP3e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
blt cr7, L(dP3x)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD7, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD8, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 16
|
|
|
00db10 |
addi rSTR2, rSTR2, 16
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
bne cr1, L(dLcr1)
|
|
|
00db10 |
bne cr6, L(dLcr6)
|
|
|
00db10 |
b L(dLoop1)
|
|
|
00db10 |
@@ -380,26 +306,21 @@ L(dP3e):
|
|
|
00db10 |
registers. */
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dP3x):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
sldi. r12, rN, 3
|
|
|
00db10 |
bne cr1, L(dLcr1x)
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 16
|
|
|
00db10 |
addi rSTR2, rSTR2, 16
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
bne cr6, L(dLcr6x)
|
|
|
00db10 |
subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
|
|
|
00db10 |
bne cr7, L(dLcr7x)
|
|
|
00db10 |
bne L(d00)
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
blr
|
|
|
00db10 |
|
|
|
00db10 |
@@ -407,46 +328,20 @@ L(dP3x):
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dP4):
|
|
|
00db10 |
mtctr r0
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 0(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 0(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, 0, rSTR1
|
|
|
00db10 |
+ LD rWORD2, 0, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
L(dP4e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ldu rWORD7, 24(rSTR1)
|
|
|
00db10 |
- ldu rWORD8, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF24, rSTR2
|
|
|
00db10 |
+ addi rSTR1, rSTR1, 24
|
|
|
00db10 |
+ addi rSTR2, rSTR2, 24
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bne cr7, L(dLcr7)
|
|
|
00db10 |
bne cr1, L(dLcr1)
|
|
|
00db10 |
@@ -454,51 +349,25 @@ L(dP4e):
|
|
|
00db10 |
/* This is the primary loop */
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dLoop):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
bne cr6, L(dLcr6)
|
|
|
00db10 |
L(dLoop1):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
bne cr5, L(dLcr5)
|
|
|
00db10 |
L(dLoop2):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bne cr7, L(dLcr7)
|
|
|
00db10 |
L(dLoop3):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ldu rWORD7, 32(rSTR1)
|
|
|
00db10 |
- ldu rWORD8, 32(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF32, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF32, rSTR2
|
|
|
00db10 |
+ addi rSTR1, rSTR1, 32
|
|
|
00db10 |
+ addi rSTR2, rSTR2, 32
|
|
|
00db10 |
bne cr1, L(dLcr1)
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
bdnz L(dLoop)
|
|
|
00db10 |
@@ -519,62 +388,75 @@ L(d14):
|
|
|
00db10 |
sldi. r12, rN, 3
|
|
|
00db10 |
bne cr5, L(dLcr5)
|
|
|
00db10 |
L(d04):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
|
|
|
00db10 |
- beq L(zeroLength)
|
|
|
00db10 |
+ beq L(duzeroLength)
|
|
|
00db10 |
/* At this point we have a remainder of 1 to 7 bytes to compare. Since
|
|
|
00db10 |
we are aligned it is safe to load the whole double word, and use
|
|
|
00db10 |
shift right double to eliminate bits beyond the compare length. */
|
|
|
00db10 |
L(d00):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
srd rWORD1, rWORD1, rN
|
|
|
00db10 |
srd rWORD2, rWORD2, rN
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
bne cr7, L(dLcr7x)
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
blr
|
|
|
00db10 |
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dLcr7):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
L(dLcr7x):
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgtlr cr7
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
blr
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dLcr1):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
L(dLcr1x):
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgtlr cr1
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
blr
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dLcr6):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
L(dLcr6x):
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgtlr cr6
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
blr
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dLcr5):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
L(dLcr5x):
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgtlr cr5
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
@@ -583,10 +465,6 @@ L(dLcr5x):
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(bytealigned):
|
|
|
00db10 |
mtctr rN
|
|
|
00db10 |
-#if 0
|
|
|
00db10 |
-/* Huh? We've already branched on cr6! */
|
|
|
00db10 |
- beq cr6, L(zeroLength)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
|
|
|
00db10 |
/* We need to prime this loop. This loop is swing modulo scheduled
|
|
|
00db10 |
to avoid pipe delays. The dependent instruction latencies (load to
|
|
|
00db10 |
@@ -685,6 +563,7 @@ L(b11):
|
|
|
00db10 |
L(bx12):
|
|
|
00db10 |
sub rRTN, rWORD1, rWORD2
|
|
|
00db10 |
blr
|
|
|
00db10 |
+
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(zeroLength):
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
@@ -705,42 +584,36 @@ L(zeroLength):
|
|
|
00db10 |
we need to adjust the length (rN) and special case the loop
|
|
|
00db10 |
versioning for the first DW. This ensures that the loop count is
|
|
|
00db10 |
correct and the first DW (shifted) is in the expected resister pair. */
|
|
|
00db10 |
-#define rSHL r29 /* Unaligned shift left count. */
|
|
|
00db10 |
-#define rSHR r28 /* Unaligned shift right count. */
|
|
|
00db10 |
-#define rWORD8_SHIFT r27 /* Left rotation temp for rWORD2. */
|
|
|
00db10 |
-#define rWORD2_SHIFT r26 /* Left rotation temp for rWORD4. */
|
|
|
00db10 |
-#define rWORD4_SHIFT r25 /* Left rotation temp for rWORD6. */
|
|
|
00db10 |
-#define rWORD6_SHIFT r24 /* Left rotation temp for rWORD8. */
|
|
|
00db10 |
L(unaligned):
|
|
|
00db10 |
- std rSHL, -24(r1)
|
|
|
00db10 |
- cfi_offset(rSHL, -24)
|
|
|
00db10 |
+ std rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rSHL, rSHLSAVE)
|
|
|
00db10 |
clrldi rSHL, rSTR2, 61
|
|
|
00db10 |
beq cr6, L(duzeroLength)
|
|
|
00db10 |
- std rSHR, -32(r1)
|
|
|
00db10 |
- cfi_offset(rSHR, -32)
|
|
|
00db10 |
+ std rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rSHR, rSHRSAVE)
|
|
|
00db10 |
beq cr5, L(DWunaligned)
|
|
|
00db10 |
- std rWORD8_SHIFT, -40(r1)
|
|
|
00db10 |
- cfi_offset(rWORD8_SHIFT, -40)
|
|
|
00db10 |
+ std rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
|
|
|
00db10 |
/* Adjust the logical start of rSTR2 to compensate for the extra bits
|
|
|
00db10 |
in the 1st rSTR1 DW. */
|
|
|
00db10 |
sub rWORD8_SHIFT, rSTR2, r12
|
|
|
00db10 |
/* But do not attempt to address the DW before that DW that contains
|
|
|
00db10 |
the actual start of rSTR2. */
|
|
|
00db10 |
clrrdi rSTR2, rSTR2, 3
|
|
|
00db10 |
- std rWORD2_SHIFT, -48(r1)
|
|
|
00db10 |
- cfi_offset(rWORD2_SHIFT, -48)
|
|
|
00db10 |
+ std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
|
|
|
00db10 |
/* Compute the left/right shift counts for the unaligned rSTR2,
|
|
|
00db10 |
compensating for the logical (DW aligned) start of rSTR1. */
|
|
|
00db10 |
clrldi rSHL, rWORD8_SHIFT, 61
|
|
|
00db10 |
clrrdi rSTR1, rSTR1, 3
|
|
|
00db10 |
- std rWORD4_SHIFT, -56(r1)
|
|
|
00db10 |
- cfi_offset(rWORD4_SHIFT, -56)
|
|
|
00db10 |
+ std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
|
|
|
00db10 |
sldi rSHL, rSHL, 3
|
|
|
00db10 |
cmpld cr5, rWORD8_SHIFT, rSTR2
|
|
|
00db10 |
add rN, rN, r12
|
|
|
00db10 |
sldi rWORD6, r12, 3
|
|
|
00db10 |
- std rWORD6_SHIFT, -64(r1)
|
|
|
00db10 |
- cfi_offset(rWORD6_SHIFT, -64)
|
|
|
00db10 |
+ std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
|
|
|
00db10 |
subfic rSHR, rSHL, 64
|
|
|
00db10 |
srdi r0, rN, 5 /* Divide by 32 */
|
|
|
00db10 |
andi. r12, rN, 24 /* Get the DW remainder */
|
|
|
00db10 |
@@ -750,25 +623,13 @@ L(unaligned):
|
|
|
00db10 |
this may cross a page boundary and cause a page fault. */
|
|
|
00db10 |
li rWORD8, 0
|
|
|
00db10 |
blt cr5, L(dus0)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
+ LD rWORD8, 0, rSTR2
|
|
|
00db10 |
addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD8, 0(rSTR2)
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
sld rWORD8, rWORD8, rSHL
|
|
|
00db10 |
|
|
|
00db10 |
L(dus0):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 0(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 0(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, 0, rSTR1
|
|
|
00db10 |
+ LD rWORD2, 0, rSTR2
|
|
|
00db10 |
cmpldi cr1, r12, 16
|
|
|
00db10 |
cmpldi cr7, rN, 32
|
|
|
00db10 |
srd r12, rWORD2, rSHR
|
|
|
00db10 |
@@ -796,12 +657,7 @@ L(dusP1):
|
|
|
00db10 |
beq L(duZeroReturn)
|
|
|
00db10 |
li r0, 0
|
|
|
00db10 |
ble cr7, L(dutrim)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
b L(dutrim)
|
|
|
00db10 |
/* Remainder is 16 */
|
|
|
00db10 |
@@ -832,27 +688,21 @@ L(duPs4):
|
|
|
00db10 |
compare length is at least 8 bytes. */
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(DWunaligned):
|
|
|
00db10 |
- std rWORD8_SHIFT, -40(r1)
|
|
|
00db10 |
- cfi_offset(rWORD8_SHIFT, -40)
|
|
|
00db10 |
+ std rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
|
|
|
00db10 |
clrrdi rSTR2, rSTR2, 3
|
|
|
00db10 |
- std rWORD2_SHIFT, -48(r1)
|
|
|
00db10 |
- cfi_offset(rWORD2_SHIFT, -48)
|
|
|
00db10 |
+ std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
|
|
|
00db10 |
srdi r0, rN, 5 /* Divide by 32 */
|
|
|
00db10 |
- std rWORD4_SHIFT, -56(r1)
|
|
|
00db10 |
- cfi_offset(rWORD4_SHIFT, -56)
|
|
|
00db10 |
+ std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
|
|
|
00db10 |
andi. r12, rN, 24 /* Get the DW remainder */
|
|
|
00db10 |
- std rWORD6_SHIFT, -64(r1)
|
|
|
00db10 |
- cfi_offset(rWORD6_SHIFT, -64)
|
|
|
00db10 |
+ std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
|
|
|
00db10 |
+ cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
|
|
|
00db10 |
sldi rSHL, rSHL, 3
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
+ LD rWORD6, 0, rSTR2
|
|
|
00db10 |
+ LD rWORD8, rOFF8, rSTR2
|
|
|
00db10 |
addi rSTR2, rSTR2, 8
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD6, 0(rSTR2)
|
|
|
00db10 |
- ldu rWORD8, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
cmpldi cr1, r12, 16
|
|
|
00db10 |
cmpldi cr7, rN, 32
|
|
|
00db10 |
clrldi rN, rN, 61
|
|
|
00db10 |
@@ -867,52 +717,26 @@ L(DWunaligned):
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duP1):
|
|
|
00db10 |
srd r12, rWORD8, rSHR
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD7, 0(rSTR1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, 0, rSTR1
|
|
|
00db10 |
sld rWORD8_SHIFT, rWORD8, rSHL
|
|
|
00db10 |
or rWORD8, r12, rWORD6_SHIFT
|
|
|
00db10 |
blt cr7, L(duP1x)
|
|
|
00db10 |
L(duP1e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
sld rWORD2_SHIFT, rWORD2, rSHL
|
|
|
00db10 |
or rWORD2, r0, rWORD8_SHIFT
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
srd r12, rWORD4, rSHR
|
|
|
00db10 |
sld rWORD4_SHIFT, rWORD4, rSHL
|
|
|
00db10 |
bne cr5, L(duLcr5)
|
|
|
00db10 |
or rWORD4, r12, rWORD2_SHIFT
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
srd r0, rWORD6, rSHR
|
|
|
00db10 |
sld rWORD6_SHIFT, rWORD6, rSHL
|
|
|
00db10 |
@@ -932,82 +756,47 @@ L(duP1x):
|
|
|
00db10 |
beq L(duZeroReturn)
|
|
|
00db10 |
li r0, 0
|
|
|
00db10 |
ble cr7, L(dutrim)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
b L(dutrim)
|
|
|
00db10 |
/* Remainder is 16 */
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duP2):
|
|
|
00db10 |
srd r0, rWORD8, rSHR
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 0(rSTR1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, 0, rSTR1
|
|
|
00db10 |
or rWORD6, r0, rWORD6_SHIFT
|
|
|
00db10 |
sld rWORD6_SHIFT, rWORD8, rSHL
|
|
|
00db10 |
L(duP2e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD7, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD8, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
srd r12, rWORD8, rSHR
|
|
|
00db10 |
sld rWORD8_SHIFT, rWORD8, rSHL
|
|
|
00db10 |
or rWORD8, r12, rWORD6_SHIFT
|
|
|
00db10 |
blt cr7, L(duP2x)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bne cr6, L(duLcr6)
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
sld rWORD2_SHIFT, rWORD2, rSHL
|
|
|
00db10 |
or rWORD2, r0, rWORD8_SHIFT
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
bne cr5, L(duLcr5)
|
|
|
00db10 |
srd r12, rWORD4, rSHR
|
|
|
00db10 |
sld rWORD4_SHIFT, rWORD4, rSHL
|
|
|
00db10 |
or rWORD4, r12, rWORD2_SHIFT
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 8
|
|
|
00db10 |
addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
b L(duLoop2)
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duP2x):
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 8
|
|
|
00db10 |
addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
bne cr6, L(duLcr6)
|
|
|
00db10 |
sldi. rN, rN, 3
|
|
|
00db10 |
bne cr5, L(duLcr5)
|
|
|
00db10 |
@@ -1015,12 +804,7 @@ L(duP2x):
|
|
|
00db10 |
beq L(duZeroReturn)
|
|
|
00db10 |
li r0, 0
|
|
|
00db10 |
ble cr7, L(dutrim)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
b L(dutrim)
|
|
|
00db10 |
|
|
|
00db10 |
@@ -1028,73 +812,39 @@ L(duP2x):
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duP3):
|
|
|
00db10 |
srd r12, rWORD8, rSHR
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 0(rSTR1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, 0, rSTR1
|
|
|
00db10 |
sld rWORD4_SHIFT, rWORD8, rSHL
|
|
|
00db10 |
or rWORD4, r12, rWORD6_SHIFT
|
|
|
00db10 |
L(duP3e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
srd r0, rWORD6, rSHR
|
|
|
00db10 |
sld rWORD6_SHIFT, rWORD6, rSHL
|
|
|
00db10 |
or rWORD6, r0, rWORD4_SHIFT
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD7, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD8, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
bne cr1, L(duLcr1)
|
|
|
00db10 |
srd r12, rWORD8, rSHR
|
|
|
00db10 |
sld rWORD8_SHIFT, rWORD8, rSHL
|
|
|
00db10 |
or rWORD8, r12, rWORD6_SHIFT
|
|
|
00db10 |
blt cr7, L(duP3x)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bne cr6, L(duLcr6)
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
sld rWORD2_SHIFT, rWORD2, rSHL
|
|
|
00db10 |
or rWORD2, r0, rWORD8_SHIFT
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 16
|
|
|
00db10 |
addi rSTR2, rSTR2, 16
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
b L(duLoop1)
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duP3x):
|
|
|
00db10 |
-#ifndef __LITTLE_ENDIAN__
|
|
|
00db10 |
addi rSTR1, rSTR1, 16
|
|
|
00db10 |
addi rSTR2, rSTR2, 16
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
-#if 0
|
|
|
00db10 |
-/* Huh? We've already branched on cr1! */
|
|
|
00db10 |
- bne cr1, L(duLcr1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bne cr6, L(duLcr6)
|
|
|
00db10 |
sldi. rN, rN, 3
|
|
|
00db10 |
@@ -1103,12 +853,7 @@ L(duP3x):
|
|
|
00db10 |
beq L(duZeroReturn)
|
|
|
00db10 |
li r0, 0
|
|
|
00db10 |
ble cr7, L(dutrim)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
b L(dutrim)
|
|
|
00db10 |
|
|
|
00db10 |
@@ -1117,51 +862,27 @@ L(duP3x):
|
|
|
00db10 |
L(duP4):
|
|
|
00db10 |
mtctr r0
|
|
|
00db10 |
srd r0, rWORD8, rSHR
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 0(rSTR1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, 0, rSTR1
|
|
|
00db10 |
sld rWORD2_SHIFT, rWORD8, rSHL
|
|
|
00db10 |
or rWORD2, r0, rWORD6_SHIFT
|
|
|
00db10 |
L(duP4e):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
srd r12, rWORD4, rSHR
|
|
|
00db10 |
sld rWORD4_SHIFT, rWORD4, rSHL
|
|
|
00db10 |
or rWORD4, r12, rWORD2_SHIFT
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
bne cr7, L(duLcr7)
|
|
|
00db10 |
srd r0, rWORD6, rSHR
|
|
|
00db10 |
sld rWORD6_SHIFT, rWORD6, rSHL
|
|
|
00db10 |
or rWORD6, r0, rWORD4_SHIFT
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ldu rWORD7, 24(rSTR1)
|
|
|
00db10 |
- ldu rWORD8, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF24, rSTR2
|
|
|
00db10 |
+ addi rSTR1, rSTR1, 24
|
|
|
00db10 |
+ addi rSTR2, rSTR2, 24
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
bne cr1, L(duLcr1)
|
|
|
00db10 |
srd r12, rWORD8, rSHR
|
|
|
00db10 |
@@ -1172,60 +893,34 @@ L(duP4e):
|
|
|
00db10 |
/* This is the primary loop */
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duLoop):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
bne cr6, L(duLcr6)
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
sld rWORD2_SHIFT, rWORD2, rSHL
|
|
|
00db10 |
or rWORD2, r0, rWORD8_SHIFT
|
|
|
00db10 |
L(duLoop1):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD3, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD4, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD3, 16(rSTR1)
|
|
|
00db10 |
- ld rWORD4, 16(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD3, rOFF16, rSTR1
|
|
|
00db10 |
+ LD rWORD4, rOFF16, rSTR2
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
bne cr5, L(duLcr5)
|
|
|
00db10 |
srd r12, rWORD4, rSHR
|
|
|
00db10 |
sld rWORD4_SHIFT, rWORD4, rSHL
|
|
|
00db10 |
or rWORD4, r12, rWORD2_SHIFT
|
|
|
00db10 |
L(duLoop2):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD5, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD6, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD5, 24(rSTR1)
|
|
|
00db10 |
- ld rWORD6, 24(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD5, rOFF24, rSTR1
|
|
|
00db10 |
+ LD rWORD6, rOFF24, rSTR2
|
|
|
00db10 |
cmpld cr5, rWORD7, rWORD8
|
|
|
00db10 |
bne cr7, L(duLcr7)
|
|
|
00db10 |
srd r0, rWORD6, rSHR
|
|
|
00db10 |
sld rWORD6_SHIFT, rWORD6, rSHL
|
|
|
00db10 |
or rWORD6, r0, rWORD4_SHIFT
|
|
|
00db10 |
L(duLoop3):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD7, 0, rSTR1
|
|
|
00db10 |
- ldbrx rWORD8, 0, rSTR2
|
|
|
00db10 |
- addi rSTR1, rSTR1, 8
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ldu rWORD7, 32(rSTR1)
|
|
|
00db10 |
- ldu rWORD8, 32(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD7, rOFF32, rSTR1
|
|
|
00db10 |
+ LD rWORD8, rOFF32, rSTR2
|
|
|
00db10 |
+ addi rSTR1, rSTR1, 32
|
|
|
00db10 |
+ addi rSTR2, rSTR2, 32
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
bne cr1, L(duLcr1)
|
|
|
00db10 |
srd r12, rWORD8, rSHR
|
|
|
00db10 |
@@ -1234,10 +929,6 @@ L(duLoop3):
|
|
|
00db10 |
bdnz L(duLoop)
|
|
|
00db10 |
|
|
|
00db10 |
L(duL4):
|
|
|
00db10 |
-#if 0
|
|
|
00db10 |
-/* Huh? We've already branched on cr1! */
|
|
|
00db10 |
- bne cr1, L(duLcr1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
cmpld cr1, rWORD3, rWORD4
|
|
|
00db10 |
bne cr6, L(duLcr6)
|
|
|
00db10 |
cmpld cr6, rWORD5, rWORD6
|
|
|
00db10 |
@@ -1264,99 +955,102 @@ L(du14):
|
|
|
00db10 |
beq L(duZeroReturn)
|
|
|
00db10 |
li r0, 0
|
|
|
00db10 |
ble cr7, L(dutrim)
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD2, 0, rSTR2
|
|
|
00db10 |
- addi rSTR2, rSTR2, 8
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD2, 8(rSTR2)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD2, rOFF8, rSTR2
|
|
|
00db10 |
srd r0, rWORD2, rSHR
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dutrim):
|
|
|
00db10 |
-#ifdef __LITTLE_ENDIAN__
|
|
|
00db10 |
- ldbrx rWORD1, 0, rSTR1
|
|
|
00db10 |
-#else
|
|
|
00db10 |
- ld rWORD1, 8(rSTR1)
|
|
|
00db10 |
-#endif
|
|
|
00db10 |
+ LD rWORD1, rOFF8, rSTR1
|
|
|
00db10 |
ld rWORD8, -8(r1)
|
|
|
00db10 |
subfic rN, rN, 64 /* Shift count is 64 - (rN * 8). */
|
|
|
00db10 |
or rWORD2, r0, rWORD8_SHIFT
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
- ld rSHL, -24(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
+ ld rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
srd rWORD1, rWORD1, rN
|
|
|
00db10 |
srd rWORD2, rWORD2, rN
|
|
|
00db10 |
- ld rSHR, -32(r1)
|
|
|
00db10 |
- ld rWORD8_SHIFT, -40(r1)
|
|
|
00db10 |
+ ld rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
+ ld rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
cmpld cr7, rWORD1, rWORD2
|
|
|
00db10 |
- ld rWORD2_SHIFT, -48(r1)
|
|
|
00db10 |
- ld rWORD4_SHIFT, -56(r1)
|
|
|
00db10 |
+ ld rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
|
|
|
00db10 |
+ ld rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
|
|
|
00db10 |
beq cr7, L(dureturn24)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
- ld rWORD6_SHIFT, -64(r1)
|
|
|
00db10 |
+ ld rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
bgtlr cr7
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
blr
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duLcr7):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgt cr7, L(dureturn29)
|
|
|
00db10 |
- ld rSHL, -24(r1)
|
|
|
00db10 |
- ld rSHR, -32(r1)
|
|
|
00db10 |
+ ld rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
+ ld rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
b L(dureturn27)
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duLcr1):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgt cr1, L(dureturn29)
|
|
|
00db10 |
- ld rSHL, -24(r1)
|
|
|
00db10 |
- ld rSHR, -32(r1)
|
|
|
00db10 |
+ ld rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
+ ld rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
b L(dureturn27)
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duLcr6):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgt cr6, L(dureturn29)
|
|
|
00db10 |
- ld rSHL, -24(r1)
|
|
|
00db10 |
- ld rSHR, -32(r1)
|
|
|
00db10 |
+ ld rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
+ ld rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
b L(dureturn27)
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(duLcr5):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
li rRTN, 1
|
|
|
00db10 |
bgt cr5, L(dureturn29)
|
|
|
00db10 |
- ld rSHL, -24(r1)
|
|
|
00db10 |
- ld rSHR, -32(r1)
|
|
|
00db10 |
+ ld rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
+ ld rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
li rRTN, -1
|
|
|
00db10 |
b L(dureturn27)
|
|
|
00db10 |
+
|
|
|
00db10 |
.align 3
|
|
|
00db10 |
L(duZeroReturn):
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
.align 4
|
|
|
00db10 |
L(dureturn):
|
|
|
00db10 |
- ld rWORD8, -8(r1)
|
|
|
00db10 |
- ld rWORD7, -16(r1)
|
|
|
00db10 |
+ ld rWORD8, rWORD8SAVE(r1)
|
|
|
00db10 |
+ ld rWORD7, rWORD7SAVE(r1)
|
|
|
00db10 |
L(dureturn29):
|
|
|
00db10 |
- ld rSHL, -24(r1)
|
|
|
00db10 |
- ld rSHR, -32(r1)
|
|
|
00db10 |
+ ld rSHL, rSHLSAVE(r1)
|
|
|
00db10 |
+ ld rSHR, rSHRSAVE(r1)
|
|
|
00db10 |
L(dureturn27):
|
|
|
00db10 |
- ld rWORD8_SHIFT, -40(r1)
|
|
|
00db10 |
-L(dureturn26):
|
|
|
00db10 |
- ld rWORD2_SHIFT, -48(r1)
|
|
|
00db10 |
-L(dureturn25):
|
|
|
00db10 |
- ld rWORD4_SHIFT, -56(r1)
|
|
|
00db10 |
+ ld rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
|
|
|
00db10 |
+ ld rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
|
|
|
00db10 |
+ ld rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
|
|
|
00db10 |
L(dureturn24):
|
|
|
00db10 |
- ld rWORD6_SHIFT, -64(r1)
|
|
|
00db10 |
+ ld rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
blr
|
|
|
00db10 |
+
|
|
|
00db10 |
L(duzeroLength):
|
|
|
00db10 |
+ ld rOFF8, rOFF8SAVE(r1)
|
|
|
00db10 |
+ ld rOFF16, rOFF16SAVE(r1)
|
|
|
00db10 |
+ ld rOFF24, rOFF24SAVE(r1)
|
|
|
00db10 |
+ ld rOFF32, rOFF32SAVE(r1)
|
|
|
00db10 |
li rRTN, 0
|
|
|
00db10 |
blr
|
|
|
00db10 |
|