5de29b
# commit be1e5d311342e08ae1f8013342df27b7ded2c156
5de29b
# Author: Anton Blanchard <anton@au1.ibm.com>
5de29b
# Date:   Sat Aug 17 18:34:40 2013 +0930
5de29b
# 
5de29b
#     PowerPC LE setjmp/longjmp
5de29b
#     http://sourceware.org/ml/libc-alpha/2013-08/msg00089.html
5de29b
#     
5de29b
#     Little-endian fixes for setjmp/longjmp.  When writing these I noticed
5de29b
#     the setjmp code corrupts the non volatile VMX registers when using an
5de29b
#     unaligned buffer.  Anton fixed this, and also simplified it quite a
5de29b
#     bit.
5de29b
#     
5de29b
#     The current code uses boilerplate for the case where we want to store
5de29b
#     16 bytes to an unaligned address.  For that we have to do a
5de29b
#     read/modify/write of two aligned 16 byte quantities.  In our case we
5de29b
#     are storing a bunch of back to back data (consective VMX registers),
5de29b
#     and only the start and end of the region need the read/modify/write.
5de29b
#     
5de29b
#         [BZ #15723]
5de29b
#         * sysdeps/powerpc/jmpbuf-offsets.h: Comment fix.
5de29b
#         * sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Correct
5de29b
#         _dl_hwcap access for little-endian.
5de29b
#         * sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Likewise.  Don't
5de29b
#         destroy vmx regs when saving unaligned.
5de29b
#         * sysdeps/powerpc/powerpc64/__longjmp-common.S: Correct CR load.
5de29b
#         * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise CR save.  Don't
5de29b
#         destroy vmx regs when saving unaligned.
5de29b
# 
5de29b
diff -urN glibc-2.17-c758a686.orig/sysdeps/powerpc/jmpbuf-offsets.h glibc-2.17-c758a686.diff/sysdeps/powerpc/jmpbuf-offsets.h
5de29b
--- glibc-2.17-c758a686.orig/sysdeps/powerpc/jmpbuf-offsets.h	2014-05-27 22:55:23.000000000 -0500
5de29b
+++ glibc-2.17-c758a686.diff/sysdeps/powerpc/jmpbuf-offsets.h	2014-05-27 22:55:27.000000000 -0500
5de29b
@@ -21,12 +21,10 @@
5de29b
 #define JB_LR     2  /* The address we will return to */
5de29b
 #if __WORDSIZE == 64
5de29b
 # define JB_GPRS   3  /* GPRs 14 through 31 are saved, 18*2 words total.  */
5de29b
-# define JB_CR     21 /* Condition code registers with the VRSAVE at */
5de29b
-                       /* offset 172 (low half of the double word.  */
5de29b
+# define JB_CR     21 /* Shared dword with VRSAVE.  CR word at offset 172.  */
5de29b
 # define JB_FPRS   22 /* FPRs 14 through 31 are saved, 18*2 words total.  */
5de29b
 # define JB_SIZE   (64 * 8) /* As per PPC64-VMX ABI.  */
5de29b
-# define JB_VRSAVE 21 /* VRSAVE shares a double word with the CR at offset */
5de29b
-                       /* 168 (high half of the double word).  */
5de29b
+# define JB_VRSAVE 21 /* Shared dword with CR.  VRSAVE word at offset 168.  */
5de29b
 # define JB_VRS    40 /* VRs 20 through 31 are saved, 12*4 words total.  */
5de29b
 #else
5de29b
 # define JB_GPRS   3  /* GPRs 14 through 31 are saved, 18 in total.  */
5de29b
diff -urN glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
5de29b
--- glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S	2014-05-27 22:55:23.000000000 -0500
5de29b
+++ glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S	2014-05-27 22:55:27.000000000 -0500
5de29b
@@ -46,16 +46,16 @@
5de29b
 #   endif
5de29b
 	mtlr    r6
5de29b
 	cfi_same_value (lr)
5de29b
-	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+4(r5)
5de29b
+	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+LOWORD(r5)
5de29b
 #  else
5de29b
 	lwz     r5,_dl_hwcap@got(r5)
5de29b
 	mtlr    r6
5de29b
 	cfi_same_value (lr)
5de29b
-	lwz     r5,4(r5)
5de29b
+	lwz     r5,LOWORD(r5)
5de29b
 #  endif
5de29b
 # else
5de29b
-	lis	r5,(_dl_hwcap+4)@ha
5de29b
-	lwz     r5,(_dl_hwcap+4)@l(r5)
5de29b
+	lis	r5,(_dl_hwcap+LOWORD)@ha
5de29b
+	lwz     r5,(_dl_hwcap+LOWORD)@l(r5)
5de29b
 # endif
5de29b
 	andis.	r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
5de29b
 	beq	L(no_vmx)
5de29b
diff -urN glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
5de29b
--- glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S	2014-05-27 22:55:23.000000000 -0500
5de29b
+++ glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S	2014-05-27 22:55:27.000000000 -0500
5de29b
@@ -97,14 +97,14 @@
5de29b
 #   else
5de29b
 	lwz     r5,_rtld_global_ro@got(r5)
5de29b
 #   endif
5de29b
-	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+4(r5)
5de29b
+	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+LOWORD(r5)
5de29b
 #  else
5de29b
 	lwz     r5,_dl_hwcap@got(r5)
5de29b
-	lwz     r5,4(r5)
5de29b
+	lwz     r5,LOWORD(r5)
5de29b
 #  endif
5de29b
 # else
5de29b
-	lis	r6,(_dl_hwcap+4)@ha
5de29b
-	lwz     r5,(_dl_hwcap+4)@l(r6)
5de29b
+	lis	r6,(_dl_hwcap+LOWORD)@ha
5de29b
+	lwz     r5,(_dl_hwcap+LOWORD)@l(r6)
5de29b
 # endif
5de29b
 	andis.	r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
5de29b
 	beq	L(no_vmx)
5de29b
@@ -114,44 +114,43 @@
5de29b
 	stw	r0,((JB_VRSAVE)*4)(3)
5de29b
 	addi	r6,r5,16
5de29b
 	beq+	L(aligned_save_vmx)
5de29b
+
5de29b
 	lvsr	v0,0,r5
5de29b
-	vspltisb v1,-1         /* set v1 to all 1's */
5de29b
-	vspltisb v2,0          /* set v2 to all 0's */
5de29b
-	vperm   v3,v2,v1,v0   /* v3 contains shift mask with num all 1 bytes on left = misalignment  */
5de29b
-
5de29b
-
5de29b
-	/* Special case for v20 we need to preserve what is in save area below v20 before obliterating it */
5de29b
-	lvx     v5,0,r5
5de29b
-	vperm   v20,v20,v20,v0
5de29b
-	vsel    v5,v5,v20,v3
5de29b
-	vsel    v20,v20,v2,v3
5de29b
-	stvx    v5,0,r5
5de29b
-
5de29b
-#define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
5de29b
-	addi    addgpr,addgpr,32; \
5de29b
-	vperm   savevr,savevr,savevr,shiftvr; \
5de29b
-	vsel    hivr,prev_savevr,savevr,maskvr; \
5de29b
-	stvx    hivr,0,savegpr;
5de29b
-
5de29b
-	save_2vmx_partial(v21,v20,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v22,v21,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v23,v22,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v24,v23,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v25,v24,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v26,v25,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v27,v26,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v28,v27,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v29,v28,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v30,v29,v5,v0,v3,r5,r6)
5de29b
-
5de29b
-	/* Special case for r31 we need to preserve what is in save area above v31 before obliterating it */
5de29b
-	addi    r5,r5,32
5de29b
-	vperm   v31,v31,v31,v0
5de29b
-	lvx     v4,0,r5
5de29b
-	vsel    v5,v30,v31,v3
5de29b
-	stvx    v5,0,r6
5de29b
-	vsel    v4,v31,v4,v3
5de29b
-	stvx    v4,0,r5
5de29b
+	lvsl	v1,0,r5
5de29b
+	addi	r6,r5,-16
5de29b
+
5de29b
+# define save_misaligned_vmx(savevr,prevvr,shiftvr,tmpvr,savegpr,addgpr) \
5de29b
+	addi	addgpr,addgpr,32;					 \
5de29b
+	vperm	tmpvr,prevvr,savevr,shiftvr;				 \
5de29b
+	stvx	tmpvr,0,savegpr
5de29b
+
5de29b
+	/*
5de29b
+	 * We have to be careful not to corrupt the data below v20 and
5de29b
+	 * above v31. To keep things simple we just rotate both ends in
5de29b
+	 * the opposite direction to our main permute so we can use
5de29b
+	 * the common macro.
5de29b
+	 */
5de29b
+
5de29b
+	/* load and rotate data below v20 */
5de29b
+	lvx	v2,0,r5
5de29b
+	vperm	v2,v2,v2,v1
5de29b
+	save_misaligned_vmx(v20,v2,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v21,v20,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v22,v21,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v23,v22,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v24,v23,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v25,v24,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v26,v25,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v27,v26,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v28,v27,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v29,v28,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v30,v29,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v31,v30,v0,v3,r6,r5)
5de29b
+	/* load and rotate data above v31 */
5de29b
+	lvx	v2,0,r6
5de29b
+	vperm	v2,v2,v2,v1
5de29b
+	save_misaligned_vmx(v2,v31,v0,v3,r5,r6)
5de29b
+
5de29b
 	b	L(no_vmx)
5de29b
 
5de29b
 L(aligned_save_vmx):
5de29b
diff -urN glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc64/__longjmp-common.S glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc64/__longjmp-common.S
5de29b
--- glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc64/__longjmp-common.S	2014-05-27 22:55:23.000000000 -0500
5de29b
+++ glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc64/__longjmp-common.S	2014-05-27 22:55:27.000000000 -0500
5de29b
@@ -60,7 +60,7 @@
5de29b
 	beq	L(no_vmx)
5de29b
 	la	r5,((JB_VRS)*8)(3)
5de29b
 	andi.	r6,r5,0xf
5de29b
-	lwz	r0,((JB_VRSAVE)*8)(3)
5de29b
+	lwz	r0,((JB_VRSAVE)*8)(3)	/* 32-bit VRSAVE.  */
5de29b
 	mtspr	VRSAVE,r0
5de29b
 	beq+	L(aligned_restore_vmx)
5de29b
 	addi    r6,r5,16
5de29b
@@ -156,7 +156,7 @@
5de29b
 	lfd fp21,((JB_FPRS+7)*8)(r3)
5de29b
 	ld r22,((JB_GPRS+8)*8)(r3)
5de29b
 	lfd fp22,((JB_FPRS+8)*8)(r3)
5de29b
-	ld r0,(JB_CR*8)(r3)
5de29b
+	lwz r0,((JB_CR*8)+4)(r3)	/* 32-bit CR.  */
5de29b
 	ld r23,((JB_GPRS+9)*8)(r3)
5de29b
 	lfd fp23,((JB_FPRS+9)*8)(r3)
5de29b
 	ld r24,((JB_GPRS+10)*8)(r3)
5de29b
diff -urN glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc64/setjmp-common.S glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc64/setjmp-common.S
5de29b
--- glibc-2.17-c758a686.orig/sysdeps/powerpc/powerpc64/setjmp-common.S	2014-05-27 22:55:23.000000000 -0500
5de29b
+++ glibc-2.17-c758a686.diff/sysdeps/powerpc/powerpc64/setjmp-common.S	2014-05-27 22:55:27.000000000 -0500
5de29b
@@ -98,7 +98,7 @@
5de29b
 	mfcr r0
5de29b
 	std  r16,((JB_GPRS+2)*8)(3)
5de29b
 	stfd fp16,((JB_FPRS+2)*8)(3)
5de29b
-	std  r0,(JB_CR*8)(3)
5de29b
+	stw  r0,((JB_CR*8)+4)(3)	/* 32-bit CR.  */
5de29b
 	std  r17,((JB_GPRS+3)*8)(3)
5de29b
 	stfd fp17,((JB_FPRS+3)*8)(3)
5de29b
 	std  r18,((JB_GPRS+4)*8)(3)
5de29b
@@ -142,50 +142,46 @@
5de29b
 	la	r5,((JB_VRS)*8)(3)
5de29b
 	andi.	r6,r5,0xf
5de29b
 	mfspr	r0,VRSAVE
5de29b
-	stw	r0,((JB_VRSAVE)*8)(3)
5de29b
+	stw	r0,((JB_VRSAVE)*8)(3)	/* 32-bit VRSAVE.  */
5de29b
 	addi	r6,r5,16
5de29b
 	beq+	L(aligned_save_vmx)
5de29b
+
5de29b
 	lvsr	v0,0,r5
5de29b
-	vspltisb v1,-1         /* set v1 to all 1's */
5de29b
-	vspltisb v2,0          /* set v2 to all 0's */
5de29b
-	vperm   v3,v2,v1,v0   /* v3 contains shift mask with num all 1 bytes
5de29b
-				 on left = misalignment  */
5de29b
-
5de29b
-
5de29b
-	/* Special case for v20 we need to preserve what is in save area
5de29b
-	   below v20 before obliterating it */
5de29b
-	lvx     v5,0,r5
5de29b
-	vperm   v20,v20,v20,v0
5de29b
-	vsel    v5,v5,v20,v3
5de29b
-	vsel    v20,v20,v2,v3
5de29b
-	stvx    v5,0,r5
5de29b
-
5de29b
-# define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
5de29b
-	addi    addgpr,addgpr,32; \
5de29b
-	vperm   savevr,savevr,savevr,shiftvr; \
5de29b
-	vsel    hivr,prev_savevr,savevr,maskvr; \
5de29b
-	stvx    hivr,0,savegpr;
5de29b
-
5de29b
-	save_2vmx_partial(v21,v20,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v22,v21,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v23,v22,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v24,v23,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v25,v24,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v26,v25,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v27,v26,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v28,v27,v5,v0,v3,r5,r6)
5de29b
-	save_2vmx_partial(v29,v28,v5,v0,v3,r6,r5)
5de29b
-	save_2vmx_partial(v30,v29,v5,v0,v3,r5,r6)
5de29b
-
5de29b
-	/* Special case for r31 we need to preserve what is in save area
5de29b
-	   above v31 before obliterating it */
5de29b
-	addi    r5,r5,32
5de29b
-	vperm   v31,v31,v31,v0
5de29b
-	lvx     v4,0,r5
5de29b
-	vsel    v5,v30,v31,v3
5de29b
-	stvx    v5,0,r6
5de29b
-	vsel    v4,v31,v4,v3
5de29b
-	stvx    v4,0,r5
5de29b
+	lvsl	v1,0,r5
5de29b
+	addi	r6,r5,-16
5de29b
+
5de29b
+# define save_misaligned_vmx(savevr,prevvr,shiftvr,tmpvr,savegpr,addgpr) \
5de29b
+	addi	addgpr,addgpr,32;					 \
5de29b
+	vperm	tmpvr,prevvr,savevr,shiftvr;				 \
5de29b
+	stvx	tmpvr,0,savegpr
5de29b
+
5de29b
+	/*
5de29b
+	 * We have to be careful not to corrupt the data below v20 and
5de29b
+	 * above v31. To keep things simple we just rotate both ends in
5de29b
+	 * the opposite direction to our main permute so we can use
5de29b
+	 * the common macro.
5de29b
+	 */
5de29b
+
5de29b
+	/* load and rotate data below v20 */
5de29b
+	lvx	v2,0,r5
5de29b
+	vperm	v2,v2,v2,v1
5de29b
+	save_misaligned_vmx(v20,v2,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v21,v20,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v22,v21,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v23,v22,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v24,v23,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v25,v24,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v26,v25,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v27,v26,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v28,v27,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v29,v28,v0,v3,r6,r5)
5de29b
+	save_misaligned_vmx(v30,v29,v0,v3,r5,r6)
5de29b
+	save_misaligned_vmx(v31,v30,v0,v3,r6,r5)
5de29b
+	/* load and rotate data above v31 */
5de29b
+	lvx	v2,0,r6
5de29b
+	vperm	v2,v2,v2,v1
5de29b
+	save_misaligned_vmx(v2,v31,v0,v3,r5,r6)
5de29b
+
5de29b
 	b	L(no_vmx)
5de29b
 
5de29b
 L(aligned_save_vmx):