588e70
commit e941e0ae80626b7661c1db8953a673cafd3b8b19
588e70
Author: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
588e70
Date:   Fri Apr 30 18:12:08 2021 -0300
588e70
588e70
    powerpc64le: Optimize memcpy for POWER10
588e70
    
588e70
    This implementation is based on __memcpy_power8_cached and integrates
588e70
    suggestions from Anton Blanchard.
588e70
    It benefits from loads and stores with length for short lengths and for
588e70
    tail code, simplifying the code.
588e70
    
588e70
    All unaligned memory accesses use instructions that do not generate
588e70
    alignment interrupts on POWER10, making it safe to use on
588e70
    caching-inhibited memory.
588e70
    
588e70
    The main loop has also been modified in order to increase instruction
588e70
    throughput by reducing the dependency on updates from previous iterations.
588e70
    
588e70
    On average, this implementation provides around 30% improvement when
588e70
    compared to __memcpy_power7 and 10% improvement in comparison to
588e70
    __memcpy_power8_cached.
588e70
588e70
diff --git a/sysdeps/powerpc/powerpc64/le/power10/memcpy.S b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
588e70
new file mode 100644
588e70
index 0000000000000000..ad1414db4a3a8b9f
588e70
--- /dev/null
588e70
+++ b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
588e70
@@ -0,0 +1,198 @@
588e70
+/* Optimized memcpy implementation for POWER10.
588e70
+   Copyright (C) 2021 Free Software Foundation, Inc.
588e70
+   This file is part of the GNU C Library.
588e70
+
588e70
+   The GNU C Library is free software; you can redistribute it and/or
588e70
+   modify it under the terms of the GNU Lesser General Public
588e70
+   License as published by the Free Software Foundation; either
588e70
+   version 2.1 of the License, or (at your option) any later version.
588e70
+
588e70
+   The GNU C Library is distributed in the hope that it will be useful,
588e70
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
588e70
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
588e70
+   Lesser General Public License for more details.
588e70
+
588e70
+   You should have received a copy of the GNU Lesser General Public
588e70
+   License along with the GNU C Library; if not, see
588e70
+   <http://www.gnu.org/licenses/>.  */
588e70
+
588e70
+#include <sysdep.h>
588e70
+
588e70
+
588e70
+#ifndef MEMCPY
588e70
+# define MEMCPY memcpy
588e70
+#endif
588e70
+
588e70
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
588e70
+	   Returns 'dst'.  */
588e70
+
588e70
+	.machine power9
588e70
+ENTRY_TOCLESS (MEMCPY, 5)
588e70
+	CALL_MCOUNT 3
588e70
+
588e70
+	/* Copy up to 16 bytes.  */
588e70
+	sldi	r6,r5,56	/* Prepare [l|st]xvl counter.  */
588e70
+	lxvl	v10,r4,r6
588e70
+	stxvl	v10,r3,r6
588e70
+	subic.	r6,r5,16	/* Return if len <= 16.  */
588e70
+	blelr
588e70
+
588e70
+	/* If len >= 256, assume nothing got copied before and copy
588e70
+	   again.  This might cause issues with overlapped memory, but memcpy
588e70
+	   is not expected to treat overlapped memory.  */
588e70
+	cmpdi	r5,256
588e70
+	bge	L(copy_ge_256)
588e70
+	/* 16 < len < 256 and the first 16 bytes have already been copied.  */
588e70
+	addi	r10,r3,16	/* Keep r3 intact as return value.  */
588e70
+	addi	r4,r4,16
588e70
+	subi	r5,r5,16
588e70
+	b	L(copy_lt_256)	/* Avoid the main loop if len < 256.  */
588e70
+
588e70
+	.p2align 5
588e70
+L(copy_ge_256):
588e70
+	mr	r10,r3		/* Keep r3 intact as return value.  */
588e70
+	/* Align dst to 16 bytes.  */
588e70
+	andi.	r9,r10,0xf
588e70
+	beq	L(dst_is_align_16)
588e70
+	lxv	v10,0(r4)
588e70
+	subfic	r12,r9,16
588e70
+	subf	r5,r12,r5
588e70
+	add	r4,r4,r12
588e70
+	stxv	v10,0(r3)
588e70
+	add	r10,r3,r12
588e70
+
588e70
+L(dst_is_align_16):
588e70
+	srdi	r9,r5,7		/* Divide by 128.  */
588e70
+	mtctr	r9
588e70
+	addi	r6,r4,64
588e70
+	addi	r7,r10,64
588e70
+
588e70
+
588e70
+	/* Main loop, copy 128 bytes per iteration.
588e70
+	   Use r6=src+64 and r7=dest+64 in order to reduce the dependency on
588e70
+	   r4 and r10.  */
588e70
+	.p2align 5
588e70
+L(copy_128):
588e70
+
588e70
+	lxv	v10, 0(r4)
588e70
+	lxv	v11, 16(r4)
588e70
+	lxv	v12, 32(r4)
588e70
+	lxv	v13, 48(r4)
588e70
+
588e70
+	addi	r4,r4,128
588e70
+
588e70
+	stxv	v10, 0(r10)
588e70
+	stxv	v11, 16(r10)
588e70
+	stxv	v12, 32(r10)
588e70
+	stxv	v13, 48(r10)
588e70
+
588e70
+	addi	r10,r10,128
588e70
+
588e70
+	lxv	v10, 0(r6)
588e70
+	lxv	v11, 16(r6)
588e70
+	lxv	v12, 32(r6)
588e70
+	lxv	v13, 48(r6)
588e70
+
588e70
+	addi	r6,r6,128
588e70
+
588e70
+	stxv	v10, 0(r7)
588e70
+	stxv	v11, 16(r7)
588e70
+	stxv	v12, 32(r7)
588e70
+	stxv	v13, 48(r7)
588e70
+
588e70
+	addi	r7,r7,128
588e70
+
588e70
+	bdnz	L(copy_128)
588e70
+
588e70
+	clrldi.	r5,r5,64-7	/* Have we copied everything?  */
588e70
+	beqlr
588e70
+
588e70
+	.p2align 5
588e70
+L(copy_lt_256):
588e70
+	cmpdi	r5,16
588e70
+	ble	L(copy_le_16)
588e70
+	srdi.	r9,r5,5		/* Divide by 32.  */
588e70
+	beq	L(copy_lt_32)
588e70
+	mtctr	r9
588e70
+	/* Use r6=src+32, r7=dest+32, r8=src+64, r9=dest+64 in order to reduce
588e70
+	   the dependency on r4 and r10.  */
588e70
+	addi	r6,r4,32
588e70
+	addi	r7,r10,32
588e70
+	addi	r8,r4,64
588e70
+	addi	r9,r10,64
588e70
+
588e70
+	.p2align 5
588e70
+	/* Copy 32 bytes at a time, unaligned.
588e70
+	   The loop is unrolled 3 times in order to reduce the dependency on
588e70
+	   r4 and r10, copying up-to 96 bytes per iteration.  */
588e70
+L(copy_32):
588e70
+	lxv	v10, 0(r4)
588e70
+	lxv	v11, 16(r4)
588e70
+	stxv	v10, 0(r10)
588e70
+	stxv	v11, 16(r10)
588e70
+	bdz	L(end_copy_32a)
588e70
+	addi	r4,r4,96
588e70
+	addi	r10,r10,96
588e70
+
588e70
+	lxv	v10, 0(r6)
588e70
+	lxv	v11, 16(r6)
588e70
+	addi	r6,r6,96
588e70
+	stxv	v10, 0(r7)
588e70
+	stxv	v11, 16(r7)
588e70
+	bdz	L(end_copy_32b)
588e70
+	addi	r7,r7,96
588e70
+
588e70
+	lxv	v12, 0(r8)
588e70
+	lxv	v13, 16(r8)
588e70
+	addi	r8,r8,96
588e70
+	stxv	v12, 0(r9)
588e70
+	stxv	v13, 16(r9)
588e70
+	addi	r9,r9,96
588e70
+	bdnz	L(copy_32)
588e70
+
588e70
+	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
588e70
+	beqlr
588e70
+	cmpdi	r5,16
588e70
+	ble	L(copy_le_16)
588e70
+	b	L(copy_lt_32)
588e70
+
588e70
+	.p2align 5
588e70
+L(end_copy_32a):
588e70
+	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
588e70
+	beqlr
588e70
+	/* 32 bytes have been copied since the last update of r4 and r10.  */
588e70
+	addi	r4,r4,32
588e70
+	addi	r10,r10,32
588e70
+	cmpdi	r5,16
588e70
+	ble	L(copy_le_16)
588e70
+	b	L(copy_lt_32)
588e70
+
588e70
+	.p2align 5
588e70
+L(end_copy_32b):
588e70
+	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
588e70
+	beqlr
588e70
+	/* The last iteration of the loop copied 64 bytes.  Update r4 and r10
588e70
+	   accordingly.  */
588e70
+	addi	r4,r4,-32
588e70
+	addi	r10,r10,-32
588e70
+	cmpdi	r5,16
588e70
+	ble	L(copy_le_16)
588e70
+
588e70
+	.p2align 5
588e70
+L(copy_lt_32):
588e70
+	lxv	v10, 0(r4)
588e70
+	stxv	v10, 0(r10)
588e70
+	addi	r4,r4,16
588e70
+	addi	r10,r10,16
588e70
+	subi	r5,r5,16
588e70
+
588e70
+	.p2align 5
588e70
+L(copy_le_16):
588e70
+	sldi	r6,r5,56
588e70
+	lxvl	v10,r4,r6
588e70
+	stxvl	v10,r10,r6
588e70
+	blr
588e70
+
588e70
+
588e70
+END_GEN_TB (MEMCPY,TB_TOCLESS)
588e70
+libc_hidden_builtin_def (memcpy)
588e70
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
588e70
index 66f8c6ace9824d4a..2e3c8f2e8a81cda4 100644
588e70
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
588e70
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
588e70
@@ -32,7 +32,7 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
588e70
 		   strncase-power8
588e70
 
588e70
 ifneq (,$(filter %le,$(config-machine)))
588e70
-sysdep_routines += memmove-power10 \
588e70
+sysdep_routines += memcpy-power10 memmove-power10 \
588e70
 		   strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
588e70
 		   rawmemchr-power9 strlen-power9 strncpy-power9 stpncpy-power9 \
588e70
 		   strlen-power10
588e70
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
588e70
index 4ce04bc51574cca1..9d5a14e480c02171 100644
588e70
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
588e70
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
588e70
@@ -51,6 +51,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
588e70
 #ifdef SHARED
588e70
   /* Support sysdeps/powerpc/powerpc64/multiarch/memcpy.c.  */
588e70
   IFUNC_IMPL (i, name, memcpy,
588e70
+#ifdef __LITTLE_ENDIAN__
588e70
+	      IFUNC_IMPL_ADD (array, i, memcpy,
588e70
+			      hwcap2 & PPC_FEATURE2_ARCH_3_1
588e70
+			      && hwcap & PPC_FEATURE_HAS_VSX,
588e70
+			      __memcpy_power10)
588e70
+#endif
588e70
 	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap2 & PPC_FEATURE2_ARCH_2_07,
588e70
 			      __memcpy_power8_cached)
588e70
 	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
588e70
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
588e70
new file mode 100644
588e70
index 0000000000000000..70e0fc3ed610cdc3
588e70
--- /dev/null
588e70
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
588e70
@@ -0,0 +1,26 @@
588e70
+/* Optimized memcpy implementation for POWER10.
588e70
+   Copyright (C) 2021 Free Software Foundation, Inc.
588e70
+   This file is part of the GNU C Library.
588e70
+
588e70
+   The GNU C Library is free software; you can redistribute it and/or
588e70
+   modify it under the terms of the GNU Lesser General Public
588e70
+   License as published by the Free Software Foundation; either
588e70
+   version 2.1 of the License, or (at your option) any later version.
588e70
+
588e70
+   The GNU C Library is distributed in the hope that it will be useful,
588e70
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
588e70
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
588e70
+   Lesser General Public License for more details.
588e70
+
588e70
+   You should have received a copy of the GNU Lesser General Public
588e70
+   License along with the GNU C Library; if not, see
588e70
+   <https://www.gnu.org/licenses/>.  */
588e70
+
588e70
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
588e70
+#define MEMCPY __memcpy_power10
588e70
+
588e70
+#undef libc_hidden_builtin_def
588e70
+#define libc_hidden_builtin_def(name)
588e70
+
588e70
+#include <sysdeps/powerpc/powerpc64/le/power10/memcpy.S>
588e70
+#endif
588e70
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
588e70
index 44dea594f3770673..be0e47f32dde2ccf 100644
588e70
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
588e70
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
588e70
@@ -36,8 +36,15 @@ extern __typeof (__redirect_memcpy) __memcpy_power6 attribute_hidden;
588e70
 extern __typeof (__redirect_memcpy) __memcpy_a2 attribute_hidden;
588e70
 extern __typeof (__redirect_memcpy) __memcpy_power7 attribute_hidden;
588e70
 extern __typeof (__redirect_memcpy) __memcpy_power8_cached attribute_hidden;
588e70
+# if defined __LITTLE_ENDIAN__
588e70
+extern __typeof (__redirect_memcpy) __memcpy_power10 attribute_hidden;
588e70
+# endif
588e70
 
588e70
 libc_ifunc (__libc_memcpy,
588e70
+# if defined __LITTLE_ENDIAN__
588e70
+	    (hwcap2 & PPC_FEATURE2_ARCH_3_1 && hwcap & PPC_FEATURE_HAS_VSX)
588e70
+	    ? __memcpy_power10 :
588e70
+# endif
588e70
 	    ((hwcap2 & PPC_FEATURE2_ARCH_2_07) && use_cached_memopt)
588e70
 	    ? __memcpy_power8_cached :
588e70
 	      (hwcap & PPC_FEATURE_HAS_VSX)