9bb5d6
commit dd59655e9371af86043b97e38953f43bd9496699
9bb5d6
Author: Lucas A. M. Magalhaes <lamm@linux.ibm.com>
9bb5d6
Date:   Fri Apr 30 18:12:08 2021 -0300
9bb5d6
9bb5d6
    powerpc64le: Optimized memmove for POWER10
9bb5d6
    
9bb5d6
    This patch was initially based on the __memmove_power7 with some ideas
9bb5d6
    from strncpy implementation for Power 9.
9bb5d6
    
9bb5d6
    Improvements from __memmove_power7:
9bb5d6
    
9bb5d6
    1. Use lxvl/stxvl for alignment code.
9bb5d6
    
9bb5d6
       The code for Power 7 uses branches when the input is not naturally
9bb5d6
       aligned to the width of a vector. The new implementation uses
9bb5d6
       lxvl/stxvl instead which reduces pressure on GPRs. It also allows
9bb5d6
       the removal of branch instructions, implicitly removing branch stalls
9bb5d6
       and mispredictions.
9bb5d6
    
9bb5d6
    2. Use of lxv/stxv and lxvl/stxvl pair is safe to use on Cache Inhibited
9bb5d6
       memory.
9bb5d6
    
9bb5d6
       On Power 10 vector load and stores are safe to use on CI memory for
9bb5d6
       addresses unaligned to 16B. This code takes advantage of this to
9bb5d6
       do unaligned loads.
9bb5d6
    
9bb5d6
       The unaligned loads don't have a significant performance impact by
9bb5d6
       themselves. However doing so decreases register pressure on GPRs
9bb5d6
       and interdependence stalls on load/store pairs. This also improved
9bb5d6
       readability as there are now less code paths for different alignments.
9bb5d6
       Finally this reduces the overall code size.
9bb5d6
    
9bb5d6
    3. Improved performance.
9bb5d6
    
9bb5d6
       This version runs on average about 30% better than memmove_power7
9bb5d6
       for lengths  larger than 8KB. For input lengths shorter than 8KB
9bb5d6
       the improvement is smaller, it has on average about 17% better
9bb5d6
       performance.
9bb5d6
    
9bb5d6
       This version has a degradation of about 50% for input lengths
9bb5d6
       in the 0 to 31 bytes range when dest is unaligned.
9bb5d6
    
9bb5d6
    Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
9bb5d6
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/le/power10/memmove.S b/sysdeps/powerpc/powerpc64/le/power10/memmove.S
9bb5d6
new file mode 100644
9bb5d6
index 0000000000000000..7dfd57edeb37e8e4
9bb5d6
--- /dev/null
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/le/power10/memmove.S
9bb5d6
@@ -0,0 +1,320 @@
9bb5d6
+/* Optimized memmove implementation for POWER10.
9bb5d6
+   Copyright (C) 2021 Free Software Foundation, Inc.
9bb5d6
+   This file is part of the GNU C Library.
9bb5d6
+
9bb5d6
+   The GNU C Library is free software; you can redistribute it and/or
9bb5d6
+   modify it under the terms of the GNU Lesser General Public
9bb5d6
+   License as published by the Free Software Foundation; either
9bb5d6
+   version 2.1 of the License, or (at your option) any later version.
9bb5d6
+
9bb5d6
+   The GNU C Library is distributed in the hope that it will be useful,
9bb5d6
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
9bb5d6
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
9bb5d6
+   Lesser General Public License for more details.
9bb5d6
+
9bb5d6
+   You should have received a copy of the GNU Lesser General Public
9bb5d6
+   License along with the GNU C Library; if not, see
9bb5d6
+   <https://www.gnu.org/licenses/>.  */
9bb5d6
+
9bb5d6
+#include <sysdep.h>
9bb5d6
+
9bb5d6
+
9bb5d6
+/* void* [r3] memmove (void *dest [r3], const void *src [r4], size_t len [r5])
9bb5d6
+
9bb5d6
+   This optimization checks if 'src' and 'dst' overlap.  If they do not
9bb5d6
+   or 'src' is ahead of 'dest' then it copies forward.
9bb5d6
+   Otherwise, an optimized backward copy is used.  */
9bb5d6
+
9bb5d6
+#ifndef MEMMOVE
9bb5d6
+# define MEMMOVE memmove
9bb5d6
+#endif
9bb5d6
+	.machine power9
9bb5d6
+ENTRY_TOCLESS (MEMMOVE, 5)
9bb5d6
+	CALL_MCOUNT 3
9bb5d6
+
9bb5d6
+L(_memmove):
9bb5d6
+	.p2align 5
9bb5d6
+	/* Check if there is overlap, if so it will branch to backward copy.  */
9bb5d6
+	subf	r9,r4,r3
9bb5d6
+	cmpld	cr7,r9,r5
9bb5d6
+	blt	cr7,L(memmove_bwd)
9bb5d6
+
9bb5d6
+	/* Fast path for length shorter than 16 bytes.  */
9bb5d6
+	sldi	r7,r5,56
9bb5d6
+	lxvl	32+v2,r4,r7
9bb5d6
+	stxvl	32+v2,r3,r7
9bb5d6
+	subic.	r8,r5,16
9bb5d6
+	blelr
9bb5d6
+
9bb5d6
+	/* For shorter lengths aligning the dest address to 16 bytes either
9bb5d6
+	   decreases performance or is irrelevant.  I'm making use of this
9bb5d6
+	   comparison to skip the alignment in.  */
9bb5d6
+	cmpldi	cr6,r5,256
9bb5d6
+	bge	cr6,L(ge_256)
9bb5d6
+	/* Account for the first 16-byte copy.  */
9bb5d6
+	addi	r4,r4,16
9bb5d6
+	addi	r11,r3,16	/* use r11 to keep dest address on r3.  */
9bb5d6
+	subi	r5,r5,16
9bb5d6
+	b	L(loop_head)
9bb5d6
+
9bb5d6
+	.p2align 5
9bb5d6
+L(ge_256):
9bb5d6
+	/* Account for the first copy <= 16 bytes.  This is necessary for
9bb5d6
+	   memmove because at this point the src address can be in front of the
9bb5d6
+	   dest address.  */
9bb5d6
+	clrldi	r9,r5,56
9bb5d6
+	li	r8,16
9bb5d6
+	cmpldi	r9,16
9bb5d6
+	iselgt	r9,r8,r9
9bb5d6
+	add	r4,r4,r9
9bb5d6
+	add	r11,r3,r9	/* use r11 to keep dest address on r3.  */
9bb5d6
+	sub	r5,r5,r9
9bb5d6
+
9bb5d6
+	/* Align dest to 16 bytes.  */
9bb5d6
+	neg	r7,r3
9bb5d6
+	clrldi.	r9,r7,60
9bb5d6
+	beq	L(loop_head)
9bb5d6
+
9bb5d6
+	.p2align 5
9bb5d6
+	sldi	r6,r9,56
9bb5d6
+	lxvl	32+v0,r4,r6
9bb5d6
+	stxvl	32+v0,r11,r6
9bb5d6
+	sub	r5,r5,r9
9bb5d6
+	add	r4,r4,r9
9bb5d6
+	add	r11,r11,r9
9bb5d6
+
9bb5d6
+L(loop_head):
9bb5d6
+	cmpldi	r5,63
9bb5d6
+	ble	L(final_64)
9bb5d6
+
9bb5d6
+	srdi.	r7,r5,7
9bb5d6
+	beq	L(loop_tail)
9bb5d6
+
9bb5d6
+	mtctr	r7
9bb5d6
+
9bb5d6
+/* Main loop that copies 128 bytes each iteration.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(loop):
9bb5d6
+	addi	r9,r4,64
9bb5d6
+	addi	r10,r11,64
9bb5d6
+
9bb5d6
+	lxv	32+v0,0(r4)
9bb5d6
+	lxv	32+v1,16(r4)
9bb5d6
+	lxv	32+v2,32(r4)
9bb5d6
+	lxv	32+v3,48(r4)
9bb5d6
+
9bb5d6
+	stxv	32+v0,0(r11)
9bb5d6
+	stxv	32+v1,16(r11)
9bb5d6
+	stxv	32+v2,32(r11)
9bb5d6
+	stxv	32+v3,48(r11)
9bb5d6
+
9bb5d6
+	addi	r4,r4,128
9bb5d6
+	addi	r11,r11,128
9bb5d6
+
9bb5d6
+	lxv	32+v4,0(r9)
9bb5d6
+	lxv	32+v5,16(r9)
9bb5d6
+	lxv	32+v6,32(r9)
9bb5d6
+	lxv	32+v7,48(r9)
9bb5d6
+
9bb5d6
+	stxv	32+v4,0(r10)
9bb5d6
+	stxv	32+v5,16(r10)
9bb5d6
+	stxv	32+v6,32(r10)
9bb5d6
+	stxv	32+v7,48(r10)
9bb5d6
+
9bb5d6
+	bdnz	L(loop)
9bb5d6
+	clrldi.	r5,r5,57
9bb5d6
+	beqlr
9bb5d6
+
9bb5d6
+/* Copy 64 bytes.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(loop_tail):
9bb5d6
+	cmpldi 	cr5,r5,63
9bb5d6
+	ble	cr5,L(final_64)
9bb5d6
+
9bb5d6
+	lxv	32+v0,0(r4)
9bb5d6
+	lxv	32+v1,16(r4)
9bb5d6
+	lxv	32+v2,32(r4)
9bb5d6
+	lxv	32+v3,48(r4)
9bb5d6
+
9bb5d6
+	stxv	32+v0,0(r11)
9bb5d6
+	stxv	32+v1,16(r11)
9bb5d6
+	stxv	32+v2,32(r11)
9bb5d6
+	stxv	32+v3,48(r11)
9bb5d6
+
9bb5d6
+	addi	r4,r4,64
9bb5d6
+	addi	r11,r11,64
9bb5d6
+	subi	r5,r5,64
9bb5d6
+
9bb5d6
+/* Copies the last 1-63 bytes.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(final_64):
9bb5d6
+	/* r8 holds the number of bytes that will be copied with lxv/stxv.  */
9bb5d6
+	clrrdi.	r8,r5,4
9bb5d6
+	beq	L(tail1)
9bb5d6
+
9bb5d6
+	cmpldi  cr5,r5,32
9bb5d6
+	lxv	32+v0,0(r4)
9bb5d6
+	blt	cr5,L(tail2)
9bb5d6
+
9bb5d6
+	cmpldi	cr6,r5,48
9bb5d6
+	lxv	32+v1,16(r4)
9bb5d6
+	blt	cr6,L(tail3)
9bb5d6
+
9bb5d6
+	.p2align 5
9bb5d6
+	lxv	32+v2,32(r4)
9bb5d6
+	stxv	32+v2,32(r11)
9bb5d6
+L(tail3):
9bb5d6
+	stxv	32+v1,16(r11)
9bb5d6
+L(tail2):
9bb5d6
+	stxv	32+v0,0(r11)
9bb5d6
+	sub	r5,r5,r8
9bb5d6
+	add	r4,r4,r8
9bb5d6
+	add	r11,r11,r8
9bb5d6
+	.p2align 5
9bb5d6
+L(tail1):
9bb5d6
+	sldi	r6,r5,56
9bb5d6
+	lxvl	v4,r4,r6
9bb5d6
+	stxvl	v4,r11,r6
9bb5d6
+	blr
9bb5d6
+
9bb5d6
+/* If dest and src overlap, we should copy backwards.  */
9bb5d6
+L(memmove_bwd):
9bb5d6
+	add	r11,r3,r5
9bb5d6
+	add	r4,r4,r5
9bb5d6
+
9bb5d6
+	/* Optimization for length smaller than 16 bytes.  */
9bb5d6
+	cmpldi	cr5,r5,15
9bb5d6
+	ble	cr5,L(tail1_bwd)
9bb5d6
+
9bb5d6
+	/* For shorter lengths the alignment either slows down or is irrelevant.
9bb5d6
+	   The forward copy uses a already need 256 comparison for that.  Here
9bb5d6
+	   it's using 128 as it will reduce code and improve readability.  */
9bb5d6
+	cmpldi	cr7,r5,128
9bb5d6
+	blt	cr7,L(bwd_loop_tail)
9bb5d6
+
9bb5d6
+	/* Align dest address to 16 bytes.  */
9bb5d6
+	.p2align 5
9bb5d6
+	clrldi.	r9,r11,60
9bb5d6
+	beq	L(bwd_loop_head)
9bb5d6
+	sub	r4,r4,r9
9bb5d6
+	sub	r11,r11,r9
9bb5d6
+	lxv	32+v0,0(r4)
9bb5d6
+	sldi	r6,r9,56
9bb5d6
+	stxvl   32+v0,r11,r6
9bb5d6
+	sub	r5,r5,r9
9bb5d6
+
9bb5d6
+L(bwd_loop_head):
9bb5d6
+	srdi.	r7,r5,7
9bb5d6
+	beq	L(bwd_loop_tail)
9bb5d6
+
9bb5d6
+	mtctr	r7
9bb5d6
+
9bb5d6
+/* Main loop that copies 128 bytes every iteration.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(bwd_loop):
9bb5d6
+	addi	r9,r4,-64
9bb5d6
+	addi	r10,r11,-64
9bb5d6
+
9bb5d6
+	lxv	32+v0,-16(r4)
9bb5d6
+	lxv	32+v1,-32(r4)
9bb5d6
+	lxv	32+v2,-48(r4)
9bb5d6
+	lxv	32+v3,-64(r4)
9bb5d6
+
9bb5d6
+	stxv	32+v0,-16(r11)
9bb5d6
+	stxv	32+v1,-32(r11)
9bb5d6
+	stxv	32+v2,-48(r11)
9bb5d6
+	stxv	32+v3,-64(r11)
9bb5d6
+
9bb5d6
+	addi	r4,r4,-128
9bb5d6
+	addi	r11,r11,-128
9bb5d6
+
9bb5d6
+	lxv	32+v0,-16(r9)
9bb5d6
+	lxv	32+v1,-32(r9)
9bb5d6
+	lxv	32+v2,-48(r9)
9bb5d6
+	lxv	32+v3,-64(r9)
9bb5d6
+
9bb5d6
+	stxv	32+v0,-16(r10)
9bb5d6
+	stxv	32+v1,-32(r10)
9bb5d6
+	stxv	32+v2,-48(r10)
9bb5d6
+	stxv	32+v3,-64(r10)
9bb5d6
+
9bb5d6
+	bdnz	L(bwd_loop)
9bb5d6
+	clrldi.	r5,r5,57
9bb5d6
+	beqlr
9bb5d6
+
9bb5d6
+/* Copy 64 bytes.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(bwd_loop_tail):
9bb5d6
+	cmpldi 	cr5,r5,63
9bb5d6
+	ble	cr5,L(bwd_final_64)
9bb5d6
+
9bb5d6
+	addi	r4,r4,-64
9bb5d6
+	addi	r11,r11,-64
9bb5d6
+
9bb5d6
+	lxv	32+v0,0(r4)
9bb5d6
+	lxv	32+v1,16(r4)
9bb5d6
+	lxv	32+v2,32(r4)
9bb5d6
+	lxv	32+v3,48(r4)
9bb5d6
+
9bb5d6
+	stxv	32+v0,0(r11)
9bb5d6
+	stxv	32+v1,16(r11)
9bb5d6
+	stxv	32+v2,32(r11)
9bb5d6
+	stxv	32+v3,48(r11)
9bb5d6
+
9bb5d6
+	subi	r5,r5,64
9bb5d6
+
9bb5d6
+/* Copies the last 1-63 bytes.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(bwd_final_64):
9bb5d6
+	/* r8 holds the number of bytes that will be copied with lxv/stxv.  */
9bb5d6
+	clrrdi.	r8,r5,4
9bb5d6
+	beq	L(tail1_bwd)
9bb5d6
+
9bb5d6
+	cmpldi	cr5,r5,32
9bb5d6
+	lxv	32+v2,-16(r4)
9bb5d6
+	blt	cr5,L(tail2_bwd)
9bb5d6
+
9bb5d6
+	cmpldi	cr6,r5,48
9bb5d6
+	lxv	32+v1,-32(r4)
9bb5d6
+	blt	cr6,L(tail3_bwd)
9bb5d6
+
9bb5d6
+	.p2align 5
9bb5d6
+	lxv	32+v0,-48(r4)
9bb5d6
+	stxv	32+v0,-48(r11)
9bb5d6
+L(tail3_bwd):
9bb5d6
+	stxv	32+v1,-32(r11)
9bb5d6
+L(tail2_bwd):
9bb5d6
+	stxv	32+v2,-16(r11)
9bb5d6
+	sub	r4,r4,r5
9bb5d6
+	sub	r11,r11,r5
9bb5d6
+	sub	r5,r5,r8
9bb5d6
+	sldi	r6,r5,56
9bb5d6
+	lxvl	v4,r4,r6
9bb5d6
+	stxvl	v4,r11,r6
9bb5d6
+	blr
9bb5d6
+
9bb5d6
+/* Copy last 16 bytes.  */
9bb5d6
+	.p2align 5
9bb5d6
+L(tail1_bwd):
9bb5d6
+	sub	r4,r4,r5
9bb5d6
+	sub	r11,r11,r5
9bb5d6
+	sldi	r6,r5,56
9bb5d6
+	lxvl	v4,r4,r6
9bb5d6
+	stxvl	v4,r11,r6
9bb5d6
+	blr
9bb5d6
+
9bb5d6
+END_GEN_TB (MEMMOVE,TB_TOCLESS)
9bb5d6
+libc_hidden_builtin_def (memmove)
9bb5d6
+
9bb5d6
+/* void bcopy(const void *src [r3], void *dest [r4], size_t n [r5])
9bb5d6
+   Implemented in this file to avoid linker create a stub function call
9bb5d6
+   in the branch to '_memmove'.  */
9bb5d6
+ENTRY_TOCLESS (__bcopy)
9bb5d6
+	mr	r6,r3
9bb5d6
+	mr	r3,r4
9bb5d6
+	mr	r4,r6
9bb5d6
+	b	L(_memmove)
9bb5d6
+END (__bcopy)
9bb5d6
+#ifndef __bcopy
9bb5d6
+weak_alias (__bcopy, bcopy)
9bb5d6
+#endif
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
9bb5d6
index 61652b65dd223018..66f8c6ace9824d4a 100644
9bb5d6
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
9bb5d6
@@ -32,7 +32,8 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
9bb5d6
 		   strncase-power8
9bb5d6
 
9bb5d6
 ifneq (,$(filter %le,$(config-machine)))
9bb5d6
-sysdep_routines += strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
9bb5d6
+sysdep_routines += memmove-power10 \
9bb5d6
+		   strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
9bb5d6
 		   rawmemchr-power9 strlen-power9 strncpy-power9 stpncpy-power9 \
9bb5d6
 		   strlen-power10
9bb5d6
 endif
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/multiarch/bcopy.c b/sysdeps/powerpc/powerpc64/multiarch/bcopy.c
9bb5d6
index 1c4a229b1fc5654a..705fef33d4e57557 100644
9bb5d6
--- a/sysdeps/powerpc/powerpc64/multiarch/bcopy.c
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/multiarch/bcopy.c
9bb5d6
@@ -22,8 +22,17 @@
9bb5d6
 extern __typeof (bcopy) __bcopy_ppc attribute_hidden;
9bb5d6
 /* __bcopy_power7 symbol is implemented at memmove-power7.S  */
9bb5d6
 extern __typeof (bcopy) __bcopy_power7 attribute_hidden;
9bb5d6
+#ifdef __LITTLE_ENDIAN__
9bb5d6
+extern __typeof (bcopy) __bcopy_power10 attribute_hidden;
9bb5d6
+#endif
9bb5d6
 
9bb5d6
 libc_ifunc (bcopy,
9bb5d6
+#ifdef __LITTLE_ENDIAN__
9bb5d6
+	     hwcap2 & (PPC_FEATURE2_ARCH_3_1 |
9bb5d6
+		       PPC_FEATURE2_HAS_ISEL)
9bb5d6
+	     && (hwcap & PPC_FEATURE_HAS_VSX)
9bb5d6
+	     ? __bcopy_power10 :
9bb5d6
+#endif
9bb5d6
             (hwcap & PPC_FEATURE_HAS_VSX)
9bb5d6
             ? __bcopy_power7
9bb5d6
             : __bcopy_ppc);
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
9bb5d6
index 46d5956adda72b86..4ce04bc51574cca1 100644
9bb5d6
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
9bb5d6
@@ -67,6 +67,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
9bb5d6
 
9bb5d6
   /* Support sysdeps/powerpc/powerpc64/multiarch/memmove.c.  */
9bb5d6
   IFUNC_IMPL (i, name, memmove,
9bb5d6
+#ifdef __LITTLE_ENDIAN__
9bb5d6
+	      IFUNC_IMPL_ADD (array, i, memmove,
9bb5d6
+			      hwcap2 & (PPC_FEATURE2_ARCH_3_1 |
9bb5d6
+					PPC_FEATURE2_HAS_ISEL)
9bb5d6
+			      && (hwcap & PPC_FEATURE_HAS_VSX),
9bb5d6
+			      __memmove_power10)
9bb5d6
+#endif
9bb5d6
 	      IFUNC_IMPL_ADD (array, i, memmove, hwcap & PPC_FEATURE_HAS_VSX,
9bb5d6
 			      __memmove_power7)
9bb5d6
 	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_ppc))
9bb5d6
@@ -186,6 +193,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
9bb5d6
 
9bb5d6
   /* Support sysdeps/powerpc/powerpc64/multiarch/bcopy.c.  */
9bb5d6
   IFUNC_IMPL (i, name, bcopy,
9bb5d6
+#ifdef __LITTLE_ENDIAN__
9bb5d6
+	      IFUNC_IMPL_ADD (array, i, bcopy,
9bb5d6
+			      hwcap2 & (PPC_FEATURE2_ARCH_3_1 |
9bb5d6
+					PPC_FEATURE2_HAS_ISEL)
9bb5d6
+			      && (hwcap & PPC_FEATURE_HAS_VSX),
9bb5d6
+			      __bcopy_power10)
9bb5d6
+#endif
9bb5d6
 	      IFUNC_IMPL_ADD (array, i, bcopy, hwcap & PPC_FEATURE_HAS_VSX,
9bb5d6
 			      __bcopy_power7)
9bb5d6
 	      IFUNC_IMPL_ADD (array, i, bcopy, 1, __bcopy_ppc))
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memmove-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memmove-power10.S
9bb5d6
new file mode 100644
9bb5d6
index 0000000000000000..171b32921a0a4d47
9bb5d6
--- /dev/null
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/multiarch/memmove-power10.S
9bb5d6
@@ -0,0 +1,27 @@
9bb5d6
+/* Optimized memmove implementation for POWER10.
9bb5d6
+   Copyright (C) 2021 Free Software Foundation, Inc.
9bb5d6
+   This file is part of the GNU C Library.
9bb5d6
+
9bb5d6
+   The GNU C Library is free software; you can redistribute it and/or
9bb5d6
+   modify it under the terms of the GNU Lesser General Public
9bb5d6
+   License as published by the Free Software Foundation; either
9bb5d6
+   version 2.1 of the License, or (at your option) any later version.
9bb5d6
+
9bb5d6
+   The GNU C Library is distributed in the hope that it will be useful,
9bb5d6
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
9bb5d6
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
9bb5d6
+   Lesser General Public License for more details.
9bb5d6
+
9bb5d6
+   You should have received a copy of the GNU Lesser General Public
9bb5d6
+   License along with the GNU C Library; if not, see
9bb5d6
+   <https://www.gnu.org/licenses/>.  */
9bb5d6
+
9bb5d6
+#define MEMMOVE __memmove_power10
9bb5d6
+
9bb5d6
+#undef libc_hidden_builtin_def
9bb5d6
+#define libc_hidden_builtin_def(name)
9bb5d6
+
9bb5d6
+#undef __bcopy
9bb5d6
+#define __bcopy __bcopy_power10
9bb5d6
+
9bb5d6
+#include <sysdeps/powerpc/powerpc64/le/power10/memmove.S>
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S
9bb5d6
index 0b251d0f5f087874..fb5261ecda64d061 100644
9bb5d6
--- a/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S
9bb5d6
@@ -21,7 +21,7 @@
9bb5d6
 #undef libc_hidden_builtin_def
9bb5d6
 #define libc_hidden_builtin_def(name)
9bb5d6
 
9bb5d6
-#undef bcopy
9bb5d6
-#define bcopy __bcopy_power7
9bb5d6
+#undef __bcopy
9bb5d6
+#define __bcopy __bcopy_power7
9bb5d6
 
9bb5d6
 #include <sysdeps/powerpc/powerpc64/power7/memmove.S>
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memmove.c b/sysdeps/powerpc/powerpc64/multiarch/memmove.c
9bb5d6
index 39987155cc7d3624..2fd7b6d309e4bedd 100644
9bb5d6
--- a/sysdeps/powerpc/powerpc64/multiarch/memmove.c
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/multiarch/memmove.c
9bb5d6
@@ -28,14 +28,22 @@
9bb5d6
 # include "init-arch.h"
9bb5d6
 
9bb5d6
 extern __typeof (__redirect_memmove) __libc_memmove;
9bb5d6
-
9bb5d6
 extern __typeof (__redirect_memmove) __memmove_ppc attribute_hidden;
9bb5d6
 extern __typeof (__redirect_memmove) __memmove_power7 attribute_hidden;
9bb5d6
+#ifdef __LITTLE_ENDIAN__
9bb5d6
+extern __typeof (__redirect_memmove) __memmove_power10 attribute_hidden;
9bb5d6
+#endif
9bb5d6
 
9bb5d6
 libc_ifunc (__libc_memmove,
9bb5d6
-            (hwcap & PPC_FEATURE_HAS_VSX)
9bb5d6
-            ? __memmove_power7
9bb5d6
-            : __memmove_ppc);
9bb5d6
+#ifdef __LITTLE_ENDIAN__
9bb5d6
+	     hwcap2 & (PPC_FEATURE2_ARCH_3_1 |
9bb5d6
+		       PPC_FEATURE2_HAS_ISEL)
9bb5d6
+	     && (hwcap & PPC_FEATURE_HAS_VSX)
9bb5d6
+	     ? __memmove_power10 :
9bb5d6
+#endif
9bb5d6
+		     (hwcap & PPC_FEATURE_HAS_VSX)
9bb5d6
+		     ? __memmove_power7
9bb5d6
+		     : __memmove_ppc);
9bb5d6
 
9bb5d6
 #undef memmove
9bb5d6
 strong_alias (__libc_memmove, memmove);
9bb5d6
diff --git a/sysdeps/powerpc/powerpc64/power7/memmove.S b/sysdeps/powerpc/powerpc64/power7/memmove.S
9bb5d6
index b7f3dc28d1a8eac3..9e4cabb07ef9b732 100644
9bb5d6
--- a/sysdeps/powerpc/powerpc64/power7/memmove.S
9bb5d6
+++ b/sysdeps/powerpc/powerpc64/power7/memmove.S
9bb5d6
@@ -832,4 +832,6 @@ ENTRY_TOCLESS (__bcopy)
9bb5d6
 	mr	r4,r6
9bb5d6
 	b	L(_memmove)
9bb5d6
 END (__bcopy)
9bb5d6
+#ifndef __bcopy
9bb5d6
 weak_alias (__bcopy, bcopy)
9bb5d6
+#endif