|
|
9bb5d6 |
commit 23fdf8178cce3c2ec320dd5eca8b544245bcaef0
|
|
|
9bb5d6 |
Author: Raoni Fassina Firmino <raoni@linux.ibm.com>
|
|
|
9bb5d6 |
Date: Fri Apr 30 18:12:08 2021 -0300
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
powerpc64le: Optimize memset for POWER10
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
This implementation is based on __memset_power8 and integrates a lot
|
|
|
9bb5d6 |
of suggestions from Anton Blanchard.
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
The biggest difference is that it makes extensive use of stxvl to
|
|
|
9bb5d6 |
alignment and tail code to avoid branches and small stores. It has
|
|
|
9bb5d6 |
three main execution paths:
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
a) "Short lengths" for lengths up to 64 bytes, avoiding as many
|
|
|
9bb5d6 |
branches as possible.
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
b) "General case" for larger lengths, it has an alignment section
|
|
|
9bb5d6 |
using stxvl to avoid branches, a 128 bytes loop and then a tail
|
|
|
9bb5d6 |
code, again using stxvl with few branches.
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
c) "Zeroing cache blocks" for lengths from 256 bytes upwards and set
|
|
|
9bb5d6 |
value being zero. It is mostly the __memset_power8 code but the
|
|
|
9bb5d6 |
alignment phase was simplified because, at this point, address is
|
|
|
9bb5d6 |
already 16-bytes aligned and also changed to use vector stores.
|
|
|
9bb5d6 |
The tail code was also simplified to reuse the general case tail.
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
All unaligned stores use stxvl instructions that do not generate
|
|
|
9bb5d6 |
alignment interrupts on POWER10, making it safe to use on
|
|
|
9bb5d6 |
caching-inhibited memory.
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
On average, this implementation provides something around 30%
|
|
|
9bb5d6 |
improvement when compared to __memset_power8.
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
Reviewed-by: Matheus Castanho <msc@linux.ibm.com>
|
|
|
9bb5d6 |
Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
diff --git a/sysdeps/powerpc/powerpc64/le/power10/memset.S b/sysdeps/powerpc/powerpc64/le/power10/memset.S
|
|
|
9bb5d6 |
new file mode 100644
|
|
|
9bb5d6 |
index 0000000000000000..6b8e2cfdaf25fd30
|
|
|
9bb5d6 |
--- /dev/null
|
|
|
9bb5d6 |
+++ b/sysdeps/powerpc/powerpc64/le/power10/memset.S
|
|
|
9bb5d6 |
@@ -0,0 +1,256 @@
|
|
|
9bb5d6 |
+/* Optimized memset implementation for POWER10 LE.
|
|
|
9bb5d6 |
+ Copyright (C) 2021 Free Software Foundation, Inc.
|
|
|
9bb5d6 |
+ This file is part of the GNU C Library.
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
9bb5d6 |
+ modify it under the terms of the GNU Lesser General Public
|
|
|
9bb5d6 |
+ License as published by the Free Software Foundation; either
|
|
|
9bb5d6 |
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
9bb5d6 |
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
9bb5d6 |
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
9bb5d6 |
+ Lesser General Public License for more details.
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ You should have received a copy of the GNU Lesser General Public
|
|
|
9bb5d6 |
+ License along with the GNU C Library; if not, see
|
|
|
9bb5d6 |
+ <https://www.gnu.org/licenses/>. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+#include <sysdep.h>
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
|
|
|
9bb5d6 |
+ Returns 's'. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+#ifndef MEMSET
|
|
|
9bb5d6 |
+# define MEMSET memset
|
|
|
9bb5d6 |
+#endif
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .machine power9
|
|
|
9bb5d6 |
+ENTRY_TOCLESS (MEMSET, 5)
|
|
|
9bb5d6 |
+ CALL_MCOUNT 3
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+L(_memset):
|
|
|
9bb5d6 |
+ /* Assume memset of zero length is uncommon, and just let it go
|
|
|
9bb5d6 |
+ through the small path below. */
|
|
|
9bb5d6 |
+ cmpldi r5,64
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ /* Replicate byte to quad word. */
|
|
|
9bb5d6 |
+ mtvsrd v0+32,r4
|
|
|
9bb5d6 |
+ vspltb v0,v0,7
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ li r7,16
|
|
|
9bb5d6 |
+ sldi r8,r7,56
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ bgt L(large)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ /* For short lengths we want to avoid as many branches as possible.
|
|
|
9bb5d6 |
+ We use store VSX vector with length instructions to do this.
|
|
|
9bb5d6 |
+ It takes advantage of the fact that if the length passed to stxvl
|
|
|
9bb5d6 |
+ is zero nothing is done, effectively a no-op. */
|
|
|
9bb5d6 |
+ sldi r5,r5,56
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ addi r10,r3,16
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub. r11,r5,r8
|
|
|
9bb5d6 |
+ isellt r11,0,r11 /* Saturate the subtraction to zero. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ stxvl v0+32,r3,r5
|
|
|
9bb5d6 |
+ stxvl v0+32,r10,r11
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ addi r9,r3,32
|
|
|
9bb5d6 |
+ addi r10,r3,48
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub. r11,r11,r8
|
|
|
9bb5d6 |
+ isellt r11,0,r11
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub. r5,r11,r8
|
|
|
9bb5d6 |
+ isellt r5,0,r5
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ stxvl v0+32,r9,r11
|
|
|
9bb5d6 |
+ stxvl v0+32,r10,r5
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ blr
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(large):
|
|
|
9bb5d6 |
+ mr r6,r3 /* Don't modify r3 since we need to return it. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ /* Get dest 16B aligned. */
|
|
|
9bb5d6 |
+ neg r0,r3
|
|
|
9bb5d6 |
+ clrldi. r7,r0,(64-4)
|
|
|
9bb5d6 |
+ beq L(aligned)
|
|
|
9bb5d6 |
+ rldic r9,r0,56,4 /* (~X & 0xf)<<56 "clrlsldi r9,r0,64-4,56". */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ stxvl v0+32,r6,r9 /* Store up to 15B until aligned address. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ add r6,r6,r7
|
|
|
9bb5d6 |
+ sub r5,r5,r7
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ /* Go to tail if there is less than 64B left after alignment. */
|
|
|
9bb5d6 |
+ cmpldi r5,64
|
|
|
9bb5d6 |
+ blt L(tail_64)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(aligned):
|
|
|
9bb5d6 |
+ /* Go to tail if there is less than 128B left after alignment. */
|
|
|
9bb5d6 |
+ srdi. r0,r5,7
|
|
|
9bb5d6 |
+ beq L(tail_128)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ /* If c == 0 && n >= 256 use dcbz to zero out full cache blocks. */
|
|
|
9bb5d6 |
+ cmpldi cr5,r5,255
|
|
|
9bb5d6 |
+ cmpldi cr6,r4,0
|
|
|
9bb5d6 |
+ crand 27,26,21
|
|
|
9bb5d6 |
+ bt 27,L(dcbz)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ mtctr r0
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 32
|
|
|
9bb5d6 |
+L(loop):
|
|
|
9bb5d6 |
+ stxv v0+32,0(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,16(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,32(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,48(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,64(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,80(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,96(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,112(r6)
|
|
|
9bb5d6 |
+ addi r6,r6,128
|
|
|
9bb5d6 |
+ bdnz L(loop)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(tail):
|
|
|
9bb5d6 |
+ /* 127B or less left, finish the tail or return. */
|
|
|
9bb5d6 |
+ andi. r5,r5,127
|
|
|
9bb5d6 |
+ beqlr
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ cmpldi r5,64
|
|
|
9bb5d6 |
+ blt L(tail_64)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(tail_128):
|
|
|
9bb5d6 |
+ /* Stores a minimum of 64B and up to 128B and return. */
|
|
|
9bb5d6 |
+ stxv v0+32,0(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,16(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,32(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,48(r6)
|
|
|
9bb5d6 |
+ addi r6,r6,64
|
|
|
9bb5d6 |
+ andi. r5,r5,63
|
|
|
9bb5d6 |
+ beqlr
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(tail_64):
|
|
|
9bb5d6 |
+ /* Stores up to 64B and return. */
|
|
|
9bb5d6 |
+ sldi r5,r5,56
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ addi r10,r6,16
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub. r11,r5,r8
|
|
|
9bb5d6 |
+ isellt r11,0,r11
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ stxvl v0+32,r6,r5
|
|
|
9bb5d6 |
+ stxvl v0+32,r10,r11
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub. r11,r11,r8
|
|
|
9bb5d6 |
+ blelr
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ addi r9,r6,32
|
|
|
9bb5d6 |
+ addi r10,r6,48
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ isellt r11,0,r11
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub. r5,r11,r8
|
|
|
9bb5d6 |
+ isellt r5,0,r5
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ stxvl v0+32,r9,r11
|
|
|
9bb5d6 |
+ stxvl v0+32,r10,r5
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ blr
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(dcbz):
|
|
|
9bb5d6 |
+ /* Special case when value is 0 and we have a long length to deal
|
|
|
9bb5d6 |
+ with. Use dcbz to zero out a full cacheline of 128 bytes at a time.
|
|
|
9bb5d6 |
+ Before using dcbz though, we need to get the destination 128-byte
|
|
|
9bb5d6 |
+ aligned. */
|
|
|
9bb5d6 |
+ neg r0,r6
|
|
|
9bb5d6 |
+ clrldi. r0,r0,(64-7)
|
|
|
9bb5d6 |
+ beq L(dcbz_aligned)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ sub r5,r5,r0
|
|
|
9bb5d6 |
+ mtocrf 0x2,r0 /* copying bits 57..59 to cr6. The ones for sizes 64,
|
|
|
9bb5d6 |
+ 32 and 16 which need to be checked. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ /* Write 16-128 bytes until DST is aligned to 128 bytes. */
|
|
|
9bb5d6 |
+64: bf 25,32f
|
|
|
9bb5d6 |
+ stxv v0+32,0(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,16(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,32(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,48(r6)
|
|
|
9bb5d6 |
+ addi r6,r6,64
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+32: bf 26,16f
|
|
|
9bb5d6 |
+ stxv v0+32,0(r6)
|
|
|
9bb5d6 |
+ stxv v0+32,16(r6)
|
|
|
9bb5d6 |
+ addi r6,r6,32
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+16: bf 27,L(dcbz_aligned)
|
|
|
9bb5d6 |
+ stxv v0+32,0(r6)
|
|
|
9bb5d6 |
+ addi r6,r6,16
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(dcbz_aligned):
|
|
|
9bb5d6 |
+ /* Setup dcbz unroll offsets and count numbers. */
|
|
|
9bb5d6 |
+ srdi. r0,r5,9
|
|
|
9bb5d6 |
+ li r9,128
|
|
|
9bb5d6 |
+ beq L(bcdz_tail)
|
|
|
9bb5d6 |
+ li r10,256
|
|
|
9bb5d6 |
+ li r11,384
|
|
|
9bb5d6 |
+ mtctr r0
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(dcbz_loop):
|
|
|
9bb5d6 |
+ /* Sets 512 bytes to zero in each iteration, the loop unrolling shows
|
|
|
9bb5d6 |
+ a throughput boost for large sizes (2048 bytes or higher). */
|
|
|
9bb5d6 |
+ dcbz 0,r6
|
|
|
9bb5d6 |
+ dcbz r9,r6
|
|
|
9bb5d6 |
+ dcbz r10,r6
|
|
|
9bb5d6 |
+ dcbz r11,r6
|
|
|
9bb5d6 |
+ addi r6,r6,512
|
|
|
9bb5d6 |
+ bdnz L(dcbz_loop)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ andi. r5,r5,511
|
|
|
9bb5d6 |
+ beqlr
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ .balign 16
|
|
|
9bb5d6 |
+L(bcdz_tail):
|
|
|
9bb5d6 |
+ /* We have 1-511 bytes remaining. */
|
|
|
9bb5d6 |
+ srdi. r0,r5,7
|
|
|
9bb5d6 |
+ beq L(tail)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ mtocrf 0x1,r0
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+256: bf 30,128f
|
|
|
9bb5d6 |
+ dcbz 0,r6
|
|
|
9bb5d6 |
+ dcbz r9,r6
|
|
|
9bb5d6 |
+ addi r6,r6,256
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+128: bf 31,L(tail)
|
|
|
9bb5d6 |
+ dcbz 0,r6
|
|
|
9bb5d6 |
+ addi r6,r6,128
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ b L(tail)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+END_GEN_TB (MEMSET,TB_TOCLESS)
|
|
|
9bb5d6 |
+libc_hidden_builtin_def (memset)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+/* Copied from bzero.S to prevent the linker from inserting a stub
|
|
|
9bb5d6 |
+ between bzero and memset. */
|
|
|
9bb5d6 |
+ENTRY_TOCLESS (__bzero)
|
|
|
9bb5d6 |
+ CALL_MCOUNT 2
|
|
|
9bb5d6 |
+ mr r5,r4
|
|
|
9bb5d6 |
+ li r4,0
|
|
|
9bb5d6 |
+ b L(_memset)
|
|
|
9bb5d6 |
+END (__bzero)
|
|
|
9bb5d6 |
+#ifndef __bzero
|
|
|
9bb5d6 |
+weak_alias (__bzero, bzero)
|
|
|
9bb5d6 |
+#endif
|
|
|
9bb5d6 |
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
|
|
|
9bb5d6 |
index 2e3c8f2e8a81cda4..1d517698429e1230 100644
|
|
|
9bb5d6 |
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
|
|
|
9bb5d6 |
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
|
|
|
9bb5d6 |
@@ -32,7 +32,7 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
|
|
|
9bb5d6 |
strncase-power8
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
ifneq (,$(filter %le,$(config-machine)))
|
|
|
9bb5d6 |
-sysdep_routines += memcpy-power10 memmove-power10 \
|
|
|
9bb5d6 |
+sysdep_routines += memcpy-power10 memmove-power10 memset-power10 \
|
|
|
9bb5d6 |
strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
|
|
|
9bb5d6 |
rawmemchr-power9 strlen-power9 strncpy-power9 stpncpy-power9 \
|
|
|
9bb5d6 |
strlen-power10
|
|
|
9bb5d6 |
diff --git a/sysdeps/powerpc/powerpc64/multiarch/bzero.c b/sysdeps/powerpc/powerpc64/multiarch/bzero.c
|
|
|
9bb5d6 |
index f8cb05bea8a3505b..4ce98e324d12a31e 100644
|
|
|
9bb5d6 |
--- a/sysdeps/powerpc/powerpc64/multiarch/bzero.c
|
|
|
9bb5d6 |
+++ b/sysdeps/powerpc/powerpc64/multiarch/bzero.c
|
|
|
9bb5d6 |
@@ -27,8 +27,16 @@ extern __typeof (bzero) __bzero_power4 attribute_hidden;
|
|
|
9bb5d6 |
extern __typeof (bzero) __bzero_power6 attribute_hidden;
|
|
|
9bb5d6 |
extern __typeof (bzero) __bzero_power7 attribute_hidden;
|
|
|
9bb5d6 |
extern __typeof (bzero) __bzero_power8 attribute_hidden;
|
|
|
9bb5d6 |
+# ifdef __LITTLE_ENDIAN__
|
|
|
9bb5d6 |
+extern __typeof (bzero) __bzero_power10 attribute_hidden;
|
|
|
9bb5d6 |
+# endif
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
libc_ifunc (__bzero,
|
|
|
9bb5d6 |
+# ifdef __LITTLE_ENDIAN__
|
|
|
9bb5d6 |
+ (hwcap2 & (PPC_FEATURE2_ARCH_3_1 | PPC_FEATURE2_HAS_ISEL)
|
|
|
9bb5d6 |
+ && hwcap & PPC_FEATURE_HAS_VSX)
|
|
|
9bb5d6 |
+ ? __bzero_power10 :
|
|
|
9bb5d6 |
+# endif
|
|
|
9bb5d6 |
(hwcap2 & PPC_FEATURE2_ARCH_2_07)
|
|
|
9bb5d6 |
? __bzero_power8 :
|
|
|
9bb5d6 |
(hwcap & PPC_FEATURE_HAS_VSX)
|
|
|
9bb5d6 |
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
|
|
|
9bb5d6 |
index 9d5a14e480c02171..11532f77d4d03b2a 100644
|
|
|
9bb5d6 |
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
|
|
|
9bb5d6 |
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
|
|
|
9bb5d6 |
@@ -86,6 +86,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
/* Support sysdeps/powerpc/powerpc64/multiarch/memset.c. */
|
|
|
9bb5d6 |
IFUNC_IMPL (i, name, memset,
|
|
|
9bb5d6 |
+#ifdef __LITTLE_ENDIAN__
|
|
|
9bb5d6 |
+ IFUNC_IMPL_ADD (array, i, memset,
|
|
|
9bb5d6 |
+ hwcap2 & (PPC_FEATURE2_ARCH_3_1 |
|
|
|
9bb5d6 |
+ PPC_FEATURE2_HAS_ISEL)
|
|
|
9bb5d6 |
+ && hwcap & PPC_FEATURE_HAS_VSX,
|
|
|
9bb5d6 |
+ __memset_power10)
|
|
|
9bb5d6 |
+#endif
|
|
|
9bb5d6 |
IFUNC_IMPL_ADD (array, i, memset, hwcap2 & PPC_FEATURE2_ARCH_2_07,
|
|
|
9bb5d6 |
__memset_power8)
|
|
|
9bb5d6 |
IFUNC_IMPL_ADD (array, i, memset, hwcap & PPC_FEATURE_HAS_VSX,
|
|
|
9bb5d6 |
@@ -187,6 +194,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
/* Support sysdeps/powerpc/powerpc64/multiarch/bzero.c. */
|
|
|
9bb5d6 |
IFUNC_IMPL (i, name, bzero,
|
|
|
9bb5d6 |
+#ifdef __LITTLE_ENDIAN__
|
|
|
9bb5d6 |
+ IFUNC_IMPL_ADD (array, i, bzero,
|
|
|
9bb5d6 |
+ hwcap2 & (PPC_FEATURE2_ARCH_3_1 |
|
|
|
9bb5d6 |
+ PPC_FEATURE2_HAS_ISEL)
|
|
|
9bb5d6 |
+ && hwcap & PPC_FEATURE_HAS_VSX,
|
|
|
9bb5d6 |
+ __bzero_power10)
|
|
|
9bb5d6 |
+#endif
|
|
|
9bb5d6 |
IFUNC_IMPL_ADD (array, i, bzero, hwcap2 & PPC_FEATURE2_ARCH_2_07,
|
|
|
9bb5d6 |
__bzero_power8)
|
|
|
9bb5d6 |
IFUNC_IMPL_ADD (array, i, bzero, hwcap & PPC_FEATURE_HAS_VSX,
|
|
|
9bb5d6 |
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memset-power10.S
|
|
|
9bb5d6 |
new file mode 100644
|
|
|
9bb5d6 |
index 0000000000000000..548e99789735296c
|
|
|
9bb5d6 |
--- /dev/null
|
|
|
9bb5d6 |
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset-power10.S
|
|
|
9bb5d6 |
@@ -0,0 +1,27 @@
|
|
|
9bb5d6 |
+/* Optimized memset implementation for POWER10 LE.
|
|
|
9bb5d6 |
+ Copyright (C) 2021 Free Software Foundation, Inc.
|
|
|
9bb5d6 |
+ This file is part of the GNU C Library.
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
9bb5d6 |
+ modify it under the terms of the GNU Lesser General Public
|
|
|
9bb5d6 |
+ License as published by the Free Software Foundation; either
|
|
|
9bb5d6 |
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
9bb5d6 |
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
9bb5d6 |
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
9bb5d6 |
+ Lesser General Public License for more details.
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+ You should have received a copy of the GNU Lesser General Public
|
|
|
9bb5d6 |
+ License along with the GNU C Library; if not, see
|
|
|
9bb5d6 |
+ <https://www.gnu.org/licenses/>. */
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+#define MEMSET __memset_power10
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+#undef libc_hidden_builtin_def
|
|
|
9bb5d6 |
+#define libc_hidden_builtin_def(name)
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+#undef __bzero
|
|
|
9bb5d6 |
+#define __bzero __bzero_power10
|
|
|
9bb5d6 |
+
|
|
|
9bb5d6 |
+#include <sysdeps/powerpc/powerpc64/le/power10/memset.S>
|
|
|
9bb5d6 |
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset.c b/sysdeps/powerpc/powerpc64/multiarch/memset.c
|
|
|
9bb5d6 |
index 1a7c46fecf78ab1f..4c97622c7d7eb8aa 100644
|
|
|
9bb5d6 |
--- a/sysdeps/powerpc/powerpc64/multiarch/memset.c
|
|
|
9bb5d6 |
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset.c
|
|
|
9bb5d6 |
@@ -33,10 +33,18 @@ extern __typeof (__redirect_memset) __memset_power4 attribute_hidden;
|
|
|
9bb5d6 |
extern __typeof (__redirect_memset) __memset_power6 attribute_hidden;
|
|
|
9bb5d6 |
extern __typeof (__redirect_memset) __memset_power7 attribute_hidden;
|
|
|
9bb5d6 |
extern __typeof (__redirect_memset) __memset_power8 attribute_hidden;
|
|
|
9bb5d6 |
+# ifdef __LITTLE_ENDIAN__
|
|
|
9bb5d6 |
+extern __typeof (__redirect_memset) __memset_power10 attribute_hidden;
|
|
|
9bb5d6 |
+# endif
|
|
|
9bb5d6 |
|
|
|
9bb5d6 |
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
|
|
|
9bb5d6 |
ifunc symbol properly. */
|
|
|
9bb5d6 |
libc_ifunc (__libc_memset,
|
|
|
9bb5d6 |
+# ifdef __LITTLE_ENDIAN__
|
|
|
9bb5d6 |
+ (hwcap2 & (PPC_FEATURE2_ARCH_3_1 | PPC_FEATURE2_HAS_ISEL)
|
|
|
9bb5d6 |
+ && hwcap & PPC_FEATURE_HAS_VSX)
|
|
|
9bb5d6 |
+ ? __memset_power10 :
|
|
|
9bb5d6 |
+# endif
|
|
|
9bb5d6 |
(hwcap2 & PPC_FEATURE2_ARCH_2_07)
|
|
|
9bb5d6 |
? __memset_power8 :
|
|
|
9bb5d6 |
(hwcap & PPC_FEATURE_HAS_VSX)
|