8a984d
commit 10624a97e8e47004985740cbb04060a84cfada76
8a984d
Author: Matheus Castanho <msc@linux.ibm.com>
8a984d
Date:   Tue Sep 29 15:40:08 2020 -0300
8a984d
8a984d
    powerpc: Add optimized strlen for POWER10
8a984d
    
8a984d
    Improvements compared to POWER9 version:
8a984d
    
8a984d
    1. Take into account first 16B comparison for aligned strings
8a984d
    
8a984d
       The previous version compares the first 16B and increments r4 by the number
8a984d
       of bytes until the address is 16B-aligned, then starts doing aligned loads at
8a984d
       that address. For aligned strings, this causes the first 16B to be compared
8a984d
       twice, because the increment is 0. Here we calculate the next 16B-aligned
8a984d
       address differently, which avoids that issue.
8a984d
    
8a984d
    2. Use simple comparisons for the first ~192 bytes
8a984d
    
8a984d
       The main loop is good for big strings, but comparing 16B each time is better
8a984d
       for smaller strings.  So after aligning the address to 16 Bytes, we check
8a984d
       more 176B in 16B chunks.  There may be some overlaps with the main loop for
8a984d
       unaligned strings, but we avoid using the more aggressive strategy too soon,
8a984d
       and also allow the loop to start at a 64B-aligned address.  This greatly
8a984d
       benefits smaller strings and avoids overlapping checks if the string is
8a984d
       already aligned at a 64B boundary.
8a984d
    
8a984d
    3. Reduce dependencies between load blocks caused by address calculation on loop
8a984d
    
8a984d
       Doing a precise time tracing on the code showed many loads in the loop were
8a984d
       stalled waiting for updates to r4 from previous code blocks.  This
8a984d
       implementation avoids that as much as possible by using 2 registers (r4 and
8a984d
       r5) to hold addresses to be used by different parts of the code.
8a984d
    
8a984d
       Also, the previous code aligned the address to 16B, then to 64B by doing a
8a984d
       few 48B loops (if needed) until the address was aligned. The main loop could
8a984d
       not start until that 48B loop had finished and r4 was updated with the
8a984d
       current address. Here we calculate the address used by the loop very early,
8a984d
       so it can start sooner.
8a984d
    
8a984d
       The main loop now uses 2 pointers 128B apart to make pointer updates less
8a984d
       frequent, and also unrolls 1 iteration to guarantee there is enough time
8a984d
       between iterations to update the pointers, reducing stalled cycles.
8a984d
    
8a984d
    4. Use new P10 instructions
8a984d
    
8a984d
       lxvp is used to load 32B with a single instruction, reducing contention in
8a984d
       the load queue.
8a984d
    
8a984d
       vextractbm allows simplifying the tail code for the loop, replacing
8a984d
       vbpermq and avoiding having to generate a permute control vector.
8a984d
    
8a984d
    Reviewed-by: Paul E Murphy <murphyp@linux.ibm.com>
8a984d
    Reviewed-by: Raphael M Zinsly <rzinsly@linux.ibm.com>
8a984d
    Reviewed-by: Lucas A. M. Magalhaes <lamm@linux.ibm.com>
8a984d
8a984d
diff --git a/sysdeps/powerpc/powerpc64/le/power10/strlen.S b/sysdeps/powerpc/powerpc64/le/power10/strlen.S
8a984d
new file mode 100644
8a984d
index 0000000000000000..ca7e9eb3d84c9b00
8a984d
--- /dev/null
8a984d
+++ b/sysdeps/powerpc/powerpc64/le/power10/strlen.S
8a984d
@@ -0,0 +1,221 @@
8a984d
+/* Optimized strlen implementation for POWER10 LE.
8a984d
+   Copyright (C) 2021 Free Software Foundation, Inc.
8a984d
+   This file is part of the GNU C Library.
8a984d
+
8a984d
+   The GNU C Library is free software; you can redistribute it and/or
8a984d
+   modify it under the terms of the GNU Lesser General Public
8a984d
+   License as published by the Free Software Foundation; either
8a984d
+   version 2.1 of the License, or (at your option) any later version.
8a984d
+
8a984d
+   The GNU C Library is distributed in the hope that it will be useful,
8a984d
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
8a984d
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
8a984d
+   Lesser General Public License for more details.
8a984d
+
8a984d
+   You should have received a copy of the GNU Lesser General Public
8a984d
+   License along with the GNU C Library; if not, see
8a984d
+   <https://www.gnu.org/licenses/>.  */
8a984d
+
8a984d
+#include <sysdep.h>
8a984d
+
8a984d
+#ifndef STRLEN
8a984d
+# define STRLEN __strlen
8a984d
+# define DEFINE_STRLEN_HIDDEN_DEF 1
8a984d
+#endif
8a984d
+
8a984d
+/* TODO: Replace macros by the actual instructions when minimum binutils becomes
8a984d
+   >= 2.35.  This is used to keep compatibility with older versions.  */
8a984d
+#define VEXTRACTBM(rt,vrb)	 \
8a984d
+	.long(((4)<<(32-6))	 \
8a984d
+	      | ((rt)<<(32-11))	 \
8a984d
+	      | ((8)<<(32-16))	 \
8a984d
+	      | ((vrb)<<(32-21)) \
8a984d
+	      | 1602)
8a984d
+
8a984d
+#define LXVP(xtp,dq,ra)		   \
8a984d
+	.long(((6)<<(32-6))		   \
8a984d
+	      | ((((xtp)-32)>>1)<<(32-10)) \
8a984d
+	      | ((1)<<(32-11))		   \
8a984d
+	      | ((ra)<<(32-16))		   \
8a984d
+	      | dq)
8a984d
+
8a984d
+#define CHECK16(vreg,offset,addr,label) \
8a984d
+	lxv	  vreg+32,offset(addr);	\
8a984d
+	vcmpequb. vreg,vreg,v18;	\
8a984d
+	bne	  cr6,L(label);
8a984d
+
8a984d
+/* Load 4 quadwords, merge into one VR for speed and check for NULLs.  r6 has #
8a984d
+   of bytes already checked.  */
8a984d
+#define CHECK64(offset,addr,label)	    \
8a984d
+	li	  r6,offset;		    \
8a984d
+	LXVP(v4+32,offset,addr);	    \
8a984d
+	LXVP(v6+32,offset+32,addr);	    \
8a984d
+	vminub	  v14,v4,v5;		    \
8a984d
+	vminub	  v15,v6,v7;		    \
8a984d
+	vminub	  v16,v14,v15;		    \
8a984d
+	vcmpequb. v0,v16,v18;		    \
8a984d
+	bne	  cr6,L(label)
8a984d
+
8a984d
+#define TAIL(vreg,increment)	   \
8a984d
+	vctzlsbb  r4,vreg;	   \
8a984d
+	subf	  r3,r3,r5;	   \
8a984d
+	addi	  r4,r4,increment; \
8a984d
+	add	  r3,r3,r4;	   \
8a984d
+	blr
8a984d
+
8a984d
+/* Implements the function
8a984d
+
8a984d
+   int [r3] strlen (const void *s [r3])
8a984d
+
8a984d
+   The implementation can load bytes past a matching byte, but only
8a984d
+   up to the next 64B boundary, so it never crosses a page.  */
8a984d
+
8a984d
+.machine power9
8a984d
+
8a984d
+ENTRY_TOCLESS (STRLEN, 4)
8a984d
+	CALL_MCOUNT 1
8a984d
+
8a984d
+	vspltisb  v18,0
8a984d
+	vspltisb  v19,-1
8a984d
+
8a984d
+	/* Next 16B-aligned address. Prepare address for L(aligned).  */
8a984d
+	addi	  r5,r3,16
8a984d
+	clrrdi	  r5,r5,4
8a984d
+
8a984d
+	/* Align data and fill bytes not loaded with non matching char.	 */
8a984d
+	lvx	  v0,0,r3
8a984d
+	lvsr	  v1,0,r3
8a984d
+	vperm	  v0,v19,v0,v1
8a984d
+
8a984d
+	vcmpequb. v6,v0,v18
8a984d
+	beq	  cr6,L(aligned)
8a984d
+
8a984d
+	vctzlsbb  r3,v6
8a984d
+	blr
8a984d
+
8a984d
+	/* Test next 176B, 16B at a time.  The main loop is optimized for longer
8a984d
+	   strings, so checking the first bytes in 16B chunks benefits a lot
8a984d
+	   small strings.  */
8a984d
+	.p2align 5
8a984d
+L(aligned):
8a984d
+	/* Prepare address for the loop.  */
8a984d
+	addi	  r4,r3,192
8a984d
+	clrrdi	  r4,r4,6
8a984d
+
8a984d
+	CHECK16(v0,0,r5,tail1)
8a984d
+	CHECK16(v1,16,r5,tail2)
8a984d
+	CHECK16(v2,32,r5,tail3)
8a984d
+	CHECK16(v3,48,r5,tail4)
8a984d
+	CHECK16(v4,64,r5,tail5)
8a984d
+	CHECK16(v5,80,r5,tail6)
8a984d
+	CHECK16(v6,96,r5,tail7)
8a984d
+	CHECK16(v7,112,r5,tail8)
8a984d
+	CHECK16(v8,128,r5,tail9)
8a984d
+	CHECK16(v9,144,r5,tail10)
8a984d
+	CHECK16(v10,160,r5,tail11)
8a984d
+
8a984d
+	addi	  r5,r4,128
8a984d
+
8a984d
+	/* Switch to a more aggressive approach checking 64B each time.  Use 2
8a984d
+	   pointers 128B apart and unroll the loop once to make the pointer
8a984d
+	   updates and usages separated enough to avoid stalls waiting for
8a984d
+	   address calculation.  */
8a984d
+	.p2align 5
8a984d
+L(loop):
8a984d
+	CHECK64(0,r4,pre_tail_64b)
8a984d
+	CHECK64(64,r4,pre_tail_64b)
8a984d
+	addi	  r4,r4,256
8a984d
+
8a984d
+	CHECK64(0,r5,tail_64b)
8a984d
+	CHECK64(64,r5,tail_64b)
8a984d
+	addi	  r5,r5,256
8a984d
+
8a984d
+	b	  L(loop)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(pre_tail_64b):
8a984d
+	mr	r5,r4
8a984d
+L(tail_64b):
8a984d
+	/* OK, we found a null byte.  Let's look for it in the current 64-byte
8a984d
+	   block and mark it in its corresponding VR.  lxvp vx,0(ry) puts the
8a984d
+	   low 16B bytes into vx+1, and the high into vx, so the order here is
8a984d
+	   v5, v4, v7, v6.  */
8a984d
+	vcmpequb  v1,v5,v18
8a984d
+	vcmpequb  v2,v4,v18
8a984d
+	vcmpequb  v3,v7,v18
8a984d
+	vcmpequb  v4,v6,v18
8a984d
+
8a984d
+	/* Take into account the other 64B blocks we had already checked.  */
8a984d
+	add	r5,r5,r6
8a984d
+
8a984d
+	/* Extract first bit of each byte.  */
8a984d
+	VEXTRACTBM(r7,v1)
8a984d
+	VEXTRACTBM(r8,v2)
8a984d
+	VEXTRACTBM(r9,v3)
8a984d
+	VEXTRACTBM(r10,v4)
8a984d
+
8a984d
+	/* Shift each value into their corresponding position.  */
8a984d
+	sldi	  r8,r8,16
8a984d
+	sldi	  r9,r9,32
8a984d
+	sldi	  r10,r10,48
8a984d
+
8a984d
+	/* Merge the results.  */
8a984d
+	or	  r7,r7,r8
8a984d
+	or	  r8,r9,r10
8a984d
+	or	  r10,r8,r7
8a984d
+
8a984d
+	cnttzd	  r0,r10	  /* Count trailing zeros before the match.  */
8a984d
+	subf	  r5,r3,r5
8a984d
+	add	  r3,r5,r0	  /* Compute final length.  */
8a984d
+	blr
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail1):
8a984d
+	TAIL(v0,0)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail2):
8a984d
+	TAIL(v1,16)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail3):
8a984d
+	TAIL(v2,32)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail4):
8a984d
+	TAIL(v3,48)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail5):
8a984d
+	TAIL(v4,64)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail6):
8a984d
+	TAIL(v5,80)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail7):
8a984d
+	TAIL(v6,96)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail8):
8a984d
+	TAIL(v7,112)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail9):
8a984d
+	TAIL(v8,128)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail10):
8a984d
+	TAIL(v9,144)
8a984d
+
8a984d
+	.p2align  5
8a984d
+L(tail11):
8a984d
+	TAIL(v10,160)
8a984d
+
8a984d
+END (STRLEN)
8a984d
+
8a984d
+#ifdef DEFINE_STRLEN_HIDDEN_DEF
8a984d
+weak_alias (__strlen, strlen)
8a984d
+libc_hidden_builtin_def (strlen)
8a984d
+#endif
8a984d
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
8a984d
index a9e13e05e90601cd..61652b65dd223018 100644
8a984d
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
8a984d
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
8a984d
@@ -33,7 +33,8 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
8a984d
 
8a984d
 ifneq (,$(filter %le,$(config-machine)))
8a984d
 sysdep_routines += strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
8a984d
-		   rawmemchr-power9 strlen-power9 strncpy-power9 stpncpy-power9
8a984d
+		   rawmemchr-power9 strlen-power9 strncpy-power9 stpncpy-power9 \
8a984d
+		   strlen-power10
8a984d
 endif
8a984d
 CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
8a984d
 CFLAGS-strncase_l-power7.c += -mcpu=power7 -funroll-loops
8a984d
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
8a984d
index b30bc53930fc0e36..46d5956adda72b86 100644
8a984d
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
8a984d
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
8a984d
@@ -112,6 +112,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
8a984d
   /* Support sysdeps/powerpc/powerpc64/multiarch/strlen.c.  */
8a984d
   IFUNC_IMPL (i, name, strlen,
8a984d
 #ifdef __LITTLE_ENDIAN__
8a984d
+	      IFUNC_IMPL_ADD (array, i, strlen, hwcap2 & PPC_FEATURE2_ARCH_3_1,
8a984d
+			      __strlen_power10)
8a984d
 	      IFUNC_IMPL_ADD (array, i, strlen, hwcap2 & PPC_FEATURE2_ARCH_3_00,
8a984d
 			      __strlen_power9)
8a984d
 #endif
8a984d
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strlen-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strlen-power10.S
8a984d
new file mode 100644
8a984d
index 0000000000000000..6a774fad58c77179
8a984d
--- /dev/null
8a984d
+++ b/sysdeps/powerpc/powerpc64/multiarch/strlen-power10.S
8a984d
@@ -0,0 +1,2 @@
8a984d
+#define STRLEN __strlen_power10
8a984d
+#include <sysdeps/powerpc/powerpc64/le/power10/strlen.S>
8a984d
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strlen.c b/sysdeps/powerpc/powerpc64/multiarch/strlen.c
8a984d
index b7f0fbb13fb97783..11bdb96de2d2aa66 100644
8a984d
--- a/sysdeps/powerpc/powerpc64/multiarch/strlen.c
8a984d
+++ b/sysdeps/powerpc/powerpc64/multiarch/strlen.c
8a984d
@@ -31,9 +31,12 @@ extern __typeof (__redirect_strlen) __strlen_ppc attribute_hidden;
8a984d
 extern __typeof (__redirect_strlen) __strlen_power7 attribute_hidden;
8a984d
 extern __typeof (__redirect_strlen) __strlen_power8 attribute_hidden;
8a984d
 extern __typeof (__redirect_strlen) __strlen_power9 attribute_hidden;
8a984d
+extern __typeof (__redirect_strlen) __strlen_power10 attribute_hidden;
8a984d
 
8a984d
 libc_ifunc (__libc_strlen,
8a984d
 # ifdef __LITTLE_ENDIAN__
8a984d
+	(hwcap2 & PPC_FEATURE2_ARCH_3_1)
8a984d
+	? __strlen_power10 :
8a984d
 	  (hwcap2 & PPC_FEATURE2_ARCH_3_00)
8a984d
 	  ? __strlen_power9 :
8a984d
 # endif