190885
From f97e72cd3b822c04bfdda110dcf252b19afd2bcd Mon Sep 17 00:00:00 2001
190885
From: "H.J. Lu" <hjl.tools@gmail.com>
190885
Date: Wed, 2 Mar 2022 14:29:44 -0800
190885
Subject: [PATCH] x86: Optimize strlen-avx2.S
190885
190885
No bug. This commit optimizes strlen-avx2.S. The optimizations are
190885
mostly small things but they add up to roughly 10-30% performance
190885
improvement for strlen. The results for strnlen are bit more
190885
ambiguous. test-strlen, test-strnlen, test-wcslen, and test-wcsnlen
190885
are all passing.
190885
190885
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
190885
(cherry picked from commit aaa23c35071537e2dcf5807e956802ed215210aa)
190885
---
190885
 sysdeps/x86_64/multiarch/ifunc-impl-list.c |  16 +-
190885
 sysdeps/x86_64/multiarch/strlen-avx2.S     | 532 +++++++++++++--------
190885
 2 files changed, 334 insertions(+), 214 deletions(-)
190885
190885
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
190885
index cbfc1a5d..f1a6460a 100644
190885
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
190885
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
190885
@@ -285,10 +285,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
190885
   /* Support sysdeps/x86_64/multiarch/strlen.c.  */
190885
   IFUNC_IMPL (i, name, strlen,
190885
 	      IFUNC_IMPL_ADD (array, i, strlen,
190885
-			      CPU_FEATURE_USABLE (AVX2),
190885
+			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)),
190885
 			      __strlen_avx2)
190885
 	      IFUNC_IMPL_ADD (array, i, strlen,
190885
 			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)
190885
 			       && CPU_FEATURE_USABLE (RTM)),
190885
 			      __strlen_avx2_rtm)
190885
 	      IFUNC_IMPL_ADD (array, i, strlen,
190885
@@ -301,10 +303,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
190885
   /* Support sysdeps/x86_64/multiarch/strnlen.c.  */
190885
   IFUNC_IMPL (i, name, strnlen,
190885
 	      IFUNC_IMPL_ADD (array, i, strnlen,
190885
-			      CPU_FEATURE_USABLE (AVX2),
190885
+			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)),
190885
 			      __strnlen_avx2)
190885
 	      IFUNC_IMPL_ADD (array, i, strnlen,
190885
 			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)
190885
 			       && CPU_FEATURE_USABLE (RTM)),
190885
 			      __strnlen_avx2_rtm)
190885
 	      IFUNC_IMPL_ADD (array, i, strnlen,
190885
@@ -640,10 +644,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
190885
   /* Support sysdeps/x86_64/multiarch/wcslen.c.  */
190885
   IFUNC_IMPL (i, name, wcslen,
190885
 	      IFUNC_IMPL_ADD (array, i, wcslen,
190885
-			      CPU_FEATURE_USABLE (AVX2),
190885
+			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)),
190885
 			      __wcslen_avx2)
190885
 	      IFUNC_IMPL_ADD (array, i, wcslen,
190885
 			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)
190885
 			       && CPU_FEATURE_USABLE (RTM)),
190885
 			      __wcslen_avx2_rtm)
190885
 	      IFUNC_IMPL_ADD (array, i, wcslen,
190885
@@ -656,10 +662,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
190885
   /* Support sysdeps/x86_64/multiarch/wcsnlen.c.  */
190885
   IFUNC_IMPL (i, name, wcsnlen,
190885
 	      IFUNC_IMPL_ADD (array, i, wcsnlen,
190885
-			      CPU_FEATURE_USABLE (AVX2),
190885
+			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)),
190885
 			      __wcsnlen_avx2)
190885
 	      IFUNC_IMPL_ADD (array, i, wcsnlen,
190885
 			      (CPU_FEATURE_USABLE (AVX2)
190885
+			       && CPU_FEATURE_USABLE (BMI2)
190885
 			       && CPU_FEATURE_USABLE (RTM)),
190885
 			      __wcsnlen_avx2_rtm)
190885
 	      IFUNC_IMPL_ADD (array, i, wcsnlen,
190885
diff --git a/sysdeps/x86_64/multiarch/strlen-avx2.S b/sysdeps/x86_64/multiarch/strlen-avx2.S
190885
index 82826e10..be8a5db5 100644
190885
--- a/sysdeps/x86_64/multiarch/strlen-avx2.S
190885
+++ b/sysdeps/x86_64/multiarch/strlen-avx2.S
190885
@@ -27,9 +27,11 @@
190885
 # ifdef USE_AS_WCSLEN
190885
 #  define VPCMPEQ	vpcmpeqd
190885
 #  define VPMINU	vpminud
190885
+#  define CHAR_SIZE	4
190885
 # else
190885
 #  define VPCMPEQ	vpcmpeqb
190885
 #  define VPMINU	vpminub
190885
+#  define CHAR_SIZE	1
190885
 # endif
190885
 
190885
 # ifndef VZEROUPPER
190885
@@ -41,349 +43,459 @@
190885
 # endif
190885
 
190885
 # define VEC_SIZE 32
190885
+# define PAGE_SIZE 4096
190885
 
190885
 	.section SECTION(.text),"ax",@progbits
190885
 ENTRY (STRLEN)
190885
 # ifdef USE_AS_STRNLEN
190885
-	/* Check for zero length.  */
190885
+	/* Check zero length.  */
190885
 	test	%RSI_LP, %RSI_LP
190885
 	jz	L(zero)
190885
+	/* Store max len in R8_LP before adjusting if using WCSLEN.  */
190885
+	mov	%RSI_LP, %R8_LP
190885
 #  ifdef USE_AS_WCSLEN
190885
 	shl	$2, %RSI_LP
190885
 #  elif defined __ILP32__
190885
 	/* Clear the upper 32 bits.  */
190885
 	movl	%esi, %esi
190885
 #  endif
190885
-	mov	%RSI_LP, %R8_LP
190885
 # endif
190885
-	movl	%edi, %ecx
190885
+	movl	%edi, %eax
190885
 	movq	%rdi, %rdx
190885
 	vpxor	%xmm0, %xmm0, %xmm0
190885
-
190885
+	/* Clear high bits from edi. Only keeping bits relevant to page
190885
+	   cross check.  */
190885
+	andl	$(PAGE_SIZE - 1), %eax
190885
 	/* Check if we may cross page boundary with one vector load.  */
190885
-	andl	$(2 * VEC_SIZE - 1), %ecx
190885
-	cmpl	$VEC_SIZE, %ecx
190885
-	ja	L(cros_page_boundary)
190885
+	cmpl	$(PAGE_SIZE - VEC_SIZE), %eax
190885
+	ja	L(cross_page_boundary)
190885
 
190885
 	/* Check the first VEC_SIZE bytes.  */
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-
190885
+	VPCMPEQ	(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
 # ifdef USE_AS_STRNLEN
190885
-	jnz	L(first_vec_x0_check)
190885
-	/* Adjust length and check the end of data.  */
190885
-	subq	$VEC_SIZE, %rsi
190885
-	jbe	L(max)
190885
-# else
190885
-	jnz	L(first_vec_x0)
190885
+	/* If length < VEC_SIZE handle special.  */
190885
+	cmpq	$VEC_SIZE, %rsi
190885
+	jbe	L(first_vec_x0)
190885
 # endif
190885
-
190885
-	/* Align data for aligned loads in the loop.  */
190885
-	addq	$VEC_SIZE, %rdi
190885
-	andl	$(VEC_SIZE - 1), %ecx
190885
-	andq	$-VEC_SIZE, %rdi
190885
+	/* If empty continue to aligned_more. Otherwise return bit
190885
+	   position of first match.  */
190885
+	testl	%eax, %eax
190885
+	jz	L(aligned_more)
190885
+	tzcntl	%eax, %eax
190885
+# ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+# endif
190885
+	VZEROUPPER_RETURN
190885
 
190885
 # ifdef USE_AS_STRNLEN
190885
-	/* Adjust length.  */
190885
-	addq	%rcx, %rsi
190885
+L(zero):
190885
+	xorl	%eax, %eax
190885
+	ret
190885
 
190885
-	subq	$(VEC_SIZE * 4), %rsi
190885
-	jbe	L(last_4x_vec_or_less)
190885
+	.p2align 4
190885
+L(first_vec_x0):
190885
+	/* Set bit for max len so that tzcnt will return min of max len
190885
+	   and position of first match.  */
190885
+	btsq	%rsi, %rax
190885
+	tzcntl	%eax, %eax
190885
+#  ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+#  endif
190885
+	VZEROUPPER_RETURN
190885
 # endif
190885
-	jmp	L(more_4x_vec)
190885
 
190885
 	.p2align 4
190885
-L(cros_page_boundary):
190885
-	andl	$(VEC_SIZE - 1), %ecx
190885
-	andq	$-VEC_SIZE, %rdi
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	/* Remove the leading bytes.  */
190885
-	sarl	%cl, %eax
190885
-	testl	%eax, %eax
190885
-	jz	L(aligned_more)
190885
+L(first_vec_x1):
190885
 	tzcntl	%eax, %eax
190885
+	/* Safe to use 32 bit instructions as these are only called for
190885
+	   size = [1, 159].  */
190885
 # ifdef USE_AS_STRNLEN
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rsi
190885
-	jbe	L(max)
190885
+	/* Use ecx which was computed earlier to compute correct value.
190885
+	 */
190885
+	subl	$(VEC_SIZE * 4 + 1), %ecx
190885
+	addl	%ecx, %eax
190885
+# else
190885
+	subl	%edx, %edi
190885
+	incl	%edi
190885
+	addl	%edi, %eax
190885
 # endif
190885
-	addq	%rdi, %rax
190885
-	addq	%rcx, %rax
190885
-	subq	%rdx, %rax
190885
 # ifdef USE_AS_WCSLEN
190885
-	shrq	$2, %rax
190885
+	shrl	$2, %eax
190885
 # endif
190885
-L(return_vzeroupper):
190885
-	ZERO_UPPER_VEC_REGISTERS_RETURN
190885
+	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(aligned_more):
190885
+L(first_vec_x2):
190885
+	tzcntl	%eax, %eax
190885
+	/* Safe to use 32 bit instructions as these are only called for
190885
+	   size = [1, 159].  */
190885
 # ifdef USE_AS_STRNLEN
190885
-        /* "rcx" is less than VEC_SIZE.  Calculate "rdx + rcx - VEC_SIZE"
190885
-	    with "rdx - (VEC_SIZE - rcx)" instead of "(rdx + rcx) - VEC_SIZE"
190885
-	    to void possible addition overflow.  */
190885
-	negq	%rcx
190885
-	addq	$VEC_SIZE, %rcx
190885
-
190885
-	/* Check the end of data.  */
190885
-	subq	%rcx, %rsi
190885
-	jbe	L(max)
190885
+	/* Use ecx which was computed earlier to compute correct value.
190885
+	 */
190885
+	subl	$(VEC_SIZE * 3 + 1), %ecx
190885
+	addl	%ecx, %eax
190885
+# else
190885
+	subl	%edx, %edi
190885
+	addl	$(VEC_SIZE + 1), %edi
190885
+	addl	%edi, %eax
190885
 # endif
190885
+# ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+# endif
190885
+	VZEROUPPER_RETURN
190885
 
190885
-	addq	$VEC_SIZE, %rdi
190885
+	.p2align 4
190885
+L(first_vec_x3):
190885
+	tzcntl	%eax, %eax
190885
+	/* Safe to use 32 bit instructions as these are only called for
190885
+	   size = [1, 159].  */
190885
+# ifdef USE_AS_STRNLEN
190885
+	/* Use ecx which was computed earlier to compute correct value.
190885
+	 */
190885
+	subl	$(VEC_SIZE * 2 + 1), %ecx
190885
+	addl	%ecx, %eax
190885
+# else
190885
+	subl	%edx, %edi
190885
+	addl	$(VEC_SIZE * 2 + 1), %edi
190885
+	addl	%edi, %eax
190885
+# endif
190885
+# ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+# endif
190885
+	VZEROUPPER_RETURN
190885
 
190885
+	.p2align 4
190885
+L(first_vec_x4):
190885
+	tzcntl	%eax, %eax
190885
+	/* Safe to use 32 bit instructions as these are only called for
190885
+	   size = [1, 159].  */
190885
 # ifdef USE_AS_STRNLEN
190885
-	subq	$(VEC_SIZE * 4), %rsi
190885
-	jbe	L(last_4x_vec_or_less)
190885
+	/* Use ecx which was computed earlier to compute correct value.
190885
+	 */
190885
+	subl	$(VEC_SIZE + 1), %ecx
190885
+	addl	%ecx, %eax
190885
+# else
190885
+	subl	%edx, %edi
190885
+	addl	$(VEC_SIZE * 3 + 1), %edi
190885
+	addl	%edi, %eax
190885
 # endif
190885
+# ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+# endif
190885
+	VZEROUPPER_RETURN
190885
 
190885
-L(more_4x_vec):
190885
+	.p2align 5
190885
+L(aligned_more):
190885
+	/* Align data to VEC_SIZE - 1. This is the same number of
190885
+	   instructions as using andq with -VEC_SIZE but saves 4 bytes of
190885
+	   code on the x4 check.  */
190885
+	orq	$(VEC_SIZE - 1), %rdi
190885
+L(cross_page_continue):
190885
 	/* Check the first 4 * VEC_SIZE.  Only one VEC_SIZE at a time
190885
 	   since data is only aligned to VEC_SIZE.  */
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x0)
190885
-
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+# ifdef USE_AS_STRNLEN
190885
+	/* + 1 because rdi is aligned to VEC_SIZE - 1. + CHAR_SIZE because
190885
+	   it simplies the logic in last_4x_vec_or_less.  */
190885
+	leaq	(VEC_SIZE * 4 + CHAR_SIZE + 1)(%rdi), %rcx
190885
+	subq	%rdx, %rcx
190885
+# endif
190885
+	/* Load first VEC regardless.  */
190885
+	VPCMPEQ	1(%rdi), %ymm0, %ymm1
190885
+# ifdef USE_AS_STRNLEN
190885
+	/* Adjust length. If near end handle specially.  */
190885
+	subq	%rcx, %rsi
190885
+	jb	L(last_4x_vec_or_less)
190885
+# endif
190885
+	vpmovmskb	%ymm1, %eax
190885
 	testl	%eax, %eax
190885
 	jnz	L(first_vec_x1)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
 	testl	%eax, %eax
190885
 	jnz	L(first_vec_x2)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+	VPCMPEQ	(VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
 	testl	%eax, %eax
190885
 	jnz	L(first_vec_x3)
190885
 
190885
-	addq	$(VEC_SIZE * 4), %rdi
190885
-
190885
-# ifdef USE_AS_STRNLEN
190885
-	subq	$(VEC_SIZE * 4), %rsi
190885
-	jbe	L(last_4x_vec_or_less)
190885
-# endif
190885
-
190885
-	/* Align data to 4 * VEC_SIZE.  */
190885
-	movq	%rdi, %rcx
190885
-	andl	$(4 * VEC_SIZE - 1), %ecx
190885
-	andq	$-(4 * VEC_SIZE), %rdi
190885
+	VPCMPEQ	(VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
+	testl	%eax, %eax
190885
+	jnz	L(first_vec_x4)
190885
 
190885
+	/* Align data to VEC_SIZE * 4 - 1.  */
190885
 # ifdef USE_AS_STRNLEN
190885
-	/* Adjust length.  */
190885
+	/* Before adjusting length check if at last VEC_SIZE * 4.  */
190885
+	cmpq	$(VEC_SIZE * 4 - 1), %rsi
190885
+	jbe	L(last_4x_vec_or_less_load)
190885
+	incq	%rdi
190885
+	movl	%edi, %ecx
190885
+	orq	$(VEC_SIZE * 4 - 1), %rdi
190885
+	andl	$(VEC_SIZE * 4 - 1), %ecx
190885
+	/* Readjust length.  */
190885
 	addq	%rcx, %rsi
190885
+# else
190885
+	incq	%rdi
190885
+	orq	$(VEC_SIZE * 4 - 1), %rdi
190885
 # endif
190885
-
190885
+	/* Compare 4 * VEC at a time forward.  */
190885
 	.p2align 4
190885
 L(loop_4x_vec):
190885
-	/* Compare 4 * VEC at a time forward.  */
190885
-	vmovdqa (%rdi), %ymm1
190885
-	vmovdqa	VEC_SIZE(%rdi), %ymm2
190885
-	vmovdqa	(VEC_SIZE * 2)(%rdi), %ymm3
190885
-	vmovdqa	(VEC_SIZE * 3)(%rdi), %ymm4
190885
-	VPMINU	%ymm1, %ymm2, %ymm5
190885
-	VPMINU	%ymm3, %ymm4, %ymm6
190885
-	VPMINU	%ymm5, %ymm6, %ymm5
190885
-
190885
-	VPCMPEQ	%ymm5, %ymm0, %ymm5
190885
-	vpmovmskb %ymm5, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(4x_vec_end)
190885
-
190885
-	addq	$(VEC_SIZE * 4), %rdi
190885
-
190885
-# ifndef USE_AS_STRNLEN
190885
-	jmp	L(loop_4x_vec)
190885
-# else
190885
+# ifdef USE_AS_STRNLEN
190885
+	/* Break if at end of length.  */
190885
 	subq	$(VEC_SIZE * 4), %rsi
190885
-	ja	L(loop_4x_vec)
190885
-
190885
-L(last_4x_vec_or_less):
190885
-	/* Less than 4 * VEC and aligned to VEC_SIZE.  */
190885
-	addl	$(VEC_SIZE * 2), %esi
190885
-	jle	L(last_2x_vec)
190885
+	jb	L(last_4x_vec_or_less_cmpeq)
190885
+# endif
190885
+	/* Save some code size by microfusing VPMINU with the load. Since
190885
+	   the matches in ymm2/ymm4 can only be returned if there where no
190885
+	   matches in ymm1/ymm3 respectively there is no issue with overlap.
190885
+	 */
190885
+	vmovdqa	1(%rdi), %ymm1
190885
+	VPMINU	(VEC_SIZE + 1)(%rdi), %ymm1, %ymm2
190885
+	vmovdqa	(VEC_SIZE * 2 + 1)(%rdi), %ymm3
190885
+	VPMINU	(VEC_SIZE * 3 + 1)(%rdi), %ymm3, %ymm4
190885
+
190885
+	VPMINU	%ymm2, %ymm4, %ymm5
190885
+	VPCMPEQ	%ymm5, %ymm0, %ymm5
190885
+	vpmovmskb	%ymm5, %ecx
190885
 
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x0)
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
+	testl	%ecx, %ecx
190885
+	jz	L(loop_4x_vec)
190885
 
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x1)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+	VPCMPEQ	%ymm1, %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
+	subq	%rdx, %rdi
190885
 	testl	%eax, %eax
190885
+	jnz	L(last_vec_return_x0)
190885
 
190885
-	jnz	L(first_vec_x2_check)
190885
-	subl	$VEC_SIZE, %esi
190885
-	jle	L(max)
190885
-
190885
-	VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+	VPCMPEQ	%ymm2, %ymm0, %ymm2
190885
+	vpmovmskb	%ymm2, %eax
190885
 	testl	%eax, %eax
190885
-
190885
-	jnz	L(first_vec_x3_check)
190885
-	movq	%r8, %rax
190885
-#  ifdef USE_AS_WCSLEN
190885
+	jnz	L(last_vec_return_x1)
190885
+
190885
+	/* Combine last 2 VEC.  */
190885
+	VPCMPEQ	%ymm3, %ymm0, %ymm3
190885
+	vpmovmskb	%ymm3, %eax
190885
+	/* rcx has combined result from all 4 VEC. It will only be used if
190885
+	   the first 3 other VEC all did not contain a match.  */
190885
+	salq	$32, %rcx
190885
+	orq	%rcx, %rax
190885
+	tzcntq	%rax, %rax
190885
+	subq	$(VEC_SIZE * 2 - 1), %rdi
190885
+	addq	%rdi, %rax
190885
+# ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
-#  endif
190885
+# endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
+
190885
+# ifdef USE_AS_STRNLEN
190885
 	.p2align 4
190885
-L(last_2x_vec):
190885
-	addl	$(VEC_SIZE * 2), %esi
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
+L(last_4x_vec_or_less_load):
190885
+	/* Depending on entry adjust rdi / prepare first VEC in ymm1.  */
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
+L(last_4x_vec_or_less_cmpeq):
190885
+	VPCMPEQ	1(%rdi), %ymm0, %ymm1
190885
+L(last_4x_vec_or_less):
190885
 
190885
-	jnz	L(first_vec_x0_check)
190885
-	subl	$VEC_SIZE, %esi
190885
-	jle	L(max)
190885
+	vpmovmskb	%ymm1, %eax
190885
+	/* If remaining length > VEC_SIZE * 2. This works if esi is off by
190885
+	   VEC_SIZE * 4.  */
190885
+	testl	$(VEC_SIZE * 2), %esi
190885
+	jnz	L(last_4x_vec)
190885
 
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+	/* length may have been negative or positive by an offset of
190885
+	   VEC_SIZE * 4 depending on where this was called from. This fixes
190885
+	   that.  */
190885
+	andl	$(VEC_SIZE * 4 - 1), %esi
190885
 	testl	%eax, %eax
190885
-	jnz	L(first_vec_x1_check)
190885
-	movq	%r8, %rax
190885
-#  ifdef USE_AS_WCSLEN
190885
-	shrq	$2, %rax
190885
-#  endif
190885
-	VZEROUPPER_RETURN
190885
+	jnz	L(last_vec_x1_check)
190885
 
190885
-	.p2align 4
190885
-L(first_vec_x0_check):
190885
+	subl	$VEC_SIZE, %esi
190885
+	jb	L(max)
190885
+
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
 	tzcntl	%eax, %eax
190885
 	/* Check the end of data.  */
190885
-	cmpq	%rax, %rsi
190885
-	jbe	L(max)
190885
+	cmpl	%eax, %esi
190885
+	jb	L(max)
190885
+	subq	%rdx, %rdi
190885
+	addl	$(VEC_SIZE + 1), %eax
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
 #  ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
 #  endif
190885
 	VZEROUPPER_RETURN
190885
+# endif
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x1_check):
190885
+L(last_vec_return_x0):
190885
 	tzcntl	%eax, %eax
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rsi
190885
-	jbe	L(max)
190885
-	addq	$VEC_SIZE, %rax
190885
+	subq	$(VEC_SIZE * 4 - 1), %rdi
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
-#  ifdef USE_AS_WCSLEN
190885
+# ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
-#  endif
190885
+# endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x2_check):
190885
+L(last_vec_return_x1):
190885
 	tzcntl	%eax, %eax
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rsi
190885
-	jbe	L(max)
190885
-	addq	$(VEC_SIZE * 2), %rax
190885
+	subq	$(VEC_SIZE * 3 - 1), %rdi
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
-#  ifdef USE_AS_WCSLEN
190885
+# ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
-#  endif
190885
+# endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
+# ifdef USE_AS_STRNLEN
190885
 	.p2align 4
190885
-L(first_vec_x3_check):
190885
+L(last_vec_x1_check):
190885
+
190885
 	tzcntl	%eax, %eax
190885
 	/* Check the end of data.  */
190885
-	cmpq	%rax, %rsi
190885
-	jbe	L(max)
190885
-	addq	$(VEC_SIZE * 3), %rax
190885
+	cmpl	%eax, %esi
190885
+	jb	L(max)
190885
+	subq	%rdx, %rdi
190885
+	incl	%eax
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
 #  ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
 #  endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
-	.p2align 4
190885
 L(max):
190885
 	movq	%r8, %rax
190885
+	VZEROUPPER_RETURN
190885
+
190885
+	.p2align 4
190885
+L(last_4x_vec):
190885
+	/* Test first 2x VEC normally.  */
190885
+	testl	%eax, %eax
190885
+	jnz	L(last_vec_x1)
190885
+
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
+	testl	%eax, %eax
190885
+	jnz	L(last_vec_x2)
190885
+
190885
+	/* Normalize length.  */
190885
+	andl	$(VEC_SIZE * 4 - 1), %esi
190885
+	VPCMPEQ	(VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
+	testl	%eax, %eax
190885
+	jnz	L(last_vec_x3)
190885
+
190885
+	subl	$(VEC_SIZE * 3), %esi
190885
+	jb	L(max)
190885
+
190885
+	VPCMPEQ	(VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
+	tzcntl	%eax, %eax
190885
+	/* Check the end of data.  */
190885
+	cmpl	%eax, %esi
190885
+	jb	L(max)
190885
+	subq	%rdx, %rdi
190885
+	addl	$(VEC_SIZE * 3 + 1), %eax
190885
+	addq	%rdi, %rax
190885
 #  ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
 #  endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
-	.p2align 4
190885
-L(zero):
190885
-	xorl	%eax, %eax
190885
-	ret
190885
-# endif
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x0):
190885
+L(last_vec_x1):
190885
+	/* essentially duplicates of first_vec_x1 but use 64 bit
190885
+	   instructions.  */
190885
 	tzcntl	%eax, %eax
190885
+	subq	%rdx, %rdi
190885
+	incl	%eax
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
-# ifdef USE_AS_WCSLEN
190885
+#  ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
-# endif
190885
+#  endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x1):
190885
+L(last_vec_x2):
190885
+	/* essentially duplicates of first_vec_x1 but use 64 bit
190885
+	   instructions.  */
190885
 	tzcntl	%eax, %eax
190885
-	addq	$VEC_SIZE, %rax
190885
+	subq	%rdx, %rdi
190885
+	addl	$(VEC_SIZE + 1), %eax
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
-# ifdef USE_AS_WCSLEN
190885
+#  ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
-# endif
190885
+#  endif
190885
 	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x2):
190885
+L(last_vec_x3):
190885
 	tzcntl	%eax, %eax
190885
-	addq	$(VEC_SIZE * 2), %rax
190885
+	subl	$(VEC_SIZE * 2), %esi
190885
+	/* Check the end of data.  */
190885
+	cmpl	%eax, %esi
190885
+	jb	L(max_end)
190885
+	subq	%rdx, %rdi
190885
+	addl	$(VEC_SIZE * 2 + 1), %eax
190885
 	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
-# ifdef USE_AS_WCSLEN
190885
+#  ifdef USE_AS_WCSLEN
190885
 	shrq	$2, %rax
190885
-# endif
190885
+#  endif
190885
+	VZEROUPPER_RETURN
190885
+L(max_end):
190885
+	movq	%r8, %rax
190885
 	VZEROUPPER_RETURN
190885
+# endif
190885
 
190885
+	/* Cold case for crossing page with first load.	 */
190885
 	.p2align 4
190885
-L(4x_vec_end):
190885
-	VPCMPEQ	%ymm1, %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x0)
190885
-	VPCMPEQ %ymm2, %ymm0, %ymm2
190885
-	vpmovmskb %ymm2, %eax
190885
+L(cross_page_boundary):
190885
+	/* Align data to VEC_SIZE - 1.  */
190885
+	orq	$(VEC_SIZE - 1), %rdi
190885
+	VPCMPEQ	-(VEC_SIZE - 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb	%ymm1, %eax
190885
+	/* Remove the leading bytes. sarxl only uses bits [5:0] of COUNT
190885
+	   so no need to manually mod rdx.  */
190885
+	sarxl	%edx, %eax, %eax
190885
+# ifdef USE_AS_STRNLEN
190885
 	testl	%eax, %eax
190885
-	jnz	L(first_vec_x1)
190885
-	VPCMPEQ %ymm3, %ymm0, %ymm3
190885
-	vpmovmskb %ymm3, %eax
190885
+	jnz	L(cross_page_less_vec)
190885
+	leaq	1(%rdi), %rcx
190885
+	subq	%rdx, %rcx
190885
+	/* Check length.  */
190885
+	cmpq	%rsi, %rcx
190885
+	jb	L(cross_page_continue)
190885
+	movq	%r8, %rax
190885
+# else
190885
 	testl	%eax, %eax
190885
-	jnz	L(first_vec_x2)
190885
-	VPCMPEQ %ymm4, %ymm0, %ymm4
190885
-	vpmovmskb %ymm4, %eax
190885
-L(first_vec_x3):
190885
+	jz	L(cross_page_continue)
190885
 	tzcntl	%eax, %eax
190885
-	addq	$(VEC_SIZE * 3), %rax
190885
-	addq	%rdi, %rax
190885
-	subq	%rdx, %rax
190885
-# ifdef USE_AS_WCSLEN
190885
-	shrq	$2, %rax
190885
+#  ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+#  endif
190885
 # endif
190885
+L(return_vzeroupper):
190885
+	ZERO_UPPER_VEC_REGISTERS_RETURN
190885
+
190885
+# ifdef USE_AS_STRNLEN
190885
+	.p2align 4
190885
+L(cross_page_less_vec):
190885
+	tzcntl	%eax, %eax
190885
+	cmpq	%rax, %rsi
190885
+	cmovb	%esi, %eax
190885
+#  ifdef USE_AS_WCSLEN
190885
+	shrl	$2, %eax
190885
+#  endif
190885
 	VZEROUPPER_RETURN
190885
+# endif
190885
 
190885
 END (STRLEN)
190885
 #endif
190885
-- 
190885
GitLab
190885