190885
From acd575144cc6340edfbf0a0e0580e38344ab623a Mon Sep 17 00:00:00 2001
190885
From: Noah Goldstein <goldstein.w.n@gmail.com>
190885
Date: Mon, 3 May 2021 03:01:58 -0400
190885
Subject: [PATCH] x86: Optimize memchr-avx2.S
190885
190885
No bug. This commit optimizes memchr-avx2.S. The optimizations include
190885
replacing some branches with cmovcc, avoiding some branches entirely
190885
in the less_4x_vec case, making the page cross logic less strict,
190885
asaving a few instructions the in loop return loop. test-memchr,
190885
test-rawmemchr, and test-wmemchr are all passing.
190885
190885
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
190885
Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
190885
(cherry picked from commit acfd088a1963ba51cd83c78f95c0ab25ead79e04)
190885
---
190885
 sysdeps/x86_64/multiarch/memchr-avx2.S | 425 ++++++++++++++-----------
190885
 1 file changed, 247 insertions(+), 178 deletions(-)
190885
190885
diff --git a/sysdeps/x86_64/multiarch/memchr-avx2.S b/sysdeps/x86_64/multiarch/memchr-avx2.S
190885
index cf893e77..b377f22e 100644
190885
--- a/sysdeps/x86_64/multiarch/memchr-avx2.S
190885
+++ b/sysdeps/x86_64/multiarch/memchr-avx2.S
190885
@@ -26,8 +26,22 @@
190885
 
190885
 # ifdef USE_AS_WMEMCHR
190885
 #  define VPCMPEQ	vpcmpeqd
190885
+#  define VPBROADCAST	vpbroadcastd
190885
+#  define CHAR_SIZE	4
190885
 # else
190885
 #  define VPCMPEQ	vpcmpeqb
190885
+#  define VPBROADCAST	vpbroadcastb
190885
+#  define CHAR_SIZE	1
190885
+# endif
190885
+
190885
+# ifdef USE_AS_RAWMEMCHR
190885
+#  define ERAW_PTR_REG	ecx
190885
+#  define RRAW_PTR_REG	rcx
190885
+#  define ALGN_PTR_REG	rdi
190885
+# else
190885
+#  define ERAW_PTR_REG	edi
190885
+#  define RRAW_PTR_REG	rdi
190885
+#  define ALGN_PTR_REG	rcx
190885
 # endif
190885
 
190885
 # ifndef VZEROUPPER
190885
@@ -39,6 +53,7 @@
190885
 # endif
190885
 
190885
 # define VEC_SIZE 32
190885
+# define PAGE_SIZE 4096
190885
 
190885
 	.section SECTION(.text),"ax",@progbits
190885
 ENTRY (MEMCHR)
190885
@@ -47,295 +62,349 @@ ENTRY (MEMCHR)
190885
 	test	%RDX_LP, %RDX_LP
190885
 	jz	L(null)
190885
 # endif
190885
-	movl	%edi, %ecx
190885
-	/* Broadcast CHAR to YMM0.  */
190885
-	vmovd	%esi, %xmm0
190885
 # ifdef USE_AS_WMEMCHR
190885
 	shl	$2, %RDX_LP
190885
-	vpbroadcastd %xmm0, %ymm0
190885
 # else
190885
 #  ifdef __ILP32__
190885
 	/* Clear the upper 32 bits.  */
190885
 	movl	%edx, %edx
190885
 #  endif
190885
-	vpbroadcastb %xmm0, %ymm0
190885
 # endif
190885
+	/* Broadcast CHAR to YMMMATCH.  */
190885
+	vmovd	%esi, %xmm0
190885
+	VPBROADCAST %xmm0, %ymm0
190885
 	/* Check if we may cross page boundary with one vector load.  */
190885
-	andl	$(2 * VEC_SIZE - 1), %ecx
190885
-	cmpl	$VEC_SIZE, %ecx
190885
-	ja	L(cros_page_boundary)
190885
+	movl	%edi, %eax
190885
+	andl	$(PAGE_SIZE - 1), %eax
190885
+	cmpl	$(PAGE_SIZE - VEC_SIZE), %eax
190885
+	ja	L(cross_page_boundary)
190885
 
190885
 	/* Check the first VEC_SIZE bytes.  */
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
+	VPCMPEQ	(%rdi), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-
190885
 # ifndef USE_AS_RAWMEMCHR
190885
-	jnz	L(first_vec_x0_check)
190885
-	/* Adjust length and check the end of data.  */
190885
-	subq	$VEC_SIZE, %rdx
190885
-	jbe	L(zero)
190885
-# else
190885
-	jnz	L(first_vec_x0)
190885
+	/* If length < CHAR_PER_VEC handle special.  */
190885
+	cmpq	$VEC_SIZE, %rdx
190885
+	jbe	L(first_vec_x0)
190885
 # endif
190885
-
190885
-	/* Align data for aligned loads in the loop.  */
190885
-	addq	$VEC_SIZE, %rdi
190885
-	andl	$(VEC_SIZE - 1), %ecx
190885
-	andq	$-VEC_SIZE, %rdi
190885
+	testl	%eax, %eax
190885
+	jz	L(aligned_more)
190885
+	tzcntl	%eax, %eax
190885
+	addq	%rdi, %rax
190885
+	VZEROUPPER_RETURN
190885
 
190885
 # ifndef USE_AS_RAWMEMCHR
190885
-	/* Adjust length.  */
190885
-	addq	%rcx, %rdx
190885
+	.p2align 5
190885
+L(first_vec_x0):
190885
+	/* Check if first match was before length.  */
190885
+	tzcntl	%eax, %eax
190885
+	xorl	%ecx, %ecx
190885
+	cmpl	%eax, %edx
190885
+	leaq	(%rdi, %rax), %rax
190885
+	cmovle	%rcx, %rax
190885
+	VZEROUPPER_RETURN
190885
 
190885
-	subq	$(VEC_SIZE * 4), %rdx
190885
-	jbe	L(last_4x_vec_or_less)
190885
+L(null):
190885
+	xorl	%eax, %eax
190885
+	ret
190885
 # endif
190885
-	jmp	L(more_4x_vec)
190885
-
190885
 	.p2align 4
190885
-L(cros_page_boundary):
190885
-	andl	$(VEC_SIZE - 1), %ecx
190885
-	andq	$-VEC_SIZE, %rdi
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
+L(cross_page_boundary):
190885
+	/* Save pointer before aligning as its original value is necessary
190885
+	   for computer return address if byte is found or adjusting length
190885
+	   if it is not and this is memchr.  */
190885
+	movq	%rdi, %rcx
190885
+	/* Align data to VEC_SIZE - 1. ALGN_PTR_REG is rcx for memchr and
190885
+	   rdi for rawmemchr.  */
190885
+	orq	$(VEC_SIZE - 1), %ALGN_PTR_REG
190885
+	VPCMPEQ	-(VEC_SIZE - 1)(%ALGN_PTR_REG), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
+# ifndef USE_AS_RAWMEMCHR
190885
+	/* Calculate length until end of page (length checked for a
190885
+	   match).  */
190885
+	leaq	1(%ALGN_PTR_REG), %rsi
190885
+	subq	%RRAW_PTR_REG, %rsi
190885
+# endif
190885
 	/* Remove the leading bytes.  */
190885
-	sarl	%cl, %eax
190885
-	testl	%eax, %eax
190885
-	jz	L(aligned_more)
190885
-	tzcntl	%eax, %eax
190885
+	sarxl	%ERAW_PTR_REG, %eax, %eax
190885
 # ifndef USE_AS_RAWMEMCHR
190885
 	/* Check the end of data.  */
190885
-	cmpq	%rax, %rdx
190885
-	jbe	L(zero)
190885
+	cmpq	%rsi, %rdx
190885
+	jbe	L(first_vec_x0)
190885
 # endif
190885
-	addq	%rdi, %rax
190885
-	addq	%rcx, %rax
190885
+	testl	%eax, %eax
190885
+	jz	L(cross_page_continue)
190885
+	tzcntl	%eax, %eax
190885
+	addq	%RRAW_PTR_REG, %rax
190885
 L(return_vzeroupper):
190885
 	ZERO_UPPER_VEC_REGISTERS_RETURN
190885
 
190885
 	.p2align 4
190885
-L(aligned_more):
190885
-# ifndef USE_AS_RAWMEMCHR
190885
-        /* Calculate "rdx + rcx - VEC_SIZE" with "rdx - (VEC_SIZE - rcx)"
190885
-	   instead of "(rdx + rcx) - VEC_SIZE" to void possible addition
190885
-	   overflow.  */
190885
-	negq	%rcx
190885
-	addq	$VEC_SIZE, %rcx
190885
+L(first_vec_x1):
190885
+	tzcntl	%eax, %eax
190885
+	incq	%rdi
190885
+	addq	%rdi, %rax
190885
+	VZEROUPPER_RETURN
190885
 
190885
-	/* Check the end of data.  */
190885
-	subq	%rcx, %rdx
190885
-	jbe	L(zero)
190885
-# endif
190885
+	.p2align 4
190885
+L(first_vec_x2):
190885
+	tzcntl	%eax, %eax
190885
+	addq	$(VEC_SIZE + 1), %rdi
190885
+	addq	%rdi, %rax
190885
+	VZEROUPPER_RETURN
190885
+
190885
+	.p2align 4
190885
+L(first_vec_x3):
190885
+	tzcntl	%eax, %eax
190885
+	addq	$(VEC_SIZE * 2 + 1), %rdi
190885
+	addq	%rdi, %rax
190885
+	VZEROUPPER_RETURN
190885
 
190885
-	addq	$VEC_SIZE, %rdi
190885
 
190885
-# ifndef USE_AS_RAWMEMCHR
190885
-	subq	$(VEC_SIZE * 4), %rdx
190885
-	jbe	L(last_4x_vec_or_less)
190885
-# endif
190885
+	.p2align 4
190885
+L(first_vec_x4):
190885
+	tzcntl	%eax, %eax
190885
+	addq	$(VEC_SIZE * 3 + 1), %rdi
190885
+	addq	%rdi, %rax
190885
+	VZEROUPPER_RETURN
190885
 
190885
-L(more_4x_vec):
190885
+	.p2align 4
190885
+L(aligned_more):
190885
 	/* Check the first 4 * VEC_SIZE.  Only one VEC_SIZE at a time
190885
 	   since data is only aligned to VEC_SIZE.  */
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x0)
190885
 
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
190885
+# ifndef USE_AS_RAWMEMCHR
190885
+L(cross_page_continue):
190885
+	/* Align data to VEC_SIZE - 1.  */
190885
+	xorl	%ecx, %ecx
190885
+	subl	%edi, %ecx
190885
+	orq	$(VEC_SIZE - 1), %rdi
190885
+	/* esi is for adjusting length to see if near the end.  */
190885
+	leal	(VEC_SIZE * 4 + 1)(%rdi, %rcx), %esi
190885
+# else
190885
+	orq	$(VEC_SIZE - 1), %rdi
190885
+L(cross_page_continue):
190885
+# endif
190885
+	/* Load first VEC regardless.  */
190885
+	VPCMPEQ	1(%rdi), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
+# ifndef USE_AS_RAWMEMCHR
190885
+	/* Adjust length. If near end handle specially.  */
190885
+	subq	%rsi, %rdx
190885
+	jbe	L(last_4x_vec_or_less)
190885
+# endif
190885
 	testl	%eax, %eax
190885
 	jnz	L(first_vec_x1)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
 	testl	%eax, %eax
190885
 	jnz	L(first_vec_x2)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
190885
+	VPCMPEQ	(VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
 	testl	%eax, %eax
190885
 	jnz	L(first_vec_x3)
190885
 
190885
-	addq	$(VEC_SIZE * 4), %rdi
190885
+	VPCMPEQ	(VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb %ymm1, %eax
190885
+	testl	%eax, %eax
190885
+	jnz	L(first_vec_x4)
190885
 
190885
 # ifndef USE_AS_RAWMEMCHR
190885
+	/* Check if at last VEC_SIZE * 4 length.  */
190885
 	subq	$(VEC_SIZE * 4), %rdx
190885
-	jbe	L(last_4x_vec_or_less)
190885
-# endif
190885
-
190885
-	/* Align data to 4 * VEC_SIZE.  */
190885
-	movq	%rdi, %rcx
190885
-	andl	$(4 * VEC_SIZE - 1), %ecx
190885
-	andq	$-(4 * VEC_SIZE), %rdi
190885
-
190885
-# ifndef USE_AS_RAWMEMCHR
190885
-	/* Adjust length.  */
190885
+	jbe	L(last_4x_vec_or_less_cmpeq)
190885
+	/* Align data to VEC_SIZE * 4 - 1 for the loop and readjust
190885
+	   length.  */
190885
+	incq	%rdi
190885
+	movl	%edi, %ecx
190885
+	orq	$(VEC_SIZE * 4 - 1), %rdi
190885
+	andl	$(VEC_SIZE * 4 - 1), %ecx
190885
 	addq	%rcx, %rdx
190885
+# else
190885
+	/* Align data to VEC_SIZE * 4 - 1 for loop.  */
190885
+	incq	%rdi
190885
+	orq	$(VEC_SIZE * 4 - 1), %rdi
190885
 # endif
190885
 
190885
+	/* Compare 4 * VEC at a time forward.  */
190885
 	.p2align 4
190885
 L(loop_4x_vec):
190885
-	/* Compare 4 * VEC at a time forward.  */
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm2
190885
-	VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm3
190885
-	VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm4
190885
-
190885
+	VPCMPEQ	1(%rdi), %ymm0, %ymm1
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm2
190885
+	VPCMPEQ	(VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm3
190885
+	VPCMPEQ	(VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm4
190885
 	vpor	%ymm1, %ymm2, %ymm5
190885
 	vpor	%ymm3, %ymm4, %ymm6
190885
 	vpor	%ymm5, %ymm6, %ymm5
190885
 
190885
-	vpmovmskb %ymm5, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(4x_vec_end)
190885
-
190885
-	addq	$(VEC_SIZE * 4), %rdi
190885
-
190885
+	vpmovmskb %ymm5, %ecx
190885
 # ifdef USE_AS_RAWMEMCHR
190885
-	jmp	L(loop_4x_vec)
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
+	testl	%ecx, %ecx
190885
+	jz	L(loop_4x_vec)
190885
 # else
190885
-	subq	$(VEC_SIZE * 4), %rdx
190885
-	ja	L(loop_4x_vec)
190885
+	testl	%ecx, %ecx
190885
+	jnz	L(loop_4x_vec_end)
190885
 
190885
-L(last_4x_vec_or_less):
190885
-	/* Less than 4 * VEC and aligned to VEC_SIZE.  */
190885
-	addl	$(VEC_SIZE * 2), %edx
190885
-	jle	L(last_2x_vec)
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
 
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x0)
190885
+	subq	$(VEC_SIZE * 4), %rdx
190885
+	ja	L(loop_4x_vec)
190885
 
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
190885
+	/* Fall through into less than 4 remaining vectors of length case.
190885
+	 */
190885
+	VPCMPEQ	(VEC_SIZE * 0 + 1)(%rdi), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
+	.p2align 4
190885
+L(last_4x_vec_or_less):
190885
+	/* Check if first VEC contained match.  */
190885
 	testl	%eax, %eax
190885
-	jnz	L(first_vec_x1)
190885
+	jnz	L(first_vec_x1_check)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
+	/* If remaining length > VEC_SIZE * 2.  */
190885
+	addl	$(VEC_SIZE * 2), %edx
190885
+	jg	L(last_4x_vec)
190885
 
190885
-	jnz	L(first_vec_x2_check)
190885
-	subl	$VEC_SIZE, %edx
190885
-	jle	L(zero)
190885
+L(last_2x_vec):
190885
+	/* If remaining length < VEC_SIZE.  */
190885
+	addl	$VEC_SIZE, %edx
190885
+	jle	L(zero_end)
190885
 
190885
-	VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
190885
+	/* Check VEC2 and compare any match with remaining length.  */
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm1
190885
 	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-
190885
-	jnz	L(first_vec_x3_check)
190885
-	xorl	%eax, %eax
190885
+	tzcntl	%eax, %eax
190885
+	cmpl	%eax, %edx
190885
+	jbe	L(set_zero_end)
190885
+	addq	$(VEC_SIZE + 1), %rdi
190885
+	addq	%rdi, %rax
190885
+L(zero_end):
190885
 	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(last_2x_vec):
190885
-	addl	$(VEC_SIZE * 2), %edx
190885
-	VPCMPEQ (%rdi), %ymm0, %ymm1
190885
+L(loop_4x_vec_end):
190885
+# endif
190885
+	/* rawmemchr will fall through into this if match was found in
190885
+	   loop.  */
190885
+
190885
 	vpmovmskb %ymm1, %eax
190885
 	testl	%eax, %eax
190885
+	jnz	L(last_vec_x1_return)
190885
 
190885
-	jnz	L(first_vec_x0_check)
190885
-	subl	$VEC_SIZE, %edx
190885
-	jle	L(zero)
190885
-
190885
-	VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
190885
-	vpmovmskb %ymm1, %eax
190885
+	vpmovmskb %ymm2, %eax
190885
 	testl	%eax, %eax
190885
-	jnz	L(first_vec_x1_check)
190885
-	xorl	%eax, %eax
190885
-	VZEROUPPER_RETURN
190885
+	jnz	L(last_vec_x2_return)
190885
 
190885
-	.p2align 4
190885
-L(first_vec_x0_check):
190885
-	tzcntl	%eax, %eax
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rdx
190885
-	jbe	L(zero)
190885
+	vpmovmskb %ymm3, %eax
190885
+	/* Combine VEC3 matches (eax) with VEC4 matches (ecx).  */
190885
+	salq	$32, %rcx
190885
+	orq	%rcx, %rax
190885
+	tzcntq	%rax, %rax
190885
+# ifdef USE_AS_RAWMEMCHR
190885
+	subq	$(VEC_SIZE * 2 - 1), %rdi
190885
+# else
190885
+	subq	$-(VEC_SIZE * 2 + 1), %rdi
190885
+# endif
190885
 	addq	%rdi, %rax
190885
 	VZEROUPPER_RETURN
190885
+# ifndef USE_AS_RAWMEMCHR
190885
 
190885
 	.p2align 4
190885
 L(first_vec_x1_check):
190885
 	tzcntl	%eax, %eax
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rdx
190885
-	jbe	L(zero)
190885
-	addq	$VEC_SIZE, %rax
190885
+	/* Adjust length.  */
190885
+	subl	$-(VEC_SIZE * 4), %edx
190885
+	/* Check if match within remaining length.  */
190885
+	cmpl	%eax, %edx
190885
+	jbe	L(set_zero_end)
190885
+	incq	%rdi
190885
 	addq	%rdi, %rax
190885
 	VZEROUPPER_RETURN
190885
+	.p2align 4
190885
+L(set_zero_end):
190885
+	xorl	%eax, %eax
190885
+	VZEROUPPER_RETURN
190885
+# endif
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x2_check):
190885
+L(last_vec_x1_return):
190885
 	tzcntl	%eax, %eax
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rdx
190885
-	jbe	L(zero)
190885
-	addq	$(VEC_SIZE * 2), %rax
190885
+# ifdef USE_AS_RAWMEMCHR
190885
+	subq	$(VEC_SIZE * 4 - 1), %rdi
190885
+# else
190885
+	incq	%rdi
190885
+# endif
190885
 	addq	%rdi, %rax
190885
 	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(first_vec_x3_check):
190885
+L(last_vec_x2_return):
190885
 	tzcntl	%eax, %eax
190885
-	/* Check the end of data.  */
190885
-	cmpq	%rax, %rdx
190885
-	jbe	L(zero)
190885
-	addq	$(VEC_SIZE * 3), %rax
190885
+# ifdef USE_AS_RAWMEMCHR
190885
+	subq	$(VEC_SIZE * 3 - 1), %rdi
190885
+# else
190885
+	subq	$-(VEC_SIZE + 1), %rdi
190885
+# endif
190885
 	addq	%rdi, %rax
190885
 	VZEROUPPER_RETURN
190885
 
190885
+# ifndef USE_AS_RAWMEMCHR
190885
 	.p2align 4
190885
-L(zero):
190885
-	xorl	%eax, %eax
190885
-	jmp     L(return_vzeroupper)
190885
+L(last_4x_vec_or_less_cmpeq):
190885
+	VPCMPEQ	(VEC_SIZE * 4 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb %ymm1, %eax
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
+	/* Check first VEC regardless.  */
190885
+	testl	%eax, %eax
190885
+	jnz	L(first_vec_x1_check)
190885
 
190885
+	/* If remaining length <= CHAR_PER_VEC * 2.  */
190885
+	addl	$(VEC_SIZE * 2), %edx
190885
+	jle	L(last_2x_vec)
190885
 	.p2align 4
190885
-L(null):
190885
-	xorl	%eax, %eax
190885
-	ret
190885
-# endif
190885
+L(last_4x_vec):
190885
+	VPCMPEQ	(VEC_SIZE + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb %ymm1, %eax
190885
+	testl	%eax, %eax
190885
+	jnz	L(last_vec_x2_return)
190885
 
190885
-	.p2align 4
190885
-L(first_vec_x0):
190885
-	tzcntl	%eax, %eax
190885
-	addq	%rdi, %rax
190885
-	VZEROUPPER_RETURN
190885
+	VPCMPEQ	(VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb %ymm1, %eax
190885
 
190885
-	.p2align 4
190885
-L(first_vec_x1):
190885
-	tzcntl	%eax, %eax
190885
-	addq	$VEC_SIZE, %rax
190885
-	addq	%rdi, %rax
190885
-	VZEROUPPER_RETURN
190885
+	/* Create mask for possible matches within remaining length.  */
190885
+	movq	$-1, %rcx
190885
+	bzhiq	%rdx, %rcx, %rcx
190885
 
190885
-	.p2align 4
190885
-L(first_vec_x2):
190885
+	/* Test matches in data against length match.  */
190885
+	andl	%ecx, %eax
190885
+	jnz	L(last_vec_x3)
190885
+
190885
+	/* if remaining length <= VEC_SIZE * 3 (Note this is after
190885
+	   remaining length was found to be > VEC_SIZE * 2.  */
190885
+	subl	$VEC_SIZE, %edx
190885
+	jbe	L(zero_end2)
190885
+
190885
+	VPCMPEQ	(VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1
190885
+	vpmovmskb %ymm1, %eax
190885
+	/* Shift remaining length mask for last VEC.  */
190885
+	shrq	$32, %rcx
190885
+	andl	%ecx, %eax
190885
+	jz	L(zero_end2)
190885
 	tzcntl	%eax, %eax
190885
-	addq	$(VEC_SIZE * 2), %rax
190885
+	addq	$(VEC_SIZE * 3 + 1), %rdi
190885
 	addq	%rdi, %rax
190885
+L(zero_end2):
190885
 	VZEROUPPER_RETURN
190885
 
190885
 	.p2align 4
190885
-L(4x_vec_end):
190885
-	vpmovmskb %ymm1, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x0)
190885
-	vpmovmskb %ymm2, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x1)
190885
-	vpmovmskb %ymm3, %eax
190885
-	testl	%eax, %eax
190885
-	jnz	L(first_vec_x2)
190885
-	vpmovmskb %ymm4, %eax
190885
-	testl	%eax, %eax
190885
-L(first_vec_x3):
190885
+L(last_vec_x3):
190885
 	tzcntl	%eax, %eax
190885
-	addq	$(VEC_SIZE * 3), %rax
190885
+	subq	$-(VEC_SIZE * 2 + 1), %rdi
190885
 	addq	%rdi, %rax
190885
 	VZEROUPPER_RETURN
190885
+# endif
190885
 
190885
 END (MEMCHR)
190885
 #endif
190885
-- 
190885
GitLab
190885