08c3a6
commit f35ad30da4880a1574996df0674986ecf82fa7ae
08c3a6
Author: H.J. Lu <hjl.tools@gmail.com>
08c3a6
Date:   Fri Oct 29 12:40:20 2021 -0700
08c3a6
08c3a6
    x86-64: Improve EVEX strcmp with masked load
08c3a6
    
08c3a6
    In strcmp-evex.S, to compare 2 32-byte strings, replace
08c3a6
    
08c3a6
            VMOVU   (%rdi, %rdx), %YMM0
08c3a6
            VMOVU   (%rsi, %rdx), %YMM1
08c3a6
            /* Each bit in K0 represents a mismatch in YMM0 and YMM1.  */
08c3a6
            VPCMP   $4, %YMM0, %YMM1, %k0
08c3a6
            VPCMP   $0, %YMMZERO, %YMM0, %k1
08c3a6
            VPCMP   $0, %YMMZERO, %YMM1, %k2
08c3a6
            /* Each bit in K1 represents a NULL in YMM0 or YMM1.  */
08c3a6
            kord    %k1, %k2, %k1
08c3a6
            /* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
            kord    %k0, %k1, %k1
08c3a6
            kmovd   %k1, %ecx
08c3a6
            testl   %ecx, %ecx
08c3a6
            jne     L(last_vector)
08c3a6
    
08c3a6
    with
08c3a6
    
08c3a6
            VMOVU   (%rdi, %rdx), %YMM0
08c3a6
            VPTESTM %YMM0, %YMM0, %k2
08c3a6
            /* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
               in YMM0 and 32 bytes at (%rsi, %rdx).  */
08c3a6
            VPCMP   $0, (%rsi, %rdx), %YMM0, %k1{%k2}
08c3a6
            kmovd   %k1, %ecx
08c3a6
            incl    %ecx
08c3a6
            jne     L(last_vector)
08c3a6
    
08c3a6
    It makes EVEX strcmp faster than AVX2 strcmp by up to 40% on Tiger Lake
08c3a6
    and Ice Lake.
08c3a6
    
08c3a6
    Co-Authored-By: Noah Goldstein <goldstein.w.n@gmail.com>
08c3a6
    (cherry picked from commit c46e9afb2df5fc9e39ff4d13777e4b4c26e04e55)
08c3a6
08c3a6
diff --git a/sysdeps/x86_64/multiarch/strcmp-evex.S b/sysdeps/x86_64/multiarch/strcmp-evex.S
08c3a6
index d5aa6daa46c7ed25..82f12ac89bcae20b 100644
08c3a6
--- a/sysdeps/x86_64/multiarch/strcmp-evex.S
08c3a6
+++ b/sysdeps/x86_64/multiarch/strcmp-evex.S
08c3a6
@@ -41,6 +41,8 @@
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 /* Compare packed dwords.  */
08c3a6
 #  define VPCMP		vpcmpd
08c3a6
+#  define VPMINU	vpminud
08c3a6
+#  define VPTESTM	vptestmd
08c3a6
 #  define SHIFT_REG32	r8d
08c3a6
 #  define SHIFT_REG64	r8
08c3a6
 /* 1 dword char == 4 bytes.  */
08c3a6
@@ -48,6 +50,8 @@
08c3a6
 # else
08c3a6
 /* Compare packed bytes.  */
08c3a6
 #  define VPCMP		vpcmpb
08c3a6
+#  define VPMINU	vpminub
08c3a6
+#  define VPTESTM	vptestmb
08c3a6
 #  define SHIFT_REG32	ecx
08c3a6
 #  define SHIFT_REG64	rcx
08c3a6
 /* 1 byte char == 1 byte.  */
08c3a6
@@ -67,6 +71,9 @@
08c3a6
 # define YMM5		ymm22
08c3a6
 # define YMM6		ymm23
08c3a6
 # define YMM7		ymm24
08c3a6
+# define YMM8		ymm25
08c3a6
+# define YMM9		ymm26
08c3a6
+# define YMM10		ymm27
08c3a6
 
08c3a6
 /* Warning!
08c3a6
            wcscmp/wcsncmp have to use SIGNED comparison for elements.
08c3a6
@@ -76,7 +83,7 @@
08c3a6
 /* The main idea of the string comparison (byte or dword) using 256-bit
08c3a6
    EVEX instructions consists of comparing (VPCMP) two ymm vectors. The
08c3a6
    latter can be on either packed bytes or dwords depending on
08c3a6
-   USE_AS_WCSCMP. In order to check the null char, algorithm keeps the
08c3a6
+   USE_AS_WCSCMP. In order to check the null CHAR, algorithm keeps the
08c3a6
    matched bytes/dwords, requiring 5 EVEX instructions (3 VPCMP and 2
08c3a6
    KORD). In general, the costs of comparing VEC_SIZE bytes (32-bytes)
08c3a6
    are 3 VPCMP and 2 KORD instructions, together with VMOVU and ktestd
08c3a6
@@ -123,27 +130,21 @@ ENTRY (STRCMP)
08c3a6
 	jg	L(cross_page)
08c3a6
 	/* Start comparing 4 vectors.  */
08c3a6
 	VMOVU	(%rdi), %YMM0
08c3a6
-	VMOVU	(%rsi), %YMM1
08c3a6
 
08c3a6
-	/* Each bit in K0 represents a mismatch in YMM0 and YMM1.  */
08c3a6
-	VPCMP	$4, %YMM0, %YMM1, %k0
08c3a6
+	/* Each bit set in K2 represents a non-null CHAR in YMM0.  */
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
 
08c3a6
-	/* Check for NULL in YMM0.  */
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM0, %k1
08c3a6
-	/* Check for NULL in YMM1.  */
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM1, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in YMM0 or YMM1.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and 32 bytes at (%rsi).  */
08c3a6
+	VPCMP	$0, (%rsi), %YMM0, %k1{%k2}
08c3a6
 
08c3a6
-	/* Each bit in K1 represents:
08c3a6
-	   1. A mismatch in YMM0 and YMM1.  Or
08c3a6
-	   2. A NULL in YMM0 or YMM1.
08c3a6
-	 */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-
08c3a6
-	ktestd	%k1, %k1
08c3a6
-	je	L(next_3_vectors)
08c3a6
 	kmovd	%k1, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
+	je	L(next_3_vectors)
08c3a6
 	tzcntl	%ecx, %edx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
@@ -172,9 +173,7 @@ L(return):
08c3a6
 # endif
08c3a6
 	ret
08c3a6
 
08c3a6
-	.p2align 4
08c3a6
 L(return_vec_size):
08c3a6
-	kmovd	%k1, %ecx
08c3a6
 	tzcntl	%ecx, %edx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
@@ -210,9 +209,7 @@ L(return_vec_size):
08c3a6
 # endif
08c3a6
 	ret
08c3a6
 
08c3a6
-	.p2align 4
08c3a6
 L(return_2_vec_size):
08c3a6
-	kmovd	%k1, %ecx
08c3a6
 	tzcntl	%ecx, %edx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
@@ -248,9 +245,7 @@ L(return_2_vec_size):
08c3a6
 # endif
08c3a6
 	ret
08c3a6
 
08c3a6
-	.p2align 4
08c3a6
 L(return_3_vec_size):
08c3a6
-	kmovd	%k1, %ecx
08c3a6
 	tzcntl	%ecx, %edx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
@@ -289,43 +284,45 @@ L(return_3_vec_size):
08c3a6
 	.p2align 4
08c3a6
 L(next_3_vectors):
08c3a6
 	VMOVU	VEC_SIZE(%rdi), %YMM0
08c3a6
-	VMOVU	VEC_SIZE(%rsi), %YMM1
08c3a6
-	/* Each bit in K0 represents a mismatch in YMM0 and YMM1.  */
08c3a6
-	VPCMP	$4, %YMM0, %YMM1, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM0, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM1, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in YMM0 or YMM1.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-	ktestd	%k1, %k1
08c3a6
+	/* Each bit set in K2 represents a non-null CHAR in YMM0.  */
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and 32 bytes at VEC_SIZE(%rsi).  */
08c3a6
+	VPCMP	$0, VEC_SIZE(%rsi), %YMM0, %k1{%k2}
08c3a6
+	kmovd	%k1, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	jne	L(return_vec_size)
08c3a6
 
08c3a6
-	VMOVU	(VEC_SIZE * 2)(%rdi), %YMM2
08c3a6
-	VMOVU	(VEC_SIZE * 3)(%rdi), %YMM3
08c3a6
-	VMOVU	(VEC_SIZE * 2)(%rsi), %YMM4
08c3a6
-	VMOVU	(VEC_SIZE * 3)(%rsi), %YMM5
08c3a6
-
08c3a6
-	/* Each bit in K0 represents a mismatch in YMM2 and YMM4.  */
08c3a6
-	VPCMP	$4, %YMM2, %YMM4, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM2, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM4, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in YMM2 or YMM4.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-	ktestd	%k1, %k1
08c3a6
+	VMOVU	(VEC_SIZE * 2)(%rdi), %YMM0
08c3a6
+	/* Each bit set in K2 represents a non-null CHAR in YMM0.  */
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and 32 bytes at (VEC_SIZE * 2)(%rsi).  */
08c3a6
+	VPCMP	$0, (VEC_SIZE * 2)(%rsi), %YMM0, %k1{%k2}
08c3a6
+	kmovd	%k1, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	jne	L(return_2_vec_size)
08c3a6
 
08c3a6
-	/* Each bit in K0 represents a mismatch in YMM3 and YMM5.  */
08c3a6
-	VPCMP	$4, %YMM3, %YMM5, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM3, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM5, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in YMM3 or YMM5.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-	ktestd	%k1, %k1
08c3a6
+	VMOVU	(VEC_SIZE * 3)(%rdi), %YMM0
08c3a6
+	/* Each bit set in K2 represents a non-null CHAR in YMM0.  */
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and 32 bytes at (VEC_SIZE * 2)(%rsi).  */
08c3a6
+	VPCMP	$0, (VEC_SIZE * 3)(%rsi), %YMM0, %k1{%k2}
08c3a6
+	kmovd	%k1, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	jne	L(return_3_vec_size)
08c3a6
 L(main_loop_header):
08c3a6
 	leaq	(VEC_SIZE * 4)(%rdi), %rdx
08c3a6
@@ -375,56 +372,51 @@ L(back_to_loop):
08c3a6
 	VMOVA	VEC_SIZE(%rax), %YMM2
08c3a6
 	VMOVA	(VEC_SIZE * 2)(%rax), %YMM4
08c3a6
 	VMOVA	(VEC_SIZE * 3)(%rax), %YMM6
08c3a6
-	VMOVU	(%rdx), %YMM1
08c3a6
-	VMOVU	VEC_SIZE(%rdx), %YMM3
08c3a6
-	VMOVU	(VEC_SIZE * 2)(%rdx), %YMM5
08c3a6
-	VMOVU	(VEC_SIZE * 3)(%rdx), %YMM7
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM0, %YMM1, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM0, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM1, %k2
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K4 represents a NULL or a mismatch in YMM0 and
08c3a6
-	   YMM1.  */
08c3a6
-	kord	%k0, %k1, %k4
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM2, %YMM3, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM2, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM3, %k2
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K5 represents a NULL or a mismatch in YMM2 and
08c3a6
-	   YMM3.  */
08c3a6
-	kord	%k0, %k1, %k5
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM4, %YMM5, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM4, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM5, %k2
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K6 represents a NULL or a mismatch in YMM4 and
08c3a6
-	   YMM5.  */
08c3a6
-	kord	%k0, %k1, %k6
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM6, %YMM7, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM6, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM7, %k2
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K7 represents a NULL or a mismatch in YMM6 and
08c3a6
-	   YMM7.  */
08c3a6
-	kord	%k0, %k1, %k7
08c3a6
-
08c3a6
-	kord	%k4, %k5, %k0
08c3a6
-	kord	%k6, %k7, %k1
08c3a6
-
08c3a6
-	/* Test each mask (32 bits) individually because for VEC_SIZE
08c3a6
-	   == 32 is not possible to OR the four masks and keep all bits
08c3a6
-	   in a 64-bit integer register, differing from SSE2 strcmp
08c3a6
-	   where ORing is possible.  */
08c3a6
-	kortestd %k0, %k1
08c3a6
-	je	L(loop)
08c3a6
-	ktestd	%k4, %k4
08c3a6
+
08c3a6
+	VPMINU	%YMM0, %YMM2, %YMM8
08c3a6
+	VPMINU	%YMM4, %YMM6, %YMM9
08c3a6
+
08c3a6
+	/* A zero CHAR in YMM8 means that there is a null CHAR.  */
08c3a6
+	VPMINU	%YMM8, %YMM9, %YMM8
08c3a6
+
08c3a6
+	/* Each bit set in K1 represents a non-null CHAR in YMM8.  */
08c3a6
+	VPTESTM	%YMM8, %YMM8, %k1
08c3a6
+
08c3a6
+	/* (YMM ^ YMM): A non-zero CHAR represents a mismatch.  */
08c3a6
+	vpxorq	(%rdx), %YMM0, %YMM1
08c3a6
+	vpxorq	VEC_SIZE(%rdx), %YMM2, %YMM3
08c3a6
+	vpxorq	(VEC_SIZE * 2)(%rdx), %YMM4, %YMM5
08c3a6
+	vpxorq	(VEC_SIZE * 3)(%rdx), %YMM6, %YMM7
08c3a6
+
08c3a6
+	vporq	%YMM1, %YMM3, %YMM9
08c3a6
+	vporq	%YMM5, %YMM7, %YMM10
08c3a6
+
08c3a6
+	/* A non-zero CHAR in YMM9 represents a mismatch.  */
08c3a6
+	vporq	%YMM9, %YMM10, %YMM9
08c3a6
+
08c3a6
+	/* Each bit cleared in K0 represents a mismatch or a null CHAR.  */
08c3a6
+	VPCMP	$0, %YMMZERO, %YMM9, %k0{%k1}
08c3a6
+	kmovd   %k0, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
+	je	 L(loop)
08c3a6
+
08c3a6
+	/* Each bit set in K1 represents a non-null CHAR in YMM0.  */
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k1
08c3a6
+	/* Each bit cleared in K0 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and (%rdx).  */
08c3a6
+	VPCMP	$0, %YMMZERO, %YMM1, %k0{%k1}
08c3a6
+	kmovd	%k0, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	je	L(test_vec)
08c3a6
-	kmovd	%k4, %edi
08c3a6
-	tzcntl	%edi, %ecx
08c3a6
+	tzcntl	%ecx, %ecx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
 	sall	$2, %ecx
08c3a6
@@ -466,9 +458,18 @@ L(test_vec):
08c3a6
 	cmpq	$VEC_SIZE, %r11
08c3a6
 	jbe	L(zero)
08c3a6
 # endif
08c3a6
-	ktestd	%k5, %k5
08c3a6
+	/* Each bit set in K1 represents a non-null CHAR in YMM2.  */
08c3a6
+	VPTESTM	%YMM2, %YMM2, %k1
08c3a6
+	/* Each bit cleared in K0 represents a mismatch or a null CHAR
08c3a6
+	   in YMM2 and VEC_SIZE(%rdx).  */
08c3a6
+	VPCMP	$0, %YMMZERO, %YMM3, %k0{%k1}
08c3a6
+	kmovd	%k0, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	je	L(test_2_vec)
08c3a6
-	kmovd	%k5, %ecx
08c3a6
 	tzcntl	%ecx, %edi
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
@@ -512,9 +513,18 @@ L(test_2_vec):
08c3a6
 	cmpq	$(VEC_SIZE * 2), %r11
08c3a6
 	jbe	L(zero)
08c3a6
 # endif
08c3a6
-	ktestd	%k6, %k6
08c3a6
+	/* Each bit set in K1 represents a non-null CHAR in YMM4.  */
08c3a6
+	VPTESTM	%YMM4, %YMM4, %k1
08c3a6
+	/* Each bit cleared in K0 represents a mismatch or a null CHAR
08c3a6
+	   in YMM4 and (VEC_SIZE * 2)(%rdx).  */
08c3a6
+	VPCMP	$0, %YMMZERO, %YMM5, %k0{%k1}
08c3a6
+	kmovd	%k0, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	je	L(test_3_vec)
08c3a6
-	kmovd	%k6, %ecx
08c3a6
 	tzcntl	%ecx, %edi
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
@@ -558,8 +568,18 @@ L(test_3_vec):
08c3a6
 	cmpq	$(VEC_SIZE * 3), %r11
08c3a6
 	jbe	L(zero)
08c3a6
 # endif
08c3a6
-	kmovd	%k7, %esi
08c3a6
-	tzcntl	%esi, %ecx
08c3a6
+	/* Each bit set in K1 represents a non-null CHAR in YMM6.  */
08c3a6
+	VPTESTM	%YMM6, %YMM6, %k1
08c3a6
+	/* Each bit cleared in K0 represents a mismatch or a null CHAR
08c3a6
+	   in YMM6 and (VEC_SIZE * 3)(%rdx).  */
08c3a6
+	VPCMP	$0, %YMMZERO, %YMM7, %k0{%k1}
08c3a6
+	kmovd	%k0, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
+	tzcntl	%ecx, %ecx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
 	/* NB: Multiply wchar_t count by 4 to get the number of bytes.  */
08c3a6
 	sall	$2, %ecx
08c3a6
@@ -615,39 +635,51 @@ L(loop_cross_page):
08c3a6
 
08c3a6
 	VMOVU	(%rax, %r10), %YMM2
08c3a6
 	VMOVU	VEC_SIZE(%rax, %r10), %YMM3
08c3a6
-	VMOVU	(%rdx, %r10), %YMM4
08c3a6
-	VMOVU	VEC_SIZE(%rdx, %r10), %YMM5
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM4, %YMM2, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM2, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM4, %k2
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch in YMM2 and
08c3a6
-	   YMM4.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM5, %YMM3, %k3
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM3, %k4
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM5, %k5
08c3a6
-	kord	%k4, %k5, %k4
08c3a6
-	/* Each bit in K3 represents a NULL or a mismatch in YMM3 and
08c3a6
-	   YMM5.  */
08c3a6
-	kord	%k3, %k4, %k3
08c3a6
+
08c3a6
+	/* Each bit set in K2 represents a non-null CHAR in YMM2.  */
08c3a6
+	VPTESTM	%YMM2, %YMM2, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM2 and 32 bytes at (%rdx, %r10).  */
08c3a6
+	VPCMP	$0, (%rdx, %r10), %YMM2, %k1{%k2}
08c3a6
+	kmovd	%k1, %r9d
08c3a6
+	/* Don't use subl since it is the lower 16/32 bits of RDI
08c3a6
+	   below.  */
08c3a6
+	notl	%r9d
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	/* Only last 8 bits are valid.  */
08c3a6
+	andl	$0xff, %r9d
08c3a6
+# endif
08c3a6
+
08c3a6
+	/* Each bit set in K4 represents a non-null CHAR in YMM3.  */
08c3a6
+	VPTESTM	%YMM3, %YMM3, %k4
08c3a6
+	/* Each bit cleared in K3 represents a mismatch or a null CHAR
08c3a6
+	   in YMM3 and 32 bytes at VEC_SIZE(%rdx, %r10).  */
08c3a6
+	VPCMP	$0, VEC_SIZE(%rdx, %r10), %YMM3, %k3{%k4}
08c3a6
+	kmovd	%k3, %edi
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	/* Don't use subl since it is the upper 8 bits of EDI below.  */
08c3a6
+	notl	%edi
08c3a6
+	andl	$0xff, %edi
08c3a6
+# else
08c3a6
+	incl	%edi
08c3a6
+# endif
08c3a6
 
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
-	/* NB: Each bit in K1/K3 represents 4-byte element.  */
08c3a6
-	kshiftlw $8, %k3, %k2
08c3a6
+	/* NB: Each bit in EDI/R9D represents 4-byte element.  */
08c3a6
+	sall	$8, %edi
08c3a6
 	/* NB: Divide shift count by 4 since each bit in K1 represent 4
08c3a6
 	   bytes.  */
08c3a6
 	movl	%ecx, %SHIFT_REG32
08c3a6
 	sarl	$2, %SHIFT_REG32
08c3a6
+
08c3a6
+	/* Each bit in EDI represents a null CHAR or a mismatch.  */
08c3a6
+	orl	%r9d, %edi
08c3a6
 # else
08c3a6
-	kshiftlq $32, %k3, %k2
08c3a6
-# endif
08c3a6
+	salq	$32, %rdi
08c3a6
 
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	korq	%k1, %k2, %k1
08c3a6
-	kmovq	%k1, %rdi
08c3a6
+	/* Each bit in RDI represents a null CHAR or a mismatch.  */
08c3a6
+	orq	%r9, %rdi
08c3a6
+# endif
08c3a6
 
08c3a6
 	/* Since ECX < VEC_SIZE * 2, simply skip the first ECX bytes.  */
08c3a6
 	shrxq	%SHIFT_REG64, %rdi, %rdi
08c3a6
@@ -692,35 +724,45 @@ L(loop_cross_page_2_vec):
08c3a6
 	/* The first VEC_SIZE * 2 bytes match or are ignored.  */
08c3a6
 	VMOVU	(VEC_SIZE * 2)(%rax, %r10), %YMM0
08c3a6
 	VMOVU	(VEC_SIZE * 3)(%rax, %r10), %YMM1
08c3a6
-	VMOVU	(VEC_SIZE * 2)(%rdx, %r10), %YMM2
08c3a6
-	VMOVU	(VEC_SIZE * 3)(%rdx, %r10), %YMM3
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM0, %YMM2, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM0, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM2, %k2
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch in YMM0 and
08c3a6
-	   YMM2.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-
08c3a6
-	VPCMP	$4, %YMM1, %YMM3, %k3
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM1, %k4
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM3, %k5
08c3a6
-	kord	%k4, %k5, %k4
08c3a6
-	/* Each bit in K3 represents a NULL or a mismatch in YMM1 and
08c3a6
-	   YMM3.  */
08c3a6
-	kord	%k3, %k4, %k3
08c3a6
 
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and 32 bytes at (VEC_SIZE * 2)(%rdx, %r10).  */
08c3a6
+	VPCMP	$0, (VEC_SIZE * 2)(%rdx, %r10), %YMM0, %k1{%k2}
08c3a6
+	kmovd	%k1, %r9d
08c3a6
+	/* Don't use subl since it is the lower 16/32 bits of RDI
08c3a6
+	   below.  */
08c3a6
+	notl	%r9d
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
-	/* NB: Each bit in K1/K3 represents 4-byte element.  */
08c3a6
-	kshiftlw $8, %k3, %k2
08c3a6
+	/* Only last 8 bits are valid.  */
08c3a6
+	andl	$0xff, %r9d
08c3a6
+# endif
08c3a6
+
08c3a6
+	VPTESTM	%YMM1, %YMM1, %k4
08c3a6
+	/* Each bit cleared in K3 represents a mismatch or a null CHAR
08c3a6
+	   in YMM1 and 32 bytes at (VEC_SIZE * 3)(%rdx, %r10).  */
08c3a6
+	VPCMP	$0, (VEC_SIZE * 3)(%rdx, %r10), %YMM1, %k3{%k4}
08c3a6
+	kmovd	%k3, %edi
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	/* Don't use subl since it is the upper 8 bits of EDI below.  */
08c3a6
+	notl	%edi
08c3a6
+	andl	$0xff, %edi
08c3a6
 # else
08c3a6
-	kshiftlq $32, %k3, %k2
08c3a6
+	incl	%edi
08c3a6
 # endif
08c3a6
 
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	korq	%k1, %k2, %k1
08c3a6
-	kmovq	%k1, %rdi
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	/* NB: Each bit in EDI/R9D represents 4-byte element.  */
08c3a6
+	sall	$8, %edi
08c3a6
+
08c3a6
+	/* Each bit in EDI represents a null CHAR or a mismatch.  */
08c3a6
+	orl	%r9d, %edi
08c3a6
+# else
08c3a6
+	salq	$32, %rdi
08c3a6
+
08c3a6
+	/* Each bit in RDI represents a null CHAR or a mismatch.  */
08c3a6
+	orq	%r9, %rdi
08c3a6
+# endif
08c3a6
 
08c3a6
 	xorl	%r8d, %r8d
08c3a6
 	/* If ECX > VEC_SIZE * 2, skip ECX - (VEC_SIZE * 2) bytes.  */
08c3a6
@@ -729,12 +771,15 @@ L(loop_cross_page_2_vec):
08c3a6
 	/* R8 has number of bytes skipped.  */
08c3a6
 	movl	%ecx, %r8d
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
-	/* NB: Divide shift count by 4 since each bit in K1 represent 4
08c3a6
+	/* NB: Divide shift count by 4 since each bit in RDI represent 4
08c3a6
 	   bytes.  */
08c3a6
 	sarl	$2, %ecx
08c3a6
-# endif
08c3a6
+	/* Skip ECX bytes.  */
08c3a6
+	shrl	%cl, %edi
08c3a6
+# else
08c3a6
 	/* Skip ECX bytes.  */
08c3a6
 	shrq	%cl, %rdi
08c3a6
+# endif
08c3a6
 1:
08c3a6
 	/* Before jumping back to the loop, set ESI to the number of
08c3a6
 	   VEC_SIZE * 4 blocks before page crossing.  */
08c3a6
@@ -818,7 +863,7 @@ L(cross_page_loop):
08c3a6
 	movzbl	(%rdi, %rdx), %eax
08c3a6
 	movzbl	(%rsi, %rdx), %ecx
08c3a6
 # endif
08c3a6
-	/* Check null char.  */
08c3a6
+	/* Check null CHAR.  */
08c3a6
 	testl	%eax, %eax
08c3a6
 	jne	L(cross_page_loop)
08c3a6
 	/* Since %eax == 0, subtract is OK for both SIGNED and UNSIGNED
08c3a6
@@ -901,18 +946,17 @@ L(cross_page):
08c3a6
 	jg	L(cross_page_1_vector)
08c3a6
 L(loop_1_vector):
08c3a6
 	VMOVU	(%rdi, %rdx), %YMM0
08c3a6
-	VMOVU	(%rsi, %rdx), %YMM1
08c3a6
-
08c3a6
-	/* Each bit in K0 represents a mismatch in YMM0 and YMM1.  */
08c3a6
-	VPCMP	$4, %YMM0, %YMM1, %k0
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM0, %k1
08c3a6
-	VPCMP	$0, %YMMZERO, %YMM1, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in YMM0 or YMM1.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
+
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in YMM0 and 32 bytes at (%rsi, %rdx).  */
08c3a6
+	VPCMP	$0, (%rsi, %rdx), %YMM0, %k1{%k2}
08c3a6
 	kmovd	%k1, %ecx
08c3a6
-	testl	%ecx, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xff, %ecx
08c3a6
+# else
08c3a6
+	incl	%ecx
08c3a6
+# endif
08c3a6
 	jne	L(last_vector)
08c3a6
 
08c3a6
 	addl	$VEC_SIZE, %edx
08c3a6
@@ -931,18 +975,17 @@ L(cross_page_1_vector):
08c3a6
 	cmpl	$(PAGE_SIZE - 16), %eax
08c3a6
 	jg	L(cross_page_1_xmm)
08c3a6
 	VMOVU	(%rdi, %rdx), %XMM0
08c3a6
-	VMOVU	(%rsi, %rdx), %XMM1
08c3a6
-
08c3a6
-	/* Each bit in K0 represents a mismatch in XMM0 and XMM1.  */
08c3a6
-	VPCMP	$4, %XMM0, %XMM1, %k0
08c3a6
-	VPCMP	$0, %XMMZERO, %XMM0, %k1
08c3a6
-	VPCMP	$0, %XMMZERO, %XMM1, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in XMM0 or XMM1.  */
08c3a6
-	korw	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	korw	%k0, %k1, %k1
08c3a6
-	kmovw	%k1, %ecx
08c3a6
-	testl	%ecx, %ecx
08c3a6
+
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in XMM0 and 16 bytes at (%rsi, %rdx).  */
08c3a6
+	VPCMP	$0, (%rsi, %rdx), %XMM0, %k1{%k2}
08c3a6
+	kmovd	%k1, %ecx
08c3a6
+# ifdef USE_AS_WCSCMP
08c3a6
+	subl	$0xf, %ecx
08c3a6
+# else
08c3a6
+	subl	$0xffff, %ecx
08c3a6
+# endif
08c3a6
 	jne	L(last_vector)
08c3a6
 
08c3a6
 	addl	$16, %edx
08c3a6
@@ -965,25 +1008,16 @@ L(cross_page_1_xmm):
08c3a6
 	vmovq	(%rdi, %rdx), %XMM0
08c3a6
 	vmovq	(%rsi, %rdx), %XMM1
08c3a6
 
08c3a6
-	/* Each bit in K0 represents a mismatch in XMM0 and XMM1.  */
08c3a6
-	VPCMP	$4, %XMM0, %XMM1, %k0
08c3a6
-	VPCMP	$0, %XMMZERO, %XMM0, %k1
08c3a6
-	VPCMP	$0, %XMMZERO, %XMM1, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in XMM0 or XMM1.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
-	kmovd	%k1, %ecx
08c3a6
-
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in XMM0 and XMM1.  */
08c3a6
+	VPCMP	$0, %XMM1, %XMM0, %k1{%k2}
08c3a6
+	kmovb	%k1, %ecx
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
-	/* Only last 2 bits are valid.  */
08c3a6
-	andl	$0x3, %ecx
08c3a6
+	subl	$0x3, %ecx
08c3a6
 # else
08c3a6
-	/* Only last 8 bits are valid.  */
08c3a6
-	andl	$0xff, %ecx
08c3a6
+	subl	$0xff, %ecx
08c3a6
 # endif
08c3a6
-
08c3a6
-	testl	%ecx, %ecx
08c3a6
 	jne	L(last_vector)
08c3a6
 
08c3a6
 	addl	$8, %edx
08c3a6
@@ -1002,25 +1036,16 @@ L(cross_page_8bytes):
08c3a6
 	vmovd	(%rdi, %rdx), %XMM0
08c3a6
 	vmovd	(%rsi, %rdx), %XMM1
08c3a6
 
08c3a6
-	/* Each bit in K0 represents a mismatch in XMM0 and XMM1.  */
08c3a6
-	VPCMP	$4, %XMM0, %XMM1, %k0
08c3a6
-	VPCMP	$0, %XMMZERO, %XMM0, %k1
08c3a6
-	VPCMP	$0, %XMMZERO, %XMM1, %k2
08c3a6
-	/* Each bit in K1 represents a NULL in XMM0 or XMM1.  */
08c3a6
-	kord	%k1, %k2, %k1
08c3a6
-	/* Each bit in K1 represents a NULL or a mismatch.  */
08c3a6
-	kord	%k0, %k1, %k1
08c3a6
+	VPTESTM	%YMM0, %YMM0, %k2
08c3a6
+	/* Each bit cleared in K1 represents a mismatch or a null CHAR
08c3a6
+	   in XMM0 and XMM1.  */
08c3a6
+	VPCMP	$0, %XMM1, %XMM0, %k1{%k2}
08c3a6
 	kmovd	%k1, %ecx
08c3a6
-
08c3a6
 # ifdef USE_AS_WCSCMP
08c3a6
-	/* Only the last bit is valid.  */
08c3a6
-	andl	$0x1, %ecx
08c3a6
+	subl	$0x1, %ecx
08c3a6
 # else
08c3a6
-	/* Only last 4 bits are valid.  */
08c3a6
-	andl	$0xf, %ecx
08c3a6
+	subl	$0xf, %ecx
08c3a6
 # endif
08c3a6
-
08c3a6
-	testl	%ecx, %ecx
08c3a6
 	jne	L(last_vector)
08c3a6
 
08c3a6
 	addl	$4, %edx