08c3a6
commit ffe75982cc0bb2d25d55ed566a3731b9c3017e6f
08c3a6
Author: Noah Goldstein <goldstein.w.n@gmail.com>
08c3a6
Date:   Fri Apr 15 12:28:00 2022 -0500
08c3a6
08c3a6
    x86: Remove memcmp-sse4.S
08c3a6
    
08c3a6
    Code didn't actually use any sse4 instructions since `ptest` was
08c3a6
    removed in:
08c3a6
    
08c3a6
    commit 2f9062d7171850451e6044ef78d91ff8c017b9c0
08c3a6
    Author: Noah Goldstein <goldstein.w.n@gmail.com>
08c3a6
    Date:   Wed Nov 10 16:18:56 2021 -0600
08c3a6
    
08c3a6
        x86: Shrink memcmp-sse4.S code size
08c3a6
    
08c3a6
    The new memcmp-sse2 implementation is also faster.
08c3a6
    
08c3a6
    geometric_mean(N=20) of page cross cases SSE2 / SSE4: 0.905
08c3a6
    
08c3a6
    Note there are two regressions preferring SSE2 for Size = 1 and Size =
08c3a6
    65.
08c3a6
    
08c3a6
    Size = 1:
08c3a6
    size, align0, align1, ret, New Time/Old Time
08c3a6
       1,      1,      1,   0,               1.2
08c3a6
       1,      1,      1,   1,             1.197
08c3a6
       1,      1,      1,  -1,               1.2
08c3a6
    
08c3a6
    This is intentional. Size == 1 is significantly less hot based on
08c3a6
    profiles of GCC11 and Python3 than sizes [4, 8] (which is made
08c3a6
    hotter).
08c3a6
    
08c3a6
    Python3 Size = 1        -> 13.64%
08c3a6
    Python3 Size = [4, 8]   -> 60.92%
08c3a6
    
08c3a6
    GCC11   Size = 1        ->  1.29%
08c3a6
    GCC11   Size = [4, 8]   -> 33.86%
08c3a6
    
08c3a6
    size, align0, align1, ret, New Time/Old Time
08c3a6
       4,      4,      4,   0,             0.622
08c3a6
       4,      4,      4,   1,             0.797
08c3a6
       4,      4,      4,  -1,             0.805
08c3a6
       5,      5,      5,   0,             0.623
08c3a6
       5,      5,      5,   1,             0.777
08c3a6
       5,      5,      5,  -1,             0.802
08c3a6
       6,      6,      6,   0,             0.625
08c3a6
       6,      6,      6,   1,             0.813
08c3a6
       6,      6,      6,  -1,             0.788
08c3a6
       7,      7,      7,   0,             0.625
08c3a6
       7,      7,      7,   1,             0.799
08c3a6
       7,      7,      7,  -1,             0.795
08c3a6
       8,      8,      8,   0,             0.625
08c3a6
       8,      8,      8,   1,             0.848
08c3a6
       8,      8,      8,  -1,             0.914
08c3a6
       9,      9,      9,   0,             0.625
08c3a6
    
08c3a6
    Size = 65:
08c3a6
    size, align0, align1, ret, New Time/Old Time
08c3a6
      65,      0,      0,   0,             1.103
08c3a6
      65,      0,      0,   1,             1.216
08c3a6
      65,      0,      0,  -1,             1.227
08c3a6
      65,     65,      0,   0,             1.091
08c3a6
      65,      0,     65,   1,              1.19
08c3a6
      65,     65,     65,  -1,             1.215
08c3a6
    
08c3a6
    This is because A) the checks in range [65, 96] are now unrolled 2x
08c3a6
    and B) because smaller values <= 16 are now given a hotter path. By
08c3a6
    contrast the SSE4 version has a branch for Size = 80. The unrolled
08c3a6
    version has get better performance for returns which need both
08c3a6
    comparisons.
08c3a6
    
08c3a6
    size, align0, align1, ret, New Time/Old Time
08c3a6
     128,      4,      8,   0,             0.858
08c3a6
     128,      4,      8,   1,             0.879
08c3a6
     128,      4,      8,  -1,             0.888
08c3a6
    
08c3a6
    As well, out of microbenchmark environments that are not full
08c3a6
    predictable the branch will have a real-cost.
08c3a6
    Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
08c3a6
    
08c3a6
    (cherry picked from commit 7cbc03d03091d5664060924789afe46d30a5477e)
08c3a6
08c3a6
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
08c3a6
index bca82e38d86cc440..b503e4b81e92a11c 100644
08c3a6
--- a/sysdeps/x86_64/multiarch/Makefile
08c3a6
+++ b/sysdeps/x86_64/multiarch/Makefile
08c3a6
@@ -11,7 +11,6 @@ sysdep_routines += \
08c3a6
   memcmp-avx2-movbe-rtm \
08c3a6
   memcmp-evex-movbe \
08c3a6
   memcmp-sse2 \
08c3a6
-  memcmp-sse4 \
08c3a6
   memcmp-ssse3 \
08c3a6
   memcpy-ssse3 \
08c3a6
   memcpy-ssse3-back \
08c3a6
@@ -174,7 +173,6 @@ sysdep_routines += \
08c3a6
   wmemcmp-avx2-movbe-rtm \
08c3a6
   wmemcmp-c \
08c3a6
   wmemcmp-evex-movbe \
08c3a6
-  wmemcmp-sse4 \
08c3a6
   wmemcmp-ssse3 \
08c3a6
 # sysdep_routines
08c3a6
 endif
08c3a6
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
08c3a6
index 4c7834dd0b951fa4..e5e48b36c3175e68 100644
08c3a6
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
08c3a6
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
08c3a6
@@ -78,8 +78,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
08c3a6
 			       && CPU_FEATURE_USABLE (BMI2)
08c3a6
 			       && CPU_FEATURE_USABLE (MOVBE)),
08c3a6
 			      __memcmp_evex_movbe)
08c3a6
-	      IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSE4_1),
08c3a6
-			      __memcmp_sse4_1)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSSE3),
08c3a6
 			      __memcmp_ssse3)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_sse2))
08c3a6
@@ -824,8 +822,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
08c3a6
 			       && CPU_FEATURE_USABLE (BMI2)
08c3a6
 			       && CPU_FEATURE_USABLE (MOVBE)),
08c3a6
 			      __wmemcmp_evex_movbe)
08c3a6
-	      IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSE4_1),
08c3a6
-			      __wmemcmp_sse4_1)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSSE3),
08c3a6
 			      __wmemcmp_ssse3)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, wmemcmp, 1, __wmemcmp_sse2))
08c3a6
diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
08c3a6
index 89e2129968e1e49c..5b92594093c1e0bb 100644
08c3a6
--- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h
08c3a6
+++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
08c3a6
@@ -21,7 +21,6 @@
08c3a6
 
08c3a6
 extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden;
08c3a6
 extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
08c3a6
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse4_1) attribute_hidden;
08c3a6
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe) attribute_hidden;
08c3a6
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe_rtm) attribute_hidden;
08c3a6
 extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_movbe) attribute_hidden;
08c3a6
@@ -47,9 +46,6 @@ IFUNC_SELECTOR (void)
08c3a6
 	return OPTIMIZE (avx2_movbe);
08c3a6
     }
08c3a6
 
08c3a6
-  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
08c3a6
-    return OPTIMIZE (sse4_1);
08c3a6
-
08c3a6
   if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
08c3a6
     return OPTIMIZE (ssse3);
08c3a6
 
08c3a6
diff --git a/sysdeps/x86_64/multiarch/memcmp-sse4.S b/sysdeps/x86_64/multiarch/memcmp-sse4.S
08c3a6
deleted file mode 100644
08c3a6
index 97c102a9c5ab2b91..0000000000000000
08c3a6
--- a/sysdeps/x86_64/multiarch/memcmp-sse4.S
08c3a6
+++ /dev/null
08c3a6
@@ -1,804 +0,0 @@
08c3a6
-/* memcmp with SSE4.1, wmemcmp with SSE4.1
08c3a6
-   Copyright (C) 2010-2021 Free Software Foundation, Inc.
08c3a6
-   Contributed by Intel Corporation.
08c3a6
-   This file is part of the GNU C Library.
08c3a6
-
08c3a6
-   The GNU C Library is free software; you can redistribute it and/or
08c3a6
-   modify it under the terms of the GNU Lesser General Public
08c3a6
-   License as published by the Free Software Foundation; either
08c3a6
-   version 2.1 of the License, or (at your option) any later version.
08c3a6
-
08c3a6
-   The GNU C Library is distributed in the hope that it will be useful,
08c3a6
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
08c3a6
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
08c3a6
-   Lesser General Public License for more details.
08c3a6
-
08c3a6
-   You should have received a copy of the GNU Lesser General Public
08c3a6
-   License along with the GNU C Library; if not, see
08c3a6
-   <https://www.gnu.org/licenses/>.  */
08c3a6
-
08c3a6
-#if IS_IN (libc)
08c3a6
-
08c3a6
-# include <sysdep.h>
08c3a6
-
08c3a6
-# ifndef MEMCMP
08c3a6
-#  define MEMCMP	__memcmp_sse4_1
08c3a6
-# endif
08c3a6
-
08c3a6
-#ifdef USE_AS_WMEMCMP
08c3a6
-# define CMPEQ	pcmpeqd
08c3a6
-# define CHAR_SIZE	4
08c3a6
-#else
08c3a6
-# define CMPEQ	pcmpeqb
08c3a6
-# define CHAR_SIZE	1
08c3a6
-#endif
08c3a6
-
08c3a6
-
08c3a6
-/* Warning!
08c3a6
-           wmemcmp has to use SIGNED comparison for elements.
08c3a6
-           memcmp has to use UNSIGNED comparison for elemnts.
08c3a6
-*/
08c3a6
-
08c3a6
-	.section .text.sse4.1,"ax",@progbits
08c3a6
-ENTRY (MEMCMP)
08c3a6
-# ifdef USE_AS_WMEMCMP
08c3a6
-	shl	$2, %RDX_LP
08c3a6
-# elif defined __ILP32__
08c3a6
-	/* Clear the upper 32 bits.  */
08c3a6
-	mov	%edx, %edx
08c3a6
-# endif
08c3a6
-	cmp	$79, %RDX_LP
08c3a6
-	ja	L(79bytesormore)
08c3a6
-
08c3a6
-	cmp	$CHAR_SIZE, %RDX_LP
08c3a6
-	jbe	L(firstbyte)
08c3a6
-
08c3a6
-	/* N in (CHAR_SIZE, 79) bytes.  */
08c3a6
-	cmpl	$32, %edx
08c3a6
-	ja	L(more_32_bytes)
08c3a6
-
08c3a6
-	cmpl	$16, %edx
08c3a6
-	jae	L(16_to_32_bytes)
08c3a6
-
08c3a6
-# ifndef USE_AS_WMEMCMP
08c3a6
-	cmpl	$8, %edx
08c3a6
-	jae	L(8_to_16_bytes)
08c3a6
-
08c3a6
-	cmpl	$4, %edx
08c3a6
-	jb	L(2_to_3_bytes)
08c3a6
-
08c3a6
-	movl	(%rdi), %eax
08c3a6
-	movl	(%rsi), %ecx
08c3a6
-
08c3a6
-	bswap	%eax
08c3a6
-	bswap	%ecx
08c3a6
-
08c3a6
-	shlq	$32, %rax
08c3a6
-	shlq	$32, %rcx
08c3a6
-
08c3a6
-	movl	-4(%rdi, %rdx), %edi
08c3a6
-	movl	-4(%rsi, %rdx), %esi
08c3a6
-
08c3a6
-	bswap	%edi
08c3a6
-	bswap	%esi
08c3a6
-
08c3a6
-	orq	%rdi, %rax
08c3a6
-	orq	%rsi, %rcx
08c3a6
-	subq	%rcx, %rax
08c3a6
-	cmovne	%edx, %eax
08c3a6
-	sbbl	%ecx, %ecx
08c3a6
-	orl	%ecx, %eax
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4,, 8
08c3a6
-L(2_to_3_bytes):
08c3a6
-	movzwl	(%rdi), %eax
08c3a6
-	movzwl	(%rsi), %ecx
08c3a6
-	shll	$8, %eax
08c3a6
-	shll	$8, %ecx
08c3a6
-	bswap	%eax
08c3a6
-	bswap	%ecx
08c3a6
-	movzbl	-1(%rdi, %rdx), %edi
08c3a6
-	movzbl	-1(%rsi, %rdx), %esi
08c3a6
-	orl	%edi, %eax
08c3a6
-	orl	%esi, %ecx
08c3a6
-	subl	%ecx, %eax
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4,, 8
08c3a6
-L(8_to_16_bytes):
08c3a6
-	movq	(%rdi), %rax
08c3a6
-	movq	(%rsi), %rcx
08c3a6
-
08c3a6
-	bswap	%rax
08c3a6
-	bswap	%rcx
08c3a6
-
08c3a6
-	subq	%rcx, %rax
08c3a6
-	jne	L(8_to_16_bytes_done)
08c3a6
-
08c3a6
-	movq	-8(%rdi, %rdx), %rax
08c3a6
-	movq	-8(%rsi, %rdx), %rcx
08c3a6
-
08c3a6
-	bswap	%rax
08c3a6
-	bswap	%rcx
08c3a6
-
08c3a6
-	subq	%rcx, %rax
08c3a6
-
08c3a6
-L(8_to_16_bytes_done):
08c3a6
-	cmovne	%edx, %eax
08c3a6
-	sbbl	%ecx, %ecx
08c3a6
-	orl	%ecx, %eax
08c3a6
-	ret
08c3a6
-# else
08c3a6
-	xorl	%eax, %eax
08c3a6
-	movl	(%rdi), %ecx
08c3a6
-	cmpl	(%rsi), %ecx
08c3a6
-	jne	L(8_to_16_bytes_done)
08c3a6
-	movl	4(%rdi), %ecx
08c3a6
-	cmpl	4(%rsi), %ecx
08c3a6
-	jne	L(8_to_16_bytes_done)
08c3a6
-	movl	-4(%rdi, %rdx), %ecx
08c3a6
-	cmpl	-4(%rsi, %rdx), %ecx
08c3a6
-	jne	L(8_to_16_bytes_done)
08c3a6
-	ret
08c3a6
-# endif
08c3a6
-
08c3a6
-	.p2align 4,, 3
08c3a6
-L(ret_zero):
08c3a6
-	xorl	%eax, %eax
08c3a6
-L(zero):
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4,, 8
08c3a6
-L(firstbyte):
08c3a6
-	jb	L(ret_zero)
08c3a6
-# ifdef USE_AS_WMEMCMP
08c3a6
-	xorl	%eax, %eax
08c3a6
-	movl	(%rdi), %ecx
08c3a6
-	cmpl	(%rsi), %ecx
08c3a6
-	je	L(zero)
08c3a6
-L(8_to_16_bytes_done):
08c3a6
-	setg	%al
08c3a6
-	leal	-1(%rax, %rax), %eax
08c3a6
-# else
08c3a6
-	movzbl	(%rdi), %eax
08c3a6
-	movzbl	(%rsi), %ecx
08c3a6
-	sub	%ecx, %eax
08c3a6
-# endif
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(vec_return_begin_48):
08c3a6
-	addq	$16, %rdi
08c3a6
-	addq	$16, %rsi
08c3a6
-L(vec_return_begin_32):
08c3a6
-	bsfl	%eax, %eax
08c3a6
-# ifdef USE_AS_WMEMCMP
08c3a6
-	movl	32(%rdi, %rax), %ecx
08c3a6
-	xorl	%edx, %edx
08c3a6
-	cmpl	32(%rsi, %rax), %ecx
08c3a6
-	setg	%dl
08c3a6
-	leal	-1(%rdx, %rdx), %eax
08c3a6
-# else
08c3a6
-	movzbl	32(%rsi, %rax), %ecx
08c3a6
-	movzbl	32(%rdi, %rax), %eax
08c3a6
-	subl	%ecx, %eax
08c3a6
-# endif
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(vec_return_begin_16):
08c3a6
-	addq	$16, %rdi
08c3a6
-	addq	$16, %rsi
08c3a6
-L(vec_return_begin):
08c3a6
-	bsfl	%eax, %eax
08c3a6
-# ifdef USE_AS_WMEMCMP
08c3a6
-	movl	(%rdi, %rax), %ecx
08c3a6
-	xorl	%edx, %edx
08c3a6
-	cmpl	(%rsi, %rax), %ecx
08c3a6
-	setg	%dl
08c3a6
-	leal	-1(%rdx, %rdx), %eax
08c3a6
-# else
08c3a6
-	movzbl	(%rsi, %rax), %ecx
08c3a6
-	movzbl	(%rdi, %rax), %eax
08c3a6
-	subl	%ecx, %eax
08c3a6
-# endif
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(vec_return_end_16):
08c3a6
-	subl	$16, %edx
08c3a6
-L(vec_return_end):
08c3a6
-	bsfl	%eax, %eax
08c3a6
-	addl	%edx, %eax
08c3a6
-# ifdef USE_AS_WMEMCMP
08c3a6
-	movl	-16(%rdi, %rax), %ecx
08c3a6
-	xorl	%edx, %edx
08c3a6
-	cmpl	-16(%rsi, %rax), %ecx
08c3a6
-	setg	%dl
08c3a6
-	leal	-1(%rdx, %rdx), %eax
08c3a6
-# else
08c3a6
-	movzbl	-16(%rsi, %rax), %ecx
08c3a6
-	movzbl	-16(%rdi, %rax), %eax
08c3a6
-	subl	%ecx, %eax
08c3a6
-# endif
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4,, 8
08c3a6
-L(more_32_bytes):
08c3a6
-	movdqu	(%rdi), %xmm0
08c3a6
-	movdqu	(%rsi), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqu	16(%rdi), %xmm0
08c3a6
-	movdqu	16(%rsi), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	cmpl	$64, %edx
08c3a6
-	jbe	L(32_to_64_bytes)
08c3a6
-	movdqu	32(%rdi), %xmm0
08c3a6
-	movdqu	32(%rsi), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	.p2align 4,, 6
08c3a6
-L(32_to_64_bytes):
08c3a6
-	movdqu	-32(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-32(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end_16)
08c3a6
-
08c3a6
-	movdqu	-16(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-16(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(16_to_32_bytes):
08c3a6
-	movdqu	(%rdi), %xmm0
08c3a6
-	movdqu	(%rsi), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqu	-16(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-16(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(79bytesormore):
08c3a6
-	movdqu	(%rdi), %xmm0
08c3a6
-	movdqu	(%rsi), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-
08c3a6
-	mov	%rsi, %rcx
08c3a6
-	and	$-16, %rsi
08c3a6
-	add	$16, %rsi
08c3a6
-	sub	%rsi, %rcx
08c3a6
-
08c3a6
-	sub	%rcx, %rdi
08c3a6
-	add	%rcx, %rdx
08c3a6
-	test	$0xf, %rdi
08c3a6
-	jz	L(2aligned)
08c3a6
-
08c3a6
-	cmp	$128, %rdx
08c3a6
-	ja	L(128bytesormore)
08c3a6
-
08c3a6
-	.p2align 4,, 6
08c3a6
-L(less128bytes):
08c3a6
-	movdqu	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqu	32(%rdi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	movdqu	48(%rdi), %xmm1
08c3a6
-	CMPEQ	48(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_48)
08c3a6
-
08c3a6
-	cmp	$96, %rdx
08c3a6
-	jb	L(32_to_64_bytes)
08c3a6
-
08c3a6
-	addq	$64, %rdi
08c3a6
-	addq	$64, %rsi
08c3a6
-	subq	$64, %rdx
08c3a6
-
08c3a6
-	.p2align 4,, 6
08c3a6
-L(last_64_bytes):
08c3a6
-	movdqu	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqu	-32(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-32(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end_16)
08c3a6
-
08c3a6
-	movdqu	-16(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-16(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(128bytesormore):
08c3a6
-	cmp	$256, %rdx
08c3a6
-	ja	L(unaligned_loop)
08c3a6
-L(less256bytes):
08c3a6
-	movdqu	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqu	32(%rdi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	movdqu	48(%rdi), %xmm1
08c3a6
-	CMPEQ	48(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_48)
08c3a6
-
08c3a6
-	addq	$64, %rdi
08c3a6
-	addq	$64, %rsi
08c3a6
-
08c3a6
-	movdqu	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqu	32(%rdi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	movdqu	48(%rdi), %xmm1
08c3a6
-	CMPEQ	48(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_48)
08c3a6
-
08c3a6
-	addq	$-128, %rdx
08c3a6
-	subq	$-64, %rsi
08c3a6
-	subq	$-64, %rdi
08c3a6
-
08c3a6
-	cmp	$64, %rdx
08c3a6
-	ja	L(less128bytes)
08c3a6
-
08c3a6
-	cmp	$32, %rdx
08c3a6
-	ja	L(last_64_bytes)
08c3a6
-
08c3a6
-	movdqu	-32(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-32(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end_16)
08c3a6
-
08c3a6
-	movdqu	-16(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-16(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(unaligned_loop):
08c3a6
-# ifdef DATA_CACHE_SIZE_HALF
08c3a6
-	mov	$DATA_CACHE_SIZE_HALF, %R8_LP
08c3a6
-# else
08c3a6
-	mov	__x86_data_cache_size_half(%rip), %R8_LP
08c3a6
-# endif
08c3a6
-	movq	%r8, %r9
08c3a6
-	addq	%r8, %r8
08c3a6
-	addq	%r9, %r8
08c3a6
-	cmpq	%r8, %rdx
08c3a6
-	ja	L(L2_L3_cache_unaligned)
08c3a6
-	sub	$64, %rdx
08c3a6
-	.p2align 4
08c3a6
-L(64bytesormore_loop):
08c3a6
-	movdqu	(%rdi), %xmm0
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	movdqu	32(%rdi), %xmm2
08c3a6
-	movdqu	48(%rdi), %xmm3
08c3a6
-
08c3a6
-	CMPEQ	(%rsi), %xmm0
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm2
08c3a6
-	CMPEQ	48(%rsi), %xmm3
08c3a6
-
08c3a6
-	pand	%xmm0, %xmm1
08c3a6
-	pand	%xmm2, %xmm3
08c3a6
-	pand	%xmm1, %xmm3
08c3a6
-
08c3a6
-	pmovmskb %xmm3, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(64bytesormore_loop_end)
08c3a6
-
08c3a6
-	add	$64, %rsi
08c3a6
-	add	$64, %rdi
08c3a6
-	sub	$64, %rdx
08c3a6
-	ja	L(64bytesormore_loop)
08c3a6
-
08c3a6
-	.p2align 4,, 6
08c3a6
-L(loop_tail):
08c3a6
-	addq	%rdx, %rdi
08c3a6
-	movdqu	(%rdi), %xmm0
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	movdqu	32(%rdi), %xmm2
08c3a6
-	movdqu	48(%rdi), %xmm3
08c3a6
-
08c3a6
-	addq	%rdx, %rsi
08c3a6
-	movdqu	(%rsi), %xmm4
08c3a6
-	movdqu	16(%rsi), %xmm5
08c3a6
-	movdqu	32(%rsi), %xmm6
08c3a6
-	movdqu	48(%rsi), %xmm7
08c3a6
-
08c3a6
-	CMPEQ	%xmm4, %xmm0
08c3a6
-	CMPEQ	%xmm5, %xmm1
08c3a6
-	CMPEQ	%xmm6, %xmm2
08c3a6
-	CMPEQ	%xmm7, %xmm3
08c3a6
-
08c3a6
-	pand	%xmm0, %xmm1
08c3a6
-	pand	%xmm2, %xmm3
08c3a6
-	pand	%xmm1, %xmm3
08c3a6
-
08c3a6
-	pmovmskb %xmm3, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(64bytesormore_loop_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-L(L2_L3_cache_unaligned):
08c3a6
-	subq	$64, %rdx
08c3a6
-	.p2align 4
08c3a6
-L(L2_L3_unaligned_128bytes_loop):
08c3a6
-	prefetchnta 0x1c0(%rdi)
08c3a6
-	prefetchnta 0x1c0(%rsi)
08c3a6
-
08c3a6
-	movdqu	(%rdi), %xmm0
08c3a6
-	movdqu	16(%rdi), %xmm1
08c3a6
-	movdqu	32(%rdi), %xmm2
08c3a6
-	movdqu	48(%rdi), %xmm3
08c3a6
-
08c3a6
-	CMPEQ	(%rsi), %xmm0
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm2
08c3a6
-	CMPEQ	48(%rsi), %xmm3
08c3a6
-
08c3a6
-	pand	%xmm0, %xmm1
08c3a6
-	pand	%xmm2, %xmm3
08c3a6
-	pand	%xmm1, %xmm3
08c3a6
-
08c3a6
-	pmovmskb %xmm3, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(64bytesormore_loop_end)
08c3a6
-
08c3a6
-	add	$64, %rsi
08c3a6
-	add	$64, %rdi
08c3a6
-	sub	$64, %rdx
08c3a6
-	ja	L(L2_L3_unaligned_128bytes_loop)
08c3a6
-	jmp	L(loop_tail)
08c3a6
-
08c3a6
-
08c3a6
-	/* This case is for machines which are sensitive for unaligned
08c3a6
-	 * instructions.  */
08c3a6
-	.p2align 4
08c3a6
-L(2aligned):
08c3a6
-	cmp	$128, %rdx
08c3a6
-	ja	L(128bytesormorein2aligned)
08c3a6
-L(less128bytesin2aligned):
08c3a6
-	movdqa	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqa	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqa	32(%rdi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	movdqa	48(%rdi), %xmm1
08c3a6
-	CMPEQ	48(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_48)
08c3a6
-
08c3a6
-	cmp	$96, %rdx
08c3a6
-	jb	L(32_to_64_bytes)
08c3a6
-
08c3a6
-	addq	$64, %rdi
08c3a6
-	addq	$64, %rsi
08c3a6
-	subq	$64, %rdx
08c3a6
-
08c3a6
-	.p2align 4,, 6
08c3a6
-L(aligned_last_64_bytes):
08c3a6
-	movdqa	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqa	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqu	-32(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-32(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end_16)
08c3a6
-
08c3a6
-	movdqu	-16(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-16(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(128bytesormorein2aligned):
08c3a6
-	cmp	$256, %rdx
08c3a6
-	ja	L(aligned_loop)
08c3a6
-L(less256bytesin2alinged):
08c3a6
-	movdqa	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqa	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqa	32(%rdi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	movdqa	48(%rdi), %xmm1
08c3a6
-	CMPEQ	48(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_48)
08c3a6
-
08c3a6
-	addq	$64, %rdi
08c3a6
-	addq	$64, %rsi
08c3a6
-
08c3a6
-	movdqa	(%rdi), %xmm1
08c3a6
-	CMPEQ	(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin)
08c3a6
-
08c3a6
-	movdqa	16(%rdi), %xmm1
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_16)
08c3a6
-
08c3a6
-	movdqa	32(%rdi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_32)
08c3a6
-
08c3a6
-	movdqa	48(%rdi), %xmm1
08c3a6
-	CMPEQ	48(%rsi), %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_begin_48)
08c3a6
-
08c3a6
-	addq	$-128, %rdx
08c3a6
-	subq	$-64, %rsi
08c3a6
-	subq	$-64, %rdi
08c3a6
-
08c3a6
-	cmp	$64, %rdx
08c3a6
-	ja	L(less128bytesin2aligned)
08c3a6
-
08c3a6
-	cmp	$32, %rdx
08c3a6
-	ja	L(aligned_last_64_bytes)
08c3a6
-
08c3a6
-	movdqu	-32(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-32(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end_16)
08c3a6
-
08c3a6
-	movdqu	-16(%rdi, %rdx), %xmm0
08c3a6
-	movdqu	-16(%rsi, %rdx), %xmm1
08c3a6
-	CMPEQ	%xmm0, %xmm1
08c3a6
-	pmovmskb %xmm1, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(vec_return_end)
08c3a6
-	ret
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(aligned_loop):
08c3a6
-# ifdef DATA_CACHE_SIZE_HALF
08c3a6
-	mov	$DATA_CACHE_SIZE_HALF, %R8_LP
08c3a6
-# else
08c3a6
-	mov	__x86_data_cache_size_half(%rip), %R8_LP
08c3a6
-# endif
08c3a6
-	movq	%r8, %r9
08c3a6
-	addq	%r8, %r8
08c3a6
-	addq	%r9, %r8
08c3a6
-	cmpq	%r8, %rdx
08c3a6
-	ja	L(L2_L3_cache_aligned)
08c3a6
-
08c3a6
-	sub	$64, %rdx
08c3a6
-	.p2align 4
08c3a6
-L(64bytesormore_loopin2aligned):
08c3a6
-	movdqa	(%rdi), %xmm0
08c3a6
-	movdqa	16(%rdi), %xmm1
08c3a6
-	movdqa	32(%rdi), %xmm2
08c3a6
-	movdqa	48(%rdi), %xmm3
08c3a6
-
08c3a6
-	CMPEQ	(%rsi), %xmm0
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm2
08c3a6
-	CMPEQ	48(%rsi), %xmm3
08c3a6
-
08c3a6
-	pand	%xmm0, %xmm1
08c3a6
-	pand	%xmm2, %xmm3
08c3a6
-	pand	%xmm1, %xmm3
08c3a6
-
08c3a6
-	pmovmskb %xmm3, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(64bytesormore_loop_end)
08c3a6
-	add	$64, %rsi
08c3a6
-	add	$64, %rdi
08c3a6
-	sub	$64, %rdx
08c3a6
-	ja	L(64bytesormore_loopin2aligned)
08c3a6
-	jmp	L(loop_tail)
08c3a6
-
08c3a6
-L(L2_L3_cache_aligned):
08c3a6
-	subq	$64, %rdx
08c3a6
-	.p2align 4
08c3a6
-L(L2_L3_aligned_128bytes_loop):
08c3a6
-	prefetchnta 0x1c0(%rdi)
08c3a6
-	prefetchnta 0x1c0(%rsi)
08c3a6
-	movdqa	(%rdi), %xmm0
08c3a6
-	movdqa	16(%rdi), %xmm1
08c3a6
-	movdqa	32(%rdi), %xmm2
08c3a6
-	movdqa	48(%rdi), %xmm3
08c3a6
-
08c3a6
-	CMPEQ	(%rsi), %xmm0
08c3a6
-	CMPEQ	16(%rsi), %xmm1
08c3a6
-	CMPEQ	32(%rsi), %xmm2
08c3a6
-	CMPEQ	48(%rsi), %xmm3
08c3a6
-
08c3a6
-	pand	%xmm0, %xmm1
08c3a6
-	pand	%xmm2, %xmm3
08c3a6
-	pand	%xmm1, %xmm3
08c3a6
-
08c3a6
-	pmovmskb %xmm3, %eax
08c3a6
-	incw	%ax
08c3a6
-	jnz	L(64bytesormore_loop_end)
08c3a6
-
08c3a6
-	addq	$64, %rsi
08c3a6
-	addq	$64, %rdi
08c3a6
-	subq	$64, %rdx
08c3a6
-	ja	L(L2_L3_aligned_128bytes_loop)
08c3a6
-	jmp	L(loop_tail)
08c3a6
-
08c3a6
-	.p2align 4
08c3a6
-L(64bytesormore_loop_end):
08c3a6
-	pmovmskb %xmm0, %ecx
08c3a6
-	incw	%cx
08c3a6
-	jnz	L(loop_end_ret)
08c3a6
-
08c3a6
-	pmovmskb %xmm1, %ecx
08c3a6
-	notw	%cx
08c3a6
-	sall	$16, %ecx
08c3a6
-	jnz	L(loop_end_ret)
08c3a6
-
08c3a6
-	pmovmskb %xmm2, %ecx
08c3a6
-	notw	%cx
08c3a6
-	shlq	$32, %rcx
08c3a6
-	jnz	L(loop_end_ret)
08c3a6
-
08c3a6
-	addq	$48, %rdi
08c3a6
-	addq	$48, %rsi
08c3a6
-	movq	%rax, %rcx
08c3a6
-
08c3a6
-	.p2align 4,, 6
08c3a6
-L(loop_end_ret):
08c3a6
-	bsfq	%rcx, %rcx
08c3a6
-# ifdef USE_AS_WMEMCMP
08c3a6
-	movl	(%rdi, %rcx), %eax
08c3a6
-	xorl	%edx, %edx
08c3a6
-	cmpl	(%rsi, %rcx), %eax
08c3a6
-	setg	%dl
08c3a6
-	leal	-1(%rdx, %rdx), %eax
08c3a6
-# else
08c3a6
-	movzbl	(%rdi, %rcx), %eax
08c3a6
-	movzbl	(%rsi, %rcx), %ecx
08c3a6
-	subl	%ecx, %eax
08c3a6
-# endif
08c3a6
-	ret
08c3a6
-END (MEMCMP)
08c3a6
-#endif