|
|
513694 |
From f5078f5cabb6e330506c2cad6ad89476438aafcb Mon Sep 17 00:00:00 2001
|
|
|
513694 |
From: Noah Goldstein <goldstein.w.n@gmail.com>
|
|
|
513694 |
Date: Fri, 15 Apr 2022 12:28:00 -0500
|
|
|
513694 |
Subject: [PATCH] x86: Remove memcmp-sse4.S
|
|
|
513694 |
|
|
|
513694 |
Code didn't actually use any sse4 instructions since `ptest` was
|
|
|
513694 |
removed in:
|
|
|
513694 |
|
|
|
513694 |
commit 2f9062d7171850451e6044ef78d91ff8c017b9c0
|
|
|
513694 |
Author: Noah Goldstein <goldstein.w.n@gmail.com>
|
|
|
513694 |
Date: Wed Nov 10 16:18:56 2021 -0600
|
|
|
513694 |
|
|
|
513694 |
x86: Shrink memcmp-sse4.S code size
|
|
|
513694 |
|
|
|
513694 |
The new memcmp-sse2 implementation is also faster.
|
|
|
513694 |
|
|
|
513694 |
geometric_mean(N=20) of page cross cases SSE2 / SSE4: 0.905
|
|
|
513694 |
|
|
|
513694 |
Note there are two regressions preferring SSE2 for Size = 1 and Size =
|
|
|
513694 |
65.
|
|
|
513694 |
|
|
|
513694 |
Size = 1:
|
|
|
513694 |
size, align0, align1, ret, New Time/Old Time
|
|
|
513694 |
1, 1, 1, 0, 1.2
|
|
|
513694 |
1, 1, 1, 1, 1.197
|
|
|
513694 |
1, 1, 1, -1, 1.2
|
|
|
513694 |
|
|
|
513694 |
This is intentional. Size == 1 is significantly less hot based on
|
|
|
513694 |
profiles of GCC11 and Python3 than sizes [4, 8] (which is made
|
|
|
513694 |
hotter).
|
|
|
513694 |
|
|
|
513694 |
Python3 Size = 1 -> 13.64%
|
|
|
513694 |
Python3 Size = [4, 8] -> 60.92%
|
|
|
513694 |
|
|
|
513694 |
GCC11 Size = 1 -> 1.29%
|
|
|
513694 |
GCC11 Size = [4, 8] -> 33.86%
|
|
|
513694 |
|
|
|
513694 |
size, align0, align1, ret, New Time/Old Time
|
|
|
513694 |
4, 4, 4, 0, 0.622
|
|
|
513694 |
4, 4, 4, 1, 0.797
|
|
|
513694 |
4, 4, 4, -1, 0.805
|
|
|
513694 |
5, 5, 5, 0, 0.623
|
|
|
513694 |
5, 5, 5, 1, 0.777
|
|
|
513694 |
5, 5, 5, -1, 0.802
|
|
|
513694 |
6, 6, 6, 0, 0.625
|
|
|
513694 |
6, 6, 6, 1, 0.813
|
|
|
513694 |
6, 6, 6, -1, 0.788
|
|
|
513694 |
7, 7, 7, 0, 0.625
|
|
|
513694 |
7, 7, 7, 1, 0.799
|
|
|
513694 |
7, 7, 7, -1, 0.795
|
|
|
513694 |
8, 8, 8, 0, 0.625
|
|
|
513694 |
8, 8, 8, 1, 0.848
|
|
|
513694 |
8, 8, 8, -1, 0.914
|
|
|
513694 |
9, 9, 9, 0, 0.625
|
|
|
513694 |
|
|
|
513694 |
Size = 65:
|
|
|
513694 |
size, align0, align1, ret, New Time/Old Time
|
|
|
513694 |
65, 0, 0, 0, 1.103
|
|
|
513694 |
65, 0, 0, 1, 1.216
|
|
|
513694 |
65, 0, 0, -1, 1.227
|
|
|
513694 |
65, 65, 0, 0, 1.091
|
|
|
513694 |
65, 0, 65, 1, 1.19
|
|
|
513694 |
65, 65, 65, -1, 1.215
|
|
|
513694 |
|
|
|
513694 |
This is because A) the checks in range [65, 96] are now unrolled 2x
|
|
|
513694 |
and B) because smaller values <= 16 are now given a hotter path. By
|
|
|
513694 |
contrast the SSE4 version has a branch for Size = 80. The unrolled
|
|
|
513694 |
version has get better performance for returns which need both
|
|
|
513694 |
comparisons.
|
|
|
513694 |
|
|
|
513694 |
size, align0, align1, ret, New Time/Old Time
|
|
|
513694 |
128, 4, 8, 0, 0.858
|
|
|
513694 |
128, 4, 8, 1, 0.879
|
|
|
513694 |
128, 4, 8, -1, 0.888
|
|
|
513694 |
|
|
|
513694 |
As well, out of microbenchmark environments that are not full
|
|
|
513694 |
predictable the branch will have a real-cost.
|
|
|
513694 |
Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
|
|
|
513694 |
|
|
|
513694 |
(cherry picked from commit 7cbc03d03091d5664060924789afe46d30a5477e)
|
|
|
513694 |
---
|
|
|
513694 |
sysdeps/x86_64/multiarch/Makefile | 2 -
|
|
|
513694 |
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 4 -
|
|
|
513694 |
sysdeps/x86_64/multiarch/ifunc-memcmp.h | 4 -
|
|
|
513694 |
sysdeps/x86_64/multiarch/memcmp-sse4.S | 804 ---------------------
|
|
|
513694 |
4 files changed, 814 deletions(-)
|
|
|
513694 |
delete mode 100644 sysdeps/x86_64/multiarch/memcmp-sse4.S
|
|
|
513694 |
|
|
|
513694 |
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
|
|
|
513694 |
index bca82e38..b503e4b8 100644
|
|
|
513694 |
--- a/sysdeps/x86_64/multiarch/Makefile
|
|
|
513694 |
+++ b/sysdeps/x86_64/multiarch/Makefile
|
|
|
513694 |
@@ -11,7 +11,6 @@ sysdep_routines += \
|
|
|
513694 |
memcmp-avx2-movbe-rtm \
|
|
|
513694 |
memcmp-evex-movbe \
|
|
|
513694 |
memcmp-sse2 \
|
|
|
513694 |
- memcmp-sse4 \
|
|
|
513694 |
memcmp-ssse3 \
|
|
|
513694 |
memcpy-ssse3 \
|
|
|
513694 |
memcpy-ssse3-back \
|
|
|
513694 |
@@ -174,7 +173,6 @@ sysdep_routines += \
|
|
|
513694 |
wmemcmp-avx2-movbe-rtm \
|
|
|
513694 |
wmemcmp-c \
|
|
|
513694 |
wmemcmp-evex-movbe \
|
|
|
513694 |
- wmemcmp-sse4 \
|
|
|
513694 |
wmemcmp-ssse3 \
|
|
|
513694 |
# sysdep_routines
|
|
|
513694 |
endif
|
|
|
513694 |
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
|
|
|
513694 |
index 14314367..450a2917 100644
|
|
|
513694 |
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
|
|
|
513694 |
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
|
|
|
513694 |
@@ -78,8 +78,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
|
513694 |
&& CPU_FEATURE_USABLE (BMI2)
|
|
|
513694 |
&& CPU_FEATURE_USABLE (MOVBE)),
|
|
|
513694 |
__memcmp_evex_movbe)
|
|
|
513694 |
- IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSE4_1),
|
|
|
513694 |
- __memcmp_sse4_1)
|
|
|
513694 |
IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSSE3),
|
|
|
513694 |
__memcmp_ssse3)
|
|
|
513694 |
IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_sse2))
|
|
|
513694 |
@@ -824,8 +822,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|
|
513694 |
&& CPU_FEATURE_USABLE (BMI2)
|
|
|
513694 |
&& CPU_FEATURE_USABLE (MOVBE)),
|
|
|
513694 |
__wmemcmp_evex_movbe)
|
|
|
513694 |
- IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSE4_1),
|
|
|
513694 |
- __wmemcmp_sse4_1)
|
|
|
513694 |
IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSSE3),
|
|
|
513694 |
__wmemcmp_ssse3)
|
|
|
513694 |
IFUNC_IMPL_ADD (array, i, wmemcmp, 1, __wmemcmp_sse2))
|
|
|
513694 |
diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
|
|
|
513694 |
index 690dffe8..0bc47a7f 100644
|
|
|
513694 |
--- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h
|
|
|
513694 |
+++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
|
|
|
513694 |
@@ -21,7 +21,6 @@
|
|
|
513694 |
|
|
|
513694 |
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden;
|
|
|
513694 |
extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
|
|
|
513694 |
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse4_1) attribute_hidden;
|
|
|
513694 |
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe) attribute_hidden;
|
|
|
513694 |
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe_rtm) attribute_hidden;
|
|
|
513694 |
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_movbe) attribute_hidden;
|
|
|
513694 |
@@ -47,9 +46,6 @@ IFUNC_SELECTOR (void)
|
|
|
513694 |
return OPTIMIZE (avx2_movbe);
|
|
|
513694 |
}
|
|
|
513694 |
|
|
|
513694 |
- if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
|
|
|
513694 |
- return OPTIMIZE (sse4_1);
|
|
|
513694 |
-
|
|
|
513694 |
if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
|
|
|
513694 |
return OPTIMIZE (ssse3);
|
|
|
513694 |
|
|
|
513694 |
diff --git a/sysdeps/x86_64/multiarch/memcmp-sse4.S b/sysdeps/x86_64/multiarch/memcmp-sse4.S
|
|
|
513694 |
deleted file mode 100644
|
|
|
513694 |
index 50060006..00000000
|
|
|
513694 |
--- a/sysdeps/x86_64/multiarch/memcmp-sse4.S
|
|
|
513694 |
+++ /dev/null
|
|
|
513694 |
@@ -1,804 +0,0 @@
|
|
|
513694 |
-/* memcmp with SSE4.1, wmemcmp with SSE4.1
|
|
|
513694 |
- Copyright (C) 2010-2018 Free Software Foundation, Inc.
|
|
|
513694 |
- Contributed by Intel Corporation.
|
|
|
513694 |
- This file is part of the GNU C Library.
|
|
|
513694 |
-
|
|
|
513694 |
- The GNU C Library is free software; you can redistribute it and/or
|
|
|
513694 |
- modify it under the terms of the GNU Lesser General Public
|
|
|
513694 |
- License as published by the Free Software Foundation; either
|
|
|
513694 |
- version 2.1 of the License, or (at your option) any later version.
|
|
|
513694 |
-
|
|
|
513694 |
- The GNU C Library is distributed in the hope that it will be useful,
|
|
|
513694 |
- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
513694 |
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
513694 |
- Lesser General Public License for more details.
|
|
|
513694 |
-
|
|
|
513694 |
- You should have received a copy of the GNU Lesser General Public
|
|
|
513694 |
- License along with the GNU C Library; if not, see
|
|
|
513694 |
- <http://www.gnu.org/licenses/>. */
|
|
|
513694 |
-
|
|
|
513694 |
-#if IS_IN (libc)
|
|
|
513694 |
-
|
|
|
513694 |
-# include <sysdep.h>
|
|
|
513694 |
-
|
|
|
513694 |
-# ifndef MEMCMP
|
|
|
513694 |
-# define MEMCMP __memcmp_sse4_1
|
|
|
513694 |
-# endif
|
|
|
513694 |
-
|
|
|
513694 |
-#ifdef USE_AS_WMEMCMP
|
|
|
513694 |
-# define CMPEQ pcmpeqd
|
|
|
513694 |
-# define CHAR_SIZE 4
|
|
|
513694 |
-#else
|
|
|
513694 |
-# define CMPEQ pcmpeqb
|
|
|
513694 |
-# define CHAR_SIZE 1
|
|
|
513694 |
-#endif
|
|
|
513694 |
-
|
|
|
513694 |
-
|
|
|
513694 |
-/* Warning!
|
|
|
513694 |
- wmemcmp has to use SIGNED comparison for elements.
|
|
|
513694 |
- memcmp has to use UNSIGNED comparison for elemnts.
|
|
|
513694 |
-*/
|
|
|
513694 |
-
|
|
|
513694 |
- .section .text.sse4.1,"ax",@progbits
|
|
|
513694 |
-ENTRY (MEMCMP)
|
|
|
513694 |
-# ifdef USE_AS_WMEMCMP
|
|
|
513694 |
- shl $2, %RDX_LP
|
|
|
513694 |
-# elif defined __ILP32__
|
|
|
513694 |
- /* Clear the upper 32 bits. */
|
|
|
513694 |
- mov %edx, %edx
|
|
|
513694 |
-# endif
|
|
|
513694 |
- cmp $79, %RDX_LP
|
|
|
513694 |
- ja L(79bytesormore)
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $CHAR_SIZE, %RDX_LP
|
|
|
513694 |
- jbe L(firstbyte)
|
|
|
513694 |
-
|
|
|
513694 |
- /* N in (CHAR_SIZE, 79) bytes. */
|
|
|
513694 |
- cmpl $32, %edx
|
|
|
513694 |
- ja L(more_32_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- cmpl $16, %edx
|
|
|
513694 |
- jae L(16_to_32_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
-# ifndef USE_AS_WMEMCMP
|
|
|
513694 |
- cmpl $8, %edx
|
|
|
513694 |
- jae L(8_to_16_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- cmpl $4, %edx
|
|
|
513694 |
- jb L(2_to_3_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- movl (%rdi), %eax
|
|
|
513694 |
- movl (%rsi), %ecx
|
|
|
513694 |
-
|
|
|
513694 |
- bswap %eax
|
|
|
513694 |
- bswap %ecx
|
|
|
513694 |
-
|
|
|
513694 |
- shlq $32, %rax
|
|
|
513694 |
- shlq $32, %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- movl -4(%rdi, %rdx), %edi
|
|
|
513694 |
- movl -4(%rsi, %rdx), %esi
|
|
|
513694 |
-
|
|
|
513694 |
- bswap %edi
|
|
|
513694 |
- bswap %esi
|
|
|
513694 |
-
|
|
|
513694 |
- orq %rdi, %rax
|
|
|
513694 |
- orq %rsi, %rcx
|
|
|
513694 |
- subq %rcx, %rax
|
|
|
513694 |
- cmovne %edx, %eax
|
|
|
513694 |
- sbbl %ecx, %ecx
|
|
|
513694 |
- orl %ecx, %eax
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 8
|
|
|
513694 |
-L(2_to_3_bytes):
|
|
|
513694 |
- movzwl (%rdi), %eax
|
|
|
513694 |
- movzwl (%rsi), %ecx
|
|
|
513694 |
- shll $8, %eax
|
|
|
513694 |
- shll $8, %ecx
|
|
|
513694 |
- bswap %eax
|
|
|
513694 |
- bswap %ecx
|
|
|
513694 |
- movzbl -1(%rdi, %rdx), %edi
|
|
|
513694 |
- movzbl -1(%rsi, %rdx), %esi
|
|
|
513694 |
- orl %edi, %eax
|
|
|
513694 |
- orl %esi, %ecx
|
|
|
513694 |
- subl %ecx, %eax
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 8
|
|
|
513694 |
-L(8_to_16_bytes):
|
|
|
513694 |
- movq (%rdi), %rax
|
|
|
513694 |
- movq (%rsi), %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- bswap %rax
|
|
|
513694 |
- bswap %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- subq %rcx, %rax
|
|
|
513694 |
- jne L(8_to_16_bytes_done)
|
|
|
513694 |
-
|
|
|
513694 |
- movq -8(%rdi, %rdx), %rax
|
|
|
513694 |
- movq -8(%rsi, %rdx), %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- bswap %rax
|
|
|
513694 |
- bswap %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- subq %rcx, %rax
|
|
|
513694 |
-
|
|
|
513694 |
-L(8_to_16_bytes_done):
|
|
|
513694 |
- cmovne %edx, %eax
|
|
|
513694 |
- sbbl %ecx, %ecx
|
|
|
513694 |
- orl %ecx, %eax
|
|
|
513694 |
- ret
|
|
|
513694 |
-# else
|
|
|
513694 |
- xorl %eax, %eax
|
|
|
513694 |
- movl (%rdi), %ecx
|
|
|
513694 |
- cmpl (%rsi), %ecx
|
|
|
513694 |
- jne L(8_to_16_bytes_done)
|
|
|
513694 |
- movl 4(%rdi), %ecx
|
|
|
513694 |
- cmpl 4(%rsi), %ecx
|
|
|
513694 |
- jne L(8_to_16_bytes_done)
|
|
|
513694 |
- movl -4(%rdi, %rdx), %ecx
|
|
|
513694 |
- cmpl -4(%rsi, %rdx), %ecx
|
|
|
513694 |
- jne L(8_to_16_bytes_done)
|
|
|
513694 |
- ret
|
|
|
513694 |
-# endif
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 3
|
|
|
513694 |
-L(ret_zero):
|
|
|
513694 |
- xorl %eax, %eax
|
|
|
513694 |
-L(zero):
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 8
|
|
|
513694 |
-L(firstbyte):
|
|
|
513694 |
- jb L(ret_zero)
|
|
|
513694 |
-# ifdef USE_AS_WMEMCMP
|
|
|
513694 |
- xorl %eax, %eax
|
|
|
513694 |
- movl (%rdi), %ecx
|
|
|
513694 |
- cmpl (%rsi), %ecx
|
|
|
513694 |
- je L(zero)
|
|
|
513694 |
-L(8_to_16_bytes_done):
|
|
|
513694 |
- setg %al
|
|
|
513694 |
- leal -1(%rax, %rax), %eax
|
|
|
513694 |
-# else
|
|
|
513694 |
- movzbl (%rdi), %eax
|
|
|
513694 |
- movzbl (%rsi), %ecx
|
|
|
513694 |
- sub %ecx, %eax
|
|
|
513694 |
-# endif
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(vec_return_begin_48):
|
|
|
513694 |
- addq $16, %rdi
|
|
|
513694 |
- addq $16, %rsi
|
|
|
513694 |
-L(vec_return_begin_32):
|
|
|
513694 |
- bsfl %eax, %eax
|
|
|
513694 |
-# ifdef USE_AS_WMEMCMP
|
|
|
513694 |
- movl 32(%rdi, %rax), %ecx
|
|
|
513694 |
- xorl %edx, %edx
|
|
|
513694 |
- cmpl 32(%rsi, %rax), %ecx
|
|
|
513694 |
- setg %dl
|
|
|
513694 |
- leal -1(%rdx, %rdx), %eax
|
|
|
513694 |
-# else
|
|
|
513694 |
- movzbl 32(%rsi, %rax), %ecx
|
|
|
513694 |
- movzbl 32(%rdi, %rax), %eax
|
|
|
513694 |
- subl %ecx, %eax
|
|
|
513694 |
-# endif
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(vec_return_begin_16):
|
|
|
513694 |
- addq $16, %rdi
|
|
|
513694 |
- addq $16, %rsi
|
|
|
513694 |
-L(vec_return_begin):
|
|
|
513694 |
- bsfl %eax, %eax
|
|
|
513694 |
-# ifdef USE_AS_WMEMCMP
|
|
|
513694 |
- movl (%rdi, %rax), %ecx
|
|
|
513694 |
- xorl %edx, %edx
|
|
|
513694 |
- cmpl (%rsi, %rax), %ecx
|
|
|
513694 |
- setg %dl
|
|
|
513694 |
- leal -1(%rdx, %rdx), %eax
|
|
|
513694 |
-# else
|
|
|
513694 |
- movzbl (%rsi, %rax), %ecx
|
|
|
513694 |
- movzbl (%rdi, %rax), %eax
|
|
|
513694 |
- subl %ecx, %eax
|
|
|
513694 |
-# endif
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(vec_return_end_16):
|
|
|
513694 |
- subl $16, %edx
|
|
|
513694 |
-L(vec_return_end):
|
|
|
513694 |
- bsfl %eax, %eax
|
|
|
513694 |
- addl %edx, %eax
|
|
|
513694 |
-# ifdef USE_AS_WMEMCMP
|
|
|
513694 |
- movl -16(%rdi, %rax), %ecx
|
|
|
513694 |
- xorl %edx, %edx
|
|
|
513694 |
- cmpl -16(%rsi, %rax), %ecx
|
|
|
513694 |
- setg %dl
|
|
|
513694 |
- leal -1(%rdx, %rdx), %eax
|
|
|
513694 |
-# else
|
|
|
513694 |
- movzbl -16(%rsi, %rax), %ecx
|
|
|
513694 |
- movzbl -16(%rdi, %rax), %eax
|
|
|
513694 |
- subl %ecx, %eax
|
|
|
513694 |
-# endif
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 8
|
|
|
513694 |
-L(more_32_bytes):
|
|
|
513694 |
- movdqu (%rdi), %xmm0
|
|
|
513694 |
- movdqu (%rsi), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 16(%rdi), %xmm0
|
|
|
513694 |
- movdqu 16(%rsi), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- cmpl $64, %edx
|
|
|
513694 |
- jbe L(32_to_64_bytes)
|
|
|
513694 |
- movdqu 32(%rdi), %xmm0
|
|
|
513694 |
- movdqu 32(%rsi), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 6
|
|
|
513694 |
-L(32_to_64_bytes):
|
|
|
513694 |
- movdqu -32(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -32(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -16(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -16(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(16_to_32_bytes):
|
|
|
513694 |
- movdqu (%rdi), %xmm0
|
|
|
513694 |
- movdqu (%rsi), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -16(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -16(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(79bytesormore):
|
|
|
513694 |
- movdqu (%rdi), %xmm0
|
|
|
513694 |
- movdqu (%rsi), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
-
|
|
|
513694 |
- mov %rsi, %rcx
|
|
|
513694 |
- and $-16, %rsi
|
|
|
513694 |
- add $16, %rsi
|
|
|
513694 |
- sub %rsi, %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- sub %rcx, %rdi
|
|
|
513694 |
- add %rcx, %rdx
|
|
|
513694 |
- test $0xf, %rdi
|
|
|
513694 |
- jz L(2aligned)
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $128, %rdx
|
|
|
513694 |
- ja L(128bytesormore)
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 6
|
|
|
513694 |
-L(less128bytes):
|
|
|
513694 |
- movdqu (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 32(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 48(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_48)
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $96, %rdx
|
|
|
513694 |
- jb L(32_to_64_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $64, %rdi
|
|
|
513694 |
- addq $64, %rsi
|
|
|
513694 |
- subq $64, %rdx
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 6
|
|
|
513694 |
-L(last_64_bytes):
|
|
|
513694 |
- movdqu (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -32(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -32(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -16(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -16(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(128bytesormore):
|
|
|
513694 |
- cmp $256, %rdx
|
|
|
513694 |
- ja L(unaligned_loop)
|
|
|
513694 |
-L(less256bytes):
|
|
|
513694 |
- movdqu (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 32(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 48(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_48)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $64, %rdi
|
|
|
513694 |
- addq $64, %rsi
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 32(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu 48(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_48)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $-128, %rdx
|
|
|
513694 |
- subq $-64, %rsi
|
|
|
513694 |
- subq $-64, %rdi
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $64, %rdx
|
|
|
513694 |
- ja L(less128bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $32, %rdx
|
|
|
513694 |
- ja L(last_64_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -32(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -32(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -16(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -16(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(unaligned_loop):
|
|
|
513694 |
-# ifdef DATA_CACHE_SIZE_HALF
|
|
|
513694 |
- mov $DATA_CACHE_SIZE_HALF, %R8_LP
|
|
|
513694 |
-# else
|
|
|
513694 |
- mov __x86_data_cache_size_half(%rip), %R8_LP
|
|
|
513694 |
-# endif
|
|
|
513694 |
- movq %r8, %r9
|
|
|
513694 |
- addq %r8, %r8
|
|
|
513694 |
- addq %r9, %r8
|
|
|
513694 |
- cmpq %r8, %rdx
|
|
|
513694 |
- ja L(L2_L3_cache_unaligned)
|
|
|
513694 |
- sub $64, %rdx
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(64bytesormore_loop):
|
|
|
513694 |
- movdqu (%rdi), %xmm0
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- movdqu 32(%rdi), %xmm2
|
|
|
513694 |
- movdqu 48(%rdi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- CMPEQ (%rsi), %xmm0
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm2
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pand %xmm0, %xmm1
|
|
|
513694 |
- pand %xmm2, %xmm3
|
|
|
513694 |
- pand %xmm1, %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm3, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(64bytesormore_loop_end)
|
|
|
513694 |
-
|
|
|
513694 |
- add $64, %rsi
|
|
|
513694 |
- add $64, %rdi
|
|
|
513694 |
- sub $64, %rdx
|
|
|
513694 |
- ja L(64bytesormore_loop)
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 6
|
|
|
513694 |
-L(loop_tail):
|
|
|
513694 |
- addq %rdx, %rdi
|
|
|
513694 |
- movdqu (%rdi), %xmm0
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- movdqu 32(%rdi), %xmm2
|
|
|
513694 |
- movdqu 48(%rdi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- addq %rdx, %rsi
|
|
|
513694 |
- movdqu (%rsi), %xmm4
|
|
|
513694 |
- movdqu 16(%rsi), %xmm5
|
|
|
513694 |
- movdqu 32(%rsi), %xmm6
|
|
|
513694 |
- movdqu 48(%rsi), %xmm7
|
|
|
513694 |
-
|
|
|
513694 |
- CMPEQ %xmm4, %xmm0
|
|
|
513694 |
- CMPEQ %xmm5, %xmm1
|
|
|
513694 |
- CMPEQ %xmm6, %xmm2
|
|
|
513694 |
- CMPEQ %xmm7, %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pand %xmm0, %xmm1
|
|
|
513694 |
- pand %xmm2, %xmm3
|
|
|
513694 |
- pand %xmm1, %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm3, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(64bytesormore_loop_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
-L(L2_L3_cache_unaligned):
|
|
|
513694 |
- subq $64, %rdx
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(L2_L3_unaligned_128bytes_loop):
|
|
|
513694 |
- prefetchnta 0x1c0(%rdi)
|
|
|
513694 |
- prefetchnta 0x1c0(%rsi)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu (%rdi), %xmm0
|
|
|
513694 |
- movdqu 16(%rdi), %xmm1
|
|
|
513694 |
- movdqu 32(%rdi), %xmm2
|
|
|
513694 |
- movdqu 48(%rdi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- CMPEQ (%rsi), %xmm0
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm2
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pand %xmm0, %xmm1
|
|
|
513694 |
- pand %xmm2, %xmm3
|
|
|
513694 |
- pand %xmm1, %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm3, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(64bytesormore_loop_end)
|
|
|
513694 |
-
|
|
|
513694 |
- add $64, %rsi
|
|
|
513694 |
- add $64, %rdi
|
|
|
513694 |
- sub $64, %rdx
|
|
|
513694 |
- ja L(L2_L3_unaligned_128bytes_loop)
|
|
|
513694 |
- jmp L(loop_tail)
|
|
|
513694 |
-
|
|
|
513694 |
-
|
|
|
513694 |
- /* This case is for machines which are sensitive for unaligned
|
|
|
513694 |
- * instructions. */
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(2aligned):
|
|
|
513694 |
- cmp $128, %rdx
|
|
|
513694 |
- ja L(128bytesormorein2aligned)
|
|
|
513694 |
-L(less128bytesin2aligned):
|
|
|
513694 |
- movdqa (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 32(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 48(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_48)
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $96, %rdx
|
|
|
513694 |
- jb L(32_to_64_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $64, %rdi
|
|
|
513694 |
- addq $64, %rsi
|
|
|
513694 |
- subq $64, %rdx
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 6
|
|
|
513694 |
-L(aligned_last_64_bytes):
|
|
|
513694 |
- movdqa (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -32(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -32(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -16(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -16(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(128bytesormorein2aligned):
|
|
|
513694 |
- cmp $256, %rdx
|
|
|
513694 |
- ja L(aligned_loop)
|
|
|
513694 |
-L(less256bytesin2alinged):
|
|
|
513694 |
- movdqa (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 32(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 48(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_48)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $64, %rdi
|
|
|
513694 |
- addq $64, %rsi
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa (%rdi), %xmm1
|
|
|
513694 |
- CMPEQ (%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 16(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 32(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_32)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqa 48(%rdi), %xmm1
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_begin_48)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $-128, %rdx
|
|
|
513694 |
- subq $-64, %rsi
|
|
|
513694 |
- subq $-64, %rdi
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $64, %rdx
|
|
|
513694 |
- ja L(less128bytesin2aligned)
|
|
|
513694 |
-
|
|
|
513694 |
- cmp $32, %rdx
|
|
|
513694 |
- ja L(aligned_last_64_bytes)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -32(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -32(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end_16)
|
|
|
513694 |
-
|
|
|
513694 |
- movdqu -16(%rdi, %rdx), %xmm0
|
|
|
513694 |
- movdqu -16(%rsi, %rdx), %xmm1
|
|
|
513694 |
- CMPEQ %xmm0, %xmm1
|
|
|
513694 |
- pmovmskb %xmm1, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(vec_return_end)
|
|
|
513694 |
- ret
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(aligned_loop):
|
|
|
513694 |
-# ifdef DATA_CACHE_SIZE_HALF
|
|
|
513694 |
- mov $DATA_CACHE_SIZE_HALF, %R8_LP
|
|
|
513694 |
-# else
|
|
|
513694 |
- mov __x86_data_cache_size_half(%rip), %R8_LP
|
|
|
513694 |
-# endif
|
|
|
513694 |
- movq %r8, %r9
|
|
|
513694 |
- addq %r8, %r8
|
|
|
513694 |
- addq %r9, %r8
|
|
|
513694 |
- cmpq %r8, %rdx
|
|
|
513694 |
- ja L(L2_L3_cache_aligned)
|
|
|
513694 |
-
|
|
|
513694 |
- sub $64, %rdx
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(64bytesormore_loopin2aligned):
|
|
|
513694 |
- movdqa (%rdi), %xmm0
|
|
|
513694 |
- movdqa 16(%rdi), %xmm1
|
|
|
513694 |
- movdqa 32(%rdi), %xmm2
|
|
|
513694 |
- movdqa 48(%rdi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- CMPEQ (%rsi), %xmm0
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm2
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pand %xmm0, %xmm1
|
|
|
513694 |
- pand %xmm2, %xmm3
|
|
|
513694 |
- pand %xmm1, %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm3, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(64bytesormore_loop_end)
|
|
|
513694 |
- add $64, %rsi
|
|
|
513694 |
- add $64, %rdi
|
|
|
513694 |
- sub $64, %rdx
|
|
|
513694 |
- ja L(64bytesormore_loopin2aligned)
|
|
|
513694 |
- jmp L(loop_tail)
|
|
|
513694 |
-
|
|
|
513694 |
-L(L2_L3_cache_aligned):
|
|
|
513694 |
- subq $64, %rdx
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(L2_L3_aligned_128bytes_loop):
|
|
|
513694 |
- prefetchnta 0x1c0(%rdi)
|
|
|
513694 |
- prefetchnta 0x1c0(%rsi)
|
|
|
513694 |
- movdqa (%rdi), %xmm0
|
|
|
513694 |
- movdqa 16(%rdi), %xmm1
|
|
|
513694 |
- movdqa 32(%rdi), %xmm2
|
|
|
513694 |
- movdqa 48(%rdi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- CMPEQ (%rsi), %xmm0
|
|
|
513694 |
- CMPEQ 16(%rsi), %xmm1
|
|
|
513694 |
- CMPEQ 32(%rsi), %xmm2
|
|
|
513694 |
- CMPEQ 48(%rsi), %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pand %xmm0, %xmm1
|
|
|
513694 |
- pand %xmm2, %xmm3
|
|
|
513694 |
- pand %xmm1, %xmm3
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm3, %eax
|
|
|
513694 |
- incw %ax
|
|
|
513694 |
- jnz L(64bytesormore_loop_end)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $64, %rsi
|
|
|
513694 |
- addq $64, %rdi
|
|
|
513694 |
- subq $64, %rdx
|
|
|
513694 |
- ja L(L2_L3_aligned_128bytes_loop)
|
|
|
513694 |
- jmp L(loop_tail)
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4
|
|
|
513694 |
-L(64bytesormore_loop_end):
|
|
|
513694 |
- pmovmskb %xmm0, %ecx
|
|
|
513694 |
- incw %cx
|
|
|
513694 |
- jnz L(loop_end_ret)
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm1, %ecx
|
|
|
513694 |
- notw %cx
|
|
|
513694 |
- sall $16, %ecx
|
|
|
513694 |
- jnz L(loop_end_ret)
|
|
|
513694 |
-
|
|
|
513694 |
- pmovmskb %xmm2, %ecx
|
|
|
513694 |
- notw %cx
|
|
|
513694 |
- shlq $32, %rcx
|
|
|
513694 |
- jnz L(loop_end_ret)
|
|
|
513694 |
-
|
|
|
513694 |
- addq $48, %rdi
|
|
|
513694 |
- addq $48, %rsi
|
|
|
513694 |
- movq %rax, %rcx
|
|
|
513694 |
-
|
|
|
513694 |
- .p2align 4,, 6
|
|
|
513694 |
-L(loop_end_ret):
|
|
|
513694 |
- bsfq %rcx, %rcx
|
|
|
513694 |
-# ifdef USE_AS_WMEMCMP
|
|
|
513694 |
- movl (%rdi, %rcx), %eax
|
|
|
513694 |
- xorl %edx, %edx
|
|
|
513694 |
- cmpl (%rsi, %rcx), %eax
|
|
|
513694 |
- setg %dl
|
|
|
513694 |
- leal -1(%rdx, %rdx), %eax
|
|
|
513694 |
-# else
|
|
|
513694 |
- movzbl (%rdi, %rcx), %eax
|
|
|
513694 |
- movzbl (%rsi, %rcx), %ecx
|
|
|
513694 |
- subl %ecx, %eax
|
|
|
513694 |
-# endif
|
|
|
513694 |
- ret
|
|
|
513694 |
-END (MEMCMP)
|
|
|
513694 |
-#endif
|
|
|
513694 |
--
|
|
|
513694 |
GitLab
|
|
|
513694 |
|