08c3a6
commit 8ab861d295b90177b89288a2bc95c5de5e4e5bc6
08c3a6
Author: Sunil K Pandey <skpgkp2@gmail.com>
08c3a6
Date:   Sun Feb 27 16:39:47 2022 -0800
08c3a6
08c3a6
    x86_64: Implement evex512 version of strlen, strnlen, wcslen and wcsnlen
08c3a6
    
08c3a6
    This patch implements following evex512 version of string functions.
08c3a6
    Perf gain for evex512 version is up to 50% as compared to evex,
08c3a6
    depending on length and alignment.
08c3a6
    
08c3a6
    Placeholder function, not used by any processor at the moment.
08c3a6
    
08c3a6
    - String length function using 512 bit vectors.
08c3a6
    - String N length using 512 bit vectors.
08c3a6
    - Wide string length using 512 bit vectors.
08c3a6
    - Wide string N length using 512 bit vectors.
08c3a6
    
08c3a6
    Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
08c3a6
    (cherry picked from commit 9c66efb86fe384f77435f7e326333fb2e4e10676)
08c3a6
08c3a6
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
08c3a6
index 67401162d526f664..4d4ad2a3686b5bc3 100644
08c3a6
--- a/sysdeps/x86_64/multiarch/Makefile
08c3a6
+++ b/sysdeps/x86_64/multiarch/Makefile
08c3a6
@@ -87,6 +87,7 @@ sysdep_routines += \
08c3a6
   strlen-avx2 \
08c3a6
   strlen-avx2-rtm \
08c3a6
   strlen-evex \
08c3a6
+  strlen-evex512 \
08c3a6
   strlen-sse2 \
08c3a6
   strncase_l-avx2 \
08c3a6
   strncase_l-avx2-rtm \
08c3a6
@@ -115,6 +116,7 @@ sysdep_routines += \
08c3a6
   strnlen-avx2 \
08c3a6
   strnlen-avx2-rtm \
08c3a6
   strnlen-evex \
08c3a6
+  strnlen-evex512 \
08c3a6
   strnlen-sse2 \
08c3a6
   strpbrk-c \
08c3a6
   strpbrk-sse2 \
08c3a6
@@ -148,6 +150,7 @@ sysdep_routines += \
08c3a6
   wcslen-avx2 \
08c3a6
   wcslen-avx2-rtm \
08c3a6
   wcslen-evex \
08c3a6
+  wcslen-evex512 \
08c3a6
   wcslen-sse2 \
08c3a6
   wcslen-sse4_1 \
08c3a6
   wcsncmp-avx2 \
08c3a6
@@ -158,6 +161,7 @@ sysdep_routines += \
08c3a6
   wcsnlen-avx2-rtm \
08c3a6
   wcsnlen-c \
08c3a6
   wcsnlen-evex \
08c3a6
+  wcsnlen-evex512 \
08c3a6
   wcsnlen-sse4_1 \
08c3a6
   wcsrchr-avx2 \
08c3a6
   wcsrchr-avx2-rtm \
08c3a6
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
08c3a6
index d990a7149489efd9..6b75a7106e174bce 100644
08c3a6
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
08c3a6
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
08c3a6
@@ -317,6 +317,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
08c3a6
 			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
 			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
 			      __strlen_evex)
08c3a6
+	      IFUNC_IMPL_ADD (array, i, strlen,
08c3a6
+			      (CPU_FEATURE_USABLE (AVX512VL)
08c3a6
+			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
+			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
+			      __strlen_evex512)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, strlen, 1, __strlen_sse2))
08c3a6
 
08c3a6
   /* Support sysdeps/x86_64/multiarch/strnlen.c.  */
08c3a6
@@ -335,6 +340,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
08c3a6
 			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
 			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
 			      __strnlen_evex)
08c3a6
+	      IFUNC_IMPL_ADD (array, i, strnlen,
08c3a6
+			      (CPU_FEATURE_USABLE (AVX512VL)
08c3a6
+			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
+			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
+			      __strnlen_evex512)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, strnlen, 1, __strnlen_sse2))
08c3a6
 
08c3a6
   /* Support sysdeps/x86_64/multiarch/stpncpy.c.  */
08c3a6
@@ -714,6 +724,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
08c3a6
 			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
 			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
 			      __wcslen_evex)
08c3a6
+	      IFUNC_IMPL_ADD (array, i, wcslen,
08c3a6
+			      (CPU_FEATURE_USABLE (AVX512VL)
08c3a6
+			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
+			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
+			      __wcslen_evex512)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, wcslen,
08c3a6
 			      CPU_FEATURE_USABLE (SSE4_1),
08c3a6
 			      __wcslen_sse4_1)
08c3a6
@@ -735,6 +750,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
08c3a6
 			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
 			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
 			      __wcsnlen_evex)
08c3a6
+	      IFUNC_IMPL_ADD (array, i, wcsnlen,
08c3a6
+			      (CPU_FEATURE_USABLE (AVX512VL)
08c3a6
+			       && CPU_FEATURE_USABLE (AVX512BW)
08c3a6
+			       && CPU_FEATURE_USABLE (BMI2)),
08c3a6
+			      __wcsnlen_evex512)
08c3a6
 	      IFUNC_IMPL_ADD (array, i, wcsnlen,
08c3a6
 			      CPU_FEATURE_USABLE (SSE4_1),
08c3a6
 			      __wcsnlen_sse4_1)
08c3a6
diff --git a/sysdeps/x86_64/multiarch/strlen-evex-base.S b/sysdeps/x86_64/multiarch/strlen-evex-base.S
08c3a6
new file mode 100644
08c3a6
index 0000000000000000..278c899691d89ba7
08c3a6
--- /dev/null
08c3a6
+++ b/sysdeps/x86_64/multiarch/strlen-evex-base.S
08c3a6
@@ -0,0 +1,302 @@
08c3a6
+/* Placeholder function, not used by any processor at the moment.
08c3a6
+   Copyright (C) 2022 Free Software Foundation, Inc.
08c3a6
+   This file is part of the GNU C Library.
08c3a6
+
08c3a6
+   The GNU C Library is free software; you can redistribute it and/or
08c3a6
+   modify it under the terms of the GNU Lesser General Public
08c3a6
+   License as published by the Free Software Foundation; either
08c3a6
+   version 2.1 of the License, or (at your option) any later version.
08c3a6
+
08c3a6
+   The GNU C Library is distributed in the hope that it will be useful,
08c3a6
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
08c3a6
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
08c3a6
+   Lesser General Public License for more details.
08c3a6
+
08c3a6
+   You should have received a copy of the GNU Lesser General Public
08c3a6
+   License along with the GNU C Library; if not, see
08c3a6
+   <https://www.gnu.org/licenses/>.  */
08c3a6
+
08c3a6
+#if IS_IN (libc)
08c3a6
+
08c3a6
+# include <sysdep.h>
08c3a6
+
08c3a6
+# ifdef USE_AS_WCSLEN
08c3a6
+#  define VPCMP		vpcmpd
08c3a6
+#  define VPTESTN	vptestnmd
08c3a6
+#  define VPMINU	vpminud
08c3a6
+#  define CHAR_SIZE	4
08c3a6
+# else
08c3a6
+#  define VPCMP		vpcmpb
08c3a6
+#  define VPTESTN	vptestnmb
08c3a6
+#  define VPMINU	vpminub
08c3a6
+#  define CHAR_SIZE	1
08c3a6
+# endif
08c3a6
+
08c3a6
+# define XMM0		xmm16
08c3a6
+# define PAGE_SIZE	4096
08c3a6
+# define CHAR_PER_VEC	(VEC_SIZE / CHAR_SIZE)
08c3a6
+
08c3a6
+# if VEC_SIZE == 64
08c3a6
+#  define KMOV		kmovq
08c3a6
+#  define KORTEST	kortestq
08c3a6
+#  define RAX		rax
08c3a6
+#  define RCX		rcx
08c3a6
+#  define RDX		rdx
08c3a6
+#  define SHR		shrq
08c3a6
+#  define TEXTSUFFIX	evex512
08c3a6
+#  define VMM0		zmm16
08c3a6
+#  define VMM1		zmm17
08c3a6
+#  define VMM2		zmm18
08c3a6
+#  define VMM3		zmm19
08c3a6
+#  define VMM4		zmm20
08c3a6
+#  define VMOVA		vmovdqa64
08c3a6
+# elif VEC_SIZE == 32
08c3a6
+/* Currently Unused.  */
08c3a6
+#  define KMOV		kmovd
08c3a6
+#  define KORTEST	kortestd
08c3a6
+#  define RAX		eax
08c3a6
+#  define RCX		ecx
08c3a6
+#  define RDX		edx
08c3a6
+#  define SHR		shrl
08c3a6
+#  define TEXTSUFFIX	evex256
08c3a6
+#  define VMM0		ymm16
08c3a6
+#  define VMM1		ymm17
08c3a6
+#  define VMM2		ymm18
08c3a6
+#  define VMM3		ymm19
08c3a6
+#  define VMM4		ymm20
08c3a6
+#  define VMOVA		vmovdqa32
08c3a6
+# endif
08c3a6
+
08c3a6
+	.section .text.TEXTSUFFIX, "ax", @progbits
08c3a6
+/* Aligning entry point to 64 byte, provides better performance for
08c3a6
+   one vector length string.  */
08c3a6
+ENTRY_P2ALIGN (STRLEN, 6)
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	/* Check zero length.  */
08c3a6
+	test	%RSI_LP, %RSI_LP
08c3a6
+	jz	L(ret_max)
08c3a6
+#  ifdef __ILP32__
08c3a6
+	/* Clear the upper 32 bits.  */
08c3a6
+	movl	%esi, %esi
08c3a6
+#  endif
08c3a6
+# endif
08c3a6
+
08c3a6
+	movl	%edi, %eax
08c3a6
+	vpxorq	%XMM0, %XMM0, %XMM0
08c3a6
+	andl	$(PAGE_SIZE - 1), %eax
08c3a6
+	cmpl	$(PAGE_SIZE - VEC_SIZE), %eax
08c3a6
+	ja	L(page_cross)
08c3a6
+
08c3a6
+	/* Compare [w]char for null, mask bit will be set for match.  */
08c3a6
+	VPCMP	$0, (%rdi), %VMM0, %k0
08c3a6
+	KMOV	%k0, %RAX
08c3a6
+	test	%RAX, %RAX
08c3a6
+	jz	L(align_more)
08c3a6
+
08c3a6
+	bsf	%RAX, %RAX
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	cmpq	%rsi, %rax
08c3a6
+	cmovnb	%rsi, %rax
08c3a6
+# endif
08c3a6
+	ret
08c3a6
+
08c3a6
+	/* At this point vector max length reached.  */
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	.p2align 4,,3
08c3a6
+L(ret_max):
08c3a6
+	movq	%rsi, %rax
08c3a6
+	ret
08c3a6
+# endif
08c3a6
+
08c3a6
+L(align_more):
08c3a6
+	leaq	VEC_SIZE(%rdi), %rax
08c3a6
+	/* Align rax to VEC_SIZE.  */
08c3a6
+	andq	$-VEC_SIZE, %rax
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	movq	%rax, %rdx
08c3a6
+	subq	%rdi, %rdx
08c3a6
+#  ifdef USE_AS_WCSLEN
08c3a6
+	SHR	$2, %RDX
08c3a6
+#  endif
08c3a6
+	/* At this point rdx contains [w]chars already compared.  */
08c3a6
+	subq	%rsi, %rdx
08c3a6
+	jae	L(ret_max)
08c3a6
+	negq	%rdx
08c3a6
+	/* At this point rdx contains number of w[char] needs to go.
08c3a6
+	   Now onwards rdx will keep decrementing with each compare.  */
08c3a6
+# endif
08c3a6
+
08c3a6
+	/* Loop unroll 4 times for 4 vector loop.  */
08c3a6
+	VPCMP	$0, (%rax), %VMM0, %k0
08c3a6
+	KMOV	%k0, %RCX
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x1)
08c3a6
+
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	subq	$CHAR_PER_VEC, %rdx
08c3a6
+	jbe	L(ret_max)
08c3a6
+# endif
08c3a6
+
08c3a6
+	VPCMP	$0, VEC_SIZE(%rax), %VMM0, %k0
08c3a6
+	KMOV	%k0, %RCX
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x2)
08c3a6
+
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	subq	$CHAR_PER_VEC, %rdx
08c3a6
+	jbe	L(ret_max)
08c3a6
+# endif
08c3a6
+
08c3a6
+	VPCMP	$0, (VEC_SIZE * 2)(%rax), %VMM0, %k0
08c3a6
+	KMOV	%k0, %RCX
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x3)
08c3a6
+
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	subq	$CHAR_PER_VEC, %rdx
08c3a6
+	jbe	L(ret_max)
08c3a6
+# endif
08c3a6
+
08c3a6
+	VPCMP	$0, (VEC_SIZE * 3)(%rax), %VMM0, %k0
08c3a6
+	KMOV	%k0, %RCX
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x4)
08c3a6
+
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	subq	$CHAR_PER_VEC, %rdx
08c3a6
+	jbe	L(ret_max)
08c3a6
+	/* Save pointer before 4 x VEC_SIZE alignment.  */
08c3a6
+	movq	%rax, %rcx
08c3a6
+# endif
08c3a6
+
08c3a6
+	/* Align address to VEC_SIZE * 4 for loop.  */
08c3a6
+	andq	$-(VEC_SIZE * 4), %rax
08c3a6
+
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	subq	%rax, %rcx
08c3a6
+#  ifdef USE_AS_WCSLEN
08c3a6
+	SHR	$2, %RCX
08c3a6
+#  endif
08c3a6
+	/* rcx contains number of [w]char will be recompared due to
08c3a6
+	   alignment fixes.  rdx must be incremented by rcx to offset
08c3a6
+	   alignment adjustment.  */
08c3a6
+	addq	%rcx, %rdx
08c3a6
+	/* Need jump as we don't want to add/subtract rdx for first
08c3a6
+	   iteration of 4 x VEC_SIZE aligned loop.  */
08c3a6
+	jmp	L(loop_entry)
08c3a6
+# endif
08c3a6
+
08c3a6
+	.p2align 4,,11
08c3a6
+L(loop):
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	subq	$(CHAR_PER_VEC * 4), %rdx
08c3a6
+	jbe	L(ret_max)
08c3a6
+L(loop_entry):
08c3a6
+# endif
08c3a6
+	/* VPMINU and VPCMP combination provide better performance as
08c3a6
+	   compared to alternative combinations.  */
08c3a6
+	VMOVA	(VEC_SIZE * 4)(%rax), %VMM1
08c3a6
+	VPMINU	(VEC_SIZE * 5)(%rax), %VMM1, %VMM2
08c3a6
+	VMOVA	(VEC_SIZE * 6)(%rax), %VMM3
08c3a6
+	VPMINU	(VEC_SIZE * 7)(%rax), %VMM3, %VMM4
08c3a6
+
08c3a6
+	VPTESTN	%VMM2, %VMM2, %k0
08c3a6
+	VPTESTN	%VMM4, %VMM4, %k1
08c3a6
+
08c3a6
+	subq	$-(VEC_SIZE * 4), %rax
08c3a6
+	KORTEST	%k0, %k1
08c3a6
+	jz	L(loop)
08c3a6
+
08c3a6
+	VPTESTN	%VMM1, %VMM1, %k2
08c3a6
+	KMOV	%k2, %RCX
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x1)
08c3a6
+
08c3a6
+	KMOV	%k0, %RCX
08c3a6
+	/* At this point, if k0 is non zero, null char must be in the
08c3a6
+	   second vector.  */
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x2)
08c3a6
+
08c3a6
+	VPTESTN	%VMM3, %VMM3, %k3
08c3a6
+	KMOV	%k3, %RCX
08c3a6
+	test	%RCX, %RCX
08c3a6
+	jnz	L(ret_vec_x3)
08c3a6
+	/* At this point null [w]char must be in the fourth vector so no
08c3a6
+	   need to check.  */
08c3a6
+	KMOV	%k1, %RCX
08c3a6
+
08c3a6
+	/* Fourth, third, second vector terminating are pretty much
08c3a6
+	   same, implemented this way to avoid branching and reuse code
08c3a6
+	   from pre loop exit condition.  */
08c3a6
+L(ret_vec_x4):
08c3a6
+	bsf	%RCX, %RCX
08c3a6
+	subq	%rdi, %rax
08c3a6
+# ifdef USE_AS_WCSLEN
08c3a6
+	subq	$-(VEC_SIZE * 3), %rax
08c3a6
+	shrq	$2, %rax
08c3a6
+	addq	%rcx, %rax
08c3a6
+# else
08c3a6
+	leaq	(VEC_SIZE * 3)(%rcx, %rax), %rax
08c3a6
+# endif
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	cmpq	%rsi, %rax
08c3a6
+	cmovnb	%rsi, %rax
08c3a6
+# endif
08c3a6
+	ret
08c3a6
+
08c3a6
+L(ret_vec_x3):
08c3a6
+	bsf	%RCX, %RCX
08c3a6
+	subq	%rdi, %rax
08c3a6
+# ifdef USE_AS_WCSLEN
08c3a6
+	subq	$-(VEC_SIZE * 2), %rax
08c3a6
+	shrq	$2, %rax
08c3a6
+	addq	%rcx, %rax
08c3a6
+# else
08c3a6
+	leaq	(VEC_SIZE * 2)(%rcx, %rax), %rax
08c3a6
+# endif
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	cmpq	%rsi, %rax
08c3a6
+	cmovnb	%rsi, %rax
08c3a6
+# endif
08c3a6
+	ret
08c3a6
+
08c3a6
+L(ret_vec_x2):
08c3a6
+	subq	$-VEC_SIZE, %rax
08c3a6
+L(ret_vec_x1):
08c3a6
+	bsf	%RCX, %RCX
08c3a6
+	subq	%rdi, %rax
08c3a6
+# ifdef USE_AS_WCSLEN
08c3a6
+	shrq	$2, %rax
08c3a6
+# endif
08c3a6
+	addq	%rcx, %rax
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	cmpq	%rsi, %rax
08c3a6
+	cmovnb	%rsi, %rax
08c3a6
+# endif
08c3a6
+	ret
08c3a6
+
08c3a6
+L(page_cross):
08c3a6
+	movl	%eax, %ecx
08c3a6
+# ifdef USE_AS_WCSLEN
08c3a6
+	andl	$(VEC_SIZE - 1), %ecx
08c3a6
+	sarl	$2, %ecx
08c3a6
+# endif
08c3a6
+	/* ecx contains number of w[char] to be skipped as a result
08c3a6
+	   of address alignment.  */
08c3a6
+	xorq	%rdi, %rax
08c3a6
+	VPCMP	$0, (PAGE_SIZE - VEC_SIZE)(%rax), %VMM0, %k0
08c3a6
+	KMOV	%k0, %RAX
08c3a6
+	/* Ignore number of character for alignment adjustment.  */
08c3a6
+	SHR	%cl, %RAX
08c3a6
+	jz	L(align_more)
08c3a6
+
08c3a6
+	bsf	%RAX, %RAX
08c3a6
+# ifdef USE_AS_STRNLEN
08c3a6
+	cmpq	%rsi, %rax
08c3a6
+	cmovnb	%rsi, %rax
08c3a6
+# endif
08c3a6
+	ret
08c3a6
+
08c3a6
+END (STRLEN)
08c3a6
+#endif
08c3a6
diff --git a/sysdeps/x86_64/multiarch/strlen-evex512.S b/sysdeps/x86_64/multiarch/strlen-evex512.S
08c3a6
new file mode 100644
08c3a6
index 0000000000000000..116f8981c8954e2e
08c3a6
--- /dev/null
08c3a6
+++ b/sysdeps/x86_64/multiarch/strlen-evex512.S
08c3a6
@@ -0,0 +1,7 @@
08c3a6
+#ifndef STRLEN
08c3a6
+# define STRLEN		__strlen_evex512
08c3a6
+#endif
08c3a6
+
08c3a6
+#define VEC_SIZE	64
08c3a6
+
08c3a6
+#include "strlen-evex-base.S"
08c3a6
diff --git a/sysdeps/x86_64/multiarch/strnlen-evex512.S b/sysdeps/x86_64/multiarch/strnlen-evex512.S
08c3a6
new file mode 100644
08c3a6
index 0000000000000000..0b7f220214a7c33c
08c3a6
--- /dev/null
08c3a6
+++ b/sysdeps/x86_64/multiarch/strnlen-evex512.S
08c3a6
@@ -0,0 +1,4 @@
08c3a6
+#define STRLEN __strnlen_evex512
08c3a6
+#define USE_AS_STRNLEN 1
08c3a6
+
08c3a6
+#include "strlen-evex512.S"
08c3a6
diff --git a/sysdeps/x86_64/multiarch/wcslen-evex512.S b/sysdeps/x86_64/multiarch/wcslen-evex512.S
08c3a6
new file mode 100644
08c3a6
index 0000000000000000..f59c372b78b4fb8c
08c3a6
--- /dev/null
08c3a6
+++ b/sysdeps/x86_64/multiarch/wcslen-evex512.S
08c3a6
@@ -0,0 +1,4 @@
08c3a6
+#define STRLEN __wcslen_evex512
08c3a6
+#define USE_AS_WCSLEN 1
08c3a6
+
08c3a6
+#include "strlen-evex512.S"
08c3a6
diff --git a/sysdeps/x86_64/multiarch/wcsnlen-evex512.S b/sysdeps/x86_64/multiarch/wcsnlen-evex512.S
08c3a6
new file mode 100644
08c3a6
index 0000000000000000..73dcf2f210a85aac
08c3a6
--- /dev/null
08c3a6
+++ b/sysdeps/x86_64/multiarch/wcsnlen-evex512.S
08c3a6
@@ -0,0 +1,5 @@
08c3a6
+#define STRLEN __wcsnlen_evex512
08c3a6
+#define USE_AS_WCSLEN 1
08c3a6
+#define USE_AS_STRNLEN 1
08c3a6
+
08c3a6
+#include "strlen-evex512.S"