6c0556
commit 4f26956d5ba394eb3ade6c1c20b5c16864a00766
6c0556
Author: Naohiro Tamura <naohirot@jp.fujitsu.com>
6c0556
Date:   Thu May 27 07:44:12 2021 +0000
6c0556
6c0556
    aarch64: Added optimized memset for A64FX
6c0556
    
6c0556
    This patch optimizes the performance of memset for A64FX [1] which
6c0556
    implements ARMv8-A SVE and has L1 64KB cache per core and L2 8MB cache
6c0556
    per NUMA node.
6c0556
    
6c0556
    The performance optimization makes use of Scalable Vector Register
6c0556
    with several techniques such as loop unrolling, memory access
6c0556
    alignment, cache zero fill and prefetch.
6c0556
    
6c0556
    SVE assembler code for memset is implemented as Vector Length Agnostic
6c0556
    code so theoretically it can be run on any SOC which supports ARMv8-A
6c0556
    SVE standard.
6c0556
    
6c0556
    We confirmed that all testcases have been passed by running 'make
6c0556
    check' and 'make xcheck' not only on A64FX but also on ThunderX2.
6c0556
    
6c0556
    And also we confirmed that the SVE 512 bit vector register performance
6c0556
    is roughly 4 times better than Advanced SIMD 128 bit register and 8
6c0556
    times better than scalar 64 bit register by running 'make bench'.
6c0556
    
6c0556
    [1] https://github.com/fujitsu/A64FX
6c0556
    
6c0556
    Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
6c0556
    Reviewed-by: Szabolcs Nagy <Szabolcs.Nagy@arm.com>
6c0556
6c0556
Conflicts:
6c0556
	sysdeps/aarch64/multiarch/Makefile
6c0556
	sysdeps/aarch64/multiarch/ifunc-impl-list.c
6c0556
	sysdeps/aarch64/multiarch/memset.c
6c0556
	  (all conflicts due to missing other CPU implementations downstream)
6c0556
6c0556
diff --git a/sysdeps/aarch64/multiarch/Makefile b/sysdeps/aarch64/multiarch/Makefile
6c0556
index 5a19ba0308e80983..5ff883a8ad8e3067 100644
6c0556
--- a/sysdeps/aarch64/multiarch/Makefile
6c0556
+++ b/sysdeps/aarch64/multiarch/Makefile
6c0556
@@ -1,5 +1,6 @@
6c0556
 ifeq ($(subdir),string)
6c0556
 sysdep_routines += memcpy_generic memcpy_thunderx memcpy_thunderx2 \
6c0556
 		   memcpy_falkor memcpy_a64fx \
6c0556
-		   memmove_falkor memset_generic memset_falkor
6c0556
+		   memmove_falkor memset_generic memset_falkor \
6c0556
+		   memset_a64fx
6c0556
 endif
6c0556
diff --git a/sysdeps/aarch64/multiarch/ifunc-impl-list.c b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
6c0556
index f53db12acce37877..53e3e162a1025e40 100644
6c0556
--- a/sysdeps/aarch64/multiarch/ifunc-impl-list.c
6c0556
+++ b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
6c0556
@@ -37,7 +37,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
6c0556
 
6c0556
   INIT_ARCH ();
6c0556
 
6c0556
-  /* Support sysdeps/aarch64/multiarch/memcpy.c and memmove.c.  */
6c0556
+  /* Support sysdeps/aarch64/multiarch/memcpy.c, memmove.c and memset.c.  */
6c0556
   IFUNC_IMPL (i, name, memcpy,
6c0556
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_thunderx)
6c0556
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_thunderx2)
6c0556
@@ -57,6 +57,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
6c0556
 	      /* Enable this on non-falkor processors too so that other cores
6c0556
 		 can do a comparative analysis with __memset_generic.  */
6c0556
 	      IFUNC_IMPL_ADD (array, i, memset, (zva_size == 64), __memset_falkor)
6c0556
+#if HAVE_AARCH64_SVE_ASM
6c0556
+	      IFUNC_IMPL_ADD (array, i, memset, sve, __memset_a64fx)
6c0556
+#endif
6c0556
 	      IFUNC_IMPL_ADD (array, i, memset, 1, __memset_generic))
6c0556
 
6c0556
   return i;
6c0556
diff --git a/sysdeps/aarch64/multiarch/memset.c b/sysdeps/aarch64/multiarch/memset.c
6c0556
index d74ed3a549a54b10..2c8cc72bb0b18474 100644
6c0556
--- a/sysdeps/aarch64/multiarch/memset.c
6c0556
+++ b/sysdeps/aarch64/multiarch/memset.c
6c0556
@@ -29,12 +29,21 @@
6c0556
 extern __typeof (__redirect_memset) __libc_memset;
6c0556
 
6c0556
 extern __typeof (__redirect_memset) __memset_falkor attribute_hidden;
6c0556
+# if HAVE_AARCH64_SVE_ASM
6c0556
+extern __typeof (__redirect_memset) __memset_a64fx attribute_hidden;
6c0556
+# endif
6c0556
 extern __typeof (__redirect_memset) __memset_generic attribute_hidden;
6c0556
 
6c0556
 libc_ifunc (__libc_memset,
6c0556
 	    ((IS_FALKOR (midr) || IS_PHECDA (midr)) && zva_size == 64
6c0556
 	     ? __memset_falkor
6c0556
+# if HAVE_AARCH64_SVE_ASM
6c0556
+	     : (IS_A64FX (midr)
6c0556
+		? __memset_a64fx
6c0556
+		: __memset_generic)));
6c0556
+# else
6c0556
 	     : __memset_generic));
6c0556
+# endif
6c0556
 
6c0556
 # undef memset
6c0556
 strong_alias (__libc_memset, memset);
6c0556
diff --git a/sysdeps/aarch64/multiarch/memset_a64fx.S b/sysdeps/aarch64/multiarch/memset_a64fx.S
6c0556
new file mode 100644
6c0556
index 0000000000000000..ce54e5418b08c8bc
6c0556
--- /dev/null
6c0556
+++ b/sysdeps/aarch64/multiarch/memset_a64fx.S
6c0556
@@ -0,0 +1,268 @@
6c0556
+/* Optimized memset for Fujitsu A64FX processor.
6c0556
+   Copyright (C) 2021 Free Software Foundation, Inc.
6c0556
+
6c0556
+   This file is part of the GNU C Library.
6c0556
+
6c0556
+   The GNU C Library is free software; you can redistribute it and/or
6c0556
+   modify it under the terms of the GNU Lesser General Public
6c0556
+   License as published by the Free Software Foundation; either
6c0556
+   version 2.1 of the License, or (at your option) any later version.
6c0556
+
6c0556
+   The GNU C Library is distributed in the hope that it will be useful,
6c0556
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
6c0556
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
6c0556
+   Lesser General Public License for more details.
6c0556
+
6c0556
+   You should have received a copy of the GNU Lesser General Public
6c0556
+   License along with the GNU C Library.  If not, see
6c0556
+   <https://www.gnu.org/licenses/>.  */
6c0556
+
6c0556
+#include <sysdep.h>
6c0556
+#include <sysdeps/aarch64/memset-reg.h>
6c0556
+
6c0556
+/* Assumptions:
6c0556
+ *
6c0556
+ * ARMv8.2-a, AArch64, unaligned accesses, sve
6c0556
+ *
6c0556
+ */
6c0556
+
6c0556
+#define L1_SIZE		(64*1024)	// L1 64KB
6c0556
+#define L2_SIZE         (8*1024*1024)	// L2 8MB - 1MB
6c0556
+#define CACHE_LINE_SIZE	256
6c0556
+#define PF_DIST_L1	(CACHE_LINE_SIZE * 16)	// Prefetch distance L1
6c0556
+#define ZF_DIST		(CACHE_LINE_SIZE * 21)	// Zerofill distance
6c0556
+#define rest		x8
6c0556
+#define vector_length	x9
6c0556
+#define vl_remainder	x10	// vector_length remainder
6c0556
+#define cl_remainder	x11	// CACHE_LINE_SIZE remainder
6c0556
+
6c0556
+#if HAVE_AARCH64_SVE_ASM
6c0556
+# if IS_IN (libc)
6c0556
+#  define MEMSET __memset_a64fx
6c0556
+
6c0556
+	.arch armv8.2-a+sve
6c0556
+
6c0556
+	.macro dc_zva times
6c0556
+	dc	zva, tmp1
6c0556
+	add	tmp1, tmp1, CACHE_LINE_SIZE
6c0556
+	.if \times-1
6c0556
+	dc_zva "(\times-1)"
6c0556
+	.endif
6c0556
+	.endm
6c0556
+
6c0556
+	.macro st1b_unroll first=0, last=7
6c0556
+	st1b	z0.b, p0, [dst, #\first, mul vl]
6c0556
+	.if \last-\first
6c0556
+	st1b_unroll "(\first+1)", \last
6c0556
+	.endif
6c0556
+	.endm
6c0556
+
6c0556
+	.macro shortcut_for_small_size exit
6c0556
+	// if rest <= vector_length * 2
6c0556
+	whilelo	p0.b, xzr, count
6c0556
+	whilelo	p1.b, vector_length, count
6c0556
+	b.last	1f
6c0556
+	st1b	z0.b, p0, [dstin, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dstin, #1, mul vl]
6c0556
+	ret
6c0556
+1:	// if rest > vector_length * 8
6c0556
+	cmp	count, vector_length, lsl 3	// vector_length * 8
6c0556
+	b.hi	\exit
6c0556
+	// if rest <= vector_length * 4
6c0556
+	lsl	tmp1, vector_length, 1	// vector_length * 2
6c0556
+	whilelo	p2.b, tmp1, count
6c0556
+	incb	tmp1
6c0556
+	whilelo	p3.b, tmp1, count
6c0556
+	b.last	1f
6c0556
+	st1b	z0.b, p0, [dstin, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dstin, #1, mul vl]
6c0556
+	st1b	z0.b, p2, [dstin, #2, mul vl]
6c0556
+	st1b	z0.b, p3, [dstin, #3, mul vl]
6c0556
+	ret
6c0556
+1:	// if rest <= vector_length * 8
6c0556
+	lsl	tmp1, vector_length, 2	// vector_length * 4
6c0556
+	whilelo	p4.b, tmp1, count
6c0556
+	incb	tmp1
6c0556
+	whilelo	p5.b, tmp1, count
6c0556
+	b.last	1f
6c0556
+	st1b	z0.b, p0, [dstin, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dstin, #1, mul vl]
6c0556
+	st1b	z0.b, p2, [dstin, #2, mul vl]
6c0556
+	st1b	z0.b, p3, [dstin, #3, mul vl]
6c0556
+	st1b	z0.b, p4, [dstin, #4, mul vl]
6c0556
+	st1b	z0.b, p5, [dstin, #5, mul vl]
6c0556
+	ret
6c0556
+1:	lsl	tmp1, vector_length, 2	// vector_length * 4
6c0556
+	incb	tmp1			// vector_length * 5
6c0556
+	incb	tmp1			// vector_length * 6
6c0556
+	whilelo	p6.b, tmp1, count
6c0556
+	incb	tmp1
6c0556
+	whilelo	p7.b, tmp1, count
6c0556
+	st1b	z0.b, p0, [dstin, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dstin, #1, mul vl]
6c0556
+	st1b	z0.b, p2, [dstin, #2, mul vl]
6c0556
+	st1b	z0.b, p3, [dstin, #3, mul vl]
6c0556
+	st1b	z0.b, p4, [dstin, #4, mul vl]
6c0556
+	st1b	z0.b, p5, [dstin, #5, mul vl]
6c0556
+	st1b	z0.b, p6, [dstin, #6, mul vl]
6c0556
+	st1b	z0.b, p7, [dstin, #7, mul vl]
6c0556
+	ret
6c0556
+	.endm
6c0556
+
6c0556
+ENTRY (MEMSET)
6c0556
+
6c0556
+	PTR_ARG (0)
6c0556
+	SIZE_ARG (2)
6c0556
+
6c0556
+	cbnz	count, 1f
6c0556
+	ret
6c0556
+1:	dup	z0.b, valw
6c0556
+	cntb	vector_length
6c0556
+	// shortcut for less than vector_length * 8
6c0556
+	// gives a free ptrue to p0.b for n >= vector_length
6c0556
+	shortcut_for_small_size L(vl_agnostic)
6c0556
+	// end of shortcut
6c0556
+
6c0556
+L(vl_agnostic): // VL Agnostic
6c0556
+	mov	rest, count
6c0556
+	mov	dst, dstin
6c0556
+	add	dstend, dstin, count
6c0556
+	// if rest >= L2_SIZE && vector_length == 64 then L(L2)
6c0556
+	mov	tmp1, 64
6c0556
+	cmp	rest, L2_SIZE
6c0556
+	ccmp	vector_length, tmp1, 0, cs
6c0556
+	b.eq	L(L2)
6c0556
+	// if rest >= L1_SIZE && vector_length == 64 then L(L1_prefetch)
6c0556
+	cmp	rest, L1_SIZE
6c0556
+	ccmp	vector_length, tmp1, 0, cs
6c0556
+	b.eq	L(L1_prefetch)
6c0556
+
6c0556
+L(unroll32):
6c0556
+	lsl	tmp1, vector_length, 3	// vector_length * 8
6c0556
+	lsl	tmp2, vector_length, 5	// vector_length * 32
6c0556
+	.p2align 3
6c0556
+1:	cmp	rest, tmp2
6c0556
+	b.cc	L(unroll8)
6c0556
+	st1b_unroll
6c0556
+	add	dst, dst, tmp1
6c0556
+	st1b_unroll
6c0556
+	add	dst, dst, tmp1
6c0556
+	st1b_unroll
6c0556
+	add	dst, dst, tmp1
6c0556
+	st1b_unroll
6c0556
+	add	dst, dst, tmp1
6c0556
+	sub	rest, rest, tmp2
6c0556
+	b	1b
6c0556
+
6c0556
+L(unroll8):
6c0556
+	lsl	tmp1, vector_length, 3
6c0556
+	.p2align 3
6c0556
+1:	cmp	rest, tmp1
6c0556
+	b.cc	L(last)
6c0556
+	st1b_unroll
6c0556
+	add	dst, dst, tmp1
6c0556
+	sub	rest, rest, tmp1
6c0556
+	b	1b
6c0556
+
6c0556
+L(last):
6c0556
+	whilelo	p0.b, xzr, rest
6c0556
+	whilelo	p1.b, vector_length, rest
6c0556
+	b.last	1f
6c0556
+	st1b	z0.b, p0, [dst, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dst, #1, mul vl]
6c0556
+	ret
6c0556
+1:	lsl	tmp1, vector_length, 1	// vector_length * 2
6c0556
+	whilelo	p2.b, tmp1, rest
6c0556
+	incb	tmp1
6c0556
+	whilelo	p3.b, tmp1, rest
6c0556
+	b.last	1f
6c0556
+	st1b	z0.b, p0, [dst, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dst, #1, mul vl]
6c0556
+	st1b	z0.b, p2, [dst, #2, mul vl]
6c0556
+	st1b	z0.b, p3, [dst, #3, mul vl]
6c0556
+	ret
6c0556
+1:	lsl	tmp1, vector_length, 2	// vector_length * 4
6c0556
+	whilelo	p4.b, tmp1, rest
6c0556
+	incb	tmp1
6c0556
+	whilelo	p5.b, tmp1, rest
6c0556
+	incb	tmp1
6c0556
+	whilelo	p6.b, tmp1, rest
6c0556
+	incb	tmp1
6c0556
+	whilelo	p7.b, tmp1, rest
6c0556
+	st1b	z0.b, p0, [dst, #0, mul vl]
6c0556
+	st1b	z0.b, p1, [dst, #1, mul vl]
6c0556
+	st1b	z0.b, p2, [dst, #2, mul vl]
6c0556
+	st1b	z0.b, p3, [dst, #3, mul vl]
6c0556
+	st1b	z0.b, p4, [dst, #4, mul vl]
6c0556
+	st1b	z0.b, p5, [dst, #5, mul vl]
6c0556
+	st1b	z0.b, p6, [dst, #6, mul vl]
6c0556
+	st1b	z0.b, p7, [dst, #7, mul vl]
6c0556
+	ret
6c0556
+
6c0556
+L(L1_prefetch): // if rest >= L1_SIZE
6c0556
+	.p2align 3
6c0556
+1:	st1b_unroll 0, 3
6c0556
+	prfm	pstl1keep, [dst, PF_DIST_L1]
6c0556
+	st1b_unroll 4, 7
6c0556
+	prfm	pstl1keep, [dst, PF_DIST_L1 + CACHE_LINE_SIZE]
6c0556
+	add	dst, dst, CACHE_LINE_SIZE * 2
6c0556
+	sub	rest, rest, CACHE_LINE_SIZE * 2
6c0556
+	cmp	rest, L1_SIZE
6c0556
+	b.ge	1b
6c0556
+	cbnz	rest, L(unroll32)
6c0556
+	ret
6c0556
+
6c0556
+L(L2):
6c0556
+	// align dst address at vector_length byte boundary
6c0556
+	sub	tmp1, vector_length, 1
6c0556
+	ands	tmp2, dst, tmp1
6c0556
+	// if vl_remainder == 0
6c0556
+	b.eq	1f
6c0556
+	sub	vl_remainder, vector_length, tmp2
6c0556
+	// process remainder until the first vector_length boundary
6c0556
+	whilelt	p2.b, xzr, vl_remainder
6c0556
+	st1b	z0.b, p2, [dst]
6c0556
+	add	dst, dst, vl_remainder
6c0556
+	sub	rest, rest, vl_remainder
6c0556
+	// align dstin address at CACHE_LINE_SIZE byte boundary
6c0556
+1:	mov	tmp1, CACHE_LINE_SIZE
6c0556
+	ands	tmp2, dst, CACHE_LINE_SIZE - 1
6c0556
+	// if cl_remainder == 0
6c0556
+	b.eq	L(L2_dc_zva)
6c0556
+	sub	cl_remainder, tmp1, tmp2
6c0556
+	// process remainder until the first CACHE_LINE_SIZE boundary
6c0556
+	mov	tmp1, xzr       // index
6c0556
+2:	whilelt	p2.b, tmp1, cl_remainder
6c0556
+	st1b	z0.b, p2, [dst, tmp1]
6c0556
+	incb	tmp1
6c0556
+	cmp	tmp1, cl_remainder
6c0556
+	b.lo	2b
6c0556
+	add	dst, dst, cl_remainder
6c0556
+	sub	rest, rest, cl_remainder
6c0556
+
6c0556
+L(L2_dc_zva):
6c0556
+	// zero fill
6c0556
+	mov	tmp1, dst
6c0556
+	dc_zva	(ZF_DIST / CACHE_LINE_SIZE) - 1
6c0556
+	mov	zva_len, ZF_DIST
6c0556
+	add	tmp1, zva_len, CACHE_LINE_SIZE * 2
6c0556
+	// unroll
6c0556
+	.p2align 3
6c0556
+1:	st1b_unroll 0, 3
6c0556
+	add	tmp2, dst, zva_len
6c0556
+	dc	 zva, tmp2
6c0556
+	st1b_unroll 4, 7
6c0556
+	add	tmp2, tmp2, CACHE_LINE_SIZE
6c0556
+	dc	zva, tmp2
6c0556
+	add	dst, dst, CACHE_LINE_SIZE * 2
6c0556
+	sub	rest, rest, CACHE_LINE_SIZE * 2
6c0556
+	cmp	rest, tmp1	// ZF_DIST + CACHE_LINE_SIZE * 2
6c0556
+	b.ge	1b
6c0556
+	cbnz	rest, L(unroll8)
6c0556
+	ret
6c0556
+
6c0556
+END (MEMSET)
6c0556
+libc_hidden_builtin_def (MEMSET)
6c0556
+
6c0556
+#endif /* IS_IN (libc) */
6c0556
+#endif /* HAVE_AARCH64_SVE_ASM */