00c0d4
From 07b427296b8d59f439144029d9a948f6c1ce0a31 Mon Sep 17 00:00:00 2001
00c0d4
From: Wilco Dijkstra <wdijkstr@arm.com>
00c0d4
Date: Tue, 10 Aug 2021 13:30:27 +0100
00c0d4
Subject: [PATCH] [1/5] AArch64: Improve A64FX memset for small sizes
00c0d4
00c0d4
Improve performance of small memsets by reducing instruction counts and
00c0d4
improving code alignment. Bench-memset shows 35-45% performance gain for
00c0d4
small sizes.
00c0d4
00c0d4
Reviewed-by: Naohiro Tamura <naohirot@fujitsu.com>
00c0d4
---
00c0d4
 sysdeps/aarch64/multiarch/memset_a64fx.S | 96 +++++++++---------------
00c0d4
 1 file changed, 36 insertions(+), 60 deletions(-)
00c0d4
00c0d4
diff --git a/sysdeps/aarch64/multiarch/memset_a64fx.S b/sysdeps/aarch64/multiarch/memset_a64fx.S
00c0d4
index ce54e5418b..cf3d402ef6 100644
00c0d4
--- a/sysdeps/aarch64/multiarch/memset_a64fx.S
00c0d4
+++ b/sysdeps/aarch64/multiarch/memset_a64fx.S
00c0d4
@@ -51,78 +51,54 @@
00c0d4
 	.endm
00c0d4
 
00c0d4
 	.macro st1b_unroll first=0, last=7
00c0d4
-	st1b	z0.b, p0, [dst, #\first, mul vl]
00c0d4
+	st1b	z0.b, p0, [dst, \first, mul vl]
00c0d4
 	.if \last-\first
00c0d4
 	st1b_unroll "(\first+1)", \last
00c0d4
 	.endif
00c0d4
 	.endm
00c0d4
 
00c0d4
-	.macro shortcut_for_small_size exit
00c0d4
-	// if rest <= vector_length * 2
00c0d4
-	whilelo	p0.b, xzr, count
00c0d4
-	whilelo	p1.b, vector_length, count
00c0d4
-	b.last	1f
00c0d4
-	st1b	z0.b, p0, [dstin, #0, mul vl]
00c0d4
-	st1b	z0.b, p1, [dstin, #1, mul vl]
00c0d4
-	ret
00c0d4
-1:	// if rest > vector_length * 8
00c0d4
-	cmp	count, vector_length, lsl 3	// vector_length * 8
00c0d4
-	b.hi	\exit
00c0d4
-	// if rest <= vector_length * 4
00c0d4
-	lsl	tmp1, vector_length, 1	// vector_length * 2
00c0d4
-	whilelo	p2.b, tmp1, count
00c0d4
-	incb	tmp1
00c0d4
-	whilelo	p3.b, tmp1, count
00c0d4
-	b.last	1f
00c0d4
-	st1b	z0.b, p0, [dstin, #0, mul vl]
00c0d4
-	st1b	z0.b, p1, [dstin, #1, mul vl]
00c0d4
-	st1b	z0.b, p2, [dstin, #2, mul vl]
00c0d4
-	st1b	z0.b, p3, [dstin, #3, mul vl]
00c0d4
-	ret
00c0d4
-1:	// if rest <= vector_length * 8
00c0d4
-	lsl	tmp1, vector_length, 2	// vector_length * 4
00c0d4
-	whilelo	p4.b, tmp1, count
00c0d4
-	incb	tmp1
00c0d4
-	whilelo	p5.b, tmp1, count
00c0d4
-	b.last	1f
00c0d4
-	st1b	z0.b, p0, [dstin, #0, mul vl]
00c0d4
-	st1b	z0.b, p1, [dstin, #1, mul vl]
00c0d4
-	st1b	z0.b, p2, [dstin, #2, mul vl]
00c0d4
-	st1b	z0.b, p3, [dstin, #3, mul vl]
00c0d4
-	st1b	z0.b, p4, [dstin, #4, mul vl]
00c0d4
-	st1b	z0.b, p5, [dstin, #5, mul vl]
00c0d4
-	ret
00c0d4
-1:	lsl	tmp1, vector_length, 2	// vector_length * 4
00c0d4
-	incb	tmp1			// vector_length * 5
00c0d4
-	incb	tmp1			// vector_length * 6
00c0d4
-	whilelo	p6.b, tmp1, count
00c0d4
-	incb	tmp1
00c0d4
-	whilelo	p7.b, tmp1, count
00c0d4
-	st1b	z0.b, p0, [dstin, #0, mul vl]
00c0d4
-	st1b	z0.b, p1, [dstin, #1, mul vl]
00c0d4
-	st1b	z0.b, p2, [dstin, #2, mul vl]
00c0d4
-	st1b	z0.b, p3, [dstin, #3, mul vl]
00c0d4
-	st1b	z0.b, p4, [dstin, #4, mul vl]
00c0d4
-	st1b	z0.b, p5, [dstin, #5, mul vl]
00c0d4
-	st1b	z0.b, p6, [dstin, #6, mul vl]
00c0d4
-	st1b	z0.b, p7, [dstin, #7, mul vl]
00c0d4
-	ret
00c0d4
-	.endm
00c0d4
 
00c0d4
-ENTRY (MEMSET)
00c0d4
+#undef BTI_C
00c0d4
+#define BTI_C
00c0d4
 
00c0d4
+ENTRY (MEMSET)
00c0d4
 	PTR_ARG (0)
00c0d4
 	SIZE_ARG (2)
00c0d4
 
00c0d4
-	cbnz	count, 1f
00c0d4
-	ret
00c0d4
-1:	dup	z0.b, valw
00c0d4
 	cntb	vector_length
00c0d4
-	// shortcut for less than vector_length * 8
00c0d4
-	// gives a free ptrue to p0.b for n >= vector_length
00c0d4
-	shortcut_for_small_size L(vl_agnostic)
00c0d4
-	// end of shortcut
00c0d4
+	dup	z0.b, valw
00c0d4
+	whilelo	p0.b, vector_length, count
00c0d4
+	b.last	1f
00c0d4
+	whilelo	p1.b, xzr, count
00c0d4
+	st1b	z0.b, p1, [dstin, 0, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstin, 1, mul vl]
00c0d4
+	ret
00c0d4
+
00c0d4
+	// count >= vector_length * 2
00c0d4
+1:	cmp	count, vector_length, lsl 2
00c0d4
+	add	dstend, dstin, count
00c0d4
+	b.hi	1f
00c0d4
+	st1b	z0.b, p0, [dstin, 0, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstin, 1, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstend, -2, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstend, -1, mul vl]
00c0d4
+	ret
00c0d4
+
00c0d4
+	// count > vector_length * 4
00c0d4
+1:	lsl	tmp1, vector_length, 3
00c0d4
+	cmp	count, tmp1
00c0d4
+	b.hi	L(vl_agnostic)
00c0d4
+	st1b	z0.b, p0, [dstin, 0, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstin, 1, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstin, 2, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstin, 3, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstend, -4, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstend, -3, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstend, -2, mul vl]
00c0d4
+	st1b	z0.b, p0, [dstend, -1, mul vl]
00c0d4
+	ret
00c0d4
 
00c0d4
+	.p2align 4
00c0d4
 L(vl_agnostic): // VL Agnostic
00c0d4
 	mov	rest, count
00c0d4
 	mov	dst, dstin
00c0d4
-- 
00c0d4
2.31.1
00c0d4