00c0d4
From a5db6a5cae6a92d1675c013e5c8d972768721576 Mon Sep 17 00:00:00 2001
00c0d4
From: Wilco Dijkstra <wdijkstr@arm.com>
00c0d4
Date: Tue, 10 Aug 2021 13:46:20 +0100
00c0d4
Subject: [PATCH] [5/5] AArch64: Improve A64FX memset medium loops
00c0d4
00c0d4
Simplify the code for memsets smaller than L1. Improve the unroll8 and
00c0d4
L1_prefetch loops.
00c0d4
00c0d4
Reviewed-by: Naohiro Tamura <naohirot@fujitsu.com>
00c0d4
---
00c0d4
 sysdeps/aarch64/multiarch/memset_a64fx.S | 45 ++++++++++--------------
00c0d4
 1 file changed, 19 insertions(+), 26 deletions(-)
00c0d4
00c0d4
diff --git a/sysdeps/aarch64/multiarch/memset_a64fx.S b/sysdeps/aarch64/multiarch/memset_a64fx.S
00c0d4
index ef0315658a..7bf759b6a7 100644
00c0d4
--- a/sysdeps/aarch64/multiarch/memset_a64fx.S
00c0d4
+++ b/sysdeps/aarch64/multiarch/memset_a64fx.S
00c0d4
@@ -30,7 +30,6 @@
00c0d4
 #define L2_SIZE         (8*1024*1024)	// L2 8MB
00c0d4
 #define CACHE_LINE_SIZE	256
00c0d4
 #define PF_DIST_L1	(CACHE_LINE_SIZE * 16)	// Prefetch distance L1
00c0d4
-#define rest		x2
00c0d4
 #define vector_length	x9
00c0d4
 
00c0d4
 #if HAVE_AARCH64_SVE_ASM
00c0d4
@@ -89,29 +88,19 @@ ENTRY (MEMSET)
00c0d4
 
00c0d4
 	.p2align 4
00c0d4
 L(vl_agnostic): // VL Agnostic
00c0d4
-	mov	rest, count
00c0d4
 	mov	dst, dstin
00c0d4
-	add	dstend, dstin, count
00c0d4
-	// if rest >= L2_SIZE && vector_length == 64 then L(L2)
00c0d4
-	mov	tmp1, 64
00c0d4
-	cmp	rest, L2_SIZE
00c0d4
-	ccmp	vector_length, tmp1, 0, cs
00c0d4
-	b.eq	L(L2)
00c0d4
-	// if rest >= L1_SIZE && vector_length == 64 then L(L1_prefetch)
00c0d4
-	cmp	rest, L1_SIZE
00c0d4
-	ccmp	vector_length, tmp1, 0, cs
00c0d4
-	b.eq	L(L1_prefetch)
00c0d4
-
00c0d4
+	cmp	count, L1_SIZE
00c0d4
+	b.hi	L(L1_prefetch)
00c0d4
 
00c0d4
+	// count >= 8 * vector_length
00c0d4
 L(unroll8):
00c0d4
-	lsl	tmp1, vector_length, 3
00c0d4
-	.p2align 3
00c0d4
-1:	cmp	rest, tmp1
00c0d4
-	b.cc	L(last)
00c0d4
-	st1b_unroll
00c0d4
+	sub	count, count, tmp1
00c0d4
+	.p2align 4
00c0d4
+1:	st1b_unroll 0, 7
00c0d4
 	add	dst, dst, tmp1
00c0d4
-	sub	rest, rest, tmp1
00c0d4
-	b	1b
00c0d4
+	subs	count, count, tmp1
00c0d4
+	b.hi	1b
00c0d4
+	add	count, count, tmp1
00c0d4
 
00c0d4
 L(last):
00c0d4
 	cmp	count, vector_length, lsl 1
00c0d4
@@ -129,18 +118,22 @@ L(last):
00c0d4
 	st1b	z0.b, p0, [dstend, -1, mul vl]
00c0d4
 	ret
00c0d4
 
00c0d4
-L(L1_prefetch): // if rest >= L1_SIZE
00c0d4
+	// count >= L1_SIZE
00c0d4
 	.p2align 3
00c0d4
+L(L1_prefetch):
00c0d4
+	cmp	count, L2_SIZE
00c0d4
+	b.hs	L(L2)
00c0d4
+	cmp	vector_length, 64
00c0d4
+	b.ne	L(unroll8)
00c0d4
 1:	st1b_unroll 0, 3
00c0d4
 	prfm	pstl1keep, [dst, PF_DIST_L1]
00c0d4
 	st1b_unroll 4, 7
00c0d4
 	prfm	pstl1keep, [dst, PF_DIST_L1 + CACHE_LINE_SIZE]
00c0d4
 	add	dst, dst, CACHE_LINE_SIZE * 2
00c0d4
-	sub	rest, rest, CACHE_LINE_SIZE * 2
00c0d4
-	cmp	rest, L1_SIZE
00c0d4
-	b.ge	1b
00c0d4
-	cbnz	rest, L(unroll8)
00c0d4
-	ret
00c0d4
+	sub	count, count, CACHE_LINE_SIZE * 2
00c0d4
+	cmp	count, PF_DIST_L1
00c0d4
+	b.hs	1b
00c0d4
+	b	L(unroll8)
00c0d4
 
00c0d4
 	// count >= L2_SIZE
00c0d4
 	.p2align 3
00c0d4
-- 
00c0d4
2.31.1
00c0d4