|
|
a41fe4 |
From 9bc2ed8f46d80859a5596789cc9e8cc2de84b0e7 Mon Sep 17 00:00:00 2001
|
|
|
a41fe4 |
From: Wilco Dijkstra <wdijkstr@arm.com>
|
|
|
a41fe4 |
Date: Tue, 10 Aug 2021 13:39:37 +0100
|
|
|
a41fe4 |
Subject: [PATCH] [2/5] AArch64: Improve A64FX memset for large sizes
|
|
|
a41fe4 |
|
|
|
a41fe4 |
Improve performance of large memsets. Simplify alignment code. For zero memset
|
|
|
a41fe4 |
use DC ZVA, which almost doubles performance. For non-zero memsets use the
|
|
|
a41fe4 |
unroll8 loop which is about 10% faster.
|
|
|
a41fe4 |
|
|
|
a41fe4 |
Reviewed-by: Naohiro Tamura <naohirot@fujitsu.com>
|
|
|
a41fe4 |
---
|
|
|
a41fe4 |
sysdeps/aarch64/multiarch/memset_a64fx.S | 85 +++++++-----------------
|
|
|
a41fe4 |
1 file changed, 25 insertions(+), 60 deletions(-)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
diff --git a/sysdeps/aarch64/multiarch/memset_a64fx.S b/sysdeps/aarch64/multiarch/memset_a64fx.S
|
|
|
a41fe4 |
index cf3d402ef6..75cf43ae79 100644
|
|
|
a41fe4 |
--- a/sysdeps/aarch64/multiarch/memset_a64fx.S
|
|
|
a41fe4 |
+++ b/sysdeps/aarch64/multiarch/memset_a64fx.S
|
|
|
a41fe4 |
@@ -27,14 +27,11 @@
|
|
|
a41fe4 |
*/
|
|
|
a41fe4 |
|
|
|
a41fe4 |
#define L1_SIZE (64*1024) // L1 64KB
|
|
|
a41fe4 |
-#define L2_SIZE (8*1024*1024) // L2 8MB - 1MB
|
|
|
a41fe4 |
+#define L2_SIZE (8*1024*1024) // L2 8MB
|
|
|
a41fe4 |
#define CACHE_LINE_SIZE 256
|
|
|
a41fe4 |
#define PF_DIST_L1 (CACHE_LINE_SIZE * 16) // Prefetch distance L1
|
|
|
a41fe4 |
-#define ZF_DIST (CACHE_LINE_SIZE * 21) // Zerofill distance
|
|
|
a41fe4 |
-#define rest x8
|
|
|
a41fe4 |
+#define rest x2
|
|
|
a41fe4 |
#define vector_length x9
|
|
|
a41fe4 |
-#define vl_remainder x10 // vector_length remainder
|
|
|
a41fe4 |
-#define cl_remainder x11 // CACHE_LINE_SIZE remainder
|
|
|
a41fe4 |
|
|
|
a41fe4 |
#if HAVE_AARCH64_SVE_ASM
|
|
|
a41fe4 |
# if IS_IN (libc)
|
|
|
a41fe4 |
@@ -42,14 +39,6 @@
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.arch armv8.2-a+sve
|
|
|
a41fe4 |
|
|
|
a41fe4 |
- .macro dc_zva times
|
|
|
a41fe4 |
- dc zva, tmp1
|
|
|
a41fe4 |
- add tmp1, tmp1, CACHE_LINE_SIZE
|
|
|
a41fe4 |
- .if \times-1
|
|
|
a41fe4 |
- dc_zva "(\times-1)"
|
|
|
a41fe4 |
- .endif
|
|
|
a41fe4 |
- .endm
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
.macro st1b_unroll first=0, last=7
|
|
|
a41fe4 |
st1b z0.b, p0, [dst, \first, mul vl]
|
|
|
a41fe4 |
.if \last-\first
|
|
|
a41fe4 |
@@ -188,54 +177,30 @@ L(L1_prefetch): // if rest >= L1_SIZE
|
|
|
a41fe4 |
cbnz rest, L(unroll32)
|
|
|
a41fe4 |
ret
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-L(L2):
|
|
|
a41fe4 |
- // align dst address at vector_length byte boundary
|
|
|
a41fe4 |
- sub tmp1, vector_length, 1
|
|
|
a41fe4 |
- ands tmp2, dst, tmp1
|
|
|
a41fe4 |
- // if vl_remainder == 0
|
|
|
a41fe4 |
- b.eq 1f
|
|
|
a41fe4 |
- sub vl_remainder, vector_length, tmp2
|
|
|
a41fe4 |
- // process remainder until the first vector_length boundary
|
|
|
a41fe4 |
- whilelt p2.b, xzr, vl_remainder
|
|
|
a41fe4 |
- st1b z0.b, p2, [dst]
|
|
|
a41fe4 |
- add dst, dst, vl_remainder
|
|
|
a41fe4 |
- sub rest, rest, vl_remainder
|
|
|
a41fe4 |
- // align dstin address at CACHE_LINE_SIZE byte boundary
|
|
|
a41fe4 |
-1: mov tmp1, CACHE_LINE_SIZE
|
|
|
a41fe4 |
- ands tmp2, dst, CACHE_LINE_SIZE - 1
|
|
|
a41fe4 |
- // if cl_remainder == 0
|
|
|
a41fe4 |
- b.eq L(L2_dc_zva)
|
|
|
a41fe4 |
- sub cl_remainder, tmp1, tmp2
|
|
|
a41fe4 |
- // process remainder until the first CACHE_LINE_SIZE boundary
|
|
|
a41fe4 |
- mov tmp1, xzr // index
|
|
|
a41fe4 |
-2: whilelt p2.b, tmp1, cl_remainder
|
|
|
a41fe4 |
- st1b z0.b, p2, [dst, tmp1]
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- cmp tmp1, cl_remainder
|
|
|
a41fe4 |
- b.lo 2b
|
|
|
a41fe4 |
- add dst, dst, cl_remainder
|
|
|
a41fe4 |
- sub rest, rest, cl_remainder
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(L2_dc_zva):
|
|
|
a41fe4 |
- // zero fill
|
|
|
a41fe4 |
- mov tmp1, dst
|
|
|
a41fe4 |
- dc_zva (ZF_DIST / CACHE_LINE_SIZE) - 1
|
|
|
a41fe4 |
- mov zva_len, ZF_DIST
|
|
|
a41fe4 |
- add tmp1, zva_len, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- // unroll
|
|
|
a41fe4 |
+ // count >= L2_SIZE
|
|
|
a41fe4 |
.p2align 3
|
|
|
a41fe4 |
-1: st1b_unroll 0, 3
|
|
|
a41fe4 |
- add tmp2, dst, zva_len
|
|
|
a41fe4 |
- dc zva, tmp2
|
|
|
a41fe4 |
- st1b_unroll 4, 7
|
|
|
a41fe4 |
- add tmp2, tmp2, CACHE_LINE_SIZE
|
|
|
a41fe4 |
- dc zva, tmp2
|
|
|
a41fe4 |
- add dst, dst, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- sub rest, rest, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- cmp rest, tmp1 // ZF_DIST + CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- b.ge 1b
|
|
|
a41fe4 |
- cbnz rest, L(unroll8)
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
+L(L2):
|
|
|
a41fe4 |
+ tst valw, 255
|
|
|
a41fe4 |
+ b.ne L(unroll8)
|
|
|
a41fe4 |
+ // align dst to CACHE_LINE_SIZE byte boundary
|
|
|
a41fe4 |
+ and tmp2, dst, CACHE_LINE_SIZE - 1
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 2, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 3, mul vl]
|
|
|
a41fe4 |
+ sub dst, dst, tmp2
|
|
|
a41fe4 |
+ add count, count, tmp2
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ // clear cachelines using DC ZVA
|
|
|
a41fe4 |
+ sub count, count, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+1: add dst, dst, CACHE_LINE_SIZE
|
|
|
a41fe4 |
+ dc zva, dst
|
|
|
a41fe4 |
+ subs count, count, CACHE_LINE_SIZE
|
|
|
a41fe4 |
+ b.hi 1b
|
|
|
a41fe4 |
+ add count, count, CACHE_LINE_SIZE
|
|
|
a41fe4 |
+ add dst, dst, CACHE_LINE_SIZE
|
|
|
a41fe4 |
+ b L(last)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
END (MEMSET)
|
|
|
a41fe4 |
libc_hidden_builtin_def (MEMSET)
|
|
|
a41fe4 |
--
|
|
|
a41fe4 |
2.31.1
|
|
|
a41fe4 |
|