|
|
a41fe4 |
From a5db6a5cae6a92d1675c013e5c8d972768721576 Mon Sep 17 00:00:00 2001
|
|
|
a41fe4 |
From: Wilco Dijkstra <wdijkstr@arm.com>
|
|
|
a41fe4 |
Date: Tue, 10 Aug 2021 13:46:20 +0100
|
|
|
a41fe4 |
Subject: [PATCH] [5/5] AArch64: Improve A64FX memset medium loops
|
|
|
a41fe4 |
|
|
|
a41fe4 |
Simplify the code for memsets smaller than L1. Improve the unroll8 and
|
|
|
a41fe4 |
L1_prefetch loops.
|
|
|
a41fe4 |
|
|
|
a41fe4 |
Reviewed-by: Naohiro Tamura <naohirot@fujitsu.com>
|
|
|
a41fe4 |
---
|
|
|
a41fe4 |
sysdeps/aarch64/multiarch/memset_a64fx.S | 45 ++++++++++--------------
|
|
|
a41fe4 |
1 file changed, 19 insertions(+), 26 deletions(-)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
diff --git a/sysdeps/aarch64/multiarch/memset_a64fx.S b/sysdeps/aarch64/multiarch/memset_a64fx.S
|
|
|
a41fe4 |
index ef0315658a..7bf759b6a7 100644
|
|
|
a41fe4 |
--- a/sysdeps/aarch64/multiarch/memset_a64fx.S
|
|
|
a41fe4 |
+++ b/sysdeps/aarch64/multiarch/memset_a64fx.S
|
|
|
a41fe4 |
@@ -30,7 +30,6 @@
|
|
|
a41fe4 |
#define L2_SIZE (8*1024*1024) // L2 8MB
|
|
|
a41fe4 |
#define CACHE_LINE_SIZE 256
|
|
|
a41fe4 |
#define PF_DIST_L1 (CACHE_LINE_SIZE * 16) // Prefetch distance L1
|
|
|
a41fe4 |
-#define rest x2
|
|
|
a41fe4 |
#define vector_length x9
|
|
|
a41fe4 |
|
|
|
a41fe4 |
#if HAVE_AARCH64_SVE_ASM
|
|
|
a41fe4 |
@@ -89,29 +88,19 @@ ENTRY (MEMSET)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.p2align 4
|
|
|
a41fe4 |
L(vl_agnostic): // VL Agnostic
|
|
|
a41fe4 |
- mov rest, count
|
|
|
a41fe4 |
mov dst, dstin
|
|
|
a41fe4 |
- add dstend, dstin, count
|
|
|
a41fe4 |
- // if rest >= L2_SIZE && vector_length == 64 then L(L2)
|
|
|
a41fe4 |
- mov tmp1, 64
|
|
|
a41fe4 |
- cmp rest, L2_SIZE
|
|
|
a41fe4 |
- ccmp vector_length, tmp1, 0, cs
|
|
|
a41fe4 |
- b.eq L(L2)
|
|
|
a41fe4 |
- // if rest >= L1_SIZE && vector_length == 64 then L(L1_prefetch)
|
|
|
a41fe4 |
- cmp rest, L1_SIZE
|
|
|
a41fe4 |
- ccmp vector_length, tmp1, 0, cs
|
|
|
a41fe4 |
- b.eq L(L1_prefetch)
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
+ cmp count, L1_SIZE
|
|
|
a41fe4 |
+ b.hi L(L1_prefetch)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
+ // count >= 8 * vector_length
|
|
|
a41fe4 |
L(unroll8):
|
|
|
a41fe4 |
- lsl tmp1, vector_length, 3
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
-1: cmp rest, tmp1
|
|
|
a41fe4 |
- b.cc L(last)
|
|
|
a41fe4 |
- st1b_unroll
|
|
|
a41fe4 |
+ sub count, count, tmp1
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+1: st1b_unroll 0, 7
|
|
|
a41fe4 |
add dst, dst, tmp1
|
|
|
a41fe4 |
- sub rest, rest, tmp1
|
|
|
a41fe4 |
- b 1b
|
|
|
a41fe4 |
+ subs count, count, tmp1
|
|
|
a41fe4 |
+ b.hi 1b
|
|
|
a41fe4 |
+ add count, count, tmp1
|
|
|
a41fe4 |
|
|
|
a41fe4 |
L(last):
|
|
|
a41fe4 |
cmp count, vector_length, lsl 1
|
|
|
a41fe4 |
@@ -129,18 +118,22 @@ L(last):
|
|
|
a41fe4 |
st1b z0.b, p0, [dstend, -1, mul vl]
|
|
|
a41fe4 |
ret
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-L(L1_prefetch): // if rest >= L1_SIZE
|
|
|
a41fe4 |
+ // count >= L1_SIZE
|
|
|
a41fe4 |
.p2align 3
|
|
|
a41fe4 |
+L(L1_prefetch):
|
|
|
a41fe4 |
+ cmp count, L2_SIZE
|
|
|
a41fe4 |
+ b.hs L(L2)
|
|
|
a41fe4 |
+ cmp vector_length, 64
|
|
|
a41fe4 |
+ b.ne L(unroll8)
|
|
|
a41fe4 |
1: st1b_unroll 0, 3
|
|
|
a41fe4 |
prfm pstl1keep, [dst, PF_DIST_L1]
|
|
|
a41fe4 |
st1b_unroll 4, 7
|
|
|
a41fe4 |
prfm pstl1keep, [dst, PF_DIST_L1 + CACHE_LINE_SIZE]
|
|
|
a41fe4 |
add dst, dst, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- sub rest, rest, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- cmp rest, L1_SIZE
|
|
|
a41fe4 |
- b.ge 1b
|
|
|
a41fe4 |
- cbnz rest, L(unroll8)
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
+ sub count, count, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
+ cmp count, PF_DIST_L1
|
|
|
a41fe4 |
+ b.hs 1b
|
|
|
a41fe4 |
+ b L(unroll8)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
// count >= L2_SIZE
|
|
|
a41fe4 |
.p2align 3
|
|
|
a41fe4 |
--
|
|
|
a41fe4 |
2.31.1
|
|
|
a41fe4 |
|