|
|
a41fe4 |
From b31bd11454fade731e5158b1aea40b133ae19926 Mon Sep 17 00:00:00 2001
|
|
|
a41fe4 |
From: Wilco Dijkstra <wdijkstr@arm.com>
|
|
|
a41fe4 |
Date: Thu, 2 Dec 2021 18:33:26 +0000
|
|
|
a41fe4 |
Subject: [PATCH] AArch64: Improve A64FX memcpy
|
|
|
a41fe4 |
|
|
|
a41fe4 |
v2 is a complete rewrite of the A64FX memcpy. Performance is improved
|
|
|
a41fe4 |
by streamlining the code, aligning all large copies and using a single
|
|
|
a41fe4 |
unrolled loop for all sizes. The code size for memcpy and memmove goes
|
|
|
a41fe4 |
down from 1796 bytes to 868 bytes. Performance is better in all cases:
|
|
|
a41fe4 |
bench-memcpy-random is 2.3% faster overall, bench-memcpy-large is ~33%
|
|
|
a41fe4 |
faster for large sizes, bench-memcpy-walk is 25% faster for small sizes
|
|
|
a41fe4 |
and 20% for the largest sizes. The geomean of all tests in bench-memcpy
|
|
|
a41fe4 |
is 5.1% faster, and total time is reduced by 4%.
|
|
|
a41fe4 |
|
|
|
a41fe4 |
Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
|
|
|
a41fe4 |
---
|
|
|
a41fe4 |
sysdeps/aarch64/multiarch/memcpy_a64fx.S | 546 ++++++++++-------------
|
|
|
a41fe4 |
1 file changed, 225 insertions(+), 321 deletions(-)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
diff --git a/sysdeps/aarch64/multiarch/memcpy_a64fx.S b/sysdeps/aarch64/multiarch/memcpy_a64fx.S
|
|
|
a41fe4 |
index ae7464e09f..0b306925e6 100644
|
|
|
a41fe4 |
--- a/sysdeps/aarch64/multiarch/memcpy_a64fx.S
|
|
|
a41fe4 |
+++ b/sysdeps/aarch64/multiarch/memcpy_a64fx.S
|
|
|
a41fe4 |
@@ -28,20 +28,15 @@
|
|
|
a41fe4 |
*
|
|
|
a41fe4 |
*/
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-#define L2_SIZE (8*1024*1024)/2 // L2 8MB/2
|
|
|
a41fe4 |
-#define CACHE_LINE_SIZE 256
|
|
|
a41fe4 |
-#define ZF_DIST (CACHE_LINE_SIZE * 21) // Zerofill distance
|
|
|
a41fe4 |
-#define dest x0
|
|
|
a41fe4 |
-#define src x1
|
|
|
a41fe4 |
-#define n x2 // size
|
|
|
a41fe4 |
-#define tmp1 x3
|
|
|
a41fe4 |
-#define tmp2 x4
|
|
|
a41fe4 |
-#define tmp3 x5
|
|
|
a41fe4 |
-#define rest x6
|
|
|
a41fe4 |
-#define dest_ptr x7
|
|
|
a41fe4 |
-#define src_ptr x8
|
|
|
a41fe4 |
-#define vector_length x9
|
|
|
a41fe4 |
-#define cl_remainder x10 // CACHE_LINE_SIZE remainder
|
|
|
a41fe4 |
+#define dstin x0
|
|
|
a41fe4 |
+#define src x1
|
|
|
a41fe4 |
+#define n x2
|
|
|
a41fe4 |
+#define dst x3
|
|
|
a41fe4 |
+#define dstend x4
|
|
|
a41fe4 |
+#define srcend x5
|
|
|
a41fe4 |
+#define tmp x6
|
|
|
a41fe4 |
+#define vlen x7
|
|
|
a41fe4 |
+#define vlen8 x8
|
|
|
a41fe4 |
|
|
|
a41fe4 |
#if HAVE_AARCH64_SVE_ASM
|
|
|
a41fe4 |
# if IS_IN (libc)
|
|
|
a41fe4 |
@@ -50,45 +45,37 @@
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.arch armv8.2-a+sve
|
|
|
a41fe4 |
|
|
|
a41fe4 |
- .macro dc_zva times
|
|
|
a41fe4 |
- dc zva, tmp1
|
|
|
a41fe4 |
- add tmp1, tmp1, CACHE_LINE_SIZE
|
|
|
a41fe4 |
- .if \times-1
|
|
|
a41fe4 |
- dc_zva "(\times-1)"
|
|
|
a41fe4 |
- .endif
|
|
|
a41fe4 |
- .endm
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
.macro ld1b_unroll8
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p0/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p0/z, [src_ptr, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p0/z, [src_ptr, #3, mul vl]
|
|
|
a41fe4 |
- ld1b z4.b, p0/z, [src_ptr, #4, mul vl]
|
|
|
a41fe4 |
- ld1b z5.b, p0/z, [src_ptr, #5, mul vl]
|
|
|
a41fe4 |
- ld1b z6.b, p0/z, [src_ptr, #6, mul vl]
|
|
|
a41fe4 |
- ld1b z7.b, p0/z, [src_ptr, #7, mul vl]
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p0/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ ld1b z2.b, p0/z, [src, 2, mul vl]
|
|
|
a41fe4 |
+ ld1b z3.b, p0/z, [src, 3, mul vl]
|
|
|
a41fe4 |
+ ld1b z4.b, p0/z, [src, 4, mul vl]
|
|
|
a41fe4 |
+ ld1b z5.b, p0/z, [src, 5, mul vl]
|
|
|
a41fe4 |
+ ld1b z6.b, p0/z, [src, 6, mul vl]
|
|
|
a41fe4 |
+ ld1b z7.b, p0/z, [src, 7, mul vl]
|
|
|
a41fe4 |
.endm
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.macro stld1b_unroll4a
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p0, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p0/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p0, [dest_ptr, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p0, [dest_ptr, #3, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p0/z, [src_ptr, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p0/z, [src_ptr, #3, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p0, [dst, 1, mul vl]
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p0/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z2.b, p0, [dst, 2, mul vl]
|
|
|
a41fe4 |
+ st1b z3.b, p0, [dst, 3, mul vl]
|
|
|
a41fe4 |
+ ld1b z2.b, p0/z, [src, 2, mul vl]
|
|
|
a41fe4 |
+ ld1b z3.b, p0/z, [src, 3, mul vl]
|
|
|
a41fe4 |
.endm
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.macro stld1b_unroll4b
|
|
|
a41fe4 |
- st1b z4.b, p0, [dest_ptr, #4, mul vl]
|
|
|
a41fe4 |
- st1b z5.b, p0, [dest_ptr, #5, mul vl]
|
|
|
a41fe4 |
- ld1b z4.b, p0/z, [src_ptr, #4, mul vl]
|
|
|
a41fe4 |
- ld1b z5.b, p0/z, [src_ptr, #5, mul vl]
|
|
|
a41fe4 |
- st1b z6.b, p0, [dest_ptr, #6, mul vl]
|
|
|
a41fe4 |
- st1b z7.b, p0, [dest_ptr, #7, mul vl]
|
|
|
a41fe4 |
- ld1b z6.b, p0/z, [src_ptr, #6, mul vl]
|
|
|
a41fe4 |
- ld1b z7.b, p0/z, [src_ptr, #7, mul vl]
|
|
|
a41fe4 |
+ st1b z4.b, p0, [dst, 4, mul vl]
|
|
|
a41fe4 |
+ st1b z5.b, p0, [dst, 5, mul vl]
|
|
|
a41fe4 |
+ ld1b z4.b, p0/z, [src, 4, mul vl]
|
|
|
a41fe4 |
+ ld1b z5.b, p0/z, [src, 5, mul vl]
|
|
|
a41fe4 |
+ st1b z6.b, p0, [dst, 6, mul vl]
|
|
|
a41fe4 |
+ st1b z7.b, p0, [dst, 7, mul vl]
|
|
|
a41fe4 |
+ ld1b z6.b, p0/z, [src, 6, mul vl]
|
|
|
a41fe4 |
+ ld1b z7.b, p0/z, [src, 7, mul vl]
|
|
|
a41fe4 |
.endm
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.macro stld1b_unroll8
|
|
|
a41fe4 |
@@ -97,87 +84,18 @@
|
|
|
a41fe4 |
.endm
|
|
|
a41fe4 |
|
|
|
a41fe4 |
.macro st1b_unroll8
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p0, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p0, [dest_ptr, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p0, [dest_ptr, #3, mul vl]
|
|
|
a41fe4 |
- st1b z4.b, p0, [dest_ptr, #4, mul vl]
|
|
|
a41fe4 |
- st1b z5.b, p0, [dest_ptr, #5, mul vl]
|
|
|
a41fe4 |
- st1b z6.b, p0, [dest_ptr, #6, mul vl]
|
|
|
a41fe4 |
- st1b z7.b, p0, [dest_ptr, #7, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p0, [dst, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z2.b, p0, [dst, 2, mul vl]
|
|
|
a41fe4 |
+ st1b z3.b, p0, [dst, 3, mul vl]
|
|
|
a41fe4 |
+ st1b z4.b, p0, [dst, 4, mul vl]
|
|
|
a41fe4 |
+ st1b z5.b, p0, [dst, 5, mul vl]
|
|
|
a41fe4 |
+ st1b z6.b, p0, [dst, 6, mul vl]
|
|
|
a41fe4 |
+ st1b z7.b, p0, [dst, 7, mul vl]
|
|
|
a41fe4 |
.endm
|
|
|
a41fe4 |
|
|
|
a41fe4 |
- .macro shortcut_for_small_size exit
|
|
|
a41fe4 |
- // if rest <= vector_length * 2
|
|
|
a41fe4 |
- whilelo p0.b, xzr, n
|
|
|
a41fe4 |
- whilelo p1.b, vector_length, n
|
|
|
a41fe4 |
- b.last 1f
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src, #1, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest, #1, mul vl]
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
-1: // if rest > vector_length * 8
|
|
|
a41fe4 |
- cmp n, vector_length, lsl 3 // vector_length * 8
|
|
|
a41fe4 |
- b.hi \exit
|
|
|
a41fe4 |
- // if rest <= vector_length * 4
|
|
|
a41fe4 |
- lsl tmp1, vector_length, 1 // vector_length * 2
|
|
|
a41fe4 |
- whilelo p2.b, tmp1, n
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p3.b, tmp1, n
|
|
|
a41fe4 |
- b.last 1f
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p3/z, [src, #3, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p3, [dest, #3, mul vl]
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
-1: // if rest <= vector_length * 8
|
|
|
a41fe4 |
- lsl tmp1, vector_length, 2 // vector_length * 4
|
|
|
a41fe4 |
- whilelo p4.b, tmp1, n
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p5.b, tmp1, n
|
|
|
a41fe4 |
- b.last 1f
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p3/z, [src, #3, mul vl]
|
|
|
a41fe4 |
- ld1b z4.b, p4/z, [src, #4, mul vl]
|
|
|
a41fe4 |
- ld1b z5.b, p5/z, [src, #5, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p3, [dest, #3, mul vl]
|
|
|
a41fe4 |
- st1b z4.b, p4, [dest, #4, mul vl]
|
|
|
a41fe4 |
- st1b z5.b, p5, [dest, #5, mul vl]
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
-1: lsl tmp1, vector_length, 2 // vector_length * 4
|
|
|
a41fe4 |
- incb tmp1 // vector_length * 5
|
|
|
a41fe4 |
- incb tmp1 // vector_length * 6
|
|
|
a41fe4 |
- whilelo p6.b, tmp1, n
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p7.b, tmp1, n
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p3/z, [src, #3, mul vl]
|
|
|
a41fe4 |
- ld1b z4.b, p4/z, [src, #4, mul vl]
|
|
|
a41fe4 |
- ld1b z5.b, p5/z, [src, #5, mul vl]
|
|
|
a41fe4 |
- ld1b z6.b, p6/z, [src, #6, mul vl]
|
|
|
a41fe4 |
- ld1b z7.b, p7/z, [src, #7, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p3, [dest, #3, mul vl]
|
|
|
a41fe4 |
- st1b z4.b, p4, [dest, #4, mul vl]
|
|
|
a41fe4 |
- st1b z5.b, p5, [dest, #5, mul vl]
|
|
|
a41fe4 |
- st1b z6.b, p6, [dest, #6, mul vl]
|
|
|
a41fe4 |
- st1b z7.b, p7, [dest, #7, mul vl]
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
- .endm
|
|
|
a41fe4 |
+#undef BTI_C
|
|
|
a41fe4 |
+#define BTI_C
|
|
|
a41fe4 |
|
|
|
a41fe4 |
ENTRY (MEMCPY)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
@@ -185,223 +103,209 @@ ENTRY (MEMCPY)
|
|
|
a41fe4 |
PTR_ARG (1)
|
|
|
a41fe4 |
SIZE_ARG (2)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-L(memcpy):
|
|
|
a41fe4 |
- cntb vector_length
|
|
|
a41fe4 |
- // shortcut for less than vector_length * 8
|
|
|
a41fe4 |
- // gives a free ptrue to p0.b for n >= vector_length
|
|
|
a41fe4 |
- shortcut_for_small_size L(vl_agnostic)
|
|
|
a41fe4 |
- // end of shortcut
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(vl_agnostic): // VL Agnostic
|
|
|
a41fe4 |
- mov rest, n
|
|
|
a41fe4 |
- mov dest_ptr, dest
|
|
|
a41fe4 |
- mov src_ptr, src
|
|
|
a41fe4 |
- // if rest >= L2_SIZE && vector_length == 64 then L(L2)
|
|
|
a41fe4 |
- mov tmp1, 64
|
|
|
a41fe4 |
- cmp rest, L2_SIZE
|
|
|
a41fe4 |
- ccmp vector_length, tmp1, 0, cs
|
|
|
a41fe4 |
- b.eq L(L2)
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(unroll8): // unrolling and software pipeline
|
|
|
a41fe4 |
- lsl tmp1, vector_length, 3 // vector_length * 8
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
- cmp rest, tmp1
|
|
|
a41fe4 |
- b.cc L(last)
|
|
|
a41fe4 |
+ cntb vlen
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 1
|
|
|
a41fe4 |
+ b.hi L(copy_small)
|
|
|
a41fe4 |
+ whilelo p1.b, vlen, n
|
|
|
a41fe4 |
+ whilelo p0.b, xzr, n
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p1/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dstin, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p1, [dstin, 1, mul vl]
|
|
|
a41fe4 |
+ ret
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+L(copy_small):
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 3
|
|
|
a41fe4 |
+ b.hi L(copy_large)
|
|
|
a41fe4 |
+ add dstend, dstin, n
|
|
|
a41fe4 |
+ add srcend, src, n
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 2
|
|
|
a41fe4 |
+ b.hi 1f
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ /* Copy 2-4 vectors. */
|
|
|
a41fe4 |
+ ptrue p0.b
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p0/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ ld1b z2.b, p0/z, [srcend, -2, mul vl]
|
|
|
a41fe4 |
+ ld1b z3.b, p0/z, [srcend, -1, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dstin, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p0, [dstin, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z2.b, p0, [dstend, -2, mul vl]
|
|
|
a41fe4 |
+ st1b z3.b, p0, [dstend, -1, mul vl]
|
|
|
a41fe4 |
+ ret
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+ /* Copy 4-8 vectors. */
|
|
|
a41fe4 |
+1: ptrue p0.b
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p0/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ ld1b z2.b, p0/z, [src, 2, mul vl]
|
|
|
a41fe4 |
+ ld1b z3.b, p0/z, [src, 3, mul vl]
|
|
|
a41fe4 |
+ ld1b z4.b, p0/z, [srcend, -4, mul vl]
|
|
|
a41fe4 |
+ ld1b z5.b, p0/z, [srcend, -3, mul vl]
|
|
|
a41fe4 |
+ ld1b z6.b, p0/z, [srcend, -2, mul vl]
|
|
|
a41fe4 |
+ ld1b z7.b, p0/z, [srcend, -1, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dstin, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p0, [dstin, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z2.b, p0, [dstin, 2, mul vl]
|
|
|
a41fe4 |
+ st1b z3.b, p0, [dstin, 3, mul vl]
|
|
|
a41fe4 |
+ st1b z4.b, p0, [dstend, -4, mul vl]
|
|
|
a41fe4 |
+ st1b z5.b, p0, [dstend, -3, mul vl]
|
|
|
a41fe4 |
+ st1b z6.b, p0, [dstend, -2, mul vl]
|
|
|
a41fe4 |
+ st1b z7.b, p0, [dstend, -1, mul vl]
|
|
|
a41fe4 |
+ ret
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+ /* At least 8 vectors - always align to vector length for
|
|
|
a41fe4 |
+ higher and consistent write performance. */
|
|
|
a41fe4 |
+L(copy_large):
|
|
|
a41fe4 |
+ sub tmp, vlen, 1
|
|
|
a41fe4 |
+ and tmp, dstin, tmp
|
|
|
a41fe4 |
+ sub tmp, vlen, tmp
|
|
|
a41fe4 |
+ whilelo p1.b, xzr, tmp
|
|
|
a41fe4 |
+ ld1b z1.b, p1/z, [src]
|
|
|
a41fe4 |
+ st1b z1.b, p1, [dstin]
|
|
|
a41fe4 |
+ add dst, dstin, tmp
|
|
|
a41fe4 |
+ add src, src, tmp
|
|
|
a41fe4 |
+ sub n, n, tmp
|
|
|
a41fe4 |
+ ptrue p0.b
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ lsl vlen8, vlen, 3
|
|
|
a41fe4 |
+ subs n, n, vlen8
|
|
|
a41fe4 |
+ b.ls 3f
|
|
|
a41fe4 |
ld1b_unroll8
|
|
|
a41fe4 |
- add src_ptr, src_ptr, tmp1
|
|
|
a41fe4 |
- sub rest, rest, tmp1
|
|
|
a41fe4 |
- cmp rest, tmp1
|
|
|
a41fe4 |
- b.cc 2f
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
+ add src, src, vlen8
|
|
|
a41fe4 |
+ subs n, n, vlen8
|
|
|
a41fe4 |
+ b.ls 2f
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+ /* 8x unrolled and software pipelined loop. */
|
|
|
a41fe4 |
1: stld1b_unroll8
|
|
|
a41fe4 |
- add dest_ptr, dest_ptr, tmp1
|
|
|
a41fe4 |
- add src_ptr, src_ptr, tmp1
|
|
|
a41fe4 |
- sub rest, rest, tmp1
|
|
|
a41fe4 |
- cmp rest, tmp1
|
|
|
a41fe4 |
- b.ge 1b
|
|
|
a41fe4 |
+ add dst, dst, vlen8
|
|
|
a41fe4 |
+ add src, src, vlen8
|
|
|
a41fe4 |
+ subs n, n, vlen8
|
|
|
a41fe4 |
+ b.hi 1b
|
|
|
a41fe4 |
2: st1b_unroll8
|
|
|
a41fe4 |
- add dest_ptr, dest_ptr, tmp1
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
-L(last):
|
|
|
a41fe4 |
- whilelo p0.b, xzr, rest
|
|
|
a41fe4 |
- whilelo p1.b, vector_length, rest
|
|
|
a41fe4 |
- b.last 1f
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- ret
|
|
|
a41fe4 |
-1: lsl tmp1, vector_length, 1 // vector_length * 2
|
|
|
a41fe4 |
- whilelo p2.b, tmp1, rest
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p3.b, tmp1, rest
|
|
|
a41fe4 |
- b.last 1f
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src_ptr, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p3/z, [src_ptr, #3, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest_ptr, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p3, [dest_ptr, #3, mul vl]
|
|
|
a41fe4 |
+ add dst, dst, vlen8
|
|
|
a41fe4 |
+3: add n, n, vlen8
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ /* Move last 0-8 vectors. */
|
|
|
a41fe4 |
+L(last_bytes):
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 1
|
|
|
a41fe4 |
+ b.hi 1f
|
|
|
a41fe4 |
+ whilelo p0.b, xzr, n
|
|
|
a41fe4 |
+ whilelo p1.b, vlen, n
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p1/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p1, [dst, 1, mul vl]
|
|
|
a41fe4 |
ret
|
|
|
a41fe4 |
-1: lsl tmp1, vector_length, 2 // vector_length * 4
|
|
|
a41fe4 |
- whilelo p4.b, tmp1, rest
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p5.b, tmp1, rest
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p6.b, tmp1, rest
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p7.b, tmp1, rest
|
|
|
a41fe4 |
- ld1b z0.b, p0/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src_ptr, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p3/z, [src_ptr, #3, mul vl]
|
|
|
a41fe4 |
- ld1b z4.b, p4/z, [src_ptr, #4, mul vl]
|
|
|
a41fe4 |
- ld1b z5.b, p5/z, [src_ptr, #5, mul vl]
|
|
|
a41fe4 |
- ld1b z6.b, p6/z, [src_ptr, #6, mul vl]
|
|
|
a41fe4 |
- ld1b z7.b, p7/z, [src_ptr, #7, mul vl]
|
|
|
a41fe4 |
- st1b z0.b, p0, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest_ptr, #2, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p3, [dest_ptr, #3, mul vl]
|
|
|
a41fe4 |
- st1b z4.b, p4, [dest_ptr, #4, mul vl]
|
|
|
a41fe4 |
- st1b z5.b, p5, [dest_ptr, #5, mul vl]
|
|
|
a41fe4 |
- st1b z6.b, p6, [dest_ptr, #6, mul vl]
|
|
|
a41fe4 |
- st1b z7.b, p7, [dest_ptr, #7, mul vl]
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+1: add srcend, src, n
|
|
|
a41fe4 |
+ add dstend, dst, n
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p0/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ ld1b z2.b, p0/z, [srcend, -2, mul vl]
|
|
|
a41fe4 |
+ ld1b z3.b, p0/z, [srcend, -1, mul vl]
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 2
|
|
|
a41fe4 |
+ b.hi 1f
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p0, [dst, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z2.b, p0, [dstend, -2, mul vl]
|
|
|
a41fe4 |
+ st1b z3.b, p0, [dstend, -1, mul vl]
|
|
|
a41fe4 |
ret
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-L(L2):
|
|
|
a41fe4 |
- // align dest address at CACHE_LINE_SIZE byte boundary
|
|
|
a41fe4 |
- mov tmp1, CACHE_LINE_SIZE
|
|
|
a41fe4 |
- ands tmp2, dest_ptr, CACHE_LINE_SIZE - 1
|
|
|
a41fe4 |
- // if cl_remainder == 0
|
|
|
a41fe4 |
- b.eq L(L2_dc_zva)
|
|
|
a41fe4 |
- sub cl_remainder, tmp1, tmp2
|
|
|
a41fe4 |
- // process remainder until the first CACHE_LINE_SIZE boundary
|
|
|
a41fe4 |
- whilelo p1.b, xzr, cl_remainder // keep p0.b all true
|
|
|
a41fe4 |
- whilelo p2.b, vector_length, cl_remainder
|
|
|
a41fe4 |
- b.last 1f
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- b 2f
|
|
|
a41fe4 |
-1: lsl tmp1, vector_length, 1 // vector_length * 2
|
|
|
a41fe4 |
- whilelo p3.b, tmp1, cl_remainder
|
|
|
a41fe4 |
- incb tmp1
|
|
|
a41fe4 |
- whilelo p4.b, tmp1, cl_remainder
|
|
|
a41fe4 |
- ld1b z1.b, p1/z, [src_ptr, #0, mul vl]
|
|
|
a41fe4 |
- ld1b z2.b, p2/z, [src_ptr, #1, mul vl]
|
|
|
a41fe4 |
- ld1b z3.b, p3/z, [src_ptr, #2, mul vl]
|
|
|
a41fe4 |
- ld1b z4.b, p4/z, [src_ptr, #3, mul vl]
|
|
|
a41fe4 |
- st1b z1.b, p1, [dest_ptr, #0, mul vl]
|
|
|
a41fe4 |
- st1b z2.b, p2, [dest_ptr, #1, mul vl]
|
|
|
a41fe4 |
- st1b z3.b, p3, [dest_ptr, #2, mul vl]
|
|
|
a41fe4 |
- st1b z4.b, p4, [dest_ptr, #3, mul vl]
|
|
|
a41fe4 |
-2: add dest_ptr, dest_ptr, cl_remainder
|
|
|
a41fe4 |
- add src_ptr, src_ptr, cl_remainder
|
|
|
a41fe4 |
- sub rest, rest, cl_remainder
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(L2_dc_zva):
|
|
|
a41fe4 |
- // zero fill
|
|
|
a41fe4 |
- and tmp1, dest, 0xffffffffffffff
|
|
|
a41fe4 |
- and tmp2, src, 0xffffffffffffff
|
|
|
a41fe4 |
- subs tmp1, tmp1, tmp2 // diff
|
|
|
a41fe4 |
- b.ge 1f
|
|
|
a41fe4 |
- neg tmp1, tmp1
|
|
|
a41fe4 |
-1: mov tmp3, ZF_DIST + CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- cmp tmp1, tmp3
|
|
|
a41fe4 |
- b.lo L(unroll8)
|
|
|
a41fe4 |
- mov tmp1, dest_ptr
|
|
|
a41fe4 |
- dc_zva (ZF_DIST / CACHE_LINE_SIZE) - 1
|
|
|
a41fe4 |
- // unroll
|
|
|
a41fe4 |
- ld1b_unroll8 // this line has to be after "b.lo L(unroll8)"
|
|
|
a41fe4 |
- add src_ptr, src_ptr, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- sub rest, rest, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- mov tmp1, ZF_DIST
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
-1: stld1b_unroll4a
|
|
|
a41fe4 |
- add tmp2, dest_ptr, tmp1 // dest_ptr + ZF_DIST
|
|
|
a41fe4 |
- dc zva, tmp2
|
|
|
a41fe4 |
- stld1b_unroll4b
|
|
|
a41fe4 |
- add tmp2, tmp2, CACHE_LINE_SIZE
|
|
|
a41fe4 |
- dc zva, tmp2
|
|
|
a41fe4 |
- add dest_ptr, dest_ptr, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- add src_ptr, src_ptr, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- sub rest, rest, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- cmp rest, tmp3 // ZF_DIST + CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- b.ge 1b
|
|
|
a41fe4 |
- st1b_unroll8
|
|
|
a41fe4 |
- add dest_ptr, dest_ptr, CACHE_LINE_SIZE * 2
|
|
|
a41fe4 |
- b L(unroll8)
|
|
|
a41fe4 |
+1: ld1b z4.b, p0/z, [src, 2, mul vl]
|
|
|
a41fe4 |
+ ld1b z5.b, p0/z, [src, 3, mul vl]
|
|
|
a41fe4 |
+ ld1b z6.b, p0/z, [srcend, -4, mul vl]
|
|
|
a41fe4 |
+ ld1b z7.b, p0/z, [srcend, -3, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dst, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p0, [dst, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z4.b, p0, [dst, 2, mul vl]
|
|
|
a41fe4 |
+ st1b z5.b, p0, [dst, 3, mul vl]
|
|
|
a41fe4 |
+ st1b z6.b, p0, [dstend, -4, mul vl]
|
|
|
a41fe4 |
+ st1b z7.b, p0, [dstend, -3, mul vl]
|
|
|
a41fe4 |
+ st1b z2.b, p0, [dstend, -2, mul vl]
|
|
|
a41fe4 |
+ st1b z3.b, p0, [dstend, -1, mul vl]
|
|
|
a41fe4 |
+ ret
|
|
|
a41fe4 |
|
|
|
a41fe4 |
END (MEMCPY)
|
|
|
a41fe4 |
libc_hidden_builtin_def (MEMCPY)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-ENTRY (MEMMOVE)
|
|
|
a41fe4 |
+ENTRY_ALIGN (MEMMOVE, 4)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
PTR_ARG (0)
|
|
|
a41fe4 |
PTR_ARG (1)
|
|
|
a41fe4 |
SIZE_ARG (2)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
- // remove tag address
|
|
|
a41fe4 |
- // dest has to be immutable because it is the return value
|
|
|
a41fe4 |
- // src has to be immutable because it is used in L(bwd_last)
|
|
|
a41fe4 |
- and tmp2, dest, 0xffffffffffffff // save dest_notag into tmp2
|
|
|
a41fe4 |
- and tmp3, src, 0xffffffffffffff // save src_notag intp tmp3
|
|
|
a41fe4 |
- cmp n, 0
|
|
|
a41fe4 |
- ccmp tmp2, tmp3, 4, ne
|
|
|
a41fe4 |
- b.ne 1f
|
|
|
a41fe4 |
+ /* Fast case for up to 2 vectors. */
|
|
|
a41fe4 |
+ cntb vlen
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 1
|
|
|
a41fe4 |
+ b.hi 1f
|
|
|
a41fe4 |
+ whilelo p0.b, xzr, n
|
|
|
a41fe4 |
+ whilelo p1.b, vlen, n
|
|
|
a41fe4 |
+ ld1b z0.b, p0/z, [src, 0, mul vl]
|
|
|
a41fe4 |
+ ld1b z1.b, p1/z, [src, 1, mul vl]
|
|
|
a41fe4 |
+ st1b z0.b, p0, [dstin, 0, mul vl]
|
|
|
a41fe4 |
+ st1b z1.b, p1, [dstin, 1, mul vl]
|
|
|
a41fe4 |
+L(full_overlap):
|
|
|
a41fe4 |
ret
|
|
|
a41fe4 |
-1: cntb vector_length
|
|
|
a41fe4 |
- // shortcut for less than vector_length * 8
|
|
|
a41fe4 |
- // gives a free ptrue to p0.b for n >= vector_length
|
|
|
a41fe4 |
- // tmp2 and tmp3 should not be used in this macro to keep
|
|
|
a41fe4 |
- // notag addresses
|
|
|
a41fe4 |
- shortcut_for_small_size L(dispatch)
|
|
|
a41fe4 |
- // end of shortcut
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(dispatch):
|
|
|
a41fe4 |
- // tmp2 = dest_notag, tmp3 = src_notag
|
|
|
a41fe4 |
- // diff = dest_notag - src_notag
|
|
|
a41fe4 |
- sub tmp1, tmp2, tmp3
|
|
|
a41fe4 |
- // if diff <= 0 || diff >= n then memcpy
|
|
|
a41fe4 |
- cmp tmp1, 0
|
|
|
a41fe4 |
- ccmp tmp1, n, 2, gt
|
|
|
a41fe4 |
- b.cs L(vl_agnostic)
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(bwd_start):
|
|
|
a41fe4 |
- mov rest, n
|
|
|
a41fe4 |
- add dest_ptr, dest, n // dest_end
|
|
|
a41fe4 |
- add src_ptr, src, n // src_end
|
|
|
a41fe4 |
-
|
|
|
a41fe4 |
-L(bwd_unroll8): // unrolling and software pipeline
|
|
|
a41fe4 |
- lsl tmp1, vector_length, 3 // vector_length * 8
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
- cmp rest, tmp1
|
|
|
a41fe4 |
- b.cc L(bwd_last)
|
|
|
a41fe4 |
- sub src_ptr, src_ptr, tmp1
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+ /* Check for overlapping moves. Return if there is a full overlap.
|
|
|
a41fe4 |
+ Small moves up to 8 vectors use the overlap-safe copy_small code.
|
|
|
a41fe4 |
+ Non-overlapping or overlapping moves with dst < src use memcpy.
|
|
|
a41fe4 |
+ Overlapping moves with dst > src use a backward copy loop. */
|
|
|
a41fe4 |
+1: sub tmp, dstin, src
|
|
|
a41fe4 |
+ ands tmp, tmp, 0xffffffffffffff /* Clear special tag bits. */
|
|
|
a41fe4 |
+ b.eq L(full_overlap)
|
|
|
a41fe4 |
+ cmp n, vlen, lsl 3
|
|
|
a41fe4 |
+ b.ls L(copy_small)
|
|
|
a41fe4 |
+ cmp tmp, n
|
|
|
a41fe4 |
+ b.hs L(copy_large)
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ /* Align to vector length. */
|
|
|
a41fe4 |
+ add dst, dstin, n
|
|
|
a41fe4 |
+ sub tmp, vlen, 1
|
|
|
a41fe4 |
+ ands tmp, dst, tmp
|
|
|
a41fe4 |
+ csel tmp, tmp, vlen, ne
|
|
|
a41fe4 |
+ whilelo p1.b, xzr, tmp
|
|
|
a41fe4 |
+ sub n, n, tmp
|
|
|
a41fe4 |
+ ld1b z1.b, p1/z, [src, n]
|
|
|
a41fe4 |
+ st1b z1.b, p1, [dstin, n]
|
|
|
a41fe4 |
+ add src, src, n
|
|
|
a41fe4 |
+ add dst, dstin, n
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ ptrue p0.b
|
|
|
a41fe4 |
+ lsl vlen8, vlen, 3
|
|
|
a41fe4 |
+ subs n, n, vlen8
|
|
|
a41fe4 |
+ b.ls 3f
|
|
|
a41fe4 |
+ sub src, src, vlen8
|
|
|
a41fe4 |
ld1b_unroll8
|
|
|
a41fe4 |
- sub rest, rest, tmp1
|
|
|
a41fe4 |
- cmp rest, tmp1
|
|
|
a41fe4 |
- b.cc 2f
|
|
|
a41fe4 |
- .p2align 3
|
|
|
a41fe4 |
-1: sub src_ptr, src_ptr, tmp1
|
|
|
a41fe4 |
- sub dest_ptr, dest_ptr, tmp1
|
|
|
a41fe4 |
+ subs n, n, vlen8
|
|
|
a41fe4 |
+ b.ls 2f
|
|
|
a41fe4 |
+
|
|
|
a41fe4 |
+ .p2align 4
|
|
|
a41fe4 |
+ /* 8x unrolled and software pipelined backward copy loop. */
|
|
|
a41fe4 |
+1: sub src, src, vlen8
|
|
|
a41fe4 |
+ sub dst, dst, vlen8
|
|
|
a41fe4 |
stld1b_unroll8
|
|
|
a41fe4 |
- sub rest, rest, tmp1
|
|
|
a41fe4 |
- cmp rest, tmp1
|
|
|
a41fe4 |
- b.ge 1b
|
|
|
a41fe4 |
-2: sub dest_ptr, dest_ptr, tmp1
|
|
|
a41fe4 |
+ subs n, n, vlen8
|
|
|
a41fe4 |
+ b.hi 1b
|
|
|
a41fe4 |
+2: sub dst, dst, vlen8
|
|
|
a41fe4 |
st1b_unroll8
|
|
|
a41fe4 |
+3: add n, n, vlen8
|
|
|
a41fe4 |
|
|
|
a41fe4 |
-L(bwd_last):
|
|
|
a41fe4 |
- mov dest_ptr, dest
|
|
|
a41fe4 |
- mov src_ptr, src
|
|
|
a41fe4 |
- b L(last)
|
|
|
a41fe4 |
+ /* Adjust src/dst for last 0-8 vectors. */
|
|
|
a41fe4 |
+ sub src, src, n
|
|
|
a41fe4 |
+ mov dst, dstin
|
|
|
a41fe4 |
+ b L(last_bytes)
|
|
|
a41fe4 |
|
|
|
a41fe4 |
END (MEMMOVE)
|
|
|
a41fe4 |
libc_hidden_builtin_def (MEMMOVE)
|
|
|
a41fe4 |
--
|
|
|
a41fe4 |
2.31.1
|
|
|
a41fe4 |
|