190885
From 8f4f10bfb4793f47accee8ea86438879a889b595 Mon Sep 17 00:00:00 2001
190885
From: "H.J. Lu" <hjl.tools@gmail.com>
190885
Date: Wed, 2 Mar 2022 15:20:14 -0800
190885
Subject: [PATCH] x86: Update large memcpy case in memmove-vec-unaligned-erms.S
190885
190885
No Bug. This commit updates the large memcpy case (no overlap). The
190885
update is to perform memcpy on either 2 or 4 contiguous pages at
190885
once. This 1) helps to alleviate the affects of false memory aliasing
190885
when destination and source have a close 4k alignment and 2) In most
190885
cases and for most DRAM units is a modestly more efficient access
190885
pattern. These changes are a clear performance improvement for
190885
VEC_SIZE =16/32, though more ambiguous for VEC_SIZE=64. test-memcpy,
190885
test-memccpy, test-mempcpy, test-memmove, and tst-memmove-overflow all
190885
pass.
190885
190885
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
190885
(cherry picked from commit 1a8605b6cd257e8a74e29b5b71c057211f5fb847)
190885
---
190885
 .../multiarch/memmove-vec-unaligned-erms.S    | 338 ++++++++++++++----
190885
 1 file changed, 265 insertions(+), 73 deletions(-)
190885
190885
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
190885
index c475fed4..3e2dd6bc 100644
190885
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
190885
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
190885
@@ -32,7 +32,16 @@
190885
       overlapping addresses.
190885
    6. If size >= __x86_shared_non_temporal_threshold and there is no
190885
       overlap between destination and source, use non-temporal store
190885
-      instead of aligned store.  */
190885
+      instead of aligned store copying from either 2 or 4 pages at
190885
+      once.
190885
+   8. For point 7) if size < 16 * __x86_shared_non_temporal_threshold
190885
+      and source and destination do not page alias, copy from 2 pages
190885
+      at once using non-temporal stores. Page aliasing in this case is
190885
+      considered true if destination's page alignment - sources' page
190885
+      alignment is less than 8 * VEC_SIZE.
190885
+   9. If size >= 16 * __x86_shared_non_temporal_threshold or source
190885
+      and destination do page alias copy from 4 pages at once using
190885
+      non-temporal stores.  */
190885
 
190885
 #include <sysdep.h>
190885
 
190885
@@ -64,6 +73,34 @@
190885
 # endif
190885
 #endif
190885
 
190885
+#ifndef PAGE_SIZE
190885
+# define PAGE_SIZE 4096
190885
+#endif
190885
+
190885
+#if PAGE_SIZE != 4096
190885
+# error Unsupported PAGE_SIZE
190885
+#endif
190885
+
190885
+#ifndef LOG_PAGE_SIZE
190885
+# define LOG_PAGE_SIZE 12
190885
+#endif
190885
+
190885
+#if PAGE_SIZE != (1 << LOG_PAGE_SIZE)
190885
+# error Invalid LOG_PAGE_SIZE
190885
+#endif
190885
+
190885
+/* Byte per page for large_memcpy inner loop.  */
190885
+#if VEC_SIZE == 64
190885
+# define LARGE_LOAD_SIZE (VEC_SIZE * 2)
190885
+#else
190885
+# define LARGE_LOAD_SIZE (VEC_SIZE * 4)
190885
+#endif
190885
+
190885
+/* Amount to shift rdx by to compare for memcpy_large_4x.  */
190885
+#ifndef LOG_4X_MEMCPY_THRESH
190885
+# define LOG_4X_MEMCPY_THRESH 4
190885
+#endif
190885
+
190885
 /* Avoid short distance rep movsb only with non-SSE vector.  */
190885
 #ifndef AVOID_SHORT_DISTANCE_REP_MOVSB
190885
 # define AVOID_SHORT_DISTANCE_REP_MOVSB (VEC_SIZE > 16)
190885
@@ -103,6 +140,28 @@
190885
 # error Unsupported PREFETCH_SIZE!
190885
 #endif
190885
 
190885
+#if LARGE_LOAD_SIZE == (VEC_SIZE * 2)
190885
+# define LOAD_ONE_SET(base, offset, vec0, vec1, ...) \
190885
+	VMOVU	(offset)base, vec0; \
190885
+	VMOVU	((offset) + VEC_SIZE)base, vec1;
190885
+# define STORE_ONE_SET(base, offset, vec0, vec1, ...) \
190885
+	VMOVNT  vec0, (offset)base; \
190885
+	VMOVNT  vec1, ((offset) + VEC_SIZE)base;
190885
+#elif LARGE_LOAD_SIZE == (VEC_SIZE * 4)
190885
+# define LOAD_ONE_SET(base, offset, vec0, vec1, vec2, vec3) \
190885
+	VMOVU	(offset)base, vec0; \
190885
+	VMOVU	((offset) + VEC_SIZE)base, vec1; \
190885
+	VMOVU	((offset) + VEC_SIZE * 2)base, vec2; \
190885
+	VMOVU	((offset) + VEC_SIZE * 3)base, vec3;
190885
+# define STORE_ONE_SET(base, offset, vec0, vec1, vec2, vec3) \
190885
+	VMOVNT	vec0, (offset)base; \
190885
+	VMOVNT	vec1, ((offset) + VEC_SIZE)base; \
190885
+	VMOVNT	vec2, ((offset) + VEC_SIZE * 2)base; \
190885
+	VMOVNT	vec3, ((offset) + VEC_SIZE * 3)base;
190885
+#else
190885
+# error Invalid LARGE_LOAD_SIZE
190885
+#endif
190885
+
190885
 #ifndef SECTION
190885
 # error SECTION is not defined!
190885
 #endif
190885
@@ -390,6 +449,15 @@ L(last_4x_vec):
190885
 	VZEROUPPER_RETURN
190885
 
190885
 L(more_8x_vec):
190885
+	/* Check if non-temporal move candidate.  */
190885
+#if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
190885
+	/* Check non-temporal store threshold.  */
190885
+	cmp __x86_shared_non_temporal_threshold(%rip), %RDX_LP
190885
+	ja	L(large_memcpy_2x)
190885
+#endif
190885
+	/* Entry if rdx is greater than non-temporal threshold but there
190885
+       is overlap.  */
190885
+L(more_8x_vec_check):
190885
 	cmpq	%rsi, %rdi
190885
 	ja	L(more_8x_vec_backward)
190885
 	/* Source == destination is less common.  */
190885
@@ -416,24 +484,21 @@ L(more_8x_vec):
190885
 	subq	%r8, %rdi
190885
 	/* Adjust length.  */
190885
 	addq	%r8, %rdx
190885
-#if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
190885
-	/* Check non-temporal store threshold.  */
190885
-	cmp	__x86_shared_non_temporal_threshold(%rip), %RDX_LP
190885
-	ja	L(large_forward)
190885
-#endif
190885
+
190885
+	.p2align 4
190885
 L(loop_4x_vec_forward):
190885
 	/* Copy 4 * VEC a time forward.  */
190885
 	VMOVU	(%rsi), %VEC(0)
190885
 	VMOVU	VEC_SIZE(%rsi), %VEC(1)
190885
 	VMOVU	(VEC_SIZE * 2)(%rsi), %VEC(2)
190885
 	VMOVU	(VEC_SIZE * 3)(%rsi), %VEC(3)
190885
-	addq	$(VEC_SIZE * 4), %rsi
190885
-	subq	$(VEC_SIZE * 4), %rdx
190885
+	subq	$-(VEC_SIZE * 4), %rsi
190885
+	addq	$-(VEC_SIZE * 4), %rdx
190885
 	VMOVA	%VEC(0), (%rdi)
190885
 	VMOVA	%VEC(1), VEC_SIZE(%rdi)
190885
 	VMOVA	%VEC(2), (VEC_SIZE * 2)(%rdi)
190885
 	VMOVA	%VEC(3), (VEC_SIZE * 3)(%rdi)
190885
-	addq	$(VEC_SIZE * 4), %rdi
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
 	cmpq	$(VEC_SIZE * 4), %rdx
190885
 	ja	L(loop_4x_vec_forward)
190885
 	/* Store the last 4 * VEC.  */
190885
@@ -467,24 +532,21 @@ L(more_8x_vec_backward):
190885
 	subq	%r8, %r9
190885
 	/* Adjust length.  */
190885
 	subq	%r8, %rdx
190885
-#if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
190885
-	/* Check non-temporal store threshold.  */
190885
-	cmp	__x86_shared_non_temporal_threshold(%rip), %RDX_LP
190885
-	ja	L(large_backward)
190885
-#endif
190885
+
190885
+	.p2align 4
190885
 L(loop_4x_vec_backward):
190885
 	/* Copy 4 * VEC a time backward.  */
190885
 	VMOVU	(%rcx), %VEC(0)
190885
 	VMOVU	-VEC_SIZE(%rcx), %VEC(1)
190885
 	VMOVU	-(VEC_SIZE * 2)(%rcx), %VEC(2)
190885
 	VMOVU	-(VEC_SIZE * 3)(%rcx), %VEC(3)
190885
-	subq	$(VEC_SIZE * 4), %rcx
190885
-	subq	$(VEC_SIZE * 4), %rdx
190885
+	addq	$-(VEC_SIZE * 4), %rcx
190885
+	addq	$-(VEC_SIZE * 4), %rdx
190885
 	VMOVA	%VEC(0), (%r9)
190885
 	VMOVA	%VEC(1), -VEC_SIZE(%r9)
190885
 	VMOVA	%VEC(2), -(VEC_SIZE * 2)(%r9)
190885
 	VMOVA	%VEC(3), -(VEC_SIZE * 3)(%r9)
190885
-	subq	$(VEC_SIZE * 4), %r9
190885
+	addq	$-(VEC_SIZE * 4), %r9
190885
 	cmpq	$(VEC_SIZE * 4), %rdx
190885
 	ja	L(loop_4x_vec_backward)
190885
 	/* Store the first 4 * VEC.  */
190885
@@ -497,72 +559,202 @@ L(loop_4x_vec_backward):
190885
 	VZEROUPPER_RETURN
190885
 
190885
 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
190885
-L(large_forward):
190885
+	.p2align 4
190885
+L(large_memcpy_2x):
190885
+	/* Compute absolute value of difference between source and
190885
+	   destination.  */
190885
+	movq	%rdi, %r9
190885
+	subq	%rsi, %r9
190885
+	movq	%r9, %r8
190885
+	leaq	-1(%r9), %rcx
190885
+	sarq	$63, %r8
190885
+	xorq	%r8, %r9
190885
+	subq	%r8, %r9
190885
 	/* Don't use non-temporal store if there is overlap between
190885
-	   destination and source since destination may be in cache
190885
-	   when source is loaded.  */
190885
-	leaq    (%rdi, %rdx), %r10
190885
-	cmpq    %r10, %rsi
190885
-	jb	L(loop_4x_vec_forward)
190885
-L(loop_large_forward):
190885
+	   destination and source since destination may be in cache when
190885
+	   source is loaded.  */
190885
+	cmpq	%r9, %rdx
190885
+	ja	L(more_8x_vec_check)
190885
+
190885
+	/* Cache align destination. First store the first 64 bytes then
190885
+	   adjust alignments.  */
190885
+	VMOVU	(%rsi), %VEC(8)
190885
+#if VEC_SIZE < 64
190885
+	VMOVU	VEC_SIZE(%rsi), %VEC(9)
190885
+#if VEC_SIZE < 32
190885
+	VMOVU	(VEC_SIZE * 2)(%rsi), %VEC(10)
190885
+	VMOVU	(VEC_SIZE * 3)(%rsi), %VEC(11)
190885
+#endif
190885
+#endif
190885
+	VMOVU	%VEC(8), (%rdi)
190885
+#if VEC_SIZE < 64
190885
+	VMOVU	%VEC(9), VEC_SIZE(%rdi)
190885
+#if VEC_SIZE < 32
190885
+	VMOVU	%VEC(10), (VEC_SIZE * 2)(%rdi)
190885
+	VMOVU	%VEC(11), (VEC_SIZE * 3)(%rdi)
190885
+#endif
190885
+#endif
190885
+	/* Adjust source, destination, and size.  */
190885
+	movq	%rdi, %r8
190885
+	andq	$63, %r8
190885
+	/* Get the negative of offset for alignment.  */
190885
+	subq	$64, %r8
190885
+	/* Adjust source.  */
190885
+	subq	%r8, %rsi
190885
+	/* Adjust destination which should be aligned now.  */
190885
+	subq	%r8, %rdi
190885
+	/* Adjust length.  */
190885
+	addq	%r8, %rdx
190885
+
190885
+	/* Test if source and destination addresses will alias. If they do
190885
+	   the larger pipeline in large_memcpy_4x alleviated the
190885
+	   performance drop.  */
190885
+	testl	$(PAGE_SIZE - VEC_SIZE * 8), %ecx
190885
+	jz	L(large_memcpy_4x)
190885
+
190885
+	movq	%rdx, %r10
190885
+	shrq	$LOG_4X_MEMCPY_THRESH, %r10
190885
+	cmp	__x86_shared_non_temporal_threshold(%rip), %r10
190885
+	jae	L(large_memcpy_4x)
190885
+
190885
+	/* edx will store remainder size for copying tail.  */
190885
+	andl	$(PAGE_SIZE * 2 - 1), %edx
190885
+	/* r10 stores outer loop counter.  */
190885
+	shrq	$((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
190885
+	/* Copy 4x VEC at a time from 2 pages.  */
190885
+	.p2align 4
190885
+L(loop_large_memcpy_2x_outer):
190885
+	/* ecx stores inner loop counter.  */
190885
+	movl	$(PAGE_SIZE / LARGE_LOAD_SIZE), %ecx
190885
+L(loop_large_memcpy_2x_inner):
190885
+	PREFETCH_ONE_SET(1, (%rsi), PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET(1, (%rsi), PREFETCHED_LOAD_SIZE * 2)
190885
+	PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE * 2)
190885
+	/* Load vectors from rsi.  */
190885
+	LOAD_ONE_SET((%rsi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
190885
+	LOAD_ONE_SET((%rsi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
190885
+	subq	$-LARGE_LOAD_SIZE, %rsi
190885
+	/* Non-temporal store vectors to rdi.  */
190885
+	STORE_ONE_SET((%rdi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
190885
+	STORE_ONE_SET((%rdi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
190885
+	subq	$-LARGE_LOAD_SIZE, %rdi
190885
+	decl	%ecx
190885
+	jnz	L(loop_large_memcpy_2x_inner)
190885
+	addq	$PAGE_SIZE, %rdi
190885
+	addq	$PAGE_SIZE, %rsi
190885
+	decq	%r10
190885
+	jne	L(loop_large_memcpy_2x_outer)
190885
+	sfence
190885
+
190885
+	/* Check if only last 4 loads are needed.  */
190885
+	cmpl	$(VEC_SIZE * 4), %edx
190885
+	jbe	L(large_memcpy_2x_end)
190885
+
190885
+	/* Handle the last 2 * PAGE_SIZE bytes.  */
190885
+L(loop_large_memcpy_2x_tail):
190885
 	/* Copy 4 * VEC a time forward with non-temporal stores.  */
190885
-	PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 2)
190885
-	PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 3)
190885
+	PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET (1, (%rdi), PREFETCHED_LOAD_SIZE)
190885
 	VMOVU	(%rsi), %VEC(0)
190885
 	VMOVU	VEC_SIZE(%rsi), %VEC(1)
190885
 	VMOVU	(VEC_SIZE * 2)(%rsi), %VEC(2)
190885
 	VMOVU	(VEC_SIZE * 3)(%rsi), %VEC(3)
190885
-	addq	$PREFETCHED_LOAD_SIZE, %rsi
190885
-	subq	$PREFETCHED_LOAD_SIZE, %rdx
190885
-	VMOVNT	%VEC(0), (%rdi)
190885
-	VMOVNT	%VEC(1), VEC_SIZE(%rdi)
190885
-	VMOVNT	%VEC(2), (VEC_SIZE * 2)(%rdi)
190885
-	VMOVNT	%VEC(3), (VEC_SIZE * 3)(%rdi)
190885
-	addq	$PREFETCHED_LOAD_SIZE, %rdi
190885
-	cmpq	$PREFETCHED_LOAD_SIZE, %rdx
190885
-	ja	L(loop_large_forward)
190885
-	sfence
190885
+	subq	$-(VEC_SIZE * 4), %rsi
190885
+	addl	$-(VEC_SIZE * 4), %edx
190885
+	VMOVA	%VEC(0), (%rdi)
190885
+	VMOVA	%VEC(1), VEC_SIZE(%rdi)
190885
+	VMOVA	%VEC(2), (VEC_SIZE * 2)(%rdi)
190885
+	VMOVA	%VEC(3), (VEC_SIZE * 3)(%rdi)
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
+	cmpl	$(VEC_SIZE * 4), %edx
190885
+	ja	L(loop_large_memcpy_2x_tail)
190885
+
190885
+L(large_memcpy_2x_end):
190885
 	/* Store the last 4 * VEC.  */
190885
-	VMOVU	%VEC(5), (%rcx)
190885
-	VMOVU	%VEC(6), -VEC_SIZE(%rcx)
190885
-	VMOVU	%VEC(7), -(VEC_SIZE * 2)(%rcx)
190885
-	VMOVU	%VEC(8), -(VEC_SIZE * 3)(%rcx)
190885
-	/* Store the first VEC.  */
190885
-	VMOVU	%VEC(4), (%r11)
190885
+	VMOVU	-(VEC_SIZE * 4)(%rsi, %rdx), %VEC(0)
190885
+	VMOVU	-(VEC_SIZE * 3)(%rsi, %rdx), %VEC(1)
190885
+	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %VEC(2)
190885
+	VMOVU	-VEC_SIZE(%rsi, %rdx), %VEC(3)
190885
+
190885
+	VMOVU	%VEC(0), -(VEC_SIZE * 4)(%rdi, %rdx)
190885
+	VMOVU	%VEC(1), -(VEC_SIZE * 3)(%rdi, %rdx)
190885
+	VMOVU	%VEC(2), -(VEC_SIZE * 2)(%rdi, %rdx)
190885
+	VMOVU	%VEC(3), -VEC_SIZE(%rdi, %rdx)
190885
 	VZEROUPPER_RETURN
190885
 
190885
-L(large_backward):
190885
-	/* Don't use non-temporal store if there is overlap between
190885
-	   destination and source since destination may be in cache
190885
-	   when source is loaded.  */
190885
-	leaq    (%rcx, %rdx), %r10
190885
-	cmpq    %r10, %r9
190885
-	jb	L(loop_4x_vec_backward)
190885
-L(loop_large_backward):
190885
-	/* Copy 4 * VEC a time backward with non-temporal stores.  */
190885
-	PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 2)
190885
-	PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 3)
190885
-	VMOVU	(%rcx), %VEC(0)
190885
-	VMOVU	-VEC_SIZE(%rcx), %VEC(1)
190885
-	VMOVU	-(VEC_SIZE * 2)(%rcx), %VEC(2)
190885
-	VMOVU	-(VEC_SIZE * 3)(%rcx), %VEC(3)
190885
-	subq	$PREFETCHED_LOAD_SIZE, %rcx
190885
-	subq	$PREFETCHED_LOAD_SIZE, %rdx
190885
-	VMOVNT	%VEC(0), (%r9)
190885
-	VMOVNT	%VEC(1), -VEC_SIZE(%r9)
190885
-	VMOVNT	%VEC(2), -(VEC_SIZE * 2)(%r9)
190885
-	VMOVNT	%VEC(3), -(VEC_SIZE * 3)(%r9)
190885
-	subq	$PREFETCHED_LOAD_SIZE, %r9
190885
-	cmpq	$PREFETCHED_LOAD_SIZE, %rdx
190885
-	ja	L(loop_large_backward)
190885
+	.p2align 4
190885
+L(large_memcpy_4x):
190885
+	movq	%rdx, %r10
190885
+	/* edx will store remainder size for copying tail.  */
190885
+	andl	$(PAGE_SIZE * 4 - 1), %edx
190885
+	/* r10 stores outer loop counter.  */
190885
+	shrq	$(LOG_PAGE_SIZE + 2), %r10
190885
+	/* Copy 4x VEC at a time from 4 pages.  */
190885
+	.p2align 4
190885
+L(loop_large_memcpy_4x_outer):
190885
+	/* ecx stores inner loop counter.  */
190885
+	movl	$(PAGE_SIZE / LARGE_LOAD_SIZE), %ecx
190885
+L(loop_large_memcpy_4x_inner):
190885
+	/* Only one prefetch set per page as doing 4 pages give more time
190885
+	   for prefetcher to keep up.  */
190885
+	PREFETCH_ONE_SET(1, (%rsi), PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE * 2 + PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE * 3 + PREFETCHED_LOAD_SIZE)
190885
+	/* Load vectors from rsi.  */
190885
+	LOAD_ONE_SET((%rsi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
190885
+	LOAD_ONE_SET((%rsi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
190885
+	LOAD_ONE_SET((%rsi), PAGE_SIZE * 2, %VEC(8), %VEC(9), %VEC(10), %VEC(11))
190885
+	LOAD_ONE_SET((%rsi), PAGE_SIZE * 3, %VEC(12), %VEC(13), %VEC(14), %VEC(15))
190885
+	subq	$-LARGE_LOAD_SIZE, %rsi
190885
+	/* Non-temporal store vectors to rdi.  */
190885
+	STORE_ONE_SET((%rdi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
190885
+	STORE_ONE_SET((%rdi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
190885
+	STORE_ONE_SET((%rdi), PAGE_SIZE * 2, %VEC(8), %VEC(9), %VEC(10), %VEC(11))
190885
+	STORE_ONE_SET((%rdi), PAGE_SIZE * 3, %VEC(12), %VEC(13), %VEC(14), %VEC(15))
190885
+	subq	$-LARGE_LOAD_SIZE, %rdi
190885
+	decl	%ecx
190885
+	jnz	L(loop_large_memcpy_4x_inner)
190885
+	addq	$(PAGE_SIZE * 3), %rdi
190885
+	addq	$(PAGE_SIZE * 3), %rsi
190885
+	decq	%r10
190885
+	jne	L(loop_large_memcpy_4x_outer)
190885
 	sfence
190885
-	/* Store the first 4 * VEC.  */
190885
-	VMOVU	%VEC(4), (%rdi)
190885
-	VMOVU	%VEC(5), VEC_SIZE(%rdi)
190885
-	VMOVU	%VEC(6), (VEC_SIZE * 2)(%rdi)
190885
-	VMOVU	%VEC(7), (VEC_SIZE * 3)(%rdi)
190885
-	/* Store the last VEC.  */
190885
-	VMOVU	%VEC(8), (%r11)
190885
+	/* Check if only last 4 loads are needed.  */
190885
+	cmpl	$(VEC_SIZE * 4), %edx
190885
+	jbe	L(large_memcpy_4x_end)
190885
+
190885
+	/* Handle the last 4  * PAGE_SIZE bytes.  */
190885
+L(loop_large_memcpy_4x_tail):
190885
+	/* Copy 4 * VEC a time forward with non-temporal stores.  */
190885
+	PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
190885
+	PREFETCH_ONE_SET (1, (%rdi), PREFETCHED_LOAD_SIZE)
190885
+	VMOVU	(%rsi), %VEC(0)
190885
+	VMOVU	VEC_SIZE(%rsi), %VEC(1)
190885
+	VMOVU	(VEC_SIZE * 2)(%rsi), %VEC(2)
190885
+	VMOVU	(VEC_SIZE * 3)(%rsi), %VEC(3)
190885
+	subq	$-(VEC_SIZE * 4), %rsi
190885
+	addl	$-(VEC_SIZE * 4), %edx
190885
+	VMOVA	%VEC(0), (%rdi)
190885
+	VMOVA	%VEC(1), VEC_SIZE(%rdi)
190885
+	VMOVA	%VEC(2), (VEC_SIZE * 2)(%rdi)
190885
+	VMOVA	%VEC(3), (VEC_SIZE * 3)(%rdi)
190885
+	subq	$-(VEC_SIZE * 4), %rdi
190885
+	cmpl	$(VEC_SIZE * 4), %edx
190885
+	ja	L(loop_large_memcpy_4x_tail)
190885
+
190885
+L(large_memcpy_4x_end):
190885
+	/* Store the last 4 * VEC.  */
190885
+	VMOVU	-(VEC_SIZE * 4)(%rsi, %rdx), %VEC(0)
190885
+	VMOVU	-(VEC_SIZE * 3)(%rsi, %rdx), %VEC(1)
190885
+	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %VEC(2)
190885
+	VMOVU	-VEC_SIZE(%rsi, %rdx), %VEC(3)
190885
+
190885
+	VMOVU	%VEC(0), -(VEC_SIZE * 4)(%rdi, %rdx)
190885
+	VMOVU	%VEC(1), -(VEC_SIZE * 3)(%rdi, %rdx)
190885
+	VMOVU	%VEC(2), -(VEC_SIZE * 2)(%rdi, %rdx)
190885
+	VMOVU	%VEC(3), -VEC_SIZE(%rdi, %rdx)
190885
 	VZEROUPPER_RETURN
190885
 #endif
190885
 END (MEMMOVE_SYMBOL (__memmove, unaligned_erms))
190885
-- 
190885
GitLab
190885