076f82
commit ba1c3f23d9ba63c38333116eec6043c471c378c4
076f82
Author: Noah Goldstein <goldstein.w.n@gmail.com>
076f82
Date:   Wed Jun 15 10:41:28 2022 -0700
076f82
076f82
    x86: Cleanup bounds checking in large memcpy case
076f82
    
076f82
    1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
076f82
       Previously was using `__x86_rep_movsb_threshold` and should
076f82
       have been using `__x86_shared_non_temporal_threshold`.
076f82
    
076f82
    2. Avoid reloading __x86_shared_non_temporal_threshold before
076f82
       the L(large_memcpy_4x) bounds check.
076f82
    
076f82
    3. Document the second bounds check for L(large_memcpy_4x)
076f82
       more clearly.
076f82
    
076f82
    (cherry picked from commit 89a25c6f64746732b87eaf433af0964b564d4a92)
076f82
076f82
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
076f82
index 7b27cbdda5fb99f7..618d46d8ce28828c 100644
076f82
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
076f82
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
076f82
@@ -118,7 +118,13 @@
076f82
 # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
076f82
 #endif
076f82
 
076f82
-/* Amount to shift rdx by to compare for memcpy_large_4x.  */
076f82
+/* Amount to shift __x86_shared_non_temporal_threshold by for
076f82
+   bound for memcpy_large_4x. This is essentially use to to
076f82
+   indicate that the copy is far beyond the scope of L3
076f82
+   (assuming no user config x86_non_temporal_threshold) and to
076f82
+   use a more aggressively unrolled loop.  NB: before
076f82
+   increasing the value also update initialization of
076f82
+   x86_non_temporal_threshold.  */
076f82
 #ifndef LOG_4X_MEMCPY_THRESH
076f82
 # define LOG_4X_MEMCPY_THRESH 4
076f82
 #endif
076f82
@@ -724,9 +730,14 @@ L(skip_short_movsb_check):
076f82
 	.p2align 4,, 10
076f82
 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
076f82
 L(large_memcpy_2x_check):
076f82
-	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP
076f82
-	jb	L(more_8x_vec_check)
076f82
+	/* Entry from L(large_memcpy_2x) has a redundant load of
076f82
+	   __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
076f82
+	   is only use for the non-erms memmove which is generally less
076f82
+	   common.  */
076f82
 L(large_memcpy_2x):
076f82
+	mov	__x86_shared_non_temporal_threshold(%rip), %R11_LP
076f82
+	cmp	%R11_LP, %RDX_LP
076f82
+	jb	L(more_8x_vec_check)
076f82
 	/* To reach this point it is impossible for dst > src and
076f82
 	   overlap. Remaining to check is src > dst and overlap. rcx
076f82
 	   already contains dst - src. Negate rcx to get src - dst. If
076f82
@@ -774,18 +785,21 @@ L(large_memcpy_2x):
076f82
 	/* ecx contains -(dst - src). not ecx will return dst - src - 1
076f82
 	   which works for testing aliasing.  */
076f82
 	notl	%ecx
076f82
+	movq	%rdx, %r10
076f82
 	testl	$(PAGE_SIZE - VEC_SIZE * 8), %ecx
076f82
 	jz	L(large_memcpy_4x)
076f82
 
076f82
-	movq	%rdx, %r10
076f82
-	shrq	$LOG_4X_MEMCPY_THRESH, %r10
076f82
-	cmp	__x86_shared_non_temporal_threshold(%rip), %r10
076f82
+	/* r11 has __x86_shared_non_temporal_threshold.  Shift it left
076f82
+	   by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
076f82
+	 */
076f82
+	shlq	$LOG_4X_MEMCPY_THRESH, %r11
076f82
+	cmp	%r11, %rdx
076f82
 	jae	L(large_memcpy_4x)
076f82
 
076f82
 	/* edx will store remainder size for copying tail.  */
076f82
 	andl	$(PAGE_SIZE * 2 - 1), %edx
076f82
 	/* r10 stores outer loop counter.  */
076f82
-	shrq	$((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
076f82
+	shrq	$(LOG_PAGE_SIZE + 1), %r10
076f82
 	/* Copy 4x VEC at a time from 2 pages.  */
076f82
 	.p2align 4
076f82
 L(loop_large_memcpy_2x_outer):
076f82
@@ -850,7 +864,6 @@ L(large_memcpy_2x_end):
076f82
 
076f82
 	.p2align 4
076f82
 L(large_memcpy_4x):
076f82
-	movq	%rdx, %r10
076f82
 	/* edx will store remainder size for copying tail.  */
076f82
 	andl	$(PAGE_SIZE * 4 - 1), %edx
076f82
 	/* r10 stores outer loop counter.  */