b1dca6
commit 3f4b61a0b8de67ef9f20737919c713ddfc4bd620
b1dca6
Author: H.J. Lu <hjl.tools@gmail.com>
b1dca6
Date:   Mon Jul 6 11:48:09 2020 -0700
b1dca6
b1dca6
    x86: Add thresholds for "rep movsb/stosb" to tunables
b1dca6
    
b1dca6
    Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables
b1dca6
    to update thresholds for "rep movsb" and "rep stosb" at run-time.
b1dca6
    
b1dca6
    Note that the user specified threshold for "rep movsb" smaller than
b1dca6
    the minimum threshold will be ignored.
b1dca6
    
b1dca6
    Reviewed-by: Carlos O'Donell <carlos@redhat.com>
b1dca6
b1dca6
Conflicts:
b1dca6
	sysdeps/x86/cacheinfo.c
b1dca6
	  (Previous backport of the shared cache computation fix.)
b1dca6
b1dca6
diff --git a/manual/tunables.texi b/manual/tunables.texi
b1dca6
index ef10d2872cfc244e..55d5dfb14db4dfb8 100644
b1dca6
--- a/manual/tunables.texi
b1dca6
+++ b/manual/tunables.texi
b1dca6
@@ -373,6 +373,22 @@ like memmove and memcpy.
b1dca6
 This tunable is specific to i386 and x86-64.
b1dca6
 @end deftp
b1dca6
 
b1dca6
+@deftp Tunable glibc.cpu.x86_rep_movsb_threshold
b1dca6
+The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user to
b1dca6
+set threshold in bytes to start using "rep movsb".  The value must be
b1dca6
+greater than zero, and currently defaults to 2048 bytes.
b1dca6
+
b1dca6
+This tunable is specific to i386 and x86-64.
b1dca6
+@end deftp
b1dca6
+
b1dca6
+@deftp Tunable glibc.cpu.x86_rep_stosb_threshold
b1dca6
+The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user to
b1dca6
+set threshold in bytes to start using "rep stosb".  The value must be
b1dca6
+greater than zero, and currently defaults to 2048 bytes.
b1dca6
+
b1dca6
+This tunable is specific to i386 and x86-64.
b1dca6
+@end deftp
b1dca6
+
b1dca6
 @deftp Tunable glibc.cpu.x86_ibt
b1dca6
 The @code{glibc.cpu.x86_ibt} tunable allows the user to control how
b1dca6
 indirect branch tracking (IBT) should be enabled.  Accepted values are
b1dca6
diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
b1dca6
index aa7cb705d546bcd0..c741a69fb19a1e95 100644
b1dca6
--- a/sysdeps/x86/cacheinfo.c
b1dca6
+++ b/sysdeps/x86/cacheinfo.c
b1dca6
@@ -530,6 +530,12 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
b1dca6
 /* Threshold to use non temporal store.  */
b1dca6
 long int __x86_shared_non_temporal_threshold attribute_hidden;
b1dca6
 
b1dca6
+/* Threshold to use Enhanced REP MOVSB.  */
b1dca6
+long int __x86_rep_movsb_threshold attribute_hidden = 2048;
b1dca6
+
b1dca6
+/* Threshold to use Enhanced REP STOSB.  */
b1dca6
+long int __x86_rep_stosb_threshold attribute_hidden = 2048;
b1dca6
+
b1dca6
 #ifndef DISABLE_PREFETCHW
b1dca6
 /* PREFETCHW support flag for use in memory and string routines.  */
b1dca6
 int __x86_prefetchw attribute_hidden;
b1dca6
@@ -892,6 +898,36 @@ init_cacheinfo (void)
b1dca6
     = (cpu_features->non_temporal_threshold != 0
b1dca6
        ? cpu_features->non_temporal_threshold
b1dca6
        : __x86_shared_cache_size * 3 / 4);
b1dca6
+
b1dca6
+  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */
b1dca6
+  unsigned int minimum_rep_movsb_threshold;
b1dca6
+  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  */
b1dca6
+  unsigned int rep_movsb_threshold;
b1dca6
+  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
b1dca6
+      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
b1dca6
+    {
b1dca6
+      rep_movsb_threshold = 2048 * (64 / 16);
b1dca6
+      minimum_rep_movsb_threshold = 64 * 8;
b1dca6
+    }
b1dca6
+  else if (CPU_FEATURES_ARCH_P (cpu_features,
b1dca6
+				AVX_Fast_Unaligned_Load))
b1dca6
+    {
b1dca6
+      rep_movsb_threshold = 2048 * (32 / 16);
b1dca6
+      minimum_rep_movsb_threshold = 32 * 8;
b1dca6
+    }
b1dca6
+  else
b1dca6
+    {
b1dca6
+      rep_movsb_threshold = 2048 * (16 / 16);
b1dca6
+      minimum_rep_movsb_threshold = 16 * 8;
b1dca6
+    }
b1dca6
+  if (cpu_features->rep_movsb_threshold > minimum_rep_movsb_threshold)
b1dca6
+    __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
b1dca6
+  else
b1dca6
+    __x86_rep_movsb_threshold = rep_movsb_threshold;
b1dca6
+
b1dca6
+# if HAVE_TUNABLES
b1dca6
+  __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
b1dca6
+# endif
b1dca6
 }
b1dca6
 
b1dca6
 #endif
b1dca6
diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
b1dca6
index 21565474839efffc..ad470f79ef7769fc 100644
b1dca6
--- a/sysdeps/x86/cpu-features.c
b1dca6
+++ b/sysdeps/x86/cpu-features.c
b1dca6
@@ -605,6 +605,10 @@ no_cpuid:
b1dca6
   TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
b1dca6
   cpu_features->non_temporal_threshold
b1dca6
     = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
b1dca6
+  cpu_features->rep_movsb_threshold
b1dca6
+    = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
b1dca6
+  cpu_features->rep_stosb_threshold
b1dca6
+    = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
b1dca6
   cpu_features->data_cache_size
b1dca6
     = TUNABLE_GET (x86_data_cache_size, long int, NULL);
b1dca6
   cpu_features->shared_cache_size
b1dca6
diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
b1dca6
index e7ea9e8ece3e8211..0f19c64352c4d7f1 100644
b1dca6
--- a/sysdeps/x86/cpu-features.h
b1dca6
+++ b/sysdeps/x86/cpu-features.h
b1dca6
@@ -102,6 +102,10 @@ struct cpu_features
b1dca6
   unsigned long int shared_cache_size;
b1dca6
   /* Threshold to use non temporal store.  */
b1dca6
   unsigned long int non_temporal_threshold;
b1dca6
+  /* Threshold to use "rep movsb".  */
b1dca6
+  unsigned long int rep_movsb_threshold;
b1dca6
+  /* Threshold to use "rep stosb".  */
b1dca6
+  unsigned long int rep_stosb_threshold;
b1dca6
 };
b1dca6
 
b1dca6
 /* Used from outside of glibc to get access to the CPU features
b1dca6
diff --git a/sysdeps/x86/dl-tunables.list b/sysdeps/x86/dl-tunables.list
b1dca6
index 2a457d0eec9c3122..e066313a1d1dd009 100644
b1dca6
--- a/sysdeps/x86/dl-tunables.list
b1dca6
+++ b/sysdeps/x86/dl-tunables.list
b1dca6
@@ -30,6 +30,30 @@ glibc {
b1dca6
     x86_non_temporal_threshold {
b1dca6
       type: SIZE_T
b1dca6
     }
b1dca6
+    x86_rep_movsb_threshold {
b1dca6
+      type: SIZE_T
b1dca6
+      # Since there is overhead to set up REP MOVSB operation, REP MOVSB
b1dca6
+      # isn't faster on short data.  The memcpy micro benchmark in glibc
b1dca6
+      # shows that 2KB is the approximate value above which REP MOVSB
b1dca6
+      # becomes faster than SSE2 optimization on processors with Enhanced
b1dca6
+      # REP MOVSB.  Since larger register size can move more data with a
b1dca6
+      # single load and store, the threshold is higher with larger register
b1dca6
+      # size.  Note: Since the REP MOVSB threshold must be greater than 8
b1dca6
+      # times of vector size, the minium value must be updated at run-time.
b1dca6
+      minval: 1
b1dca6
+      default: 2048
b1dca6
+    }
b1dca6
+    x86_rep_stosb_threshold {
b1dca6
+      type: SIZE_T
b1dca6
+      # Since there is overhead to set up REP STOSB operation, REP STOSB
b1dca6
+      # isn't faster on short data.  The memset micro benchmark in glibc
b1dca6
+      # shows that 2KB is the approximate value above which REP STOSB
b1dca6
+      # becomes faster on processors with Enhanced REP STOSB.  Since the
b1dca6
+      # stored value is fixed, larger register size has minimal impact
b1dca6
+      # on threshold.
b1dca6
+      minval: 1
b1dca6
+      default: 2048
b1dca6
+    }
b1dca6
     x86_data_cache_size {
b1dca6
       type: SIZE_T
b1dca6
     }
b1dca6
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
b1dca6
index e2ede45e9f68791b..c952576cfdf6e3e6 100644
b1dca6
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
b1dca6
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
b1dca6
@@ -56,17 +56,6 @@
b1dca6
 # endif
b1dca6
 #endif
b1dca6
 
b1dca6
-/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set
b1dca6
-   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The
b1dca6
-   memcpy micro benchmark in glibc shows that 2KB is the approximate
b1dca6
-   value above which REP MOVSB becomes faster than SSE2 optimization
b1dca6
-   on processors with Enhanced REP MOVSB.  Since larger register size
b1dca6
-   can move more data with a single load and store, the threshold is
b1dca6
-   higher with larger register size.  */
b1dca6
-#ifndef REP_MOVSB_THRESHOLD
b1dca6
-# define REP_MOVSB_THRESHOLD	(2048 * (VEC_SIZE / 16))
b1dca6
-#endif
b1dca6
-
b1dca6
 #ifndef PREFETCH
b1dca6
 # define PREFETCH(addr) prefetcht0 addr
b1dca6
 #endif
b1dca6
@@ -245,9 +234,6 @@ L(movsb):
b1dca6
 	leaq	(%rsi,%rdx), %r9
b1dca6
 	cmpq	%r9, %rdi
b1dca6
 	/* Avoid slow backward REP MOVSB.  */
b1dca6
-# if REP_MOVSB_THRESHOLD <= (VEC_SIZE * 8)
b1dca6
-#  error Unsupported REP_MOVSB_THRESHOLD and VEC_SIZE!
b1dca6
-# endif
b1dca6
 	jb	L(more_8x_vec_backward)
b1dca6
 1:
b1dca6
 	movq	%rdx, %rcx
b1dca6
@@ -323,7 +309,7 @@ L(between_2_3):
b1dca6
 
b1dca6
 #if defined USE_MULTIARCH && IS_IN (libc)
b1dca6
 L(movsb_more_2x_vec):
b1dca6
-	cmpq	$REP_MOVSB_THRESHOLD, %rdx
b1dca6
+	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP
b1dca6
 	ja	L(movsb)
b1dca6
 #endif
b1dca6
 L(more_2x_vec):
b1dca6
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
b1dca6
index dc9cb88b37a5477a..270a1d49b34be9f5 100644
b1dca6
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
b1dca6
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
b1dca6
@@ -58,16 +58,6 @@
b1dca6
 # endif
b1dca6
 #endif
b1dca6
 
b1dca6
-/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set
b1dca6
-   up REP STOSB operation, REP STOSB isn't faster on short data.  The
b1dca6
-   memset micro benchmark in glibc shows that 2KB is the approximate
b1dca6
-   value above which REP STOSB becomes faster on processors with
b1dca6
-   Enhanced REP STOSB.  Since the stored value is fixed, larger register
b1dca6
-   size has minimal impact on threshold.  */
b1dca6
-#ifndef REP_STOSB_THRESHOLD
b1dca6
-# define REP_STOSB_THRESHOLD		2048
b1dca6
-#endif
b1dca6
-
b1dca6
 #ifndef SECTION
b1dca6
 # error SECTION is not defined!
b1dca6
 #endif
b1dca6
@@ -173,7 +163,7 @@ ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))
b1dca6
 	ret
b1dca6
 
b1dca6
 L(stosb_more_2x_vec):
b1dca6
-	cmpq	$REP_STOSB_THRESHOLD, %rdx
b1dca6
+	cmp	__x86_rep_stosb_threshold(%rip), %RDX_LP
b1dca6
 	ja	L(stosb)
b1dca6
 #endif
b1dca6
 L(more_2x_vec):