076f82
commit d201c59177b98946d7f80145e7b4d02991d04805
076f82
Author: Noah Goldstein <goldstein.w.n@gmail.com>
076f82
Date:   Fri Jun 24 09:42:12 2022 -0700
076f82
076f82
    x86: Align entry for memrchr to 64-bytes.
076f82
    
076f82
    The function was tuned around 64-byte entry alignment and performs
076f82
    better for all sizes with it.
076f82
    
076f82
    As well different code boths where explicitly written to touch the
076f82
    minimum number of cache line i.e sizes <= 32 touch only the entry
076f82
    cache line.
076f82
    
076f82
    (cherry picked from commit 227afaa67213efcdce6a870ef5086200f1076438)
076f82
076f82
diff --git a/sysdeps/x86_64/multiarch/memrchr-avx2.S b/sysdeps/x86_64/multiarch/memrchr-avx2.S
076f82
index 5f8e0be18cfe4fad..edd8180ba1ede9a5 100644
076f82
--- a/sysdeps/x86_64/multiarch/memrchr-avx2.S
076f82
+++ b/sysdeps/x86_64/multiarch/memrchr-avx2.S
076f82
@@ -35,7 +35,7 @@
076f82
 # define VEC_SIZE			32
076f82
 # define PAGE_SIZE			4096
076f82
 	.section SECTION(.text), "ax", @progbits
076f82
-ENTRY(MEMRCHR)
076f82
+ENTRY_P2ALIGN(MEMRCHR, 6)
076f82
 # ifdef __ILP32__
076f82
 	/* Clear upper bits.  */
076f82
 	and	%RDX_LP, %RDX_LP