ad1f37
From 2dfdc5b7d6943c0ac60eef63e361e2a50f9da610 Mon Sep 17 00:00:00 2001
ad1f37
From: Ilya Leoshkevich <iii@linux.ibm.com>
ad1f37
Date: Thu, 19 Mar 2020 11:52:03 +0100
ad1f37
Subject: [PATCH] s390x: vectorize crc32
ad1f37
ad1f37
Use vector extensions when compiling for s390x and binutils knows
ad1f37
about them. At runtime, check whether kernel supports vector
ad1f37
extensions (it has to be not just the CPU, but also the kernel) and
ad1f37
choose between the regular and the vectorized implementations.
ad1f37
---
ad1f37
 Makefile.in             |   9 ++
ad1f37
 configure               |  28 ++++++
ad1f37
 contrib/s390/crc32-vx.c | 195 ++++++++++++++++++++++++++++++++++++++++
ad1f37
 crc32.c                 |  55 +++++++++++-
ad1f37
 4 files changed, 285 insertions(+), 2 deletions(-)
ad1f37
 create mode 100644 contrib/s390/crc32-vx.c
ad1f37
ad1f37
diff --git a/Makefile.in b/Makefile.in
ad1f37
index 6070dcc..9e9743b 100644
ad1f37
--- a/Makefile.in
ad1f37
+++ b/Makefile.in
ad1f37
@@ -29,6 +29,7 @@ LDFLAGS=
ad1f37
 TEST_LDFLAGS=-L. libz.a
ad1f37
 LDSHARED=$(CC)
ad1f37
 CPP=$(CC) -E
ad1f37
+VGFMAFLAG=
ad1f37
 
ad1f37
 STATICLIB=libz.a
ad1f37
 SHAREDLIB=libz.so
ad1f37
@@ -179,6 +180,9 @@ crc32_power8.o: $(SRCDIR)contrib/power8-crc/vec_crc32.c
ad1f37
 crc32.o: $(SRCDIR)crc32.c
ad1f37
 	$(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)crc32.c
ad1f37
 
ad1f37
+crc32-vx.o: $(SRCDIR)contrib/s390/crc32-vx.c
ad1f37
+	$(CC) $(CFLAGS) $(VGFMAFLAG) $(ZINC) -c -o $@ $(SRCDIR)contrib/s390/crc32-vx.c
ad1f37
+
ad1f37
 deflate.o: $(SRCDIR)deflate.c
ad1f37
 	$(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)deflate.c
ad1f37
 
ad1f37
@@ -234,6 +238,11 @@ crc32.lo: $(SRCDIR)crc32.c
ad1f37
 	$(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/crc32.o $(SRCDIR)crc32.c
ad1f37
 	-@mv objs/crc32.o $@
ad1f37
 
ad1f37
+crc32-vx.lo: $(SRCDIR)contrib/s390/crc32-vx.c
ad1f37
+	-@mkdir objs 2>/dev/null || test -d objs
ad1f37
+	$(CC) $(SFLAGS) $(VGFMAFLAG) $(ZINC) -DPIC -c -o objs/crc32-vx.o $(SRCDIR)contrib/s390/crc32-vx.c
ad1f37
+	-@mv objs/crc32-vx.o $@
ad1f37
+
ad1f37
 deflate.lo: $(SRCDIR)deflate.c
ad1f37
 	-@mkdir objs 2>/dev/null || test -d objs
ad1f37
 	$(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/deflate.o $(SRCDIR)deflate.c
ad1f37
diff --git a/configure b/configure
ad1f37
index 70ed86b..7941f75 100755
ad1f37
--- a/configure
ad1f37
+++ b/configure
ad1f37
@@ -923,6 +923,32 @@ EOF
ad1f37
   fi
ad1f37
 fi
ad1f37
 
ad1f37
+# check if we are compiling for s390 and binutils support vector extensions
ad1f37
+VGFMAFLAG=-march=z13
ad1f37
+cat > $test.c <
ad1f37
+#ifndef __s390__
ad1f37
+#error
ad1f37
+#endif
ad1f37
+EOF
ad1f37
+if try $CC -c $CFLAGS $VGFMAFLAG $test.c; then
ad1f37
+  CFLAGS="$CFLAGS -DHAVE_S390X_VX"
ad1f37
+  SFLAGS="$SFLAGS -DHAVE_S390X_VX"
ad1f37
+  OBJC="$OBJC crc32-vx.o"
ad1f37
+  PIC_OBJC="$PIC_OBJC crc32-vx.lo"
ad1f37
+  echo "Checking for s390 vector extensions... Yes." | tee -a configure.log
ad1f37
+
ad1f37
+  for flag in -mzarch -fzvector; do
ad1f37
+    if try $CC -c $CFLAGS $VGFMAFLAG $flag $test.c; then
ad1f37
+      VGFMAFLAG="$VGFMAFLAG $flag"
ad1f37
+      echo "Checking for $flag... Yes." | tee -a configure.log
ad1f37
+    else
ad1f37
+      echo "Checking for $flag... No." | tee -a configure.log
ad1f37
+    fi
ad1f37
+  done
ad1f37
+else
ad1f37
+  echo "Checking for s390 vector extensions... No." | tee -a configure.log
ad1f37
+fi
ad1f37
+
ad1f37
 # show the results in the log
ad1f37
 echo >> configure.log
ad1f37
 echo ALL = $ALL >> configure.log
ad1f37
@@ -955,6 +981,7 @@ echo mandir = $mandir >> configure.log
ad1f37
 echo prefix = $prefix >> configure.log
ad1f37
 echo sharedlibdir = $sharedlibdir >> configure.log
ad1f37
 echo uname = $uname >> configure.log
ad1f37
+echo VGFMAFLAG = $VGFMAFLAG >> configure.log
ad1f37
 
ad1f37
 # udpate Makefile with the configure results
ad1f37
 sed < ${SRCDIR}Makefile.in "
ad1f37
@@ -964,6 +991,7 @@ sed < ${SRCDIR}Makefile.in "
ad1f37
 /^LDFLAGS *=/s#=.*#=$LDFLAGS#
ad1f37
 /^LDSHARED *=/s#=.*#=$LDSHARED#
ad1f37
 /^CPP *=/s#=.*#=$CPP#
ad1f37
+/^VGFMAFLAG *=/s#=.*#=$VGFMAFLAG#
ad1f37
 /^STATICLIB *=/s#=.*#=$STATICLIB#
ad1f37
 /^SHAREDLIB *=/s#=.*#=$SHAREDLIB#
ad1f37
 /^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV#
ad1f37
diff --git a/contrib/s390/crc32-vx.c b/contrib/s390/crc32-vx.c
ad1f37
new file mode 100644
ad1f37
index 0000000..fa5387c
ad1f37
--- /dev/null
ad1f37
+++ b/contrib/s390/crc32-vx.c
ad1f37
@@ -0,0 +1,195 @@
ad1f37
+/*
ad1f37
+ * Hardware-accelerated CRC-32 variants for Linux on z Systems
ad1f37
+ *
ad1f37
+ * Use the z/Architecture Vector Extension Facility to accelerate the
ad1f37
+ * computing of bitreflected CRC-32 checksums.
ad1f37
+ *
ad1f37
+ * This CRC-32 implementation algorithm is bitreflected and processes
ad1f37
+ * the least-significant bit first (Little-Endian).
ad1f37
+ *
ad1f37
+ * This code was originally written by Hendrik Brueckner
ad1f37
+ * <brueckner@linux.vnet.ibm.com> for use in the Linux kernel and has been
ad1f37
+ * relicensed under the zlib license.
ad1f37
+ */
ad1f37
+
ad1f37
+#include "../../zutil.h"
ad1f37
+
ad1f37
+#include <stdint.h>
ad1f37
+#include <vecintrin.h>
ad1f37
+
ad1f37
+typedef unsigned char uv16qi __attribute__((vector_size(16)));
ad1f37
+typedef unsigned int uv4si __attribute__((vector_size(16)));
ad1f37
+typedef unsigned long long uv2di __attribute__((vector_size(16)));
ad1f37
+
ad1f37
+uint32_t crc32_le_vgfm_16(uint32_t crc, const unsigned char *buf, size_t len) {
ad1f37
+    /*
ad1f37
+     * The CRC-32 constant block contains reduction constants to fold and
ad1f37
+     * process particular chunks of the input data stream in parallel.
ad1f37
+     *
ad1f37
+     * For the CRC-32 variants, the constants are precomputed according to
ad1f37
+     * these definitions:
ad1f37
+     *
ad1f37
+     *      R1 = [(x4*128+32 mod P'(x) << 32)]' << 1
ad1f37
+     *      R2 = [(x4*128-32 mod P'(x) << 32)]' << 1
ad1f37
+     *      R3 = [(x128+32 mod P'(x) << 32)]'   << 1
ad1f37
+     *      R4 = [(x128-32 mod P'(x) << 32)]'   << 1
ad1f37
+     *      R5 = [(x64 mod P'(x) << 32)]'       << 1
ad1f37
+     *      R6 = [(x32 mod P'(x) << 32)]'       << 1
ad1f37
+     *
ad1f37
+     *      The bitreflected Barret reduction constant, u', is defined as
ad1f37
+     *      the bit reversal of floor(x**64 / P(x)).
ad1f37
+     *
ad1f37
+     *      where P(x) is the polynomial in the normal domain and the P'(x) is the
ad1f37
+     *      polynomial in the reversed (bitreflected) domain.
ad1f37
+     *
ad1f37
+     * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
ad1f37
+     *
ad1f37
+     *      P(x)  = 0x04C11DB7
ad1f37
+     *      P'(x) = 0xEDB88320
ad1f37
+     */
ad1f37
+    const uv16qi perm_le2be = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};  /* BE->LE mask */
ad1f37
+    const uv2di r2r1 = {0x1C6E41596, 0x154442BD4};                                     /* R2, R1 */
ad1f37
+    const uv2di r4r3 = {0x0CCAA009E, 0x1751997D0};                                     /* R4, R3 */
ad1f37
+    const uv2di r5 = {0, 0x163CD6124};                                                 /* R5 */
ad1f37
+    const uv2di ru_poly = {0, 0x1F7011641};                                            /* u' */
ad1f37
+    const uv2di crc_poly = {0, 0x1DB710641};                                           /* P'(x) << 1 */
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Load the initial CRC value.
ad1f37
+     *
ad1f37
+     * The CRC value is loaded into the rightmost word of the
ad1f37
+     * vector register and is later XORed with the LSB portion
ad1f37
+     * of the loaded input data.
ad1f37
+     */
ad1f37
+    uv2di v0 = {0, 0};
ad1f37
+    v0 = (uv2di)vec_insert(crc, (uv4si)v0, 3);
ad1f37
+
ad1f37
+    /* Load a 64-byte data chunk and XOR with CRC */
ad1f37
+    uv2di v1 = vec_perm(((uv2di *)buf)[0], ((uv2di *)buf)[0], perm_le2be);
ad1f37
+    uv2di v2 = vec_perm(((uv2di *)buf)[1], ((uv2di *)buf)[1], perm_le2be);
ad1f37
+    uv2di v3 = vec_perm(((uv2di *)buf)[2], ((uv2di *)buf)[2], perm_le2be);
ad1f37
+    uv2di v4 = vec_perm(((uv2di *)buf)[3], ((uv2di *)buf)[3], perm_le2be);
ad1f37
+
ad1f37
+    v1 ^= v0;
ad1f37
+    buf += 64;
ad1f37
+    len -= 64;
ad1f37
+
ad1f37
+    while (len >= 64) {
ad1f37
+        /* Load the next 64-byte data chunk */
ad1f37
+        uv16qi part1 = vec_perm(((uv16qi *)buf)[0], ((uv16qi *)buf)[0], perm_le2be);
ad1f37
+        uv16qi part2 = vec_perm(((uv16qi *)buf)[1], ((uv16qi *)buf)[1], perm_le2be);
ad1f37
+        uv16qi part3 = vec_perm(((uv16qi *)buf)[2], ((uv16qi *)buf)[2], perm_le2be);
ad1f37
+        uv16qi part4 = vec_perm(((uv16qi *)buf)[3], ((uv16qi *)buf)[3], perm_le2be);
ad1f37
+
ad1f37
+        /*
ad1f37
+         * Perform a GF(2) multiplication of the doublewords in V1 with
ad1f37
+         * the R1 and R2 reduction constants in V0.  The intermediate result
ad1f37
+         * is then folded (accumulated) with the next data chunk in PART1 and
ad1f37
+         * stored in V1. Repeat this step for the register contents
ad1f37
+         * in V2, V3, and V4 respectively.
ad1f37
+         */
ad1f37
+        v1 = (uv2di)vec_gfmsum_accum_128(r2r1, v1, part1);
ad1f37
+        v2 = (uv2di)vec_gfmsum_accum_128(r2r1, v2, part2);
ad1f37
+        v3 = (uv2di)vec_gfmsum_accum_128(r2r1, v3, part3);
ad1f37
+        v4 = (uv2di)vec_gfmsum_accum_128(r2r1, v4, part4);
ad1f37
+
ad1f37
+        buf += 64;
ad1f37
+        len -= 64;
ad1f37
+    }
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Fold V1 to V4 into a single 128-bit value in V1.  Multiply V1 with R3
ad1f37
+     * and R4 and accumulating the next 128-bit chunk until a single 128-bit
ad1f37
+     * value remains.
ad1f37
+     */
ad1f37
+    v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
ad1f37
+    v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v3);
ad1f37
+    v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v4);
ad1f37
+
ad1f37
+    while (len >= 16) {
ad1f37
+        /* Load next data chunk */
ad1f37
+        v2 = vec_perm(*(uv2di *)buf, *(uv2di *)buf, perm_le2be);
ad1f37
+
ad1f37
+        /* Fold next data chunk */
ad1f37
+        v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
ad1f37
+
ad1f37
+        buf += 16;
ad1f37
+        len -= 16;
ad1f37
+    }
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Set up a vector register for byte shifts.  The shift value must
ad1f37
+     * be loaded in bits 1-4 in byte element 7 of a vector register.
ad1f37
+     * Shift by 8 bytes: 0x40
ad1f37
+     * Shift by 4 bytes: 0x20
ad1f37
+     */
ad1f37
+    uv16qi v9 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
ad1f37
+    v9 = vec_insert((unsigned char)0x40, v9, 7);
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
ad1f37
+     * to move R4 into the rightmost doubleword and set the leftmost
ad1f37
+     * doubleword to 0x1.
ad1f37
+     */
ad1f37
+    v0 = vec_srb(r4r3, (uv2di)v9);
ad1f37
+    v0[0] = 1;
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Compute GF(2) product of V1 and V0.  The rightmost doubleword
ad1f37
+     * of V1 is multiplied with R4.  The leftmost doubleword of V1 is
ad1f37
+     * multiplied by 0x1 and is then XORed with rightmost product.
ad1f37
+     * Implicitly, the intermediate leftmost product becomes padded
ad1f37
+     */
ad1f37
+    v1 = (uv2di)vec_gfmsum_128(v0, v1);
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Now do the final 32-bit fold by multiplying the rightmost word
ad1f37
+     * in V1 with R5 and XOR the result with the remaining bits in V1.
ad1f37
+     *
ad1f37
+     * To achieve this by a single VGFMAG, right shift V1 by a word
ad1f37
+     * and store the result in V2 which is then accumulated.  Use the
ad1f37
+     * vector unpack instruction to load the rightmost half of the
ad1f37
+     * doubleword into the rightmost doubleword element of V1; the other
ad1f37
+     * half is loaded in the leftmost doubleword.
ad1f37
+     * The vector register with CONST_R5 contains the R5 constant in the
ad1f37
+     * rightmost doubleword and the leftmost doubleword is zero to ignore
ad1f37
+     * the leftmost product of V1.
ad1f37
+     */
ad1f37
+    v9 = vec_insert((unsigned char)0x20, v9, 7);
ad1f37
+    v2 = vec_srb(v1, (uv2di)v9);
ad1f37
+    v1 = vec_unpackl((uv4si)v1);  /* Split rightmost doubleword */
ad1f37
+    v1 = (uv2di)vec_gfmsum_accum_128(r5, v1, (uv16qi)v2);
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Apply a Barret reduction to compute the final 32-bit CRC value.
ad1f37
+     *
ad1f37
+     * The input values to the Barret reduction are the degree-63 polynomial
ad1f37
+     * in V1 (R(x)), degree-32 generator polynomial, and the reduction
ad1f37
+     * constant u.  The Barret reduction result is the CRC value of R(x) mod
ad1f37
+     * P(x).
ad1f37
+     *
ad1f37
+     * The Barret reduction algorithm is defined as:
ad1f37
+     *
ad1f37
+     *    1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
ad1f37
+     *    2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
ad1f37
+     *    3. C(x)  = R(x) XOR T2(x) mod x^32
ad1f37
+     *
ad1f37
+     *  Note: The leftmost doubleword of vector register containing
ad1f37
+     *  CONST_RU_POLY is zero and, thus, the intermediate GF(2) product
ad1f37
+     *  is zero and does not contribute to the final result.
ad1f37
+     */
ad1f37
+
ad1f37
+    /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
ad1f37
+    v2 = vec_unpackl((uv4si)v1);
ad1f37
+    v2 = (uv2di)vec_gfmsum_128(ru_poly, v2);
ad1f37
+
ad1f37
+    /*
ad1f37
+     * Compute the GF(2) product of the CRC polynomial with T1(x) in
ad1f37
+     * V2 and XOR the intermediate result, T2(x), with the value in V1.
ad1f37
+     * The final result is stored in word element 2 of V2.
ad1f37
+     */
ad1f37
+    v2 = vec_unpackl((uv4si)v2);
ad1f37
+    v2 = (uv2di)vec_gfmsum_accum_128(crc_poly, v2, (uv16qi)v1);
ad1f37
+
ad1f37
+    return ((uv4si)v2)[2];
ad1f37
+}
ad1f37
diff --git a/crc32.c b/crc32.c
ad1f37
index 34132ea..dfa33ef 100644
ad1f37
--- a/crc32.c
ad1f37
+++ b/crc32.c
ad1f37
@@ -252,12 +252,54 @@ unsigned long crc32_vpmsum(unsigned long, const unsigned char FAR *, z_size_t);
ad1f37
 #endif
ad1f37
 #endif
ad1f37
 
ad1f37
+#ifdef HAVE_S390X_VX
ad1f37
+#include <sys/auxv.h>
ad1f37
+
ad1f37
+#define VX_MIN_LEN 64
ad1f37
+#define VX_ALIGNMENT 16L
ad1f37
+#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
ad1f37
+
ad1f37
+unsigned int crc32_le_vgfm_16(unsigned int crc, const unsigned char FAR *buf, z_size_t len);
ad1f37
+
ad1f37
+local unsigned long s390_crc32_vx(unsigned long crc, const unsigned char FAR *buf, z_size_t len)
ad1f37
+{
ad1f37
+    uint64_t prealign, aligned, remaining;
ad1f37
+
ad1f37
+    if (buf == Z_NULL) return 0UL;
ad1f37
+
ad1f37
+    if (len < VX_MIN_LEN + VX_ALIGN_MASK)
ad1f37
+        return crc32_big(crc, buf, len);
ad1f37
+
ad1f37
+    if ((uintptr_t)buf & VX_ALIGN_MASK) {
ad1f37
+        prealign = VX_ALIGNMENT - ((uintptr_t)buf & VX_ALIGN_MASK);
ad1f37
+        len -= prealign;
ad1f37
+        crc = crc32_big(crc, buf, prealign);
ad1f37
+        buf += prealign;
ad1f37
+    }
ad1f37
+    aligned = len & ~VX_ALIGN_MASK;
ad1f37
+    remaining = len & VX_ALIGN_MASK;
ad1f37
+
ad1f37
+    crc = crc32_le_vgfm_16(crc ^ 0xffffffff, buf, (size_t)aligned) ^ 0xffffffff;
ad1f37
+
ad1f37
+    if (remaining)
ad1f37
+        crc = crc32_big(crc, buf + aligned, remaining);
ad1f37
+
ad1f37
+    return crc;
ad1f37
+}
ad1f37
+#endif
ad1f37
+
ad1f37
 /* due to a quirk of gnu_indirect_function - "local" (aka static) is applied to
ad1f37
  * crc32_z which is not desired. crc32_z_ifunc is implictly "local" */
ad1f37
 #ifndef Z_IFUNC_ASM
ad1f37
 local
ad1f37
 #endif
ad1f37
-unsigned long (*(crc32_z_ifunc(void)))(unsigned long, const unsigned char FAR *, z_size_t)
ad1f37
+unsigned long (*(crc32_z_ifunc(
ad1f37
+#ifdef __s390__
ad1f37
+unsigned long hwcap
ad1f37
+#else
ad1f37
+void
ad1f37
+#endif
ad1f37
+)))(unsigned long, const unsigned char FAR *, z_size_t)
ad1f37
 {
ad1f37
 #if _ARCH_PWR8==1
ad1f37
 #if defined(__BUILTIN_CPU_SUPPORTS__)
ad1f37
@@ -269,6 +311,11 @@ unsigned long (*(crc32_z_ifunc(void)))(unsigned long, const unsigned char FAR *,
ad1f37
 #endif
ad1f37
 #endif /* _ARCH_PWR8 */
ad1f37
 
ad1f37
+#ifdef HAVE_S390X_VX
ad1f37
+    if (hwcap & HWCAP_S390_VX)
ad1f37
+        return s390_crc32_vx;
ad1f37
+#endif
ad1f37
+
ad1f37
 /* return a function pointer for optimized arches here */
ad1f37
 
ad1f37
 #ifdef DYNAMIC_CRC_TABLE
ad1f37
@@ -301,7 +348,11 @@ unsigned long ZEXPORT crc32_z(crc, buf, len)
ad1f37
     static unsigned long ZEXPORT (*crc32_func)(unsigned long, const unsigned char FAR *, z_size_t) = NULL;
ad1f37
 
ad1f37
     if (!crc32_func)
ad1f37
-        crc32_func = crc32_z_ifunc();
ad1f37
+        crc32_func = crc32_z_ifunc(
ad1f37
+#ifdef __s390__
ad1f37
+            getauxval(AT_HWCAP)
ad1f37
+#endif
ad1f37
+        );
ad1f37
     return (*crc32_func)(crc, buf, len);
ad1f37
 }
ad1f37
 
ad1f37
-- 
ad1f37
2.25.1
ad1f37