Blob Blame History Raw
--- a/configure
+++ b/configure
@@ -115,6 +115,7 @@
       echo '    [--static] [--64] [--libdir=LIBDIR] [--sharedlibdir=LIBDIR]' | tee -a configure.log
       echo '    [--includedir=INCLUDEDIR] [--archs="-arch i386 -arch x86_64"]' | tee -a configure.log
       echo '    [--dfltcc]' | tee -a configure.log
+      echo '    [--simd-slide-hash]' | tee -a configure.log
         exit 0 ;;
     -p*=* | --prefix=*) prefix=`echo $1 | sed 's/.*=//'`; shift ;;
     -e*=* | --eprefix=*) exec_prefix=`echo $1 | sed 's/.*=//'`; shift ;;
@@ -144,6 +145,11 @@
 	    PIC_OBJC="$PIC_OBJC dfltcc.lo"
       shift
       ;; 
+    --simd-slide-hash)
+	    OBJC="$OBJC slide_avx2.o slide_sse.o"
+	    PIC_OBJC="$PIC_OBJC slide_avx2.lo slide_sse.lo"
+      shift
+      ;; 
     *)
       echo "unknown option: $1" | tee -a configure.log
       echo "$0 --help for help" | tee -a configure.log
--- a/Makefile.in
+++ b/Makefile.in
@@ -152,6 +152,22 @@
 	$(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/dfltcc.o $(SRCDIR)contrib/s390/dfltcc.c
 	-@mv objs/dfltcc.o $@
 
+slide_sse.o: $(SRCDIR)slide_sse.c
+	$(CC) $(CFLAGS) $(ZINC) -msse2 -c -o $@ $(SRCDIR)slide_sse.c
+
+slide_sse.lo: $(SRCDIR)slide_sse.c
+	-@mkdir objs 2>/dev/null || test -d objs
+	$(CC) $(SFLAGS) $(ZINC) -DPIC -msse2 -c -o objs/slide_sse.o $(SRCDIR)slide_sse.c
+	-@mv objs/slide_sse.o $@
+
+slide_avx2.o: $(SRCDIR)slide_avx2.c
+	$(CC) $(CFLAGS) $(ZINC) -mavx2 -c -o $@ $(SRCDIR)slide_avx2.c
+
+slide_avx2.lo: $(SRCDIR)slide_avx2.c
+	-@mkdir objs 2>/dev/null || test -d objs
+	$(CC) $(SFLAGS) $(ZINC) -DPIC -mavx2 -c -o objs/slide_avx2.o $(SRCDIR)slide_avx2.c
+	-@mv objs/slide_avx2.o $@
+
 crc32_test.o: $(SRCDIR)test/crc32_test.c $(SRCDIR)zlib.h zconf.h
 	$(CC) $(CFLAGS) $(ZINCOUT) -c -o $@ $(SRCDIR)test/crc32_test.c
 
--- /dev/null
+++ b/slide_sse.c
@@ -0,0 +1,47 @@
+/*
+ * SSE optimized hash slide
+ *
+ * Copyright (C) 2017 Intel Corporation
+ * Authors:
+ *   Arjan van de Ven	<arjan@linux.intel.com>
+ *   Jim Kukunas	<james.t.kukunas@linux.intel.com>
+ *
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+#include "deflate.h"
+#include <immintrin.h>
+
+void slide_hash_sse(deflate_state *s)
+{
+    unsigned n;
+    Posf *p;
+    uInt wsize = s->w_size;
+    z_const __m128i xmm_wsize = _mm_set1_epi16(s->w_size);
+
+    n = s->hash_size;
+    p = &s->head[n] - 8;
+    do {
+        __m128i value, result;
+
+	value = _mm_loadu_si128((__m128i *)p);
+	result= _mm_subs_epu16(value, xmm_wsize);
+	_mm_storeu_si128((__m128i *)p, result);
+	p -= 8;
+	n -= 8;
+    } while (n > 0);
+
+#ifndef FASTEST
+    n = wsize;
+    p = &s->prev[n] - 8;
+    do {
+        __m128i value, result;
+
+	value = _mm_loadu_si128((__m128i *)p);
+	result= _mm_subs_epu16(value, xmm_wsize);
+	_mm_storeu_si128((__m128i *)p, result);
+
+	p -= 8;
+	n -= 8;
+    } while (n > 0);
+#endif
+}
--- /dev/null
+++ b/slide_avx2.c
@@ -0,0 +1,44 @@
+/*
+ * AVX2 optimized hash slide
+ *
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+#include "deflate.h"
+#include <immintrin.h>
+
+void slide_hash_avx2(deflate_state *s)
+{
+    unsigned n;
+    Posf *p;
+    uInt wsize = s->w_size;
+    z_const __m256i ymm_wsize = _mm256_set1_epi16(s->w_size);
+
+    n = s->hash_size;
+    p = &s->head[n] - 16;
+    do {
+        __m256i value, result;
+
+        value = _mm256_loadu_si256((__m256i *)p);
+        result= _mm256_subs_epu16(value, ymm_wsize);
+        _mm256_storeu_si256((__m256i *)p, result);
+        p -= 16;
+        n -= 16;
+    } while (n > 0);
+
+#ifndef FASTEST
+    n = wsize;
+    p = &s->prev[n] - 16;
+    do {
+        __m256i value, result;
+
+        value = _mm256_loadu_si256((__m256i *)p);
+        result= _mm256_subs_epu16(value, ymm_wsize);
+        _mm256_storeu_si256((__m256i *)p, result);
+
+        p -= 16;
+        n -= 16;
+    } while (n > 0);
+#endif
+}
--- a/deflate.c
+++ b/deflate.c
@@ -90,6 +90,9 @@
 
 local int deflateStateCheck      OF((z_streamp strm));
 local void slide_hash     OF((deflate_state *s));
+local void slide_hash_c     OF((deflate_state *s));
+extern void slide_hash_sse     (deflate_state *s);
+extern void slide_hash_avx2     (deflate_state *s);
 local void fill_window    OF((deflate_state *s));
 local block_state deflate_stored OF((deflate_state *s, int flush));
 local block_state deflate_fast   OF((deflate_state *s, int flush));
@@ -212,7 +215,7 @@
  * bit values at the expense of memory usage). We slide even when level == 0 to
  * keep the hash table consistent if we switch back to level > 0 later.
  */
-local void slide_hash(s)
+local void slide_hash_c(s)
     deflate_state *s;
 {
     unsigned n, m;
@@ -238,6 +241,13 @@
 #endif
 }
 
+local void slide_hash(deflate_state *s) {
+    #ifdef AVX2_SLIDE
+        slide_hash_avx2(s);
+    #endif
+        slide_hash_sse(s);
+}
+
 /* ========================================================================= */
 int ZEXPORT deflateInit_(strm, level, version, stream_size)
     z_streamp strm;