|
|
0a1c52 |
--- a/configure
|
|
|
0a1c52 |
+++ b/configure
|
|
|
0a1c52 |
@@ -115,6 +115,7 @@
|
|
|
e6944d |
echo ' [--static] [--64] [--libdir=LIBDIR] [--sharedlibdir=LIBDIR]' | tee -a configure.log
|
|
|
e6944d |
echo ' [--includedir=INCLUDEDIR] [--archs="-arch i386 -arch x86_64"]' | tee -a configure.log
|
|
|
e6944d |
echo ' [--dfltcc]' | tee -a configure.log
|
|
|
0a1c52 |
+ echo ' [--simd-slide-hash]' | tee -a configure.log
|
|
|
e6944d |
exit 0 ;;
|
|
|
e6944d |
-p*=* | --prefix=*) prefix=`echo $1 | sed 's/.*=//'`; shift ;;
|
|
|
e6944d |
-e*=* | --eprefix=*) exec_prefix=`echo $1 | sed 's/.*=//'`; shift ;;
|
|
|
0a1c52 |
@@ -144,6 +145,11 @@
|
|
|
e6944d |
PIC_OBJC="$PIC_OBJC dfltcc.lo"
|
|
|
e6944d |
shift
|
|
|
e6944d |
;;
|
|
|
0a1c52 |
+ --simd-slide-hash)
|
|
|
0a1c52 |
+ OBJC="$OBJC slide_avx2.o slide_sse.o"
|
|
|
0a1c52 |
+ PIC_OBJC="$PIC_OBJC slide_avx2.lo slide_sse.lo"
|
|
|
0b2893 |
+ shift
|
|
|
0b2893 |
+ ;;
|
|
|
e6944d |
*)
|
|
|
e6944d |
echo "unknown option: $1" | tee -a configure.log
|
|
|
e6944d |
echo "$0 --help for help" | tee -a configure.log
|
|
|
0a1c52 |
--- a/Makefile.in
|
|
|
0a1c52 |
+++ b/Makefile.in
|
|
|
0b2893 |
@@ -152,6 +152,22 @@
|
|
|
0b2893 |
$(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/dfltcc.o $(SRCDIR)contrib/s390/dfltcc.c
|
|
|
0b2893 |
-@mv objs/dfltcc.o $@
|
|
|
e6944d |
|
|
|
e6944d |
+slide_sse.o: $(SRCDIR)slide_sse.c
|
|
|
e6944d |
+ $(CC) $(CFLAGS) $(ZINC) -msse2 -c -o $@ $(SRCDIR)slide_sse.c
|
|
|
e6944d |
+
|
|
|
e6944d |
+slide_sse.lo: $(SRCDIR)slide_sse.c
|
|
|
e6944d |
+ -@mkdir objs 2>/dev/null || test -d objs
|
|
|
e6944d |
+ $(CC) $(SFLAGS) $(ZINC) -DPIC -msse2 -c -o objs/slide_sse.o $(SRCDIR)slide_sse.c
|
|
|
e6944d |
+ -@mv objs/slide_sse.o $@
|
|
|
e6944d |
+
|
|
|
0b2893 |
+slide_avx2.o: $(SRCDIR)slide_avx2.c
|
|
|
0b2893 |
+ $(CC) $(CFLAGS) $(ZINC) -mavx2 -c -o $@ $(SRCDIR)slide_avx2.c
|
|
|
0b2893 |
+
|
|
|
0b2893 |
+slide_avx2.lo: $(SRCDIR)slide_avx2.c
|
|
|
0b2893 |
+ -@mkdir objs 2>/dev/null || test -d objs
|
|
|
0b2893 |
+ $(CC) $(SFLAGS) $(ZINC) -DPIC -mavx2 -c -o objs/slide_avx2.o $(SRCDIR)slide_avx2.c
|
|
|
0b2893 |
+ -@mv objs/slide_avx2.o $@
|
|
|
0b2893 |
+
|
|
|
0b2893 |
crc32_test.o: $(SRCDIR)test/crc32_test.c $(SRCDIR)zlib.h zconf.h
|
|
|
0b2893 |
$(CC) $(CFLAGS) $(ZINCOUT) -c -o $@ $(SRCDIR)test/crc32_test.c
|
|
|
e6944d |
|
|
|
e6944d |
--- /dev/null
|
|
|
e6944d |
+++ b/slide_sse.c
|
|
|
0b2893 |
@@ -0,0 +1,47 @@
|
|
|
e6944d |
+/*
|
|
|
e6944d |
+ * SSE optimized hash slide
|
|
|
e6944d |
+ *
|
|
|
e6944d |
+ * Copyright (C) 2017 Intel Corporation
|
|
|
e6944d |
+ * Authors:
|
|
|
e6944d |
+ * Arjan van de Ven <arjan@linux.intel.com>
|
|
|
e6944d |
+ * Jim Kukunas <james.t.kukunas@linux.intel.com>
|
|
|
e6944d |
+ *
|
|
|
e6944d |
+ * For conditions of distribution and use, see copyright notice in zlib.h
|
|
|
e6944d |
+ */
|
|
|
e6944d |
+#include "deflate.h"
|
|
|
e6944d |
+#include <immintrin.h>
|
|
|
e6944d |
+
|
|
|
e6944d |
+void slide_hash_sse(deflate_state *s)
|
|
|
e6944d |
+{
|
|
|
e6944d |
+ unsigned n;
|
|
|
e6944d |
+ Posf *p;
|
|
|
e6944d |
+ uInt wsize = s->w_size;
|
|
|
e6944d |
+ z_const __m128i xmm_wsize = _mm_set1_epi16(s->w_size);
|
|
|
e6944d |
+
|
|
|
e6944d |
+ n = s->hash_size;
|
|
|
e6944d |
+ p = &s->head[n] - 8;
|
|
|
e6944d |
+ do {
|
|
|
e6944d |
+ __m128i value, result;
|
|
|
e6944d |
+
|
|
|
e6944d |
+ value = _mm_loadu_si128((__m128i *)p);
|
|
|
e6944d |
+ result= _mm_subs_epu16(value, xmm_wsize);
|
|
|
e6944d |
+ _mm_storeu_si128((__m128i *)p, result);
|
|
|
e6944d |
+ p -= 8;
|
|
|
e6944d |
+ n -= 8;
|
|
|
e6944d |
+ } while (n > 0);
|
|
|
e6944d |
+
|
|
|
e6944d |
+#ifndef FASTEST
|
|
|
e6944d |
+ n = wsize;
|
|
|
e6944d |
+ p = &s->prev[n] - 8;
|
|
|
e6944d |
+ do {
|
|
|
e6944d |
+ __m128i value, result;
|
|
|
e6944d |
+
|
|
|
e6944d |
+ value = _mm_loadu_si128((__m128i *)p);
|
|
|
e6944d |
+ result= _mm_subs_epu16(value, xmm_wsize);
|
|
|
e6944d |
+ _mm_storeu_si128((__m128i *)p, result);
|
|
|
e6944d |
+
|
|
|
e6944d |
+ p -= 8;
|
|
|
e6944d |
+ n -= 8;
|
|
|
e6944d |
+ } while (n > 0);
|
|
|
e6944d |
+#endif
|
|
|
e6944d |
+}
|
|
|
0b2893 |
--- /dev/null
|
|
|
0b2893 |
+++ b/slide_avx2.c
|
|
|
0b2893 |
@@ -0,0 +1,44 @@
|
|
|
0b2893 |
+/*
|
|
|
0b2893 |
+ * AVX2 optimized hash slide
|
|
|
0b2893 |
+ *
|
|
|
0b2893 |
+ * Copyright (C) 2020 Intel Corporation
|
|
|
0b2893 |
+ *
|
|
|
0b2893 |
+ * For conditions of distribution and use, see copyright notice in zlib.h
|
|
|
0b2893 |
+ */
|
|
|
0b2893 |
+#include "deflate.h"
|
|
|
0b2893 |
+#include <immintrin.h>
|
|
|
0b2893 |
+
|
|
|
0b2893 |
+void slide_hash_avx2(deflate_state *s)
|
|
|
0b2893 |
+{
|
|
|
0b2893 |
+ unsigned n;
|
|
|
0b2893 |
+ Posf *p;
|
|
|
0b2893 |
+ uInt wsize = s->w_size;
|
|
|
0b2893 |
+ z_const __m256i ymm_wsize = _mm256_set1_epi16(s->w_size);
|
|
|
0b2893 |
+
|
|
|
0b2893 |
+ n = s->hash_size;
|
|
|
0b2893 |
+ p = &s->head[n] - 16;
|
|
|
0b2893 |
+ do {
|
|
|
0b2893 |
+ __m256i value, result;
|
|
|
e6944d |
+
|
|
|
0b2893 |
+ value = _mm256_loadu_si256((__m256i *)p);
|
|
|
0b2893 |
+ result= _mm256_subs_epu16(value, ymm_wsize);
|
|
|
0b2893 |
+ _mm256_storeu_si256((__m256i *)p, result);
|
|
|
0b2893 |
+ p -= 16;
|
|
|
0b2893 |
+ n -= 16;
|
|
|
0b2893 |
+ } while (n > 0);
|
|
|
0b2893 |
+
|
|
|
0b2893 |
+#ifndef FASTEST
|
|
|
0b2893 |
+ n = wsize;
|
|
|
0b2893 |
+ p = &s->prev[n] - 16;
|
|
|
0b2893 |
+ do {
|
|
|
0b2893 |
+ __m256i value, result;
|
|
|
e6944d |
+
|
|
|
0b2893 |
+ value = _mm256_loadu_si256((__m256i *)p);
|
|
|
0b2893 |
+ result= _mm256_subs_epu16(value, ymm_wsize);
|
|
|
0b2893 |
+ _mm256_storeu_si256((__m256i *)p, result);
|
|
|
0b2893 |
+
|
|
|
0b2893 |
+ p -= 16;
|
|
|
0b2893 |
+ n -= 16;
|
|
|
0b2893 |
+ } while (n > 0);
|
|
|
0b2893 |
+#endif
|
|
|
0b2893 |
+}
|
|
|
0a1c52 |
--- a/deflate.c
|
|
|
0a1c52 |
+++ b/deflate.c
|
|
|
0a1c52 |
@@ -90,6 +90,9 @@
|
|
|
0a1c52 |
|
|
|
0a1c52 |
local int deflateStateCheck OF((z_streamp strm));
|
|
|
0a1c52 |
local void slide_hash OF((deflate_state *s));
|
|
|
0a1c52 |
+local void slide_hash_c OF((deflate_state *s));
|
|
|
0a1c52 |
+extern void slide_hash_sse (deflate_state *s);
|
|
|
0a1c52 |
+extern void slide_hash_avx2 (deflate_state *s);
|
|
|
0a1c52 |
local void fill_window OF((deflate_state *s));
|
|
|
0a1c52 |
local block_state deflate_stored OF((deflate_state *s, int flush));
|
|
|
0a1c52 |
local block_state deflate_fast OF((deflate_state *s, int flush));
|
|
|
0a1c52 |
@@ -212,7 +215,7 @@
|
|
|
0a1c52 |
* bit values at the expense of memory usage). We slide even when level == 0 to
|
|
|
0a1c52 |
* keep the hash table consistent if we switch back to level > 0 later.
|
|
|
0a1c52 |
*/
|
|
|
0a1c52 |
-local void slide_hash(s)
|
|
|
0a1c52 |
+local void slide_hash_c(s)
|
|
|
0a1c52 |
deflate_state *s;
|
|
|
0a1c52 |
{
|
|
|
0a1c52 |
unsigned n, m;
|
|
|
0a1c52 |
@@ -238,6 +241,13 @@
|
|
|
0a1c52 |
#endif
|
|
|
0a1c52 |
}
|
|
|
0a1c52 |
|
|
|
0a1c52 |
+local void slide_hash(deflate_state *s) {
|
|
|
0a1c52 |
+ #ifdef AVX2_SLIDE
|
|
|
0a1c52 |
+ slide_hash_avx2(s);
|
|
|
0a1c52 |
+ #endif
|
|
|
0a1c52 |
+ slide_hash_sse(s);
|
|
|
0a1c52 |
+}
|
|
|
0a1c52 |
+
|
|
|
0a1c52 |
/* ========================================================================= */
|
|
|
0a1c52 |
int ZEXPORT deflateInit_(strm, level, version, stream_size)
|
|
|
0a1c52 |
z_streamp strm;
|