diff --git a/valgrind-3.18.1-amd64-more-spec-rules.patch b/valgrind-3.18.1-amd64-more-spec-rules.patch
new file mode 100644
index 0000000..87794ee
--- /dev/null
+++ b/valgrind-3.18.1-amd64-more-spec-rules.patch
@@ -0,0 +1,105 @@
+commit 595341b150312d2407bd43304449bf39ec3e1fa8
+Author: Julian Seward <jseward@acm.org>
+Date:   Sat Nov 13 19:59:07 2021 +0100
+
+    amd64 front end: add more spec rules:
+    
+       S  after SHRQ
+       Z  after SHLQ
+       NZ after SHLQ
+       Z  after SHLL
+       S  after SHLL
+    
+    The lack of at least one of these was observed to cause occasional false
+    positives in Memcheck.
+    
+    Plus add commented-out cases so as to complete the set of 12 rules
+    {Z,NZ,S,NS} after {SHRQ,SHLQ,SHLL}.  The commented-out ones are commented
+    out because I so far didn't find any use cases for them.
+
+diff --git a/VEX/priv/guest_amd64_helpers.c b/VEX/priv/guest_amd64_helpers.c
+index 9d61e7a0f..ba71c1b62 100644
+--- a/VEX/priv/guest_amd64_helpers.c
++++ b/VEX/priv/guest_amd64_helpers.c
+@@ -1823,16 +1823,26 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
+       /*---------------- SHRQ ----------------*/
+ 
+       if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondZ)) {
+-         /* SHRQ, then Z --> test dep1 == 0 */
++         /* SHRQ, then Z --> test result[63:0] == 0 */
+          return unop(Iop_1Uto64,
+                      binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+       }
+       if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNZ)) {
+-         /* SHRQ, then NZ --> test dep1 != 0 */
++         /* SHRQ, then NZ --> test result[63:0] != 0 */
+          return unop(Iop_1Uto64,
+                      binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+       }
+ 
++      if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondS)) {
++         /* SHRQ, then S --> (ULong)result[63] (result is in dep1) */
++         return binop(Iop_Shr64, cc_dep1, mkU8(63));
++      }
++      // No known test case for this, hence disabled:
++      //if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNS)) {
++      //   /* SHRQ, then NS --> (ULong) ~ result[63] */
++      //   vassert(0);
++      //}
++
+       /*---------------- SHRL ----------------*/
+ 
+       if (isU64(cc_op, AMD64G_CC_OP_SHRL) && isU64(cond, AMD64CondZ)) {
+@@ -1881,6 +1891,52 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
+       //                     mkU32(0)));
+       //}
+ 
++      /*---------------- SHLQ ----------------*/
++
++      if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondZ)) {
++         /* SHLQ, then Z --> test dep1 == 0 */
++         return unop(Iop_1Uto64,
++                     binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
++      }
++      if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondNZ)) {
++         /* SHLQ, then NZ --> test dep1 != 0 */
++         return unop(Iop_1Uto64,
++                     binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
++      }
++
++      //if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondS)) {
++      //   /* SHLQ, then S --> (ULong)result[63] */
++      //   vassert(0);
++      //}
++      //if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondNS)) {
++      //   /* SHLQ, then NS --> (ULong) ~ result[63] */
++      //   vassert(0);
++      //}
++
++      /*---------------- SHLL ----------------*/
++
++      if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondZ)) {
++         /* SHLL, then Z --> test result[31:0] == 0 */
++         return unop(Iop_1Uto64,
++                     binop(Iop_CmpEQ32, unop(Iop_64to32, cc_dep1),
++                           mkU32(0)));
++      }
++      //if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondNZ)) {
++      //   /* SHLL, then NZ --> test dep1 != 0 */
++      //   vassert(0);
++      //}
++
++      if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondS)) {
++         /* SHLL, then S --> (ULong)result[31] */
++         return binop(Iop_And64,
++                      binop(Iop_Shr64, cc_dep1, mkU8(31)),
++                      mkU64(1));
++      }
++      //if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondNS)) {
++      //   /* SHLL, then NS --> (ULong) ~ result[31] */
++      //   vassert(0);
++      //}
++
+       /*---------------- COPY ----------------*/
+       /* This can happen, as a result of amd64 FP compares: "comisd ... ;
+          jbe" for example. */
diff --git a/valgrind.spec b/valgrind.spec
index e2a97c2..42b55fd 100644
--- a/valgrind.spec
+++ b/valgrind.spec
@@ -117,6 +117,10 @@ Patch14: valgrind-3.18.1-arm64-ldaxp-stlxp.patch
 # KDE#445415 arm64 front end: alignment checks missing for atomic instructions.
 Patch15: valgrind-3.18.1-arm64-atomic-align.patch
 
+# commit 595341b150312d2407bd43304449bf39ec3e1fa8
+# amd64 front end: add more spec rules
+Patch16: valgrind-3.18.1-amd64-more-spec-rules.patch
+
 BuildRequires: make
 BuildRequires: glibc-devel
 
@@ -264,6 +268,7 @@ Valgrind User Manual for details.
 %patch13 -p1
 %patch14 -p1
 %patch15 -p1
+%patch16 -p1
 
 %build
 # LTO triggers undefined symbols in valgrind.  Valgrind has a --enable-lto
@@ -501,6 +506,7 @@ fi
 - Add valgrind-3.18.1-arm64-doubleword-cas.patch
 - Add valgrind-3.18.1-arm64-ldaxp-stlxp.patch
 - Add valgrind-3.18.1-arm64-atomic-align.patch
+- Add valgrind-3.18.1-amd64-more-spec-rules.patch
 
 * Mon Nov  1 2021 Mark Wielaard <mjw@fedoraproject.org> - 3.18.1-2
 - Add valgrind-3.18.1-dhat-tests-copy.patch