diff --git a/SOURCES/libgcrypt-1.8.5-fips-hwfeatures.patch b/SOURCES/libgcrypt-1.8.5-fips-hwfeatures.patch
new file mode 100644
index 0000000..bbf758a
--- /dev/null
+++ b/SOURCES/libgcrypt-1.8.5-fips-hwfeatures.patch
@@ -0,0 +1,13 @@
+diff -up libgcrypt-1.8.5/src/hwfeatures.c.hw-fips libgcrypt-1.8.5/src/hwfeatures.c
+--- libgcrypt-1.8.5/src/hwfeatures.c.hw-fips	2021-06-25 11:55:55.843819137 +0200
++++ libgcrypt-1.8.5/src/hwfeatures.c	2021-06-25 11:56:00.925895390 +0200
+@@ -205,9 +205,6 @@ _gcry_detect_hw_features (void)
+ {
+   hw_features = 0;
+ 
+-  if (fips_mode ())
+-    return; /* Hardware support is not to be evaluated.  */
+-
+   parse_hwf_deny_file ();
+ 
+ #if defined (HAVE_CPU_ARCH_X86)
diff --git a/SOURCES/libgcrypt-1.8.5-ppc-aes-gcm.patch b/SOURCES/libgcrypt-1.8.5-ppc-aes-gcm.patch
new file mode 100644
index 0000000..02e120b
--- /dev/null
+++ b/SOURCES/libgcrypt-1.8.5-ppc-aes-gcm.patch
@@ -0,0 +1,1333 @@
+diff --git a/AUTHORS b/AUTHORS
+index ee336b2e..77055c25 100644
+--- a/AUTHORS
++++ b/AUTHORS
+@@ -29,6 +29,7 @@ List of Copyright holders
+   Copyright (C) 1996-1999 Peter Gutmann, Paul Kendall, and Chris Wedgwood
+   Copyright (C) 1996-2006 Peter Gutmann, Matt Thomlinson and Blake Coverett
+   Copyright (C) 2003 Nikos Mavroyanopoulos
++  Copyright (c) 2006 CRYPTOGAMS
+   Copyright (C) 2006-2007 NTT (Nippon Telegraph and Telephone Corporation)
+   Copyright (C) 2012-2019 g10 Code GmbH
+   Copyright (C) 2012 Simon Josefsson, Niels Möller
+diff --git a/LICENSES b/LICENSES
+index f6733a69..c19284e2 100644
+--- a/LICENSES
++++ b/LICENSES
+@@ -54,7 +54,6 @@ with any binary distributions derived from the GNU C Library.
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ #+end_quote
+ 
+-
+   For files:
+   - random/jitterentropy-base.c
+   - random/jitterentropy.h
+@@ -99,6 +98,48 @@ with any binary distributions derived from the GNU C Library.
+  * DAMAGE.
+ #+end_quote
+ 
++  For files:
++  - cipher/cipher-gcm-ppc.c
++
++#+begin_quote
++ Copyright (c) 2006, CRYPTOGAMS by <appro@openssl.org>
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++       * Redistributions of source code must retain copyright notices,
++         this list of conditions and the following disclaimer.
++
++       * Redistributions in binary form must reproduce the above
++         copyright notice, this list of conditions and the following
++         disclaimer in the documentation and/or other materials
++         provided with the distribution.
++
++       * Neither the name of the CRYPTOGAMS nor the names of its
++         copyright holder and contributors may be used to endorse or
++         promote products derived from this software without specific
++         prior written permission.
++
++ ALTERNATIVELY, provided that this notice is retained in full, this
++ product may be distributed under the terms of the GNU General Public
++ License (GPL), in which case the provisions of the GPL apply INSTEAD OF
++ those given above.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++#+end_quote
++
+ * X License
+ 
+   For files:
+diff --git a/cipher/Makefile.am b/cipher/Makefile.am
+index 1728e9f9..ab5d2a38 100644
+--- a/cipher/Makefile.am
++++ b/cipher/Makefile.am
+@@ -66,6 +66,7 @@ blowfish.c blowfish-amd64.S blowfish-arm.S \
+ cast5.c cast5-amd64.S cast5-arm.S \
+ chacha20.c chacha20-sse2-amd64.S chacha20-ssse3-amd64.S chacha20-avx2-amd64.S \
+   chacha20-armv7-neon.S \
++cipher-gcm-ppc.c \
+ crc.c \
+   crc-intel-pclmul.c crc-ppc.c \
+ des.c des-amd64.S \
+@@ -165,3 +166,9 @@ crc-ppc.o: $(srcdir)/crc-ppc.c Makefile
+ 
+ crc-ppc.lo: $(srcdir)/crc-ppc.c Makefile
+ 	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++cipher-gcm-ppc.o: $(srcdir)/cipher-gcm-ppc.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++cipher-gcm-ppc.lo: $(srcdir)/cipher-gcm-ppc.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
+diff --git a/cipher/cipher-gcm-ppc.c b/cipher/cipher-gcm-ppc.c
+new file mode 100644
+index 00000000..ed27ef15
+--- /dev/null
++++ b/cipher/cipher-gcm-ppc.c
+@@ -0,0 +1,510 @@
++/* cipher-gcm-ppc.c  -  Power 8 vpmsum accelerated Galois Counter Mode
++ *                      implementation
++ * Copyright (C) 2019 Shawn Landden <shawn@git.icu>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser general Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ *
++ * Based on GHASH implementation by Andy Polyakov from CRYPTOGAMS
++ * distribution (ppc/ghashp8-ppc.pl). Specifically, it uses his register
++ * allocation (which then defers to your compiler's register allocation),
++ * instead of re-implementing Gerald Estrin's Scheme of parallelized
++ * multiplication of polynomials, as I did not understand this algorithm at
++ * the time.
++ *
++ * Original copyright license follows:
++ *
++ *  Copyright (c) 2006, CRYPTOGAMS by <appro@openssl.org>
++ *  All rights reserved.
++ *
++ *  Redistribution and use in source and binary forms, with or without
++ *  modification, are permitted provided that the following conditions
++ *  are met:
++ *
++ *        * Redistributions of source code must retain copyright notices,
++ *          this list of conditions and the following disclaimer.
++ *
++ *        * Redistributions in binary form must reproduce the above
++ *          copyright notice, this list of conditions and the following
++ *          disclaimer in the documentation and/or other materials
++ *          provided with the distribution.
++ *
++ *        * Neither the name of the CRYPTOGAMS nor the names of its
++ *          copyright holder and contributors may be used to endorse or
++ *          promote products derived from this software without specific
++ *          prior written permission.
++ *
++ *  ALTERNATIVELY, provided that this notice is retained in full, this
++ *  product may be distributed under the terms of the GNU General Public
++ *  License (GPL), in which case the provisions of the GPL apply INSTEAD OF
++ *  those given above.
++ *
++ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
++ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
++ */
++
++#include <config.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <errno.h>
++#include <stdint.h>
++
++#include "g10lib.h"
++#include "cipher.h"
++#include "bufhelp.h"
++#include "./cipher-internal.h"
++
++#ifdef GCM_USE_PPC_VPMSUM
++
++#include <altivec.h>
++
++#define ALWAYS_INLINE inline __attribute__((always_inline))
++#define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
++
++#define ASM_FUNC_ATTR        NO_INSTRUMENT_FUNCTION
++#define ASM_FUNC_ATTR_INLINE ASM_FUNC_ATTR ALWAYS_INLINE
++
++typedef vector unsigned char vector16x_u8;
++typedef vector signed char vector16x_s8;
++typedef vector unsigned long long vector2x_u64;
++typedef vector unsigned long long block;
++
++static ASM_FUNC_ATTR_INLINE block
++asm_vpmsumd(block a, block b)
++{
++  block r;
++  __asm__("vpmsumd %0, %1, %2"
++	  : "=v" (r)
++	  : "v" (a), "v" (b));
++  return r;
++}
++
++static ASM_FUNC_ATTR_INLINE block
++asm_swap_u64(block a)
++{
++  __asm__("xxswapd %x0, %x1"
++          : "=wa" (a)
++          : "wa" (a));
++  return a;
++}
++
++static ASM_FUNC_ATTR_INLINE block
++asm_rot_block_left(block a)
++{
++  block zero = {0, 0};
++  block mask = {2, 0};
++  return __builtin_shuffle(a, zero, mask);
++}
++
++static ASM_FUNC_ATTR_INLINE block
++asm_rot_block_right(block a)
++{
++  block zero = {0, 0};
++  block mask = {1, 2};
++  return __builtin_shuffle(a, zero, mask);
++}
++
++/* vsl is a slightly strange function in the way the shift is passed... */
++static ASM_FUNC_ATTR_INLINE block
++asm_ashl_128(block a, vector16x_u8 shift)
++{
++  block r;
++  __asm__("vsl %0, %1, %2"
++          : "=v" (r)
++          : "v" (a), "v" (shift));
++  return r;
++}
++
++#define ALIGNED_LOAD(in_ptr) \
++  (vec_aligned_ld (0, (const unsigned char *)(in_ptr)))
++
++static ASM_FUNC_ATTR_INLINE block
++vec_aligned_ld(unsigned long offset, const unsigned char *ptr)
++{
++#ifndef WORDS_BIGENDIAN
++  block vec;
++  __asm__ ("lvx %0,%1,%2\n\t"
++	   : "=v" (vec)
++	   : "r" (offset), "r" ((uintptr_t)ptr)
++	   : "memory", "r0");
++  return vec;
++#else
++  return vec_vsx_ld (offset, ptr);
++#endif
++}
++
++#define STORE_TABLE(gcm_table, slot, vec) \
++  vec_aligned_st (((block)vec), slot * 16, (unsigned char *)(gcm_table));
++
++
++static ASM_FUNC_ATTR_INLINE void
++vec_aligned_st(block vec, unsigned long offset, unsigned char *ptr)
++{
++#ifndef WORDS_BIGENDIAN
++  __asm__ ("stvx %0,%1,%2\n\t"
++	   :
++	   : "v" (vec), "r" (offset), "r" ((uintptr_t)ptr)
++	   : "memory", "r0");
++#else
++  vec_vsx_st ((vector16x_u8)vec, offset, ptr);
++#endif
++}
++
++#define VEC_LOAD_BE(in_ptr, bswap_const) \
++  (vec_load_be (0, (const unsigned char *)(in_ptr), bswap_const))
++
++static ASM_FUNC_ATTR_INLINE block
++vec_load_be(unsigned long offset, const unsigned char *ptr,
++	    vector unsigned char be_bswap_const)
++{
++#ifndef WORDS_BIGENDIAN
++  block vec;
++  /* GCC vec_vsx_ld is generating two instructions on little-endian. Use
++   * lxvw4x directly instead. */
++  __asm__ ("lxvw4x %x0,%1,%2\n\t"
++	   : "=wa" (vec)
++	   : "r" (offset), "r" ((uintptr_t)ptr)
++	   : "memory", "r0");
++  __asm__ ("vperm %0,%1,%1,%2\n\t"
++	   : "=v" (vec)
++	   : "v" (vec), "v" (be_bswap_const));
++  return vec;
++#else
++  (void)be_bswap_const;
++  return vec_vsx_ld (offset, ptr);
++#endif
++}
++
++/* Power ghash based on papers:
++   "The Galois/Counter Mode of Operation (GCM)"; David A. McGrew, John Viega
++   "Intel® Carry-Less Multiplication Instruction and its Usage for Computing
++    the GCM Mode - Rev 2.01"; Shay Gueron, Michael E. Kounavis.
++
++   After saving the magic c2 constant and pre-formatted version of the key,
++   we pre-process the key for parallel hashing. This takes advantage of the
++   identity of addition over a galois field being identital to XOR, and thus
++   can be parellized (S 2.2, page 3). We multiply and add (galois field
++   versions) the key over multiple iterations and save the result. This can
++   later be galois added (XORed) with parallel processed input (Estrin's
++   Scheme).
++
++   The ghash "key" is a salt. */
++void ASM_FUNC_ATTR
++_gcry_ghash_setup_ppc_vpmsum (uint64_t *gcm_table, void *gcm_key)
++{
++  vector16x_u8 bswap_const =
++    { 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 };
++  vector16x_u8 c2 =
++    { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0b11000010 };
++  block T0, T1, T2;
++  block C2, H, H1, H1l, H1h, H2, H2l, H2h;
++  block H3l, H3, H3h, H4l, H4, H4h, T3, T4;
++  vector16x_s8 most_sig_of_H, t7, carry;
++  vector16x_u8 one = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
++
++  H = VEC_LOAD_BE(gcm_key, bswap_const);
++  most_sig_of_H = vec_splat((vector16x_s8)H, 15);
++  t7 = vec_splat_s8(7);
++  carry = most_sig_of_H >> t7;
++  carry &= c2; /* only interested in certain carries. */
++  H1 = asm_ashl_128(H, one);
++  H1 ^= (block)carry; /* complete the <<< 1 */
++
++  T1 = asm_swap_u64 (H1);
++  H1l = asm_rot_block_right (T1);
++  H1h = asm_rot_block_left (T1);
++  C2 = asm_rot_block_right ((block)c2);
++
++  STORE_TABLE (gcm_table, 0, C2);
++  STORE_TABLE (gcm_table, 1, H1l);
++  STORE_TABLE (gcm_table, 2, T1);
++  STORE_TABLE (gcm_table, 3, H1h);
++
++  /* pre-process coefficients for Gerald Estrin's scheme for parallel
++   * multiplication of polynomials
++   */
++  H2l = asm_vpmsumd (H1l, H1); /* do not need to mask in
++                                  because 0 * anything -> 0 */
++  H2 = asm_vpmsumd (T1, H1);
++  H2h = asm_vpmsumd (H1h, H1);
++
++  /* reduce 1 */
++  T0 = asm_vpmsumd (H2l, C2);
++
++  H2l ^= asm_rot_block_left (H2);;
++  H2h ^= asm_rot_block_right (H2);
++  H2l = asm_swap_u64 (H2l);
++  H2l ^= T0;
++  /* reduce 2 */
++  T0 = asm_swap_u64 (H2l);
++  H2l = asm_vpmsumd (H2l, C2);
++  H2 = H2l ^ H2h ^ T0;
++
++  T2 = asm_swap_u64 (H2);
++  H2l = asm_rot_block_right (T2);
++  H2h = asm_rot_block_left (T2);
++
++  STORE_TABLE (gcm_table, 4, H2l);
++  STORE_TABLE (gcm_table, 5, T2);
++  STORE_TABLE (gcm_table, 6, H2h);
++
++  H3l = asm_vpmsumd (H2l, H1);
++  H4l = asm_vpmsumd (H2l, H2);
++  H3 = asm_vpmsumd (T2, H1);
++  H4 = asm_vpmsumd (T2, H2);
++  H3h = asm_vpmsumd (H2h, H1);
++  H4h = asm_vpmsumd (H2h, H2);
++
++  T3 = asm_vpmsumd (H3l, C2);
++  T4 = asm_vpmsumd (H4l, C2);
++
++  H3l ^= asm_rot_block_left (H3);
++  H3h ^= asm_rot_block_right (H3);
++  H4l ^= asm_rot_block_left (H4);
++  H4h ^= asm_rot_block_right (H4);
++
++  H3 = asm_swap_u64 (H3l);
++  H4 = asm_swap_u64 (H4l);
++
++  H3 ^= T3;
++  H4 ^= T4;
++
++  /* We could have also b64 switched reduce and reduce2, however as we are
++     using the unrotated H and H2 above to vpmsum, this is marginally better. */
++  T3 = asm_swap_u64 (H3);
++  T4 = asm_swap_u64 (H4);
++
++  H3 = asm_vpmsumd (H3, C2);
++  H4 = asm_vpmsumd (H4, C2);
++
++  T3 ^= H3h;
++  T4 ^= H4h;
++  H3 ^= T3;
++  H4 ^= T4;
++  H3 = asm_swap_u64 (H3);
++  H4 = asm_swap_u64 (H4);
++
++  H3l = asm_rot_block_right (H3);
++  H3h = asm_rot_block_left (H3);
++  H4l = asm_rot_block_right (H4);
++  H4h = asm_rot_block_left (H4);
++
++  STORE_TABLE (gcm_table, 7, H3l);
++  STORE_TABLE (gcm_table, 8, H3);
++  STORE_TABLE (gcm_table, 9, H3h);
++  STORE_TABLE (gcm_table, 10, H4l);
++  STORE_TABLE (gcm_table, 11, H4);
++  STORE_TABLE (gcm_table, 12, H4h);
++}
++
++ASM_FUNC_ATTR_INLINE
++block
++vec_perm2(block l, block r, vector16x_u8 perm) {
++  block ret;
++  __asm__ ("vperm %0,%1,%2,%3\n\t"
++	   : "=v" (ret)
++	   : "v" (l), "v" (r), "v" (perm));
++  return ret;
++}
++
++void ASM_FUNC_ATTR
++_gcry_ghash_ppc_vpmsum (const byte *result, const void *const gcm_table,
++			const byte *const buf, const size_t nblocks)
++{
++  /* This const is strange, it is reversing the bytes, and also reversing
++     the u32s that get switched by lxvw4 and it also addresses bytes big-endian,
++     and is here due to lack of proper peep-hole optimization. */
++  vector16x_u8 bswap_const =
++    { 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 };
++  vector16x_u8 bswap_8_const =
++    { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
++  block c2, H0l, H0m, H0h, H4l, H4m, H4h, H2m, H3l, H3m, H3h, Hl;
++  block Hm, Hh, in, in0, in1, in2, in3, Hm_right, Hl_rotate, cur;
++  size_t blocks_remaining = nblocks, off = 0;
++  size_t not_multiple_of_four;
++  block t0;
++
++  cur = vec_load_be (0, result, bswap_const);
++
++  c2 = vec_aligned_ld (0, gcm_table);
++  H0l = vec_aligned_ld (16, gcm_table);
++  H0m = vec_aligned_ld (32, gcm_table);
++  H0h = vec_aligned_ld (48, gcm_table);
++
++  for (not_multiple_of_four = nblocks % 4; not_multiple_of_four;
++       not_multiple_of_four--)
++    {
++      in = vec_load_be (off, buf, bswap_const);
++      off += 16;
++      blocks_remaining--;
++      cur ^= in;
++
++      Hl = asm_vpmsumd (cur, H0l);
++      Hm = asm_vpmsumd (cur, H0m);
++      Hh = asm_vpmsumd (cur, H0h);
++
++      t0 = asm_vpmsumd (Hl, c2);
++
++      Hl ^= asm_rot_block_left (Hm);
++
++      Hm_right = asm_rot_block_right (Hm);
++      Hh ^= Hm_right;
++      Hl_rotate = asm_swap_u64 (Hl);
++      Hl_rotate ^= t0;
++      Hl = asm_swap_u64 (Hl_rotate);
++      Hl_rotate = asm_vpmsumd (Hl_rotate, c2);
++      Hl ^= Hh;
++      Hl ^= Hl_rotate;
++
++      cur = Hl;
++  }
++
++  if (blocks_remaining > 0)
++    {
++      vector16x_u8 hiperm =
++	{
++	  0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
++	  0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0
++	};
++      vector16x_u8 loperm =
++        {
++	  0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
++	  0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8
++	};
++      block Xl, Xm, Xh, Xl1, Xm1, Xh1, Xm2, Xl3, Xm3, Xh3, Xl_rotate;
++      block H21l, H21h, merge_l, merge_h;
++
++      H2m = vec_aligned_ld (48 + 32, gcm_table);
++      H3l = vec_aligned_ld (48 * 2 + 16, gcm_table);
++      H3m = vec_aligned_ld (48 * 2 + 32, gcm_table);
++      H3h = vec_aligned_ld (48 * 2 + 48, gcm_table);
++      H4l = vec_aligned_ld (48 * 3 + 16, gcm_table);
++      H4m = vec_aligned_ld (48 * 3 + 32, gcm_table);
++      H4h = vec_aligned_ld (48 * 3 + 48, gcm_table);
++
++      in0 = vec_load_be (off, buf, bswap_const);
++      in1 = vec_load_be (off + 16, buf, bswap_const);
++      in2 = vec_load_be (off + 32, buf, bswap_const);
++      in3 = vec_load_be (off + 48, buf, bswap_const);
++      blocks_remaining -= 4;
++      off += 64;
++
++      Xh = in0 ^ cur;
++
++      Xl1 = asm_vpmsumd (in1, H3l);
++      Xm1 = asm_vpmsumd (in1, H3m);
++      Xh1 = asm_vpmsumd (in1, H3h);
++
++      H21l = vec_perm2 (H2m, H0m, hiperm);
++      H21h = vec_perm2 (H2m, H0m, loperm);
++      merge_l = vec_perm2 (in2, in3, loperm);
++      merge_h = vec_perm2 (in2, in3, hiperm);
++
++      Xm2 = asm_vpmsumd (in2, H2m);
++      Xl3 = asm_vpmsumd (merge_l, H21l);
++      Xm3 = asm_vpmsumd (in3, H0m);
++      Xh3 = asm_vpmsumd (merge_h, H21h);
++
++      Xm2 ^= Xm1;
++      Xl3 ^= Xl1;
++      Xm3 ^= Xm2;
++      Xh3 ^= Xh1;
++
++      /* Gerald Estrin's scheme for parallel multiplication of polynomials */
++      for (;blocks_remaining > 0; blocks_remaining -= 4, off += 64)
++        {
++	  in0 = vec_load_be (off, buf, bswap_const);
++	  in1 = vec_load_be (off + 16, buf, bswap_const);
++	  in2 = vec_load_be (off + 32, buf, bswap_const);
++	  in3 = vec_load_be (off + 48, buf, bswap_const);
++
++	  Xl = asm_vpmsumd (Xh, H4l);
++	  Xm = asm_vpmsumd (Xh, H4m);
++	  Xh = asm_vpmsumd (Xh, H4h);
++	  Xl1 = asm_vpmsumd (in1, H3l);
++	  Xm1 = asm_vpmsumd (in1, H3m);
++	  Xh1 = asm_vpmsumd (in1, H3h);
++
++	  Xl ^= Xl3;
++	  Xm ^= Xm3;
++	  Xh ^= Xh3;
++	  merge_l = vec_perm2 (in2, in3, loperm);
++	  merge_h = vec_perm2 (in2, in3, hiperm);
++
++	  t0 = asm_vpmsumd (Xl, c2);
++	  Xl3 = asm_vpmsumd (merge_l, H21l);
++	  Xh3 = asm_vpmsumd (merge_h, H21h);
++
++	  Xl ^= asm_rot_block_left (Xm);
++	  Xh ^= asm_rot_block_right (Xm);
++
++	  Xl = asm_swap_u64 (Xl);
++	  Xl ^= t0;
++
++	  Xl_rotate = asm_swap_u64 (Xl);
++	  Xm2 = asm_vpmsumd (in2, H2m);
++	  Xm3 = asm_vpmsumd (in3, H0m);
++	  Xl = asm_vpmsumd (Xl, c2);
++
++	  Xl3 ^= Xl1;
++	  Xh3 ^= Xh1;
++	  Xh ^= in0;
++	  Xm2 ^= Xm1;
++	  Xh ^= Xl_rotate;
++	  Xm3 ^= Xm2;
++	  Xh ^= Xl;
++	}
++
++      Xl = asm_vpmsumd (Xh, H4l);
++      Xm = asm_vpmsumd (Xh, H4m);
++      Xh = asm_vpmsumd (Xh, H4h);
++
++      Xl ^= Xl3;
++      Xm ^= Xm3;
++
++      t0 = asm_vpmsumd (Xl, c2);
++
++      Xh ^= Xh3;
++      Xl ^= asm_rot_block_left (Xm);
++      Xh ^= asm_rot_block_right (Xm);
++
++      Xl = asm_swap_u64 (Xl);
++      Xl ^= t0;
++
++      Xl_rotate = asm_swap_u64 (Xl);
++      Xl = asm_vpmsumd (Xl, c2);
++      Xl_rotate ^= Xh;
++      Xl ^= Xl_rotate;
++
++      cur = Xl;
++    }
++
++  cur = (block)vec_perm ((vector16x_u8)cur, (vector16x_u8)cur, bswap_8_const);
++  STORE_TABLE (result, 0, cur);
++}
++
++#endif /* GCM_USE_PPC_VPMSUM */
+diff --git a/cipher/cipher-gcm.c b/cipher/cipher-gcm.c
+index 32ec9fa0..b84a0698 100644
+--- a/cipher/cipher-gcm.c
++++ b/cipher/cipher-gcm.c
+@@ -61,6 +61,28 @@ ghash_armv8_ce_pmull (gcry_cipher_hd_t c, byte *result, const byte *buf,
+ 
+ #endif
+ 
++#ifdef GCM_USE_PPC_VPMSUM
++extern void _gcry_ghash_setup_ppc_vpmsum (void *gcm_table, void *gcm_key);
++
++/* result is 128-bits */
++extern unsigned int _gcry_ghash_ppc_vpmsum (byte *result, void *gcm_table,
++					    const byte *buf, size_t nblocks);
++
++static void
++ghash_setup_ppc_vpmsum (gcry_cipher_hd_t c)
++{
++  _gcry_ghash_setup_ppc_vpmsum(c->u_mode.gcm.gcm_table, c->u_mode.gcm.u_ghash_key.key);
++}
++
++static unsigned int
++ghash_ppc_vpmsum (gcry_cipher_hd_t c, byte *result, const byte *buf,
++		  size_t nblocks)
++{
++  _gcry_ghash_ppc_vpmsum(result, c->u_mode.gcm.gcm_table, buf,
++			 nblocks);
++  return 0;
++}
++#endif /* GCM_USE_PPC_VPMSUM */
+ 
+ #ifdef GCM_USE_TABLES
+ static const u16 gcmR[256] = {
+@@ -403,7 +425,8 @@ ghash_internal (gcry_cipher_hd_t c, byte *result, const byte *buf,
+ static void
+ setupM (gcry_cipher_hd_t c)
+ {
+-#if defined(GCM_USE_INTEL_PCLMUL) || defined(GCM_USE_ARM_PMULL)
++#if defined(GCM_USE_INTEL_PCLMUL) || defined(GCM_USE_ARM_PMULL) || \
++    defined(GCM_USE_S390X_CRYPTO) || defined(GCM_USE_PPC_VPMSUM)
+   unsigned int features = _gcry_get_hw_features ();
+ #endif
+ 
+@@ -423,7 +446,24 @@ setupM (gcry_cipher_hd_t c)
+       ghash_setup_armv8_ce_pmull (c);
+     }
+ #endif
+-  else
++#ifdef GCM_USE_PPC_VPMSUM
++  else if (features & HWF_PPC_VCRYPTO)
++    {
++      c->u_mode.gcm.ghash_fn = ghash_ppc_vpmsum;
++      ghash_setup_ppc_vpmsum (c);
++    }
++#endif
++#ifdef GCM_USE_S390X_CRYPTO
++  else if (features & HWF_S390X_MSA)
++    {
++      if (kimd_query () & km_function_to_mask (KMID_FUNCTION_GHASH))
++	{
++	  c->u_mode.gcm.ghash_fn = ghash_s390x_kimd;
++	}
++    }
++#endif
++
++  if (c->u_mode.gcm.ghash_fn == NULL)
+     {
+       c->u_mode.gcm.ghash_fn = ghash_internal;
+       fillM (c);
+diff --git a/cipher/cipher-internal.h b/cipher/cipher-internal.h
+index a95e084b..a5fd3097 100644
+--- a/cipher/cipher-internal.h
++++ b/cipher/cipher-internal.h
+@@ -87,6 +87,18 @@
+ #endif /* GCM_USE_ARM_PMULL */
+ 
+ 
++/* GCM_USE_PPC_VPMSUM indicates whether to compile GCM with PPC Power 8
++ * polynomial multiplication instruction. */
++#undef GCM_USE_PPC_VPMSUM
++#if defined(GCM_USE_TABLES)
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && defined(__powerpc64__) && \
++    !defined(WORDS_BIGENDIAN) && defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && __GNUC__ >= 4
++#  define GCM_USE_PPC_VPMSUM 1
++#  define NEED_16BYTE_ALIGNED_CONTEXT 1 /* this also aligns gcm_table */
++#endif
++#endif /* GCM_USE_PPC_VPMSUM */
++
+ typedef unsigned int (*ghash_fn_t) (gcry_cipher_hd_t c, byte *result,
+                                     const byte *buf, size_t nblocks);
+ 
+@@ -277,9 +289,6 @@ struct gcry_cipher_handle
+         unsigned char key[MAX_BLOCKSIZE];
+       } u_ghash_key;
+ 
+-      /* GHASH implementation in use. */
+-      ghash_fn_t ghash_fn;
+-
+       /* Pre-calculated table for GCM. */
+ #ifdef GCM_USE_TABLES
+  #if (SIZEOF_UNSIGNED_LONG == 8 || defined(__x86_64__))
+@@ -290,6 +299,9 @@ struct gcry_cipher_handle
+       u32 gcm_table[4 * 16];
+  #endif
+ #endif
++
++      /* GHASH implementation in use. */
++      ghash_fn_t ghash_fn;
+     } gcm;
+ 
+     /* Mode specific storage for OCB mode. */
+diff --git a/configure.ac b/configure.ac
+index be35ce42..202ac888 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -2752,6 +2752,25 @@ case "${host}" in
+   ;;
+ esac
+ 
++# Arch specific GCM implementations
++case "${host}" in
++  powerpc64le-*-*)
++      GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
++  ;;
++  powerpc64-*-*)
++      GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
++  ;;
++  powerpc-*-*)
++      GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
++  ;;
++esac
++
++LIST_MEMBER(sm3, $enabled_digests)
++if test "$found" = "1" ; then
++   GCRYPT_DIGESTS="$GCRYPT_DIGESTS sm3.lo"
++   AC_DEFINE(USE_SM3, 1, [Defined if this module should be included])
++fi
++
+ LIST_MEMBER(scrypt, $enabled_kdfs)
+ if test "$found" = "1" ; then
+    GCRYPT_KDFS="$GCRYPT_KDFS scrypt.lo"
+diff --git a/tests/basic.c b/tests/basic.c
+index 0bd80201..06808d4a 100644
+--- a/tests/basic.c
++++ b/tests/basic.c
+@@ -1553,6 +1553,22 @@ _check_gcm_cipher (unsigned int step)
+         "\x0f\xc0\xc3\xb7\x80\xf2\x44\x45\x2d\xa3\xeb\xf1\xc5\xd8\x2c\xde"
+         "\xa2\x41\x89\x97\x20\x0e\xf8\x2e\x44\xae\x7e\x3f",
+         "\xa4\x4a\x82\x66\xee\x1c\x8e\xb0\xc8\xb5\xd4\xcf\x5a\xe9\xf1\x9a" },
++      { GCRY_CIPHER_AES256,
++        "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
++        "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08",
++        "\xca\xfe\xba\xbe\xfa\xce\xdb\xad\xde\xca\xf8\x88", 12,
++        "\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce\xde\xad\xbe\xef"
++        "\xab\xad\xda\xd2", 20,
++        "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
++        "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
++        "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
++        "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57\xba\x63\x7b\x39",
++        60,
++        "\x52\x2d\xc1\xf0\x99\x56\x7d\x07\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
++        "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
++        "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d\xa7\xb0\x8b\x10\x56\x82\x88\x38"
++        "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a\xbc\xc9\xf6\x62",
++        "\x76\xfc\x6e\xce\x0f\x4e\x17\x68\xcd\xdf\x88\x53\xbb\x2d\x55\x1b" },
+       /* Test vectors for overflowing CTR. */
+       /* After setiv, ctr_low: 0xffffffff */
+       { GCRY_CIPHER_AES256,
+
+diff --git a/cipher/cipher-gcm-ppc.c b/cipher/cipher-gcm-ppc.c
+index ed27ef15..2f60c09d 100644
+--- a/cipher/cipher-gcm-ppc.c
++++ b/cipher/cipher-gcm-ppc.c
+@@ -93,112 +93,157 @@ typedef vector signed char vector16x_s8;
+ typedef vector unsigned long long vector2x_u64;
+ typedef vector unsigned long long block;
+ 
++static ASM_FUNC_ATTR_INLINE block
++asm_xor(block a, block b)
++{
++  block r;
++  __asm__ volatile ("xxlxor %x0, %x1, %x2"
++		    : "=wa" (r)
++		    : "wa" (a), "wa" (b));
++  return r;
++}
++
+ static ASM_FUNC_ATTR_INLINE block
+ asm_vpmsumd(block a, block b)
+ {
+   block r;
+-  __asm__("vpmsumd %0, %1, %2"
+-	  : "=v" (r)
+-	  : "v" (a), "v" (b));
++  __asm__ volatile ("vpmsumd %0, %1, %2"
++		    : "=v" (r)
++		    : "v" (a), "v" (b));
+   return r;
+ }
+ 
+ static ASM_FUNC_ATTR_INLINE block
+ asm_swap_u64(block a)
+ {
+-  __asm__("xxswapd %x0, %x1"
+-          : "=wa" (a)
+-          : "wa" (a));
+-  return a;
++  block r;
++  __asm__ volatile ("xxswapd %x0, %x1"
++		    : "=wa" (r)
++		    : "wa" (a));
++  return r;
+ }
+ 
+ static ASM_FUNC_ATTR_INLINE block
+-asm_rot_block_left(block a)
++asm_mergelo(block l, block r)
+ {
+-  block zero = {0, 0};
+-  block mask = {2, 0};
+-  return __builtin_shuffle(a, zero, mask);
++  block ret;
++  __asm__ volatile ("xxmrgld %x0, %x1, %x2\n\t"
++		    : "=wa" (ret)
++		    : "wa" (l), "wa" (r));
++  return ret;
+ }
+ 
+ static ASM_FUNC_ATTR_INLINE block
+-asm_rot_block_right(block a)
++asm_mergehi(block l, block r)
+ {
+-  block zero = {0, 0};
+-  block mask = {1, 2};
+-  return __builtin_shuffle(a, zero, mask);
++  block ret;
++  __asm__ volatile ("xxmrghd %x0, %x1, %x2\n\t"
++		    : "=wa" (ret)
++		    : "wa" (l), "wa" (r));
++  return ret;
+ }
+ 
+-/* vsl is a slightly strange function in the way the shift is passed... */
+ static ASM_FUNC_ATTR_INLINE block
+-asm_ashl_128(block a, vector16x_u8 shift)
++asm_rot_block_left(block a)
+ {
+   block r;
+-  __asm__("vsl %0, %1, %2"
+-          : "=v" (r)
+-          : "v" (a), "v" (shift));
++  block zero = { 0, 0 };
++  __asm__ volatile ("xxmrgld %x0, %x1, %x2"
++		    : "=wa" (r)
++		    : "wa" (a), "wa" (zero));
+   return r;
+ }
+ 
+-#define ALIGNED_LOAD(in_ptr) \
+-  (vec_aligned_ld (0, (const unsigned char *)(in_ptr)))
++static ASM_FUNC_ATTR_INLINE block
++asm_rot_block_right(block a)
++{
++  block r;
++  block zero = { 0, 0 };
++  __asm__ volatile ("xxsldwi %x0, %x2, %x1, 2"
++		    : "=wa" (r)
++		    : "wa" (a), "wa" (zero));
++  return r;
++}
+ 
++/* vsl is a slightly strange function in the way the shift is passed... */
+ static ASM_FUNC_ATTR_INLINE block
+-vec_aligned_ld(unsigned long offset, const unsigned char *ptr)
++asm_ashl_128(block a, vector16x_u8 shift)
+ {
+-#ifndef WORDS_BIGENDIAN
+-  block vec;
+-  __asm__ ("lvx %0,%1,%2\n\t"
+-	   : "=v" (vec)
+-	   : "r" (offset), "r" ((uintptr_t)ptr)
+-	   : "memory", "r0");
+-  return vec;
+-#else
+-  return vec_vsx_ld (offset, ptr);
+-#endif
++  block r;
++  __asm__ volatile ("vsl %0, %1, %2"
++		    : "=v" (r)
++		    : "v" (a), "v" (shift));
++  return r;
+ }
+ 
+ #define STORE_TABLE(gcm_table, slot, vec) \
+-  vec_aligned_st (((block)vec), slot * 16, (unsigned char *)(gcm_table));
+-
++  vec_store_he (((block)vec), slot * 16, (unsigned char *)(gcm_table));
+ 
+ static ASM_FUNC_ATTR_INLINE void
+-vec_aligned_st(block vec, unsigned long offset, unsigned char *ptr)
++vec_store_he(block vec, unsigned long offset, unsigned char *ptr)
+ {
+ #ifndef WORDS_BIGENDIAN
+-  __asm__ ("stvx %0,%1,%2\n\t"
+-	   :
+-	   : "v" (vec), "r" (offset), "r" ((uintptr_t)ptr)
+-	   : "memory", "r0");
++  /* GCC vec_vsx_ld is generating two instructions on little-endian. Use
++   * lxvd2x directly instead. */
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ volatile ("stxvd2x %x0, 0, %1\n\t"
++		    :
++		    : "wa" (vec), "r" ((uintptr_t)ptr)
++		    : "memory", "r0");
++  else
++#endif
++    __asm__ volatile ("stxvd2x %x0, %1, %2\n\t"
++		      :
++		      : "wa" (vec), "r" (offset), "r" ((uintptr_t)ptr)
++		      : "memory", "r0");
+ #else
+   vec_vsx_st ((vector16x_u8)vec, offset, ptr);
+ #endif
+ }
+ 
+ #define VEC_LOAD_BE(in_ptr, bswap_const) \
+-  (vec_load_be (0, (const unsigned char *)(in_ptr), bswap_const))
++  vec_be_swap(vec_load_he (0, (const unsigned char *)(in_ptr)), bswap_const)
+ 
+ static ASM_FUNC_ATTR_INLINE block
+-vec_load_be(unsigned long offset, const unsigned char *ptr,
+-	    vector unsigned char be_bswap_const)
++vec_load_he(unsigned long offset, const unsigned char *ptr)
+ {
+ #ifndef WORDS_BIGENDIAN
+   block vec;
+   /* GCC vec_vsx_ld is generating two instructions on little-endian. Use
+-   * lxvw4x directly instead. */
+-  __asm__ ("lxvw4x %x0,%1,%2\n\t"
+-	   : "=wa" (vec)
+-	   : "r" (offset), "r" ((uintptr_t)ptr)
+-	   : "memory", "r0");
+-  __asm__ ("vperm %0,%1,%1,%2\n\t"
+-	   : "=v" (vec)
+-	   : "v" (vec), "v" (be_bswap_const));
++   * lxvd2x directly instead. */
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ volatile ("lxvd2x %x0, 0, %1\n\t"
++		    : "=wa" (vec)
++		    : "r" ((uintptr_t)ptr)
++		    : "memory", "r0");
++  else
++#endif
++    __asm__ volatile ("lxvd2x %x0, %1, %2\n\t"
++		      : "=wa" (vec)
++		      : "r" (offset), "r" ((uintptr_t)ptr)
++		      : "memory", "r0");
+   return vec;
+ #else
+-  (void)be_bswap_const;
+   return vec_vsx_ld (offset, ptr);
+ #endif
+ }
+ 
++static ASM_FUNC_ATTR_INLINE block
++vec_be_swap(block vec, vector16x_u8 be_bswap_const)
++{
++#ifndef WORDS_BIGENDIAN
++  __asm__ volatile ("vperm %0, %1, %1, %2\n\t"
++		    : "=v" (vec)
++		    : "v" (vec), "v" (be_bswap_const));
++#else
++  (void)be_bswap_const;
++#endif
++  return vec;
++}
++
++
+ /* Power ghash based on papers:
+    "The Galois/Counter Mode of Operation (GCM)"; David A. McGrew, John Viega
+    "Intel® Carry-Less Multiplication Instruction and its Usage for Computing
+@@ -216,15 +261,16 @@ vec_load_be(unsigned long offset, const unsigned char *ptr,
+ void ASM_FUNC_ATTR
+ _gcry_ghash_setup_ppc_vpmsum (uint64_t *gcm_table, void *gcm_key)
+ {
+-  vector16x_u8 bswap_const =
+-    { 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 };
+-  vector16x_u8 c2 =
++  static const vector16x_u8 bswap_const =
++    { ~7, ~6, ~5, ~4, ~3, ~2, ~1, ~0, ~15, ~14, ~13, ~12, ~11, ~10, ~9, ~8 };
++  static const vector16x_u8 c2 =
+     { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0b11000010 };
++  static const vector16x_u8 one =
++    { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+   block T0, T1, T2;
+   block C2, H, H1, H1l, H1h, H2, H2l, H2h;
+   block H3l, H3, H3h, H4l, H4, H4h, T3, T4;
+   vector16x_s8 most_sig_of_H, t7, carry;
+-  vector16x_u8 one = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+ 
+   H = VEC_LOAD_BE(gcm_key, bswap_const);
+   most_sig_of_H = vec_splat((vector16x_s8)H, 15);
+@@ -255,7 +301,7 @@ _gcry_ghash_setup_ppc_vpmsum (uint64_t *gcm_table, void *gcm_key)
+   /* reduce 1 */
+   T0 = asm_vpmsumd (H2l, C2);
+ 
+-  H2l ^= asm_rot_block_left (H2);;
++  H2l ^= asm_rot_block_left (H2);
+   H2h ^= asm_rot_block_right (H2);
+   H2l = asm_swap_u64 (H2l);
+   H2l ^= T0;
+@@ -321,45 +367,30 @@ _gcry_ghash_setup_ppc_vpmsum (uint64_t *gcm_table, void *gcm_key)
+   STORE_TABLE (gcm_table, 12, H4h);
+ }
+ 
+-ASM_FUNC_ATTR_INLINE
+-block
+-vec_perm2(block l, block r, vector16x_u8 perm) {
+-  block ret;
+-  __asm__ ("vperm %0,%1,%2,%3\n\t"
+-	   : "=v" (ret)
+-	   : "v" (l), "v" (r), "v" (perm));
+-  return ret;
+-}
+-
+ void ASM_FUNC_ATTR
+-_gcry_ghash_ppc_vpmsum (const byte *result, const void *const gcm_table,
+-			const byte *const buf, const size_t nblocks)
++_gcry_ghash_ppc_vpmsum (byte *result, const void *const gcm_table,
++			const byte *buf, const size_t nblocks)
+ {
+-  /* This const is strange, it is reversing the bytes, and also reversing
+-     the u32s that get switched by lxvw4 and it also addresses bytes big-endian,
+-     and is here due to lack of proper peep-hole optimization. */
+-  vector16x_u8 bswap_const =
+-    { 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 };
+-  vector16x_u8 bswap_8_const =
+-    { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
++  static const vector16x_u8 bswap_const =
++    { ~7, ~6, ~5, ~4, ~3, ~2, ~1, ~0, ~15, ~14, ~13, ~12, ~11, ~10, ~9, ~8 };
+   block c2, H0l, H0m, H0h, H4l, H4m, H4h, H2m, H3l, H3m, H3h, Hl;
+   block Hm, Hh, in, in0, in1, in2, in3, Hm_right, Hl_rotate, cur;
+-  size_t blocks_remaining = nblocks, off = 0;
++  size_t blocks_remaining = nblocks;
+   size_t not_multiple_of_four;
+   block t0;
+ 
+-  cur = vec_load_be (0, result, bswap_const);
++  cur = vec_be_swap (vec_load_he (0, result), bswap_const);
+ 
+-  c2 = vec_aligned_ld (0, gcm_table);
+-  H0l = vec_aligned_ld (16, gcm_table);
+-  H0m = vec_aligned_ld (32, gcm_table);
+-  H0h = vec_aligned_ld (48, gcm_table);
++  c2 = vec_load_he (0, gcm_table);
++  H0l = vec_load_he (16, gcm_table);
++  H0m = vec_load_he (32, gcm_table);
++  H0h = vec_load_he (48, gcm_table);
+ 
+   for (not_multiple_of_four = nblocks % 4; not_multiple_of_four;
+        not_multiple_of_four--)
+     {
+-      in = vec_load_be (off, buf, bswap_const);
+-      off += 16;
++      in = vec_be_swap (vec_load_he (0, buf), bswap_const);
++      buf += 16;
+       blocks_remaining--;
+       cur ^= in;
+ 
+@@ -385,62 +416,64 @@ _gcry_ghash_ppc_vpmsum (const byte *result, const void *const gcm_table,
+ 
+   if (blocks_remaining > 0)
+     {
+-      vector16x_u8 hiperm =
+-	{
+-	  0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
+-	  0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0
+-	};
+-      vector16x_u8 loperm =
+-        {
+-	  0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
+-	  0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8
+-	};
+       block Xl, Xm, Xh, Xl1, Xm1, Xh1, Xm2, Xl3, Xm3, Xh3, Xl_rotate;
+       block H21l, H21h, merge_l, merge_h;
+-
+-      H2m = vec_aligned_ld (48 + 32, gcm_table);
+-      H3l = vec_aligned_ld (48 * 2 + 16, gcm_table);
+-      H3m = vec_aligned_ld (48 * 2 + 32, gcm_table);
+-      H3h = vec_aligned_ld (48 * 2 + 48, gcm_table);
+-      H4l = vec_aligned_ld (48 * 3 + 16, gcm_table);
+-      H4m = vec_aligned_ld (48 * 3 + 32, gcm_table);
+-      H4h = vec_aligned_ld (48 * 3 + 48, gcm_table);
+-
+-      in0 = vec_load_be (off, buf, bswap_const);
+-      in1 = vec_load_be (off + 16, buf, bswap_const);
+-      in2 = vec_load_be (off + 32, buf, bswap_const);
+-      in3 = vec_load_be (off + 48, buf, bswap_const);
+-      blocks_remaining -= 4;
+-      off += 64;
+-
+-      Xh = in0 ^ cur;
++      block t1, t2;
++
++      H2m = vec_load_he (48 + 32, gcm_table);
++      H3l = vec_load_he (48 * 2 + 16, gcm_table);
++      H3m = vec_load_he (48 * 2 + 32, gcm_table);
++      H3h = vec_load_he (48 * 2 + 48, gcm_table);
++      H4l = vec_load_he (48 * 3 + 16, gcm_table);
++      H4m = vec_load_he (48 * 3 + 32, gcm_table);
++      H4h = vec_load_he (48 * 3 + 48, gcm_table);
++
++      in0 = vec_load_he (0, buf);
++      in1 = vec_load_he (16, buf);
++      in2 = vec_load_he (32, buf);
++      in3 = vec_load_he (48, buf);
++      in0 = vec_be_swap(in0, bswap_const);
++      in1 = vec_be_swap(in1, bswap_const);
++      in2 = vec_be_swap(in2, bswap_const);
++      in3 = vec_be_swap(in3, bswap_const);
++
++      Xh = asm_xor (in0, cur);
+ 
+       Xl1 = asm_vpmsumd (in1, H3l);
+       Xm1 = asm_vpmsumd (in1, H3m);
+       Xh1 = asm_vpmsumd (in1, H3h);
+ 
+-      H21l = vec_perm2 (H2m, H0m, hiperm);
+-      H21h = vec_perm2 (H2m, H0m, loperm);
+-      merge_l = vec_perm2 (in2, in3, loperm);
+-      merge_h = vec_perm2 (in2, in3, hiperm);
++      H21l = asm_mergehi (H2m, H0m);
++      H21h = asm_mergelo (H2m, H0m);
++      merge_l = asm_mergelo (in2, in3);
++      merge_h = asm_mergehi (in2, in3);
+ 
+       Xm2 = asm_vpmsumd (in2, H2m);
+       Xl3 = asm_vpmsumd (merge_l, H21l);
+       Xm3 = asm_vpmsumd (in3, H0m);
+       Xh3 = asm_vpmsumd (merge_h, H21h);
+ 
+-      Xm2 ^= Xm1;
+-      Xl3 ^= Xl1;
+-      Xm3 ^= Xm2;
+-      Xh3 ^= Xh1;
++      Xm2 = asm_xor (Xm2, Xm1);
++      Xl3 = asm_xor (Xl3, Xl1);
++      Xm3 = asm_xor (Xm3, Xm2);
++      Xh3 = asm_xor (Xh3, Xh1);
+ 
+       /* Gerald Estrin's scheme for parallel multiplication of polynomials */
+-      for (;blocks_remaining > 0; blocks_remaining -= 4, off += 64)
++      while (1)
+         {
+-	  in0 = vec_load_be (off, buf, bswap_const);
+-	  in1 = vec_load_be (off + 16, buf, bswap_const);
+-	  in2 = vec_load_be (off + 32, buf, bswap_const);
+-	  in3 = vec_load_be (off + 48, buf, bswap_const);
++	  buf += 64;
++	  blocks_remaining -= 4;
++	  if (!blocks_remaining)
++	    break;
++
++	  in0 = vec_load_he (0, buf);
++	  in1 = vec_load_he (16, buf);
++	  in2 = vec_load_he (32, buf);
++	  in3 = vec_load_he (48, buf);
++	  in1 = vec_be_swap(in1, bswap_const);
++	  in2 = vec_be_swap(in2, bswap_const);
++	  in3 = vec_be_swap(in3, bswap_const);
++	  in0 = vec_be_swap(in0, bswap_const);
+ 
+ 	  Xl = asm_vpmsumd (Xh, H4l);
+ 	  Xm = asm_vpmsumd (Xh, H4m);
+@@ -449,62 +482,63 @@ _gcry_ghash_ppc_vpmsum (const byte *result, const void *const gcm_table,
+ 	  Xm1 = asm_vpmsumd (in1, H3m);
+ 	  Xh1 = asm_vpmsumd (in1, H3h);
+ 
+-	  Xl ^= Xl3;
+-	  Xm ^= Xm3;
+-	  Xh ^= Xh3;
+-	  merge_l = vec_perm2 (in2, in3, loperm);
+-	  merge_h = vec_perm2 (in2, in3, hiperm);
++	  Xl = asm_xor (Xl, Xl3);
++	  Xm = asm_xor (Xm, Xm3);
++	  Xh = asm_xor (Xh, Xh3);
++	  merge_l = asm_mergelo (in2, in3);
++	  merge_h = asm_mergehi (in2, in3);
+ 
+ 	  t0 = asm_vpmsumd (Xl, c2);
+ 	  Xl3 = asm_vpmsumd (merge_l, H21l);
+ 	  Xh3 = asm_vpmsumd (merge_h, H21h);
+ 
+-	  Xl ^= asm_rot_block_left (Xm);
+-	  Xh ^= asm_rot_block_right (Xm);
++	  t1 = asm_rot_block_left (Xm);
++	  t2 = asm_rot_block_right (Xm);
++	  Xl = asm_xor(Xl, t1);
++	  Xh = asm_xor(Xh, t2);
+ 
+ 	  Xl = asm_swap_u64 (Xl);
+-	  Xl ^= t0;
++	  Xl = asm_xor(Xl, t0);
+ 
+ 	  Xl_rotate = asm_swap_u64 (Xl);
+ 	  Xm2 = asm_vpmsumd (in2, H2m);
+ 	  Xm3 = asm_vpmsumd (in3, H0m);
+ 	  Xl = asm_vpmsumd (Xl, c2);
+ 
+-	  Xl3 ^= Xl1;
+-	  Xh3 ^= Xh1;
+-	  Xh ^= in0;
+-	  Xm2 ^= Xm1;
+-	  Xh ^= Xl_rotate;
+-	  Xm3 ^= Xm2;
+-	  Xh ^= Xl;
++	  Xl3 = asm_xor (Xl3, Xl1);
++	  Xh3 = asm_xor (Xh3, Xh1);
++	  Xh = asm_xor (Xh, in0);
++	  Xm2 = asm_xor (Xm2, Xm1);
++	  Xh = asm_xor (Xh, Xl_rotate);
++	  Xm3 = asm_xor (Xm3, Xm2);
++	  Xh = asm_xor (Xh, Xl);
+ 	}
+ 
+       Xl = asm_vpmsumd (Xh, H4l);
+       Xm = asm_vpmsumd (Xh, H4m);
+       Xh = asm_vpmsumd (Xh, H4h);
+ 
+-      Xl ^= Xl3;
+-      Xm ^= Xm3;
++      Xl = asm_xor (Xl, Xl3);
++      Xm = asm_xor (Xm, Xm3);
+ 
+       t0 = asm_vpmsumd (Xl, c2);
+ 
+-      Xh ^= Xh3;
+-      Xl ^= asm_rot_block_left (Xm);
+-      Xh ^= asm_rot_block_right (Xm);
++      Xh = asm_xor (Xh, Xh3);
++      t1 = asm_rot_block_left (Xm);
++      t2 = asm_rot_block_right (Xm);
++      Xl = asm_xor (Xl, t1);
++      Xh = asm_xor (Xh, t2);
+ 
+       Xl = asm_swap_u64 (Xl);
+-      Xl ^= t0;
++      Xl = asm_xor (Xl, t0);
+ 
+       Xl_rotate = asm_swap_u64 (Xl);
+       Xl = asm_vpmsumd (Xl, c2);
+-      Xl_rotate ^= Xh;
+-      Xl ^= Xl_rotate;
+-
+-      cur = Xl;
++      Xh = asm_xor (Xh, Xl_rotate);
++      cur = asm_xor (Xh, Xl);
+     }
+ 
+-  cur = (block)vec_perm ((vector16x_u8)cur, (vector16x_u8)cur, bswap_8_const);
+-  STORE_TABLE (result, 0, cur);
++  vec_store_he (vec_be_swap (cur, bswap_const), 0, result);
+ }
+ 
+ #endif /* GCM_USE_PPC_VPMSUM */
+
+diff --git a/cipher/Makefile.am b/cipher/Makefile.am
+index ab5d2a38..7a777ef2 100644
+--- a/cipher/Makefile.am
++++ b/cipher/Makefile.am
+@@ -42,8 +42,7 @@ libcipher_la_LIBADD = $(GCRYPT_MODULES)
+ libcipher_la_SOURCES = \
+ cipher.c cipher-internal.h \
+ cipher-cbc.c cipher-cfb.c cipher-ofb.c cipher-ctr.c cipher-aeswrap.c \
+-cipher-ccm.c cipher-cmac.c cipher-gcm.c cipher-gcm-intel-pclmul.c \
+-  cipher-gcm-armv8-aarch32-ce.S cipher-gcm-armv8-aarch64-ce.S \
++cipher-ccm.c cipher-cmac.c cipher-gcm.c \
+ cipher-poly1305.c cipher-ocb.c cipher-xts.c \
+ cipher-selftest.c cipher-selftest.h \
+ pubkey.c pubkey-internal.h pubkey-util.c \
+@@ -66,7 +65,8 @@ blowfish.c blowfish-amd64.S blowfish-arm.S \
+ cast5.c cast5-amd64.S cast5-arm.S \
+ chacha20.c chacha20-sse2-amd64.S chacha20-ssse3-amd64.S chacha20-avx2-amd64.S \
+   chacha20-armv7-neon.S \
+-cipher-gcm-ppc.c \
++cipher-gcm-ppc.c cipher-gcm-intel-pclmul.c \
++  cipher-gcm-armv8-aarch32-ce.S cipher-gcm-armv8-aarch64-ce.S \
+ crc.c \
+   crc-intel-pclmul.c crc-ppc.c \
+ des.c des-amd64.S \
+diff --git a/configure.ac b/configure.ac
+index fd447906..9bcb1318 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -2754,14 +2754,18 @@ esac
+ 
+ # Arch specific GCM implementations
+ case "${host}" in
+-  powerpc64le-*-*)
+-      GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
++  i?86-*-* | x86_64-*-*)
++    GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-intel-pclmul.lo"
+   ;;
+-  powerpc64-*-*)
+-      GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
++  arm*-*-*)
++    GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-armv7-neon.lo"
++    GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-armv8-aarch32-ce.lo"
++  ;;
++  aarch64-*-*)
++    GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-armv8-aarch64-ce.lo"
+   ;;
+-  powerpc-*-*)
+-      GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
++  powerpc64le-*-* | powerpc64-*-* | powerpc-*-*)
++    GCRYPT_DIGESTS="$GCRYPT_DIGESTS cipher-gcm-ppc.lo"
+   ;;
+ esac
+ 
diff --git a/SOURCES/libgcrypt-1.8.5-ppc-bugfix.patch b/SOURCES/libgcrypt-1.8.5-ppc-bugfix.patch
new file mode 100644
index 0000000..3a7a146
--- /dev/null
+++ b/SOURCES/libgcrypt-1.8.5-ppc-bugfix.patch
@@ -0,0 +1,274 @@
+diff --git a/cipher/crc-ppc.c b/cipher/crc-ppc.c
+index 4d7f0add..b9a40130 100644
+--- a/cipher/crc-ppc.c
++++ b/cipher/crc-ppc.c
+@@ -154,26 +154,63 @@ static const vector16x_u8 bswap_const ALIGNED_64 =
+ #ifdef WORDS_BIGENDIAN
+ # define CRC_VEC_U64_DEF(lo, hi) { (hi), (lo) }
+ # define CRC_VEC_U64_LOAD(offs, ptr) \
+-          asm_swap_u64(vec_vsx_ld((offs), (const unsigned long long *)(ptr)))
++	  asm_swap_u64(asm_vec_u64_load(offs, ptr))
+ # define CRC_VEC_U64_LOAD_LE(offs, ptr) \
+-	  CRC_VEC_SWAP(vec_vsx_ld((offs), (const unsigned long long *)(ptr)))
++	  CRC_VEC_SWAP(asm_vec_u64_load(offs, ptr))
+ # define CRC_VEC_U64_LOAD_BE(offs, ptr) \
+-         vec_vsx_ld((offs), (const unsigned long long *)(ptr))
++	  asm_vec_u64_load(offs, ptr)
+ # define CRC_VEC_SWAP_TO_LE(v) CRC_VEC_SWAP(v)
+ # define CRC_VEC_SWAP_TO_BE(v) (v)
+ # define VEC_U64_LO 1
+ # define VEC_U64_HI 0
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++asm_vec_u64_load(unsigned long offset, const void *ptr)
++{
++  vector2x_u64 vecu64;
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ volatile ("lxvd2x %x0,0,%1\n\t"
++		      : "=wa" (vecu64)
++		      : "r" ((uintptr_t)ptr)
++		      : "memory");
++  else
++#endif
++    __asm__ volatile ("lxvd2x %x0,%1,%2\n\t"
++		      : "=wa" (vecu64)
++		      : "r" (offset), "r" ((uintptr_t)ptr)
++		      : "memory", "r0");
++  return vecu64;
++}
+ #else
+ # define CRC_VEC_U64_DEF(lo, hi) { (lo), (hi) }
+-# define CRC_VEC_U64_LOAD(offs, ptr) \
+-	  vec_vsx_ld((offs), (const unsigned long long *)(ptr))
+-# define CRC_VEC_U64_LOAD_LE(offs, ptr) CRC_VEC_U64_LOAD((offs), (ptr))
++# define CRC_VEC_U64_LOAD(offs, ptr) asm_vec_u64_load_le(offs, ptr)
++# define CRC_VEC_U64_LOAD_LE(offs, ptr) asm_vec_u64_load_le(offs, ptr)
+ # define CRC_VEC_U64_LOAD_BE(offs, ptr) asm_vec_u64_load_be(offs, ptr)
+ # define CRC_VEC_SWAP_TO_LE(v) (v)
+ # define CRC_VEC_SWAP_TO_BE(v) CRC_VEC_SWAP(v)
+ # define VEC_U64_LO 0
+ # define VEC_U64_HI 1
+ 
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++asm_vec_u64_load_le(unsigned long offset, const void *ptr)
++{
++  vector2x_u64 vecu64;
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ volatile ("lxvd2x %x0,0,%1\n\t"
++		      : "=wa" (vecu64)
++		      : "r" ((uintptr_t)ptr)
++		      : "memory");
++  else
++#endif
++    __asm__ volatile ("lxvd2x %x0,%1,%2\n\t"
++		      : "=wa" (vecu64)
++		      : "r" (offset), "r" ((uintptr_t)ptr)
++		      : "memory", "r0");
++  return asm_swap_u64(vecu64);
++}
++
+ static ASM_FUNC_ATTR_INLINE vector2x_u64
+ asm_vec_u64_load_be(unsigned int offset, const void *ptr)
+ {
+diff --git a/cipher/sha512-ppc.c b/cipher/sha512-ppc.c
+index a758e1ea..31ea25bf 100644
+--- a/cipher/sha512-ppc.c
++++ b/cipher/sha512-ppc.c
+@@ -115,14 +115,62 @@ vec_merge_idx0_elems(vector2x_u64 v0, vector2x_u64 v1)
+ static ASM_FUNC_ATTR_INLINE vector2x_u64
+ vec_vshasigma_u64(vector2x_u64 v, unsigned int a, unsigned int b)
+ {
+-  asm ("vshasigmad %0,%1,%2,%3"
+-       : "=v" (v)
+-       : "v" (v), "g" (a), "g" (b)
+-       : "memory");
++  __asm__ ("vshasigmad %0,%1,%2,%3"
++	   : "=v" (v)
++	   : "v" (v), "g" (a), "g" (b)
++	   : "memory");
+   return v;
+ }
+ 
+ 
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++vec_u64_load(unsigned long offset, const void *ptr)
++{
++  vector2x_u64 vecu64;
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ ("lxvd2x %x0,0,%1\n\t"
++	     : "=wa" (vecu64)
++	     : "r" ((uintptr_t)ptr)
++	     : "memory");
++  else
++#endif
++    __asm__ ("lxvd2x %x0,%1,%2\n\t"
++	     : "=wa" (vecu64)
++	     : "r" (offset), "r" ((uintptr_t)ptr)
++	     : "memory", "r0");
++#ifndef WORDS_BIGENDIAN
++  __asm__ ("xxswapd %x0, %x1"
++	   : "=wa" (vecu64)
++	   : "wa" (vecu64));
++#endif
++  return vecu64;
++}
++
++
++static ASM_FUNC_ATTR_INLINE void
++vec_u64_store(vector2x_u64 vecu64, unsigned long offset, void *ptr)
++{
++#ifndef WORDS_BIGENDIAN
++  __asm__ ("xxswapd %x0, %x1"
++	   : "=wa" (vecu64)
++	   : "wa" (vecu64));
++#endif
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ ("stxvd2x %x0,0,%1\n\t"
++	     :
++	     : "wa" (vecu64), "r" ((uintptr_t)ptr)
++	     : "memory");
++  else
++#endif
++    __asm__ ("stxvd2x %x0,%1,%2\n\t"
++	     :
++	     : "wa" (vecu64), "r" (offset), "r" ((uintptr_t)ptr)
++	     : "memory", "r0");
++}
++
++
+ /* SHA2 round in vector registers */
+ #define R(a,b,c,d,e,f,g,h,k,w) do                             \
+     {                                                         \
+@@ -168,13 +216,13 @@ _gcry_sha512_transform_ppc8(u64 state[8],
+   vector2x_u64 a, b, c, d, e, f, g, h, t1, t2;
+   u64 w[16];
+ 
+-  h0 = vec_vsx_ld (8 * 0, (unsigned long long *)state);
++  h0 = vec_u64_load (8 * 0, (unsigned long long *)state);
+   h1 = vec_rol_elems (h0, 1);
+-  h2 = vec_vsx_ld (8 * 2, (unsigned long long *)state);
++  h2 = vec_u64_load (8 * 2, (unsigned long long *)state);
+   h3 = vec_rol_elems (h2, 1);
+-  h4 = vec_vsx_ld (8 * 4, (unsigned long long *)state);
++  h4 = vec_u64_load (8 * 4, (unsigned long long *)state);
+   h5 = vec_rol_elems (h4, 1);
+-  h6 = vec_vsx_ld (8 * 6, (unsigned long long *)state);
++  h6 = vec_u64_load (8 * 6, (unsigned long long *)state);
+   h7 = vec_rol_elems (h6, 1);
+ 
+   while (nblks >= 2)
+@@ -514,10 +562,10 @@ _gcry_sha512_transform_ppc8(u64 state[8],
+   h2 = vec_merge_idx0_elems (h2, h3);
+   h4 = vec_merge_idx0_elems (h4, h5);
+   h6 = vec_merge_idx0_elems (h6, h7);
+-  vec_vsx_st (h0, 8 * 0, (unsigned long long *)state);
+-  vec_vsx_st (h2, 8 * 2, (unsigned long long *)state);
+-  vec_vsx_st (h4, 8 * 4, (unsigned long long *)state);
+-  vec_vsx_st (h6, 8 * 6, (unsigned long long *)state);
++  vec_u64_store (h0, 8 * 0, (unsigned long long *)state);
++  vec_u64_store (h2, 8 * 2, (unsigned long long *)state);
++  vec_u64_store (h4, 8 * 4, (unsigned long long *)state);
++  vec_u64_store (h6, 8 * 6, (unsigned long long *)state);
+ 
+   return sizeof(w);
+ }
+diff --git a/configure.ac b/configure.ac
+index b6b6455a..be35ce42 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1745,10 +1745,12 @@ AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX intrinsics],
+ 	AC_COMPILE_IFELSE([AC_LANG_SOURCE(
+ 	[[#include <altivec.h>
+ 	  typedef vector unsigned char block;
++	  typedef vector unsigned int vecu32;
+ 	  block fn(block in)
+ 	  {
+ 	    block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
+-	    return vec_cipher_be (t, in);
++	    vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
++	    return vec_cipher_be (t, in) ^ (block)y;
+ 	  }
+ 	  ]])],
+ 	[gcry_cv_cc_ppc_altivec=yes])
+@@ -1769,10 +1771,12 @@ if test "$gcry_cv_cc_ppc_altivec" = "no" &&
+     AC_COMPILE_IFELSE([AC_LANG_SOURCE(
+       [[#include <altivec.h>
+ 	typedef vector unsigned char block;
++	typedef vector unsigned int vecu32;
+ 	block fn(block in)
+ 	{
+ 	  block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
+-	  return vec_cipher_be (t, in);
++	  vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
++	  return vec_cipher_be (t, in) ^ (block)y;
+ 	}]])],
+       [gcry_cv_cc_ppc_altivec_cflags=yes])])
+   if test "$gcry_cv_cc_ppc_altivec_cflags" = "yes" ; then
+
+diff --git a/configure.ac b/configure.ac
+index 202ac888..fd447906 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -2562,13 +2562,13 @@ if test "$found" = "1" ; then
+          GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc-intel-pclmul.lo"
+       ;;
+       powerpc64le-*-*)
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS crc-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc-ppc.lo"
+       ;;
+       powerpc64-*-*)
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS crc-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc-ppc.lo"
+       ;;
+       powerpc-*-*)
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS crc-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc-ppc.lo"
+       ;;
+    esac
+ fi
+@@ -2635,17 +2635,17 @@ if test "$found" = "1" ; then
+       ;;
+       powerpc64le-*-*)
+          # Build with the crypto extension implementation
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha256-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-ppc.lo"
+       ;;
+       powerpc64-*-*)
+          # Big-Endian.
+          # Build with the crypto extension implementation
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha256-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-ppc.lo"
+       ;;
+       powerpc-*-*)
+          # Big-Endian.
+          # Build with the crypto extension implementation
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha256-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-ppc.lo"
+    esac
+ fi
+ 
+@@ -2667,17 +2667,17 @@ if test "$found" = "1" ; then
+       ;;
+       powerpc64le-*-*)
+          # Build with the crypto extension implementation
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha512-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-ppc.lo"
+       ;;
+       powerpc64-*-*)
+          # Big-Endian.
+          # Build with the crypto extension implementation
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha512-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-ppc.lo"
+       ;;
+       powerpc-*-*)
+          # Big-Endian.
+          # Build with the crypto extension implementation
+-         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha512-ppc.lo"
++         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-ppc.lo"
+    esac
+ 
+    if test x"$neonsupport" = xyes ; then
diff --git a/SOURCES/libgcrypt-1.8.5-ppc-chacha20-poly1305.patch b/SOURCES/libgcrypt-1.8.5-ppc-chacha20-poly1305.patch
new file mode 100644
index 0000000..2ac32f8
--- /dev/null
+++ b/SOURCES/libgcrypt-1.8.5-ppc-chacha20-poly1305.patch
@@ -0,0 +1,3521 @@
+From 83e50634789cab5071d648f66622cc1b3cf72318 Mon Sep 17 00:00:00 2001
+From: Eric Richter <erichte@linux.vnet.ibm.com>
+Date: Thu, 24 Jun 2021 18:31:51 -0600
+Subject: [PATCH] improvements for chacha20 and poly1305 on power
+
+---
+ cipher/Makefile.am            |   22 +
+ cipher/chacha20-new.c         | 1344 +++++++++++++++++++++++++++++++++
+ cipher/chacha20-ppc.c         |  646 ++++++++++++++++
+ cipher/chacha20.c             |    7 +
+ cipher/cipher-internal.h      |    9 +-
+ cipher/mpi-new/mpi-asm-defs.h |    8 +
+ cipher/mpi-new/mpi-inline.h   |  161 ++++
+ cipher/mpi-new/mpi-internal.h |  305 ++++++++
+ cipher/poly1305-new.c         |  749 ++++++++++++++++++
+ cipher/poly1305.c             |    7 +
+ configure.ac                  |   24 +
+ mpi/longlong.h                |    2 -
+ 12 files changed, 3281 insertions(+), 3 deletions(-)
+ create mode 100644 cipher/chacha20-new.c
+ create mode 100644 cipher/chacha20-ppc.c
+ create mode 100644 cipher/mpi-new/mpi-asm-defs.h
+ create mode 100644 cipher/mpi-new/mpi-inline.h
+ create mode 100644 cipher/mpi-new/mpi-internal.h
+ create mode 100644 cipher/poly1305-new.c
+
+diff --git a/cipher/Makefile.am b/cipher/Makefile.am
+index 7a777ef2..86ae09fa 100644
+--- a/cipher/Makefile.am
++++ b/cipher/Makefile.am
+@@ -65,6 +65,7 @@ blowfish.c blowfish-amd64.S blowfish-arm.S \
+ cast5.c cast5-amd64.S cast5-arm.S \
+ chacha20.c chacha20-sse2-amd64.S chacha20-ssse3-amd64.S chacha20-avx2-amd64.S \
+   chacha20-armv7-neon.S \
++  chacha20-new.c chacha20-ppc.c \
+ cipher-gcm-ppc.c cipher-gcm-intel-pclmul.c \
+   cipher-gcm-armv8-aarch32-ce.S cipher-gcm-armv8-aarch64-ce.S \
+ crc.c \
+@@ -80,6 +81,7 @@ gostr3411-94.c \
+ md4.c \
+ md5.c \
+ poly1305-sse2-amd64.S poly1305-avx2-amd64.S poly1305-armv7-neon.S \
++  poly1305-new.c \
+ rijndael.c rijndael-internal.h rijndael-tables.h rijndael-aesni.c \
+   rijndael-padlock.c rijndael-amd64.S rijndael-arm.S \
+   rijndael-ssse3-amd64.c rijndael-ssse3-amd64-asm.S \
+@@ -172,3 +174,23 @@ cipher-gcm-ppc.o: $(srcdir)/cipher-gcm-ppc.c Makefile
+ 
+ cipher-gcm-ppc.lo: $(srcdir)/cipher-gcm-ppc.c Makefile
+ 	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++
++# MAYBE WILL NEED THIS ONE?
++poly1305-new.o: $(srcdir)/poly1305-new.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++poly1305-new.lo: $(srcdir)/poly1305-new.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++chacha20-ppc.o: $(srcdir)/chacha20-ppc.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++chacha20-ppc.lo: $(srcdir)/chacha20-ppc.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++chacha20-new.o: $(srcdir)/chacha20-new.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++chacha20-new.lo: $(srcdir)/chacha20-new.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
+diff --git a/cipher/chacha20-new.c b/cipher/chacha20-new.c
+new file mode 100644
+index 00000000..347d9726
+--- /dev/null
++++ b/cipher/chacha20-new.c
+@@ -0,0 +1,1344 @@
++/* chacha20.c  -  Bernstein's ChaCha20 cipher
++ * Copyright (C) 2014,2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser general Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ *
++ * For a description of the algorithm, see:
++ *   http://cr.yp.to/chacha.html
++ */
++
++/*
++ * Based on D. J. Bernstein reference implementation at
++ * http://cr.yp.to/chacha.html:
++ *
++ * chacha-regs.c version 20080118
++ * D. J. Bernstein
++ * Public domain.
++ */
++
++#include <config.h>
++
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    defined(USE_CHACHA20) && \
++    __GNUC__ >= 4
++
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include "types.h"
++#include "g10lib.h"
++#include "cipher.h"
++#include "cipher-internal.h"
++#include "bufhelp.h"
++
++
++/* A structure with function pointers for bulk operations.  The cipher
++   algorithm setkey function initializes them when bulk operations are
++   available and the actual encryption routines use them if they are
++   not NULL.  */
++// Stolen from cipher-internal.h
++typedef struct cipher_bulk_ops
++{
++  void (*cfb_enc)(void *context, unsigned char *iv, void *outbuf_arg,
++		  const void *inbuf_arg, size_t nblocks);
++  void (*cfb_dec)(void *context, unsigned char *iv, void *outbuf_arg,
++		  const void *inbuf_arg, size_t nblocks);
++  void (*cbc_enc)(void *context, unsigned char *iv, void *outbuf_arg,
++		  const void *inbuf_arg, size_t nblocks, int cbc_mac);
++  void (*cbc_dec)(void *context, unsigned char *iv, void *outbuf_arg,
++		  const void *inbuf_arg, size_t nblocks);
++  void (*ofb_enc)(void *context, unsigned char *iv, void *outbuf_arg,
++		  const void *inbuf_arg, size_t nblocks);
++  void (*ctr_enc)(void *context, unsigned char *iv, void *outbuf_arg,
++		  const void *inbuf_arg, size_t nblocks);
++  size_t (*ocb_crypt)(gcry_cipher_hd_t c, void *outbuf_arg,
++		      const void *inbuf_arg, size_t nblocks, int encrypt);
++  size_t (*ocb_auth)(gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks);
++  void (*xts_crypt)(void *context, unsigned char *tweak, void *outbuf_arg,
++		    const void *inbuf_arg, size_t nblocks, int encrypt);
++  size_t (*gcm_crypt)(gcry_cipher_hd_t c, void *outbuf_arg,
++		      const void *inbuf_arg, size_t nblocks, int encrypt);
++} cipher_bulk_ops_t;
++
++
++#define CHACHA20_MIN_KEY_SIZE 16        /* Bytes.  */
++#define CHACHA20_MAX_KEY_SIZE 32        /* Bytes.  */
++#define CHACHA20_BLOCK_SIZE   64        /* Bytes.  */
++#define CHACHA20_MIN_IV_SIZE   8        /* Bytes.  */
++#define CHACHA20_MAX_IV_SIZE  12        /* Bytes.  */
++#define CHACHA20_CTR_SIZE     16        /* Bytes.  */
++
++
++/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
++#undef USE_SSSE3
++#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
++   (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
++    defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
++# define USE_SSSE3 1
++#endif
++
++/* USE_AVX2 indicates whether to compile with Intel AVX2 code. */
++#undef USE_AVX2
++#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \
++    (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
++     defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
++# define USE_AVX2 1
++#endif
++
++/* USE_ARMV7_NEON indicates whether to enable ARMv7 NEON assembly code. */
++#undef USE_ARMV7_NEON
++#ifdef ENABLE_NEON_SUPPORT
++# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
++     && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
++     && defined(HAVE_GCC_INLINE_ASM_NEON)
++#  define USE_ARMV7_NEON 1
++# endif
++#endif
++
++/* USE_AARCH64_SIMD indicates whether to enable ARMv8 SIMD assembly
++ * code. */
++#undef USE_AARCH64_SIMD
++#ifdef ENABLE_NEON_SUPPORT
++# if defined(__AARCH64EL__) \
++       && defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) \
++       && defined(HAVE_GCC_INLINE_ASM_AARCH64_NEON)
++#  define USE_AARCH64_SIMD 1
++# endif
++#endif
++
++/* USE_PPC_VEC indicates whether to enable PowerPC vector
++ * accelerated code. */
++#undef USE_PPC_VEC
++#ifdef ENABLE_PPC_CRYPTO_SUPPORT
++# if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++     defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
++#  if __GNUC__ >= 4
++#   define USE_PPC_VEC 1
++#  endif
++# endif
++#endif
++
++/* USE_S390X_VX indicates whether to enable zSeries code. */
++#undef USE_S390X_VX
++#if defined (__s390x__) && __GNUC__ >= 4 && __ARCH__ >= 9
++# if defined(HAVE_GCC_INLINE_ASM_S390X_VX)
++#  define USE_S390X_VX 1
++# endif /* USE_S390X_VX */
++#endif
++
++/* Assembly implementations use SystemV ABI, ABI conversion and additional
++ * stack to store XMM6-XMM15 needed on Win64. */
++#undef ASM_FUNC_ABI
++#undef ASM_EXTRA_STACK
++#if defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)
++# define ASM_FUNC_ABI __attribute__((sysv_abi))
++#else
++# define ASM_FUNC_ABI
++#endif
++
++
++typedef struct CHACHA20_context_s
++{
++  u32 input[16];
++  unsigned char pad[CHACHA20_BLOCK_SIZE];
++  unsigned int unused; /* bytes in the pad.  */
++  unsigned int use_ssse3:1;
++  unsigned int use_avx2:1;
++  unsigned int use_neon:1;
++  unsigned int use_ppc:1;
++  unsigned int use_s390x:1;
++} CHACHA20_context_t;
++
++
++#ifdef USE_SSSE3
++
++unsigned int _gcry_chacha20_amd64_ssse3_blocks4(u32 *state, byte *dst,
++						const byte *src,
++						size_t nblks) ASM_FUNC_ABI;
++
++unsigned int _gcry_chacha20_amd64_ssse3_blocks1(u32 *state, byte *dst,
++						const byte *src,
++						size_t nblks) ASM_FUNC_ABI;
++
++unsigned int _gcry_chacha20_poly1305_amd64_ssse3_blocks4(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		void *poly1305_state, const byte *poly1305_src) ASM_FUNC_ABI;
++
++unsigned int _gcry_chacha20_poly1305_amd64_ssse3_blocks1(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		void *poly1305_state, const byte *poly1305_src) ASM_FUNC_ABI;
++
++#endif /* USE_SSSE3 */
++
++#ifdef USE_AVX2
++
++unsigned int _gcry_chacha20_amd64_avx2_blocks8(u32 *state, byte *dst,
++					       const byte *src,
++					       size_t nblks) ASM_FUNC_ABI;
++
++unsigned int _gcry_chacha20_poly1305_amd64_avx2_blocks8(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		void *poly1305_state, const byte *poly1305_src) ASM_FUNC_ABI;
++
++#endif /* USE_AVX2 */
++
++#ifdef USE_PPC_VEC
++
++unsigned int _gcry_chacha20_ppc8_blocks4(u32 *state, byte *dst,
++					 const byte *src,
++					 size_t nblks);
++
++unsigned int _gcry_chacha20_ppc8_blocks1(u32 *state, byte *dst,
++					 const byte *src,
++					 size_t nblks);
++
++#undef USE_PPC_VEC_POLY1305
++#if SIZEOF_UNSIGNED_LONG == 8
++#define USE_PPC_VEC_POLY1305 1
++unsigned int _gcry_chacha20_poly1305_ppc8_blocks4(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		POLY1305_STATE *st, const byte *poly1305_src);
++#endif /* SIZEOF_UNSIGNED_LONG == 8 */
++
++#endif /* USE_PPC_VEC */
++
++#ifdef USE_S390X_VX
++
++unsigned int _gcry_chacha20_s390x_vx_blocks8(u32 *state, byte *dst,
++					     const byte *src, size_t nblks);
++
++unsigned int _gcry_chacha20_s390x_vx_blocks4_2_1(u32 *state, byte *dst,
++						 const byte *src, size_t nblks);
++
++#undef USE_S390X_VX_POLY1305
++#if SIZEOF_UNSIGNED_LONG == 8
++#define USE_S390X_VX_POLY1305 1
++unsigned int _gcry_chacha20_poly1305_s390x_vx_blocks8(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		POLY1305_STATE *st, const byte *poly1305_src);
++
++unsigned int _gcry_chacha20_poly1305_s390x_vx_blocks4_2_1(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		POLY1305_STATE *st, const byte *poly1305_src);
++#endif /* SIZEOF_UNSIGNED_LONG == 8 */
++
++#endif /* USE_S390X_VX */
++
++#ifdef USE_ARMV7_NEON
++
++unsigned int _gcry_chacha20_armv7_neon_blocks4(u32 *state, byte *dst,
++					       const byte *src,
++					       size_t nblks);
++
++#endif /* USE_ARMV7_NEON */
++
++#ifdef USE_AARCH64_SIMD
++
++unsigned int _gcry_chacha20_aarch64_blocks4(u32 *state, byte *dst,
++					    const byte *src, size_t nblks);
++
++unsigned int _gcry_chacha20_poly1305_aarch64_blocks4(
++		u32 *state, byte *dst, const byte *src, size_t nblks,
++		void *poly1305_state, const byte *poly1305_src);
++
++#endif /* USE_AARCH64_SIMD */
++
++
++static const char *selftest (void);
++
++
++#define ROTATE(v,c)	(rol(v,c))
++#define XOR(v,w)	((v) ^ (w))
++#define PLUS(v,w)	((u32)((v) + (w)))
++#define PLUSONE(v)	(PLUS((v),1))
++
++#define QUARTERROUND(a,b,c,d) \
++  a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \
++  c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \
++  a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \
++  c = PLUS(c,d); b = ROTATE(XOR(b,c), 7);
++
++#define BUF_XOR_LE32(dst, src, offset, x) \
++  buf_put_le32((dst) + (offset), buf_get_le32((src) + (offset)) ^ (x))
++
++static unsigned int
++do_chacha20_blocks (u32 *input, byte *dst, const byte *src, size_t nblks)
++{
++  u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
++  unsigned int i;
++
++  while (nblks)
++    {
++      x0 = input[0];
++      x1 = input[1];
++      x2 = input[2];
++      x3 = input[3];
++      x4 = input[4];
++      x5 = input[5];
++      x6 = input[6];
++      x7 = input[7];
++      x8 = input[8];
++      x9 = input[9];
++      x10 = input[10];
++      x11 = input[11];
++      x12 = input[12];
++      x13 = input[13];
++      x14 = input[14];
++      x15 = input[15];
++
++      for (i = 20; i > 0; i -= 2)
++	{
++	  QUARTERROUND(x0, x4,  x8, x12)
++	  QUARTERROUND(x1, x5,  x9, x13)
++	  QUARTERROUND(x2, x6, x10, x14)
++	  QUARTERROUND(x3, x7, x11, x15)
++	  QUARTERROUND(x0, x5, x10, x15)
++	  QUARTERROUND(x1, x6, x11, x12)
++	  QUARTERROUND(x2, x7,  x8, x13)
++	  QUARTERROUND(x3, x4,  x9, x14)
++	}
++
++      x0 = PLUS(x0, input[0]);
++      x1 = PLUS(x1, input[1]);
++      x2 = PLUS(x2, input[2]);
++      x3 = PLUS(x3, input[3]);
++      x4 = PLUS(x4, input[4]);
++      x5 = PLUS(x5, input[5]);
++      x6 = PLUS(x6, input[6]);
++      x7 = PLUS(x7, input[7]);
++      x8 = PLUS(x8, input[8]);
++      x9 = PLUS(x9, input[9]);
++      x10 = PLUS(x10, input[10]);
++      x11 = PLUS(x11, input[11]);
++      x12 = PLUS(x12, input[12]);
++      x13 = PLUS(x13, input[13]);
++      x14 = PLUS(x14, input[14]);
++      x15 = PLUS(x15, input[15]);
++
++      input[12] = PLUSONE(input[12]);
++      input[13] = PLUS(input[13], !input[12]);
++
++      BUF_XOR_LE32(dst, src, 0, x0);
++      BUF_XOR_LE32(dst, src, 4, x1);
++      BUF_XOR_LE32(dst, src, 8, x2);
++      BUF_XOR_LE32(dst, src, 12, x3);
++      BUF_XOR_LE32(dst, src, 16, x4);
++      BUF_XOR_LE32(dst, src, 20, x5);
++      BUF_XOR_LE32(dst, src, 24, x6);
++      BUF_XOR_LE32(dst, src, 28, x7);
++      BUF_XOR_LE32(dst, src, 32, x8);
++      BUF_XOR_LE32(dst, src, 36, x9);
++      BUF_XOR_LE32(dst, src, 40, x10);
++      BUF_XOR_LE32(dst, src, 44, x11);
++      BUF_XOR_LE32(dst, src, 48, x12);
++      BUF_XOR_LE32(dst, src, 52, x13);
++      BUF_XOR_LE32(dst, src, 56, x14);
++      BUF_XOR_LE32(dst, src, 60, x15);
++
++      src += CHACHA20_BLOCK_SIZE;
++      dst += CHACHA20_BLOCK_SIZE;
++      nblks--;
++    }
++
++  /* burn_stack */
++  return (17 * sizeof(u32) + 6 * sizeof(void *));
++}
++
++
++static unsigned int
++chacha20_blocks (CHACHA20_context_t *ctx, byte *dst, const byte *src,
++		 size_t nblks)
++{
++#ifdef USE_SSSE3
++  if (ctx->use_ssse3)
++    {
++      return _gcry_chacha20_amd64_ssse3_blocks1(ctx->input, dst, src, nblks);
++    }
++#endif
++
++#ifdef USE_PPC_VEC
++  if (ctx->use_ppc)
++    {
++      return _gcry_chacha20_ppc8_blocks1(ctx->input, dst, src, nblks);
++    }
++#endif
++
++#ifdef USE_S390X_VX
++  if (ctx->use_s390x)
++    {
++      return _gcry_chacha20_s390x_vx_blocks4_2_1(ctx->input, dst, src, nblks);
++    }
++#endif
++
++  return do_chacha20_blocks (ctx->input, dst, src, nblks);
++}
++
++
++static void
++chacha20_keysetup (CHACHA20_context_t *ctx, const byte *key,
++                   unsigned int keylen)
++{
++  static const char sigma[16] = "expand 32-byte k";
++  static const char tau[16] = "expand 16-byte k";
++  const char *constants;
++
++  ctx->input[4] = buf_get_le32(key + 0);
++  ctx->input[5] = buf_get_le32(key + 4);
++  ctx->input[6] = buf_get_le32(key + 8);
++  ctx->input[7] = buf_get_le32(key + 12);
++  if (keylen == CHACHA20_MAX_KEY_SIZE) /* 256 bits */
++    {
++      key += 16;
++      constants = sigma;
++    }
++  else /* 128 bits */
++    {
++      constants = tau;
++    }
++  ctx->input[8] = buf_get_le32(key + 0);
++  ctx->input[9] = buf_get_le32(key + 4);
++  ctx->input[10] = buf_get_le32(key + 8);
++  ctx->input[11] = buf_get_le32(key + 12);
++  ctx->input[0] = buf_get_le32(constants + 0);
++  ctx->input[1] = buf_get_le32(constants + 4);
++  ctx->input[2] = buf_get_le32(constants + 8);
++  ctx->input[3] = buf_get_le32(constants + 12);
++}
++
++
++static void
++chacha20_ivsetup (CHACHA20_context_t * ctx, const byte *iv, size_t ivlen)
++{
++  if (ivlen == CHACHA20_CTR_SIZE)
++    {
++      ctx->input[12] = buf_get_le32 (iv + 0);
++      ctx->input[13] = buf_get_le32 (iv + 4);
++      ctx->input[14] = buf_get_le32 (iv + 8);
++      ctx->input[15] = buf_get_le32 (iv + 12);
++    }
++  else if (ivlen == CHACHA20_MAX_IV_SIZE)
++    {
++      ctx->input[12] = 0;
++      ctx->input[13] = buf_get_le32 (iv + 0);
++      ctx->input[14] = buf_get_le32 (iv + 4);
++      ctx->input[15] = buf_get_le32 (iv + 8);
++    }
++  else if (ivlen == CHACHA20_MIN_IV_SIZE)
++    {
++      ctx->input[12] = 0;
++      ctx->input[13] = 0;
++      ctx->input[14] = buf_get_le32 (iv + 0);
++      ctx->input[15] = buf_get_le32 (iv + 4);
++    }
++  else
++    {
++      ctx->input[12] = 0;
++      ctx->input[13] = 0;
++      ctx->input[14] = 0;
++      ctx->input[15] = 0;
++    }
++}
++
++
++static void
++chacha20_setiv (void *context, const byte *iv, size_t ivlen)
++{
++  CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
++
++  /* draft-nir-cfrg-chacha20-poly1305-02 defines 96-bit and 64-bit nonce. */
++  if (iv && ivlen != CHACHA20_MAX_IV_SIZE && ivlen != CHACHA20_MIN_IV_SIZE
++      && ivlen != CHACHA20_CTR_SIZE)
++    log_info ("WARNING: chacha20_setiv: bad ivlen=%u\n", (u32) ivlen);
++
++  if (iv && (ivlen == CHACHA20_MAX_IV_SIZE || ivlen == CHACHA20_MIN_IV_SIZE
++             || ivlen == CHACHA20_CTR_SIZE))
++    chacha20_ivsetup (ctx, iv, ivlen);
++  else
++    chacha20_ivsetup (ctx, NULL, 0);
++
++  /* Reset the unused pad bytes counter.  */
++  ctx->unused = 0;
++}
++
++
++static gcry_err_code_t
++chacha20_do_setkey (CHACHA20_context_t *ctx,
++                    const byte *key, unsigned int keylen)
++{
++  static int initialized;
++  static const char *selftest_failed;
++  unsigned int features = _gcry_get_hw_features ();
++
++  if (!initialized)
++    {
++      initialized = 1;
++      selftest_failed = selftest ();
++      if (selftest_failed)
++        log_error ("CHACHA20 selftest failed (%s)\n", selftest_failed);
++    }
++  if (selftest_failed)
++    return GPG_ERR_SELFTEST_FAILED;
++
++  if (keylen != CHACHA20_MAX_KEY_SIZE && keylen != CHACHA20_MIN_KEY_SIZE)
++    return GPG_ERR_INV_KEYLEN;
++
++#ifdef USE_SSSE3
++  ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
++#endif
++#ifdef USE_AVX2
++  ctx->use_avx2 = (features & HWF_INTEL_AVX2) != 0;
++#endif
++#ifdef USE_ARMV7_NEON
++  ctx->use_neon = (features & HWF_ARM_NEON) != 0;
++#endif
++#ifdef USE_AARCH64_SIMD
++  ctx->use_neon = (features & HWF_ARM_NEON) != 0;
++#endif
++#ifdef USE_PPC_VEC
++  ctx->use_ppc = (features & HWF_PPC_ARCH_2_07) != 0;
++#endif
++#ifdef USE_S390X_VX
++  ctx->use_s390x = (features & HWF_S390X_VX) != 0;
++#endif
++
++  (void)features;
++
++  chacha20_keysetup (ctx, key, keylen);
++
++  /* We default to a zero nonce.  */
++  chacha20_setiv (ctx, NULL, 0);
++
++  return 0;
++}
++
++
++static gcry_err_code_t
++chacha20_setkey (void *context, const byte *key, unsigned int keylen,
++                 cipher_bulk_ops_t *bulk_ops)
++{
++  CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
++  gcry_err_code_t rc = chacha20_do_setkey (ctx, key, keylen);
++  (void)bulk_ops;
++  _gcry_burn_stack (4 + sizeof (void *) + 4 * sizeof (void *));
++  return rc;
++}
++
++
++static unsigned int
++do_chacha20_encrypt_stream_tail (CHACHA20_context_t *ctx, byte *outbuf,
++				 const byte *inbuf, size_t length)
++{
++  static const unsigned char zero_pad[CHACHA20_BLOCK_SIZE] = { 0, };
++  unsigned int nburn, burn = 0;
++
++#ifdef USE_AVX2
++  if (ctx->use_avx2 && length >= CHACHA20_BLOCK_SIZE * 8)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 8;
++      nburn = _gcry_chacha20_amd64_avx2_blocks8(ctx->input, outbuf, inbuf,
++						nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_SSSE3
++  if (ctx->use_ssse3 && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 4;
++      nburn = _gcry_chacha20_amd64_ssse3_blocks4(ctx->input, outbuf, inbuf,
++						 nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_ARMV7_NEON
++  if (ctx->use_neon && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 4;
++      nburn = _gcry_chacha20_armv7_neon_blocks4(ctx->input, outbuf, inbuf,
++						nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_AARCH64_SIMD
++  if (ctx->use_neon && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 4;
++      nburn = _gcry_chacha20_aarch64_blocks4(ctx->input, outbuf, inbuf,
++					     nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_PPC_VEC
++  if (ctx->use_ppc && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 4;
++      nburn = _gcry_chacha20_ppc8_blocks4(ctx->input, outbuf, inbuf, nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_S390X_VX
++  if (ctx->use_s390x && length >= CHACHA20_BLOCK_SIZE * 8)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 8;
++      nburn = _gcry_chacha20_s390x_vx_blocks8(ctx->input, outbuf, inbuf,
++					      nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++  if (length >= CHACHA20_BLOCK_SIZE)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nburn = chacha20_blocks(ctx, outbuf, inbuf, nblocks);
++      burn = nburn > burn ? nburn : burn;
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++
++  if (length > 0)
++    {
++      nburn = chacha20_blocks(ctx, ctx->pad, zero_pad, 1);
++      burn = nburn > burn ? nburn : burn;
++
++      buf_xor (outbuf, inbuf, ctx->pad, length);
++      ctx->unused = CHACHA20_BLOCK_SIZE - length;
++    }
++
++  if (burn)
++    burn += 5 * sizeof(void *);
++
++  return burn;
++}
++
++
++static void
++chacha20_encrypt_stream (void *context, byte *outbuf, const byte *inbuf,
++                         size_t length)
++{
++  CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
++  unsigned int nburn, burn = 0;
++
++  if (!length)
++    return;
++
++  if (ctx->unused)
++    {
++      unsigned char *p = ctx->pad;
++      size_t n;
++
++      gcry_assert (ctx->unused < CHACHA20_BLOCK_SIZE);
++
++      n = ctx->unused;
++      if (n > length)
++        n = length;
++
++      buf_xor (outbuf, inbuf, p + CHACHA20_BLOCK_SIZE - ctx->unused, n);
++      length -= n;
++      outbuf += n;
++      inbuf += n;
++      ctx->unused -= n;
++
++      if (!length)
++        return;
++      gcry_assert (!ctx->unused);
++    }
++
++  nburn = do_chacha20_encrypt_stream_tail (ctx, outbuf, inbuf, length);
++  burn = nburn > burn ? nburn : burn;
++
++  if (burn)
++    _gcry_burn_stack (burn);
++}
++
++
++gcry_err_code_t
++_gcry_chacha20_poly1305_encrypt(gcry_cipher_hd_t c, byte *outbuf,
++				const byte *inbuf, size_t length)
++{
++  CHACHA20_context_t *ctx = (void *) &c->context.c;
++  unsigned int nburn, burn = 0;
++  byte *authptr = NULL;
++
++  if (!length)
++    return 0;
++
++  if (ctx->unused)
++    {
++      unsigned char *p = ctx->pad;
++      size_t n;
++
++      gcry_assert (ctx->unused < CHACHA20_BLOCK_SIZE);
++
++      n = ctx->unused;
++      if (n > length)
++        n = length;
++
++      buf_xor (outbuf, inbuf, p + CHACHA20_BLOCK_SIZE - ctx->unused, n);
++      nburn = _gcry_poly1305_update_burn (&c->u_mode.poly1305.ctx, outbuf, n);
++      burn = nburn > burn ? nburn : burn;
++      length -= n;
++      outbuf += n;
++      inbuf += n;
++      ctx->unused -= n;
++
++      if (!length)
++	{
++	  if (burn)
++	    _gcry_burn_stack (burn);
++
++	  return 0;
++	}
++      gcry_assert (!ctx->unused);
++    }
++
++  gcry_assert (c->u_mode.poly1305.ctx.leftover == 0);
++
++  if (0)
++    { }
++#ifdef USE_AVX2
++  else if (ctx->use_avx2 && length >= CHACHA20_BLOCK_SIZE * 8)
++    {
++      nburn = _gcry_chacha20_amd64_avx2_blocks8(ctx->input, outbuf, inbuf, 8);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 8 * CHACHA20_BLOCK_SIZE;
++      outbuf += 8 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 8 * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++#ifdef USE_SSSE3
++  else if (ctx->use_ssse3 && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      nburn = _gcry_chacha20_amd64_ssse3_blocks4(ctx->input, outbuf, inbuf, 4);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 4 * CHACHA20_BLOCK_SIZE;
++      outbuf += 4 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 4 * CHACHA20_BLOCK_SIZE;
++    }
++  else if (ctx->use_ssse3 && length >= CHACHA20_BLOCK_SIZE * 2)
++    {
++      nburn = _gcry_chacha20_amd64_ssse3_blocks1(ctx->input, outbuf, inbuf, 2);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 2 * CHACHA20_BLOCK_SIZE;
++      outbuf += 2 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 2 * CHACHA20_BLOCK_SIZE;
++    }
++  else if (ctx->use_ssse3 && length >= CHACHA20_BLOCK_SIZE)
++    {
++      nburn = _gcry_chacha20_amd64_ssse3_blocks1(ctx->input, outbuf, inbuf, 1);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 1 * CHACHA20_BLOCK_SIZE;
++      outbuf += 1 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 1 * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++#ifdef USE_AARCH64_SIMD
++  else if (ctx->use_neon && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      nburn = _gcry_chacha20_aarch64_blocks4(ctx->input, outbuf, inbuf, 4);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 4 * CHACHA20_BLOCK_SIZE;
++      outbuf += 4 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 4 * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++#ifdef USE_PPC_VEC_POLY1305
++  else if (ctx->use_ppc && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      nburn = _gcry_chacha20_ppc8_blocks4(ctx->input, outbuf, inbuf, 4);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 4 * CHACHA20_BLOCK_SIZE;
++      outbuf += 4 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 4 * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++#ifdef USE_S390X_VX_POLY1305
++  else if (ctx->use_s390x && length >= 2 * CHACHA20_BLOCK_SIZE * 8)
++    {
++      nburn = _gcry_chacha20_s390x_vx_blocks8(ctx->input, outbuf, inbuf, 8);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 8 * CHACHA20_BLOCK_SIZE;
++      outbuf += 8 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 8 * CHACHA20_BLOCK_SIZE;
++    }
++  else if (ctx->use_s390x && length >= CHACHA20_BLOCK_SIZE * 4)
++    {
++      nburn = _gcry_chacha20_s390x_vx_blocks4_2_1(ctx->input, outbuf, inbuf, 4);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 4 * CHACHA20_BLOCK_SIZE;
++      outbuf += 4 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 4 * CHACHA20_BLOCK_SIZE;
++    }
++  else if (ctx->use_s390x && length >= CHACHA20_BLOCK_SIZE * 2)
++    {
++      nburn = _gcry_chacha20_s390x_vx_blocks4_2_1(ctx->input, outbuf, inbuf, 2);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 2 * CHACHA20_BLOCK_SIZE;
++      outbuf += 2 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 2 * CHACHA20_BLOCK_SIZE;
++    }
++  else if (ctx->use_s390x && length >= CHACHA20_BLOCK_SIZE)
++    {
++      nburn = _gcry_chacha20_s390x_vx_blocks4_2_1(ctx->input, outbuf, inbuf, 1);
++      burn = nburn > burn ? nburn : burn;
++
++      authptr = outbuf;
++      length -= 1 * CHACHA20_BLOCK_SIZE;
++      outbuf += 1 * CHACHA20_BLOCK_SIZE;
++      inbuf  += 1 * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++  if (authptr)
++    {
++      size_t authoffset = outbuf - authptr;
++
++#ifdef USE_AVX2
++      if (ctx->use_avx2 &&
++	  length >= 8 * CHACHA20_BLOCK_SIZE &&
++	  authoffset >= 8 * CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	  nblocks -= nblocks % 8;
++
++	  nburn = _gcry_chacha20_poly1305_amd64_avx2_blocks8(
++		      ctx->input, outbuf, inbuf, nblocks,
++		      &c->u_mode.poly1305.ctx.state, authptr);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	  authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++#endif
++
++#ifdef USE_SSSE3
++      if (ctx->use_ssse3)
++	{
++	  if (length >= 4 * CHACHA20_BLOCK_SIZE &&
++	      authoffset >= 4 * CHACHA20_BLOCK_SIZE)
++	    {
++	      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	      nblocks -= nblocks % 4;
++
++	      nburn = _gcry_chacha20_poly1305_amd64_ssse3_blocks4(
++			  ctx->input, outbuf, inbuf, nblocks,
++			  &c->u_mode.poly1305.ctx.state, authptr);
++	      burn = nburn > burn ? nburn : burn;
++
++	      length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	      outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	      inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	      authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	    }
++
++	  if (length >= CHACHA20_BLOCK_SIZE &&
++	      authoffset >= CHACHA20_BLOCK_SIZE)
++	    {
++	      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++
++	      nburn = _gcry_chacha20_poly1305_amd64_ssse3_blocks1(
++			  ctx->input, outbuf, inbuf, nblocks,
++			  &c->u_mode.poly1305.ctx.state, authptr);
++	      burn = nburn > burn ? nburn : burn;
++
++	      length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	      outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	      inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	      authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	    }
++	}
++#endif
++
++#ifdef USE_AARCH64_SIMD
++      if (ctx->use_neon &&
++	  length >= 4 * CHACHA20_BLOCK_SIZE &&
++	  authoffset >= 4 * CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	  nblocks -= nblocks % 4;
++
++	  nburn = _gcry_chacha20_poly1305_aarch64_blocks4(
++		      ctx->input, outbuf, inbuf, nblocks,
++		      &c->u_mode.poly1305.ctx.state, authptr);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	  authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++#endif
++
++#ifdef USE_PPC_VEC_POLY1305
++      if (ctx->use_ppc &&
++	  length >= 4 * CHACHA20_BLOCK_SIZE &&
++	  authoffset >= 4 * CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	  nblocks -= nblocks % 4;
++
++	  nburn = _gcry_chacha20_poly1305_ppc8_blocks4(
++		      ctx->input, outbuf, inbuf, nblocks,
++		      &c->u_mode.poly1305.ctx.state, authptr);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	  authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++#endif
++
++#ifdef USE_S390X_VX_POLY1305
++      if (ctx->use_s390x)
++	{
++	  if (length >= 8 * CHACHA20_BLOCK_SIZE &&
++	      authoffset >= 8 * CHACHA20_BLOCK_SIZE)
++	    {
++	      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	      nblocks -= nblocks % 8;
++
++	      burn = _gcry_chacha20_poly1305_s390x_vx_blocks8(
++			  ctx->input, outbuf, inbuf, nblocks,
++			  &c->u_mode.poly1305.ctx.state, authptr);
++	      burn = nburn > burn ? nburn : burn;
++
++	      length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	      outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	      inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	      authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	    }
++
++	  if (length >= CHACHA20_BLOCK_SIZE &&
++	      authoffset >= CHACHA20_BLOCK_SIZE)
++	    {
++	      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++
++	      burn = _gcry_chacha20_poly1305_s390x_vx_blocks4_2_1(
++			  ctx->input, outbuf, inbuf, nblocks,
++			  &c->u_mode.poly1305.ctx.state, authptr);
++	      burn = nburn > burn ? nburn : burn;
++
++	      length  -= nblocks * CHACHA20_BLOCK_SIZE;
++	      outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	      inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
++	      authptr += nblocks * CHACHA20_BLOCK_SIZE;
++	    }
++	}
++#endif
++
++      if (authoffset > 0)
++	{
++	  _gcry_poly1305_update (&c->u_mode.poly1305.ctx, authptr, authoffset);
++	  authptr += authoffset;
++	  authoffset = 0;
++	}
++
++      gcry_assert(authptr == outbuf);
++    }
++
++  while (length)
++    {
++      size_t currlen = length;
++
++      /* Since checksumming is done after encryption, process input in 24KiB
++       * chunks to keep data loaded in L1 cache for checksumming. */
++      if (currlen > 24 * 1024)
++	currlen = 24 * 1024;
++
++      nburn = do_chacha20_encrypt_stream_tail (ctx, outbuf, inbuf, currlen);
++      burn = nburn > burn ? nburn : burn;
++
++      nburn = _gcry_poly1305_update_burn (&c->u_mode.poly1305.ctx, outbuf,
++					  currlen);
++      burn = nburn > burn ? nburn : burn;
++
++      outbuf += currlen;
++      inbuf += currlen;
++      length -= currlen;
++    }
++
++  if (burn)
++    _gcry_burn_stack (burn);
++
++  return 0;
++}
++
++
++gcry_err_code_t
++_gcry_chacha20_poly1305_decrypt(gcry_cipher_hd_t c, byte *outbuf,
++				const byte *inbuf, size_t length)
++{
++  CHACHA20_context_t *ctx = (void *) &c->context.c;
++  unsigned int nburn, burn = 0;
++
++  if (!length)
++    return 0;
++
++  if (ctx->unused)
++    {
++      unsigned char *p = ctx->pad;
++      size_t n;
++
++      gcry_assert (ctx->unused < CHACHA20_BLOCK_SIZE);
++
++      n = ctx->unused;
++      if (n > length)
++        n = length;
++
++      nburn = _gcry_poly1305_update_burn (&c->u_mode.poly1305.ctx, inbuf, n);
++      burn = nburn > burn ? nburn : burn;
++      buf_xor (outbuf, inbuf, p + CHACHA20_BLOCK_SIZE - ctx->unused, n);
++      length -= n;
++      outbuf += n;
++      inbuf += n;
++      ctx->unused -= n;
++
++      if (!length)
++	{
++	  if (burn)
++	    _gcry_burn_stack (burn);
++
++	  return 0;
++	}
++      gcry_assert (!ctx->unused);
++    }
++
++  gcry_assert (c->u_mode.poly1305.ctx.leftover == 0);
++
++#ifdef USE_AVX2
++  if (ctx->use_avx2 && length >= 8 * CHACHA20_BLOCK_SIZE)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 8;
++
++      nburn = _gcry_chacha20_poly1305_amd64_avx2_blocks8(
++			ctx->input, outbuf, inbuf, nblocks,
++			&c->u_mode.poly1305.ctx.state, inbuf);
++      burn = nburn > burn ? nburn : burn;
++
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_SSSE3
++  if (ctx->use_ssse3)
++    {
++      if (length >= 4 * CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	  nblocks -= nblocks % 4;
++
++	  nburn = _gcry_chacha20_poly1305_amd64_ssse3_blocks4(
++			    ctx->input, outbuf, inbuf, nblocks,
++			    &c->u_mode.poly1305.ctx.state, inbuf);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++
++      if (length >= CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++
++	  nburn = _gcry_chacha20_poly1305_amd64_ssse3_blocks1(
++			    ctx->input, outbuf, inbuf, nblocks,
++			    &c->u_mode.poly1305.ctx.state, inbuf);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++    }
++#endif
++
++#ifdef USE_AARCH64_SIMD
++  if (ctx->use_neon && length >= 4 * CHACHA20_BLOCK_SIZE)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 4;
++
++      nburn = _gcry_chacha20_poly1305_aarch64_blocks4(
++			ctx->input, outbuf, inbuf, nblocks,
++			&c->u_mode.poly1305.ctx.state, inbuf);
++      burn = nburn > burn ? nburn : burn;
++
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_PPC_VEC_POLY1305
++  if (ctx->use_ppc && length >= 4 * CHACHA20_BLOCK_SIZE)
++    {
++      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++      nblocks -= nblocks % 4;
++
++      nburn = _gcry_chacha20_poly1305_ppc8_blocks4(
++			ctx->input, outbuf, inbuf, nblocks,
++			&c->u_mode.poly1305.ctx.state, inbuf);
++      burn = nburn > burn ? nburn : burn;
++
++      length -= nblocks * CHACHA20_BLOCK_SIZE;
++      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++    }
++#endif
++
++#ifdef USE_S390X_VX_POLY1305
++  if (ctx->use_s390x)
++    {
++      if (length >= 8 * CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++	  nblocks -= nblocks % 8;
++
++	  nburn = _gcry_chacha20_poly1305_s390x_vx_blocks8(
++			    ctx->input, outbuf, inbuf, nblocks,
++			    &c->u_mode.poly1305.ctx.state, inbuf);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++
++      if (length >= CHACHA20_BLOCK_SIZE)
++	{
++	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
++
++	  nburn = _gcry_chacha20_poly1305_s390x_vx_blocks4_2_1(
++			    ctx->input, outbuf, inbuf, nblocks,
++			    &c->u_mode.poly1305.ctx.state, inbuf);
++	  burn = nburn > burn ? nburn : burn;
++
++	  length -= nblocks * CHACHA20_BLOCK_SIZE;
++	  outbuf += nblocks * CHACHA20_BLOCK_SIZE;
++	  inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
++	}
++    }
++#endif
++
++  while (length)
++    {
++      size_t currlen = length;
++
++      /* Since checksumming is done before decryption, process input in 24KiB
++       * chunks to keep data loaded in L1 cache for decryption. */
++      if (currlen > 24 * 1024)
++	currlen = 24 * 1024;
++
++      nburn = _gcry_poly1305_update_burn (&c->u_mode.poly1305.ctx, inbuf,
++					  currlen);
++      burn = nburn > burn ? nburn : burn;
++
++      nburn = do_chacha20_encrypt_stream_tail (ctx, outbuf, inbuf, currlen);
++      burn = nburn > burn ? nburn : burn;
++
++      outbuf += currlen;
++      inbuf += currlen;
++      length -= currlen;
++    }
++
++  if (burn)
++    _gcry_burn_stack (burn);
++
++  return 0;
++}
++
++
++static const char *
++selftest (void)
++{
++  byte ctxbuf[sizeof(CHACHA20_context_t) + 15];
++  CHACHA20_context_t *ctx;
++  byte scratch[127 + 1];
++  byte buf[512 + 64 + 4];
++  int i;
++
++  /* From draft-strombergson-chacha-test-vectors */
++  static byte key_1[] = {
++    0xc4, 0x6e, 0xc1, 0xb1, 0x8c, 0xe8, 0xa8, 0x78,
++    0x72, 0x5a, 0x37, 0xe7, 0x80, 0xdf, 0xb7, 0x35,
++    0x1f, 0x68, 0xed, 0x2e, 0x19, 0x4c, 0x79, 0xfb,
++    0xc6, 0xae, 0xbe, 0xe1, 0xa6, 0x67, 0x97, 0x5d
++  };
++  static const byte nonce_1[] =
++    { 0x1a, 0xda, 0x31, 0xd5, 0xcf, 0x68, 0x82, 0x21 };
++  static const byte plaintext_1[127] = {
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++  };
++  static const byte ciphertext_1[127] = {
++    0xf6, 0x3a, 0x89, 0xb7, 0x5c, 0x22, 0x71, 0xf9,
++    0x36, 0x88, 0x16, 0x54, 0x2b, 0xa5, 0x2f, 0x06,
++    0xed, 0x49, 0x24, 0x17, 0x92, 0x30, 0x2b, 0x00,
++    0xb5, 0xe8, 0xf8, 0x0a, 0xe9, 0xa4, 0x73, 0xaf,
++    0xc2, 0x5b, 0x21, 0x8f, 0x51, 0x9a, 0xf0, 0xfd,
++    0xd4, 0x06, 0x36, 0x2e, 0x8d, 0x69, 0xde, 0x7f,
++    0x54, 0xc6, 0x04, 0xa6, 0xe0, 0x0f, 0x35, 0x3f,
++    0x11, 0x0f, 0x77, 0x1b, 0xdc, 0xa8, 0xab, 0x92,
++    0xe5, 0xfb, 0xc3, 0x4e, 0x60, 0xa1, 0xd9, 0xa9,
++    0xdb, 0x17, 0x34, 0x5b, 0x0a, 0x40, 0x27, 0x36,
++    0x85, 0x3b, 0xf9, 0x10, 0xb0, 0x60, 0xbd, 0xf1,
++    0xf8, 0x97, 0xb6, 0x29, 0x0f, 0x01, 0xd1, 0x38,
++    0xae, 0x2c, 0x4c, 0x90, 0x22, 0x5b, 0xa9, 0xea,
++    0x14, 0xd5, 0x18, 0xf5, 0x59, 0x29, 0xde, 0xa0,
++    0x98, 0xca, 0x7a, 0x6c, 0xcf, 0xe6, 0x12, 0x27,
++    0x05, 0x3c, 0x84, 0xe4, 0x9a, 0x4a, 0x33
++  };
++
++  /* 16-byte alignment required for amd64 implementation. */
++  ctx = (CHACHA20_context_t *)((uintptr_t)(ctxbuf + 15) & ~(uintptr_t)15);
++
++  chacha20_setkey (ctx, key_1, sizeof key_1, NULL);
++  chacha20_setiv (ctx, nonce_1, sizeof nonce_1);
++  scratch[sizeof (scratch) - 1] = 0;
++  chacha20_encrypt_stream (ctx, scratch, plaintext_1, sizeof plaintext_1);
++  if (memcmp (scratch, ciphertext_1, sizeof ciphertext_1))
++    return "ChaCha20 encryption test 1 failed.";
++  if (scratch[sizeof (scratch) - 1])
++    return "ChaCha20 wrote too much.";
++  chacha20_setkey (ctx, key_1, sizeof (key_1), NULL);
++  chacha20_setiv (ctx, nonce_1, sizeof nonce_1);
++  chacha20_encrypt_stream (ctx, scratch, scratch, sizeof plaintext_1);
++  if (memcmp (scratch, plaintext_1, sizeof plaintext_1))
++    return "ChaCha20 decryption test 1 failed.";
++
++  for (i = 0; i < sizeof buf; i++)
++    buf[i] = i;
++  chacha20_setkey (ctx, key_1, sizeof key_1, NULL);
++  chacha20_setiv (ctx, nonce_1, sizeof nonce_1);
++  /*encrypt */
++  chacha20_encrypt_stream (ctx, buf, buf, sizeof buf);
++  /*decrypt */
++  chacha20_setkey (ctx, key_1, sizeof key_1, NULL);
++  chacha20_setiv (ctx, nonce_1, sizeof nonce_1);
++  chacha20_encrypt_stream (ctx, buf, buf, 1);
++  chacha20_encrypt_stream (ctx, buf + 1, buf + 1, (sizeof buf) - 1 - 1);
++  chacha20_encrypt_stream (ctx, buf + (sizeof buf) - 1,
++                           buf + (sizeof buf) - 1, 1);
++  for (i = 0; i < sizeof buf; i++)
++    if (buf[i] != (byte) i)
++      return "ChaCha20 encryption test 2 failed.";
++
++  chacha20_setkey (ctx, key_1, sizeof key_1, NULL);
++  chacha20_setiv (ctx, nonce_1, sizeof nonce_1);
++  /* encrypt */
++  for (i = 0; i < sizeof buf; i++)
++    chacha20_encrypt_stream (ctx, &buf[i], &buf[i], 1);
++  /* decrypt */
++  chacha20_setkey (ctx, key_1, sizeof key_1, NULL);
++  chacha20_setiv (ctx, nonce_1, sizeof nonce_1);
++  chacha20_encrypt_stream (ctx, buf, buf, sizeof buf);
++  for (i = 0; i < sizeof buf; i++)
++    if (buf[i] != (byte) i)
++      return "ChaCha20 encryption test 3 failed.";
++
++  return NULL;
++}
++
++
++gcry_cipher_spec_t _gcry_cipher_spec_chacha20 = {
++  GCRY_CIPHER_CHACHA20,
++  {0, 0},                       /* flags */
++  "CHACHA20",                   /* name */
++  NULL,                         /* aliases */
++  NULL,                         /* oids */
++  1,                            /* blocksize in bytes. */
++  CHACHA20_MAX_KEY_SIZE * 8,    /* standard key length in bits. */
++  sizeof (CHACHA20_context_t),
++  chacha20_setkey,
++  NULL,
++  NULL,
++  chacha20_encrypt_stream,
++  chacha20_encrypt_stream,
++  NULL,
++  NULL,
++  chacha20_setiv
++};
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/cipher/chacha20-ppc.c b/cipher/chacha20-ppc.c
+new file mode 100644
+index 00000000..565b7156
+--- /dev/null
++++ b/cipher/chacha20-ppc.c
+@@ -0,0 +1,646 @@
++/* chacha20-ppc.c - PowerPC vector implementation of ChaCha20
++ * Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <config.h>
++
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    defined(USE_CHACHA20) && \
++    __GNUC__ >= 4
++
++#include <altivec.h>
++#include "bufhelp.h"
++#include "poly1305-internal-new.h"
++
++#include "mpi/mpi-internal.h"
++#include "mpi/longlong.h"
++
++
++typedef vector unsigned char vector16x_u8;
++typedef vector unsigned int vector4x_u32;
++typedef vector unsigned long long vector2x_u64;
++
++
++#define ALWAYS_INLINE inline __attribute__((always_inline))
++#define NO_INLINE __attribute__((noinline))
++#define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
++
++#define ASM_FUNC_ATTR          NO_INSTRUMENT_FUNCTION
++#define ASM_FUNC_ATTR_INLINE   ASM_FUNC_ATTR ALWAYS_INLINE
++#define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE
++
++
++#ifdef WORDS_BIGENDIAN
++static const vector16x_u8 le_bswap_const =
++  { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
++#endif
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_rol_elems(vector4x_u32 v, unsigned int idx)
++{
++#ifndef WORDS_BIGENDIAN
++  return vec_sld (v, v, (16 - (4 * idx)) & 15);
++#else
++  return vec_sld (v, v, (4 * idx) & 15);
++#endif
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_load_le(unsigned long offset, const unsigned char *ptr)
++{
++  vector4x_u32 vec;
++  vec = vec_vsx_ld (offset, (const u32 *)ptr);
++#ifdef WORDS_BIGENDIAN
++  vec = (vector4x_u32)vec_perm((vector16x_u8)vec, (vector16x_u8)vec,
++			       le_bswap_const);
++#endif
++  return vec;
++}
++
++
++static ASM_FUNC_ATTR_INLINE void
++vec_store_le(vector4x_u32 vec, unsigned long offset, unsigned char *ptr)
++{
++#ifdef WORDS_BIGENDIAN
++  vec = (vector4x_u32)vec_perm((vector16x_u8)vec, (vector16x_u8)vec,
++			       le_bswap_const);
++#endif
++  vec_vsx_st (vec, offset, (u32 *)ptr);
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_add_ctr_u64(vector4x_u32 v, vector4x_u32 a)
++{
++#ifdef WORDS_BIGENDIAN
++  static const vector16x_u8 swap32 =
++    { 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11 };
++  vector2x_u64 vec, add, sum;
++
++  vec = (vector2x_u64)vec_perm((vector16x_u8)v, (vector16x_u8)v, swap32);
++  add = (vector2x_u64)vec_perm((vector16x_u8)a, (vector16x_u8)a, swap32);
++  sum = vec + add;
++  return (vector4x_u32)vec_perm((vector16x_u8)sum, (vector16x_u8)sum, swap32);
++#else
++  return (vector4x_u32)((vector2x_u64)(v) + (vector2x_u64)(a));
++#endif
++}
++
++
++/**********************************************************************
++  2-way && 1-way chacha20
++ **********************************************************************/
++
++#define ROTATE(v1,rolv)			\
++	__asm__ ("vrlw %0,%1,%2\n\t" : "=v" (v1) : "v" (v1), "v" (rolv))
++
++#define WORD_ROL(v1,c)			\
++	((v1) = vec_rol_elems((v1), (c)))
++
++#define XOR(ds,s) \
++	((ds) ^= (s))
++
++#define PLUS(ds,s) \
++	((ds) += (s))
++
++#define QUARTERROUND4(x0,x1,x2,x3,rol_x1,rol_x2,rol_x3) \
++	PLUS(x0, x1); XOR(x3, x0); ROTATE(x3, rotate_16); \
++	PLUS(x2, x3); XOR(x1, x2); ROTATE(x1, rotate_12); \
++	PLUS(x0, x1); XOR(x3, x0); ROTATE(x3, rotate_8); \
++	PLUS(x2, x3); \
++	  WORD_ROL(x3, rol_x3); \
++		      XOR(x1, x2); \
++	  WORD_ROL(x2, rol_x2); \
++				   ROTATE(x1, rotate_7); \
++	  WORD_ROL(x1, rol_x1);
++
++#define ADD_U64(v,a) \
++	(v = vec_add_ctr_u64(v, a))
++
++unsigned int ASM_FUNC_ATTR
++_gcry_chacha20_ppc8_blocks1(u32 *state, byte *dst, const byte *src,
++			    size_t nblks)
++{
++  vector4x_u32 counter_1 = { 1, 0, 0, 0 };
++  vector4x_u32 rotate_16 = { 16, 16, 16, 16 };
++  vector4x_u32 rotate_12 = { 12, 12, 12, 12 };
++  vector4x_u32 rotate_8 = { 8, 8, 8, 8 };
++  vector4x_u32 rotate_7 = { 7, 7, 7, 7 };
++  vector4x_u32 state0, state1, state2, state3;
++  vector4x_u32 v0, v1, v2, v3;
++  vector4x_u32 v4, v5, v6, v7;
++  int i;
++
++  /* force preload of constants to vector registers */
++  __asm__ ("": "+v" (counter_1) :: "memory");
++  __asm__ ("": "+v" (rotate_16) :: "memory");
++  __asm__ ("": "+v" (rotate_12) :: "memory");
++  __asm__ ("": "+v" (rotate_8) :: "memory");
++  __asm__ ("": "+v" (rotate_7) :: "memory");
++
++  state0 = vec_vsx_ld(0 * 16, state);
++  state1 = vec_vsx_ld(1 * 16, state);
++  state2 = vec_vsx_ld(2 * 16, state);
++  state3 = vec_vsx_ld(3 * 16, state);
++
++  while (nblks >= 2)
++    {
++      v0 = state0;
++      v1 = state1;
++      v2 = state2;
++      v3 = state3;
++
++      v4 = state0;
++      v5 = state1;
++      v6 = state2;
++      v7 = state3;
++      ADD_U64(v7, counter_1);
++
++      for (i = 20; i > 0; i -= 2)
++	{
++	  QUARTERROUND4(v0, v1, v2, v3, 1, 2, 3);
++	  QUARTERROUND4(v4, v5, v6, v7, 1, 2, 3);
++	  QUARTERROUND4(v0, v1, v2, v3, 3, 2, 1);
++	  QUARTERROUND4(v4, v5, v6, v7, 3, 2, 1);
++	}
++
++      v0 += state0;
++      v1 += state1;
++      v2 += state2;
++      v3 += state3;
++      ADD_U64(state3, counter_1); /* update counter */
++      v4 += state0;
++      v5 += state1;
++      v6 += state2;
++      v7 += state3;
++      ADD_U64(state3, counter_1); /* update counter */
++
++      v0 ^= vec_load_le(0 * 16, src);
++      v1 ^= vec_load_le(1 * 16, src);
++      v2 ^= vec_load_le(2 * 16, src);
++      v3 ^= vec_load_le(3 * 16, src);
++      vec_store_le(v0, 0 * 16, dst);
++      vec_store_le(v1, 1 * 16, dst);
++      vec_store_le(v2, 2 * 16, dst);
++      vec_store_le(v3, 3 * 16, dst);
++      src += 64;
++      dst += 64;
++      v4 ^= vec_load_le(0 * 16, src);
++      v5 ^= vec_load_le(1 * 16, src);
++      v6 ^= vec_load_le(2 * 16, src);
++      v7 ^= vec_load_le(3 * 16, src);
++      vec_store_le(v4, 0 * 16, dst);
++      vec_store_le(v5, 1 * 16, dst);
++      vec_store_le(v6, 2 * 16, dst);
++      vec_store_le(v7, 3 * 16, dst);
++      src += 64;
++      dst += 64;
++
++      nblks -= 2;
++    }
++
++  while (nblks)
++    {
++      v0 = state0;
++      v1 = state1;
++      v2 = state2;
++      v3 = state3;
++
++      for (i = 20; i > 0; i -= 2)
++	{
++	  QUARTERROUND4(v0, v1, v2, v3, 1, 2, 3);
++	  QUARTERROUND4(v0, v1, v2, v3, 3, 2, 1);
++	}
++
++      v0 += state0;
++      v1 += state1;
++      v2 += state2;
++      v3 += state3;
++      ADD_U64(state3, counter_1); /* update counter */
++
++      v0 ^= vec_load_le(0 * 16, src);
++      v1 ^= vec_load_le(1 * 16, src);
++      v2 ^= vec_load_le(2 * 16, src);
++      v3 ^= vec_load_le(3 * 16, src);
++      vec_store_le(v0, 0 * 16, dst);
++      vec_store_le(v1, 1 * 16, dst);
++      vec_store_le(v2, 2 * 16, dst);
++      vec_store_le(v3, 3 * 16, dst);
++      src += 64;
++      dst += 64;
++
++      nblks--;
++    }
++
++  vec_vsx_st(state3, 3 * 16, state); /* store counter */
++
++  return 0;
++}
++
++
++/**********************************************************************
++  4-way chacha20
++ **********************************************************************/
++
++/* 4x4 32-bit integer matrix transpose */
++#define transpose_4x4(x0, x1, x2, x3) ({ \
++	vector4x_u32 t1 = vec_mergeh(x0, x2); \
++	vector4x_u32 t2 = vec_mergel(x0, x2); \
++	vector4x_u32 t3 = vec_mergeh(x1, x3); \
++	x3 = vec_mergel(x1, x3); \
++	x0 = vec_mergeh(t1, t3); \
++	x1 = vec_mergel(t1, t3); \
++	x2 = vec_mergeh(t2, x3); \
++	x3 = vec_mergel(t2, x3); \
++      })
++
++#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2)			\
++	PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2);	\
++	    ROTATE(d1, rotate_16); ROTATE(d2, rotate_16);	\
++	PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2);	\
++	    ROTATE(b1, rotate_12); ROTATE(b2, rotate_12);	\
++	PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2);	\
++	    ROTATE(d1, rotate_8); ROTATE(d2, rotate_8);		\
++	PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2);	\
++	    ROTATE(b1, rotate_7); ROTATE(b2, rotate_7);
++
++unsigned int ASM_FUNC_ATTR
++_gcry_chacha20_ppc8_blocks4(u32 *state, byte *dst, const byte *src,
++			    size_t nblks)
++{
++  vector4x_u32 counters_0123 = { 0, 1, 2, 3 };
++  vector4x_u32 counter_4 = { 4, 0, 0, 0 };
++  vector4x_u32 rotate_16 = { 16, 16, 16, 16 };
++  vector4x_u32 rotate_12 = { 12, 12, 12, 12 };
++  vector4x_u32 rotate_8 = { 8, 8, 8, 8 };
++  vector4x_u32 rotate_7 = { 7, 7, 7, 7 };
++  vector4x_u32 state0, state1, state2, state3;
++  vector4x_u32 v0, v1, v2, v3, v4, v5, v6, v7;
++  vector4x_u32 v8, v9, v10, v11, v12, v13, v14, v15;
++  vector4x_u32 tmp;
++  int i;
++
++  /* force preload of constants to vector registers */
++  __asm__ ("": "+v" (counters_0123) :: "memory");
++  __asm__ ("": "+v" (counter_4) :: "memory");
++  __asm__ ("": "+v" (rotate_16) :: "memory");
++  __asm__ ("": "+v" (rotate_12) :: "memory");
++  __asm__ ("": "+v" (rotate_8) :: "memory");
++  __asm__ ("": "+v" (rotate_7) :: "memory");
++
++  state0 = vec_vsx_ld(0 * 16, state);
++  state1 = vec_vsx_ld(1 * 16, state);
++  state2 = vec_vsx_ld(2 * 16, state);
++  state3 = vec_vsx_ld(3 * 16, state);
++
++  do
++    {
++      v0 = vec_splat(state0, 0);
++      v1 = vec_splat(state0, 1);
++      v2 = vec_splat(state0, 2);
++      v3 = vec_splat(state0, 3);
++      v4 = vec_splat(state1, 0);
++      v5 = vec_splat(state1, 1);
++      v6 = vec_splat(state1, 2);
++      v7 = vec_splat(state1, 3);
++      v8 = vec_splat(state2, 0);
++      v9 = vec_splat(state2, 1);
++      v10 = vec_splat(state2, 2);
++      v11 = vec_splat(state2, 3);
++      v12 = vec_splat(state3, 0);
++      v13 = vec_splat(state3, 1);
++      v14 = vec_splat(state3, 2);
++      v15 = vec_splat(state3, 3);
++
++      v12 += counters_0123;
++      v13 -= vec_cmplt(v12, counters_0123);
++
++      for (i = 20; i > 0; i -= 2)
++	{
++	  QUARTERROUND2(v0, v4,  v8, v12,   v1, v5,  v9, v13)
++	  QUARTERROUND2(v2, v6, v10, v14,   v3, v7, v11, v15)
++	  QUARTERROUND2(v0, v5, v10, v15,   v1, v6, v11, v12)
++	  QUARTERROUND2(v2, v7,  v8, v13,   v3, v4,  v9, v14)
++	}
++
++      v0 += vec_splat(state0, 0);
++      v1 += vec_splat(state0, 1);
++      v2 += vec_splat(state0, 2);
++      v3 += vec_splat(state0, 3);
++      v4 += vec_splat(state1, 0);
++      v5 += vec_splat(state1, 1);
++      v6 += vec_splat(state1, 2);
++      v7 += vec_splat(state1, 3);
++      v8 += vec_splat(state2, 0);
++      v9 += vec_splat(state2, 1);
++      v10 += vec_splat(state2, 2);
++      v11 += vec_splat(state2, 3);
++      tmp = vec_splat(state3, 0);
++      tmp += counters_0123;
++      v12 += tmp;
++      v13 += vec_splat(state3, 1) - vec_cmplt(tmp, counters_0123);
++      v14 += vec_splat(state3, 2);
++      v15 += vec_splat(state3, 3);
++      ADD_U64(state3, counter_4); /* update counter */
++
++      transpose_4x4(v0, v1, v2, v3);
++      transpose_4x4(v4, v5, v6, v7);
++      transpose_4x4(v8, v9, v10, v11);
++      transpose_4x4(v12, v13, v14, v15);
++
++      v0 ^= vec_load_le((64 * 0 + 16 * 0), src);
++      v1 ^= vec_load_le((64 * 1 + 16 * 0), src);
++      v2 ^= vec_load_le((64 * 2 + 16 * 0), src);
++      v3 ^= vec_load_le((64 * 3 + 16 * 0), src);
++
++      v4 ^= vec_load_le((64 * 0 + 16 * 1), src);
++      v5 ^= vec_load_le((64 * 1 + 16 * 1), src);
++      v6 ^= vec_load_le((64 * 2 + 16 * 1), src);
++      v7 ^= vec_load_le((64 * 3 + 16 * 1), src);
++
++      v8 ^= vec_load_le((64 * 0 + 16 * 2), src);
++      v9 ^= vec_load_le((64 * 1 + 16 * 2), src);
++      v10 ^= vec_load_le((64 * 2 + 16 * 2), src);
++      v11 ^= vec_load_le((64 * 3 + 16 * 2), src);
++
++      v12 ^= vec_load_le((64 * 0 + 16 * 3), src);
++      v13 ^= vec_load_le((64 * 1 + 16 * 3), src);
++      v14 ^= vec_load_le((64 * 2 + 16 * 3), src);
++      v15 ^= vec_load_le((64 * 3 + 16 * 3), src);
++
++      vec_store_le(v0, (64 * 0 + 16 * 0), dst);
++      vec_store_le(v1, (64 * 1 + 16 * 0), dst);
++      vec_store_le(v2, (64 * 2 + 16 * 0), dst);
++      vec_store_le(v3, (64 * 3 + 16 * 0), dst);
++
++      vec_store_le(v4, (64 * 0 + 16 * 1), dst);
++      vec_store_le(v5, (64 * 1 + 16 * 1), dst);
++      vec_store_le(v6, (64 * 2 + 16 * 1), dst);
++      vec_store_le(v7, (64 * 3 + 16 * 1), dst);
++
++      vec_store_le(v8, (64 * 0 + 16 * 2), dst);
++      vec_store_le(v9, (64 * 1 + 16 * 2), dst);
++      vec_store_le(v10, (64 * 2 + 16 * 2), dst);
++      vec_store_le(v11, (64 * 3 + 16 * 2), dst);
++
++      vec_store_le(v12, (64 * 0 + 16 * 3), dst);
++      vec_store_le(v13, (64 * 1 + 16 * 3), dst);
++      vec_store_le(v14, (64 * 2 + 16 * 3), dst);
++      vec_store_le(v15, (64 * 3 + 16 * 3), dst);
++
++      src += 4*64;
++      dst += 4*64;
++
++      nblks -= 4;
++    }
++  while (nblks);
++
++  vec_vsx_st(state3, 3 * 16, state); /* store counter */
++
++  return 0;
++}
++
++
++#if SIZEOF_UNSIGNED_LONG == 8
++
++/**********************************************************************
++  4-way stitched chacha20-poly1305
++ **********************************************************************/
++
++#define ADD_1305_64(A2, A1, A0, B2, B1, B0) \
++      __asm__ ("addc %0, %3, %0\n" \
++	       "adde %1, %4, %1\n" \
++	       "adde %2, %5, %2\n" \
++	       : "+r" (A0), "+r" (A1), "+r" (A2) \
++	       : "r" (B0), "r" (B1), "r" (B2) \
++	       : "cc" )
++
++#define MUL_MOD_1305_64_PART1(H2, H1, H0, R1, R0, R1_MULT5) do { \
++    /* x = a * r (partial mod 2^130-5) */ \
++    umul_ppmm(x0_hi, x0_lo, H0, R0);  /* h0 * r0 */ \
++    umul_ppmm(x1_hi, x1_lo, H0, R1);  /* h0 * r1 */ \
++    \
++    umul_ppmm(t0_hi, t0_lo, H1, R1_MULT5); /* h1 * r1 mod 2^130-5 */ \
++  } while (0)
++
++#define MUL_MOD_1305_64_PART2(H2, H1, H0, R1, R0, R1_MULT5) do { \
++    add_ssaaaa(x0_hi, x0_lo, x0_hi, x0_lo, t0_hi, t0_lo); \
++    umul_ppmm(t1_hi, t1_lo, H1, R0);       /* h1 * r0 */ \
++    add_ssaaaa(x1_hi, x1_lo, x1_hi, x1_lo, t1_hi, t1_lo); \
++    \
++    t1_lo = H2 * R1_MULT5; /* h2 * r1 mod 2^130-5 */ \
++    t1_hi = H2 * R0;       /* h2 * r0 */ \
++    add_ssaaaa(H0, H1, x1_hi, x1_lo, t1_hi, t1_lo); \
++    \
++    /* carry propagation */ \
++    H2 = H0 & 3; \
++    H0 = (H0 >> 2) * 5; /* msb mod 2^130-5 */ \
++    ADD_1305_64(H2, H1, H0, (u64)0, x0_hi, x0_lo); \
++  } while (0)
++
++#define POLY1305_BLOCK_PART1(in_pos) do { \
++    m0 = buf_get_le64(poly1305_src + (in_pos) + 0); \
++    m1 = buf_get_le64(poly1305_src + (in_pos) + 8); \
++    /* a = h + m */ \
++    ADD_1305_64(h2, h1, h0, m2, m1, m0); \
++    /* h = a * r (partial mod 2^130-5) */ \
++    MUL_MOD_1305_64_PART1(h2, h1, h0, r1, r0, r1_mult5); \
++  } while (0)
++
++#define POLY1305_BLOCK_PART2(in_pos) do { \
++    MUL_MOD_1305_64_PART2(h2, h1, h0, r1, r0, r1_mult5); \
++  } while (0)
++
++unsigned int ASM_FUNC_ATTR
++_gcry_chacha20_poly1305_ppc8_blocks4(u32 *state, byte *dst, const byte *src,
++				     size_t nblks, POLY1305_STATE *st,
++				     const byte *poly1305_src)
++{
++  vector4x_u32 counters_0123 = { 0, 1, 2, 3 };
++  vector4x_u32 counter_4 = { 4, 0, 0, 0 };
++  vector4x_u32 rotate_16 = { 16, 16, 16, 16 };
++  vector4x_u32 rotate_12 = { 12, 12, 12, 12 };
++  vector4x_u32 rotate_8 = { 8, 8, 8, 8 };
++  vector4x_u32 rotate_7 = { 7, 7, 7, 7 };
++  vector4x_u32 state0, state1, state2, state3;
++  vector4x_u32 v0, v1, v2, v3, v4, v5, v6, v7;
++  vector4x_u32 v8, v9, v10, v11, v12, v13, v14, v15;
++  vector4x_u32 tmp;
++  u64 r0, r1, r1_mult5;
++  u64 h0, h1, h2;
++  u64 m0, m1, m2;
++  u64 x0_lo, x0_hi, x1_lo, x1_hi;
++  u64 t0_lo, t0_hi, t1_lo, t1_hi;
++  unsigned int i, o;
++
++  /* load poly1305 state */
++  m2 = 1;
++  h0 = st->h[0] + ((u64)st->h[1] << 32);
++  h1 = st->h[2] + ((u64)st->h[3] << 32);
++  h2 = st->h[4];
++  r0 = st->r[0] + ((u64)st->r[1] << 32);
++  r1 = st->r[2] + ((u64)st->r[3] << 32);
++  r1_mult5 = (r1 >> 2) + r1;
++
++  /* force preload of constants to vector registers */
++  __asm__ ("": "+v" (counters_0123) :: "memory");
++  __asm__ ("": "+v" (counter_4) :: "memory");
++  __asm__ ("": "+v" (rotate_16) :: "memory");
++  __asm__ ("": "+v" (rotate_12) :: "memory");
++  __asm__ ("": "+v" (rotate_8) :: "memory");
++  __asm__ ("": "+v" (rotate_7) :: "memory");
++
++  state0 = vec_vsx_ld(0 * 16, state);
++  state1 = vec_vsx_ld(1 * 16, state);
++  state2 = vec_vsx_ld(2 * 16, state);
++  state3 = vec_vsx_ld(3 * 16, state);
++
++  do
++    {
++      v0 = vec_splat(state0, 0);
++      v1 = vec_splat(state0, 1);
++      v2 = vec_splat(state0, 2);
++      v3 = vec_splat(state0, 3);
++      v4 = vec_splat(state1, 0);
++      v5 = vec_splat(state1, 1);
++      v6 = vec_splat(state1, 2);
++      v7 = vec_splat(state1, 3);
++      v8 = vec_splat(state2, 0);
++      v9 = vec_splat(state2, 1);
++      v10 = vec_splat(state2, 2);
++      v11 = vec_splat(state2, 3);
++      v12 = vec_splat(state3, 0);
++      v13 = vec_splat(state3, 1);
++      v14 = vec_splat(state3, 2);
++      v15 = vec_splat(state3, 3);
++
++      v12 += counters_0123;
++      v13 -= vec_cmplt(v12, counters_0123);
++
++      for (o = 20; o; o -= 10)
++	{
++	  for (i = 8; i; i -= 2)
++	    {
++	      POLY1305_BLOCK_PART1(0 * 16);
++	      QUARTERROUND2(v0, v4,  v8, v12,   v1, v5,  v9, v13)
++	      POLY1305_BLOCK_PART2();
++	      QUARTERROUND2(v2, v6, v10, v14,   v3, v7, v11, v15)
++	      POLY1305_BLOCK_PART1(1 * 16);
++	      poly1305_src += 2 * 16;
++	      QUARTERROUND2(v0, v5, v10, v15,   v1, v6, v11, v12)
++	      POLY1305_BLOCK_PART2();
++	      QUARTERROUND2(v2, v7,  v8, v13,   v3, v4,  v9, v14)
++	    }
++
++	  QUARTERROUND2(v0, v4,  v8, v12,   v1, v5,  v9, v13)
++	  QUARTERROUND2(v2, v6, v10, v14,   v3, v7, v11, v15)
++	  QUARTERROUND2(v0, v5, v10, v15,   v1, v6, v11, v12)
++	  QUARTERROUND2(v2, v7,  v8, v13,   v3, v4,  v9, v14)
++	}
++
++      v0 += vec_splat(state0, 0);
++      v1 += vec_splat(state0, 1);
++      v2 += vec_splat(state0, 2);
++      v3 += vec_splat(state0, 3);
++      v4 += vec_splat(state1, 0);
++      v5 += vec_splat(state1, 1);
++      v6 += vec_splat(state1, 2);
++      v7 += vec_splat(state1, 3);
++      v8 += vec_splat(state2, 0);
++      v9 += vec_splat(state2, 1);
++      v10 += vec_splat(state2, 2);
++      v11 += vec_splat(state2, 3);
++      tmp = vec_splat(state3, 0);
++      tmp += counters_0123;
++      v12 += tmp;
++      v13 += vec_splat(state3, 1) - vec_cmplt(tmp, counters_0123);
++      v14 += vec_splat(state3, 2);
++      v15 += vec_splat(state3, 3);
++      ADD_U64(state3, counter_4); /* update counter */
++
++      transpose_4x4(v0, v1, v2, v3);
++      transpose_4x4(v4, v5, v6, v7);
++      transpose_4x4(v8, v9, v10, v11);
++      transpose_4x4(v12, v13, v14, v15);
++
++      v0 ^= vec_load_le((64 * 0 + 16 * 0), src);
++      v1 ^= vec_load_le((64 * 1 + 16 * 0), src);
++      v2 ^= vec_load_le((64 * 2 + 16 * 0), src);
++      v3 ^= vec_load_le((64 * 3 + 16 * 0), src);
++
++      v4 ^= vec_load_le((64 * 0 + 16 * 1), src);
++      v5 ^= vec_load_le((64 * 1 + 16 * 1), src);
++      v6 ^= vec_load_le((64 * 2 + 16 * 1), src);
++      v7 ^= vec_load_le((64 * 3 + 16 * 1), src);
++
++      v8 ^= vec_load_le((64 * 0 + 16 * 2), src);
++      v9 ^= vec_load_le((64 * 1 + 16 * 2), src);
++      v10 ^= vec_load_le((64 * 2 + 16 * 2), src);
++      v11 ^= vec_load_le((64 * 3 + 16 * 2), src);
++
++      v12 ^= vec_load_le((64 * 0 + 16 * 3), src);
++      v13 ^= vec_load_le((64 * 1 + 16 * 3), src);
++      v14 ^= vec_load_le((64 * 2 + 16 * 3), src);
++      v15 ^= vec_load_le((64 * 3 + 16 * 3), src);
++
++      vec_store_le(v0, (64 * 0 + 16 * 0), dst);
++      vec_store_le(v1, (64 * 1 + 16 * 0), dst);
++      vec_store_le(v2, (64 * 2 + 16 * 0), dst);
++      vec_store_le(v3, (64 * 3 + 16 * 0), dst);
++
++      vec_store_le(v4, (64 * 0 + 16 * 1), dst);
++      vec_store_le(v5, (64 * 1 + 16 * 1), dst);
++      vec_store_le(v6, (64 * 2 + 16 * 1), dst);
++      vec_store_le(v7, (64 * 3 + 16 * 1), dst);
++
++      vec_store_le(v8, (64 * 0 + 16 * 2), dst);
++      vec_store_le(v9, (64 * 1 + 16 * 2), dst);
++      vec_store_le(v10, (64 * 2 + 16 * 2), dst);
++      vec_store_le(v11, (64 * 3 + 16 * 2), dst);
++
++      vec_store_le(v12, (64 * 0 + 16 * 3), dst);
++      vec_store_le(v13, (64 * 1 + 16 * 3), dst);
++      vec_store_le(v14, (64 * 2 + 16 * 3), dst);
++      vec_store_le(v15, (64 * 3 + 16 * 3), dst);
++
++      src += 4*64;
++      dst += 4*64;
++
++      nblks -= 4;
++    }
++  while (nblks);
++
++  vec_vsx_st(state3, 3 * 16, state); /* store counter */
++
++  /* store poly1305 state */
++  st->h[0] = h0;
++  st->h[1] = h0 >> 32;
++  st->h[2] = h1;
++  st->h[3] = h1 >> 32;
++  st->h[4] = h2;
++
++  return 0;
++}
++
++#endif /* SIZEOF_UNSIGNED_LONG == 8 */
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/cipher/chacha20.c b/cipher/chacha20.c
+index ebbfeb24..8eefba7d 100644
+--- a/cipher/chacha20.c
++++ b/cipher/chacha20.c
+@@ -31,6 +31,11 @@
+ 
+ 
+ #include <config.h>
++
++#if !defined(ENABLE_PPC_CRYPTO_SUPPORT) || \
++    !defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) || \
++    !defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
++
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -637,3 +642,5 @@ gcry_cipher_spec_t _gcry_cipher_spec_chacha20 = {
+   NULL,
+   chacha20_setiv
+ };
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/cipher/cipher-internal.h b/cipher/cipher-internal.h
+index a5fd3097..cd45e0a5 100644
+--- a/cipher/cipher-internal.h
++++ b/cipher/cipher-internal.h
+@@ -20,8 +20,15 @@
+ #ifndef G10_CIPHER_INTERNAL_H
+ #define G10_CIPHER_INTERNAL_H
+ 
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    defined(USE_CHACHA20) && \
++    __GNUC__ >= 4
++#include "./poly1305-internal-new.h"
++#else
+ #include "./poly1305-internal.h"
+-
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+ 
+ /* The maximum supported size of a block in bytes.  */
+ #define MAX_BLOCKSIZE 16
+diff --git a/cipher/mpi-new/mpi-asm-defs.h b/cipher/mpi-new/mpi-asm-defs.h
+new file mode 100644
+index 00000000..e607806e
+--- /dev/null
++++ b/cipher/mpi-new/mpi-asm-defs.h
+@@ -0,0 +1,8 @@
++/* This file defines some basic constants for the MPI machinery.
++ * AMD64 compiled for the x32 ABI is special and thus we can't use the
++ * standard values for this ABI.  */
++#if __GNUC__ >= 3 && defined(__x86_64__) && defined(__ILP32__)
++#define BYTES_PER_MPI_LIMB 8
++#else
++#define BYTES_PER_MPI_LIMB  (SIZEOF_UNSIGNED_LONG)
++#endif
+diff --git a/cipher/mpi-new/mpi-inline.h b/cipher/mpi-new/mpi-inline.h
+new file mode 100644
+index 00000000..94e2aec8
+--- /dev/null
++++ b/cipher/mpi-new/mpi-inline.h
+@@ -0,0 +1,161 @@
++/* mpi-inline.h  -  Internal to the Multi Precision Integers
++ * Copyright (C) 1994, 1996, 1998, 1999,
++ *               2001, 2002 Free Software Foundation, Inc.
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
++ *
++ * Note: This code is heavily based on the GNU MP Library.
++ *	 Actually it's the same code with only minor changes in the
++ *	 way the data is stored; this is to support the abstraction
++ *	 of an optional secure memory allocation which may be used
++ *	 to avoid revealing of sensitive data due to paging etc.
++ */
++
++#ifndef G10_MPI_INLINE_H
++#define G10_MPI_INLINE_H
++
++/* Starting with gcc 4.3 "extern inline" conforms in c99 mode to the
++   c99 semantics.  To keep the useful old semantics we use an
++   attribute.  */
++#ifndef G10_MPI_INLINE_DECL
++# ifdef __GNUC_STDC_INLINE__
++#  define G10_MPI_INLINE_DECL  extern inline __attribute__ ((__gnu_inline__))
++# else
++#  define G10_MPI_INLINE_DECL  extern __inline__
++# endif
++#endif
++
++G10_MPI_INLINE_DECL  mpi_limb_t
++_gcry_mpih_add_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
++	       mpi_size_t s1_size, mpi_limb_t s2_limb)
++{
++    mpi_limb_t x;
++
++    x = *s1_ptr++;
++    s2_limb += x;
++    *res_ptr++ = s2_limb;
++    if( s2_limb < x ) { /* sum is less than the left operand: handle carry */
++	while( --s1_size ) {
++	    x = *s1_ptr++ + 1;	/* add carry */
++	    *res_ptr++ = x;	/* and store */
++	    if( x )		/* not 0 (no overflow): we can stop */
++		goto leave;
++	}
++	return 1; /* return carry (size of s1 to small) */
++    }
++
++  leave:
++    if( res_ptr != s1_ptr ) { /* not the same variable */
++	mpi_size_t i;	       /* copy the rest */
++	for( i=0; i < s1_size-1; i++ )
++	    res_ptr[i] = s1_ptr[i];
++    }
++    return 0; /* no carry */
++}
++
++
++
++G10_MPI_INLINE_DECL mpi_limb_t
++_gcry_mpih_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
++			       mpi_ptr_t s2_ptr, mpi_size_t s2_size)
++{
++    mpi_limb_t cy = 0;
++
++    if( s2_size )
++	cy = _gcry_mpih_add_n( res_ptr, s1_ptr, s2_ptr, s2_size );
++
++    if( s1_size - s2_size )
++	cy = _gcry_mpih_add_1( res_ptr + s2_size, s1_ptr + s2_size,
++			    s1_size - s2_size, cy);
++    return cy;
++}
++
++
++G10_MPI_INLINE_DECL mpi_limb_t
++_gcry_mpih_sub_1(mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr,
++	      mpi_size_t s1_size, mpi_limb_t s2_limb )
++{
++    mpi_limb_t x;
++
++    x = *s1_ptr++;
++    s2_limb = x - s2_limb;
++    *res_ptr++ = s2_limb;
++    if( s2_limb > x ) {
++	while( --s1_size ) {
++	    x = *s1_ptr++;
++	    *res_ptr++ = x - 1;
++	    if( x )
++		goto leave;
++	}
++	return 1;
++    }
++
++  leave:
++    if( res_ptr != s1_ptr ) {
++	mpi_size_t i;
++	for( i=0; i < s1_size-1; i++ )
++	    res_ptr[i] = s1_ptr[i];
++    }
++    return 0;
++}
++
++
++
++G10_MPI_INLINE_DECL   mpi_limb_t
++_gcry_mpih_sub( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
++				mpi_ptr_t s2_ptr, mpi_size_t s2_size)
++{
++    mpi_limb_t cy = 0;
++
++    if( s2_size )
++	cy = _gcry_mpih_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size);
++
++    if( s1_size - s2_size )
++	cy = _gcry_mpih_sub_1(res_ptr + s2_size, s1_ptr + s2_size,
++				      s1_size - s2_size, cy);
++    return cy;
++}
++
++/****************
++ * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE.
++ * There are no restrictions on the relative sizes of
++ * the two arguments.
++ * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2.
++ */
++G10_MPI_INLINE_DECL int
++_gcry_mpih_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size )
++{
++    mpi_size_t i;
++    mpi_limb_t op1_word, op2_word;
++
++    for( i = size - 1; i >= 0 ; i--) {
++	op1_word = op1_ptr[i];
++	op2_word = op2_ptr[i];
++	if( op1_word != op2_word )
++	    goto diff;
++    }
++    return 0;
++
++  diff:
++    /* This can *not* be simplified to
++     *	 op2_word - op2_word
++     * since that expression might give signed overflow.  */
++    return (op1_word > op2_word) ? 1 : -1;
++}
++
++
++#endif /*G10_MPI_INLINE_H*/
+diff --git a/cipher/mpi-new/mpi-internal.h b/cipher/mpi-new/mpi-internal.h
+new file mode 100644
+index 00000000..11fcbde4
+--- /dev/null
++++ b/cipher/mpi-new/mpi-internal.h
+@@ -0,0 +1,305 @@
++/* mpi-internal.h  -  Internal to the Multi Precision Integers
++ * Copyright (C) 1994, 1996, 1998, 2000, 2002,
++ *               2003 Free Software Foundation, Inc.
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
++ *
++ * Note: This code is heavily based on the GNU MP Library.
++ *	 Actually it's the same code with only minor changes in the
++ *	 way the data is stored; this is to support the abstraction
++ *	 of an optional secure memory allocation which may be used
++ *	 to avoid revealing of sensitive data due to paging etc.
++ */
++
++#ifndef G10_MPI_INTERNAL_H
++#define G10_MPI_INTERNAL_H
++
++#include "mpi-asm-defs.h"
++
++#ifndef BITS_PER_MPI_LIMB
++#if BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_INT
++  typedef unsigned int mpi_limb_t;
++  typedef   signed int mpi_limb_signed_t;
++#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG
++  typedef unsigned long int mpi_limb_t;
++  typedef   signed long int mpi_limb_signed_t;
++#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG_LONG
++  typedef unsigned long long int mpi_limb_t;
++  typedef   signed long long int mpi_limb_signed_t;
++#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_SHORT
++  typedef unsigned short int mpi_limb_t;
++  typedef   signed short int mpi_limb_signed_t;
++#else
++#error BYTES_PER_MPI_LIMB does not match any C type
++#endif
++#define BITS_PER_MPI_LIMB    (8*BYTES_PER_MPI_LIMB)
++#endif /*BITS_PER_MPI_LIMB*/
++
++#include "mpi.h"
++
++/* If KARATSUBA_THRESHOLD is not already defined, define it to a
++ * value which is good on most machines.  */
++
++/* tested 4, 16, 32 and 64, where 16 gave the best performance when
++ * checking a 768 and a 1024 bit ElGamal signature.
++ * (wk 22.12.97) */
++#ifndef KARATSUBA_THRESHOLD
++#define KARATSUBA_THRESHOLD 16
++#endif
++
++/* The code can't handle KARATSUBA_THRESHOLD smaller than 2.  */
++#if KARATSUBA_THRESHOLD < 2
++#undef KARATSUBA_THRESHOLD
++#define KARATSUBA_THRESHOLD 2
++#endif
++
++
++typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
++typedef int mpi_size_t;        /* (must be a signed type) */
++
++#define ABS(x) (x >= 0 ? x : -x)
++#define MIN(l,o) ((l) < (o) ? (l) : (o))
++#define MAX(h,i) ((h) > (i) ? (h) : (i))
++#define RESIZE_IF_NEEDED(a,b) \
++    do {			   \
++	if( (a)->alloced < (b) )   \
++	    mpi_resize((a), (b));  \
++    } while(0)
++#define RESIZE_AND_CLEAR_IF_NEEDED(a,b) \
++    do {			   \
++	if( (a)->nlimbs < (b) )   \
++	    mpi_resize((a), (b));  \
++    } while(0)
++
++/* Copy N limbs from S to D.  */
++#define MPN_COPY( d, s, n) \
++    do {				\
++	mpi_size_t _i;			\
++	for( _i = 0; _i < (n); _i++ )	\
++	    (d)[_i] = (s)[_i];		\
++    } while(0)
++
++#define MPN_COPY_INCR( d, s, n) 	\
++    do {				\
++	mpi_size_t _i;			\
++	for( _i = 0; _i < (n); _i++ )	\
++	    (d)[_i] = (s)[_i];		\
++    } while (0)
++
++#define MPN_COPY_DECR( d, s, n ) \
++    do {				\
++	mpi_size_t _i;			\
++	for( _i = (n)-1; _i >= 0; _i--) \
++	   (d)[_i] = (s)[_i];		\
++    } while(0)
++
++/* Zero N limbs at D */
++#define MPN_ZERO(d, n) \
++    do {				  \
++	int  _i;			  \
++	for( _i = 0; _i < (n); _i++ )  \
++	    (d)[_i] = 0;		    \
++    } while (0)
++
++#define MPN_NORMALIZE(d, n)  \
++    do {		       \
++	while( (n) > 0 ) {     \
++	    if( (d)[(n)-1] ) \
++		break;	       \
++	    (n)--;	       \
++	}		       \
++    } while(0)
++
++#define MPN_NORMALIZE_NOT_ZERO(d, n) \
++    do {				    \
++	for(;;) {			    \
++	    if( (d)[(n)-1] )		    \
++		break;			    \
++	    (n)--;			    \
++	}				    \
++    } while(0)
++
++#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
++    do {						\
++	if( (size) < KARATSUBA_THRESHOLD )		\
++	    mul_n_basecase (prodp, up, vp, size);	\
++	else						\
++	    mul_n (prodp, up, vp, size, tspace);	\
++    } while (0);
++
++
++/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
++ * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
++ * If this would yield overflow, DI should be the largest possible number
++ * (i.e., only ones).  For correct operation, the most significant bit of D
++ * has to be set.  Put the quotient in Q and the remainder in R.
++ */
++#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
++    do {							    \
++        mpi_limb_t _ql GCC_ATTR_UNUSED;                               \
++	mpi_limb_t _q, _r;                                          \
++	mpi_limb_t _xh, _xl;					    \
++	umul_ppmm (_q, _ql, (nh), (di));			    \
++	_q += (nh);	/* DI is 2**BITS_PER_MPI_LIMB too small */  \
++	umul_ppmm (_xh, _xl, _q, (d));				    \
++	sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl);		    \
++	if( _xh ) {						    \
++	    sub_ddmmss (_xh, _r, _xh, _r, 0, (d));		    \
++	    _q++;						    \
++	    if( _xh) {						    \
++		sub_ddmmss (_xh, _r, _xh, _r, 0, (d));		    \
++		_q++;						    \
++	    }							    \
++	}							    \
++	if( _r >= (d) ) {					    \
++	    _r -= (d);						    \
++	    _q++;						    \
++	}							    \
++	(r) = _r;						    \
++	(q) = _q;						    \
++    } while (0)
++
++
++/*-- mpiutil.c --*/
++#define mpi_alloc_limb_space(n,f)  _gcry_mpi_alloc_limb_space((n),(f))
++mpi_ptr_t _gcry_mpi_alloc_limb_space( unsigned nlimbs, int sec );
++void _gcry_mpi_free_limb_space( mpi_ptr_t a, unsigned int nlimbs );
++void _gcry_mpi_assign_limb_space( gcry_mpi_t a, mpi_ptr_t ap, unsigned nlimbs );
++
++/*-- mpi-bit.c --*/
++#define mpi_rshift_limbs(a,n)  _gcry_mpi_rshift_limbs ((a), (n))
++#define mpi_lshift_limbs(a,n)  _gcry_mpi_lshift_limbs ((a), (n))
++
++void _gcry_mpi_rshift_limbs( gcry_mpi_t a, unsigned int count );
++void _gcry_mpi_lshift_limbs( gcry_mpi_t a, unsigned int count );
++
++
++/*-- mpih-add.c --*/
++mpi_limb_t _gcry_mpih_add_1(mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr,
++			 mpi_size_t s1_size, mpi_limb_t s2_limb );
++mpi_limb_t _gcry_mpih_add_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
++			  mpi_ptr_t s2_ptr,  mpi_size_t size);
++mpi_limb_t _gcry_mpih_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
++		       mpi_ptr_t s2_ptr, mpi_size_t s2_size);
++
++/*-- mpih-sub.c --*/
++mpi_limb_t _gcry_mpih_sub_1( mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr,
++			  mpi_size_t s1_size, mpi_limb_t s2_limb );
++mpi_limb_t _gcry_mpih_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
++			  mpi_ptr_t s2_ptr, mpi_size_t size);
++mpi_limb_t _gcry_mpih_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
++		       mpi_ptr_t s2_ptr, mpi_size_t s2_size);
++
++/*-- mpih-cmp.c --*/
++int _gcry_mpih_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size );
++
++/*-- mpih-mul.c --*/
++
++struct karatsuba_ctx {
++    struct karatsuba_ctx *next;
++    mpi_ptr_t tspace;
++    unsigned int tspace_nlimbs;
++    mpi_size_t tspace_size;
++    mpi_ptr_t tp;
++    unsigned int tp_nlimbs;
++    mpi_size_t tp_size;
++};
++
++void _gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx *ctx );
++
++mpi_limb_t _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
++			     mpi_size_t s1_size, mpi_limb_t s2_limb);
++mpi_limb_t _gcry_mpih_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
++			     mpi_size_t s1_size, mpi_limb_t s2_limb);
++void _gcry_mpih_mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
++						   mpi_size_t size);
++mpi_limb_t _gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
++					 mpi_ptr_t vp, mpi_size_t vsize);
++void _gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size );
++void _gcry_mpih_sqr_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
++						mpi_ptr_t tspace);
++
++void _gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp,
++				 mpi_ptr_t up, mpi_size_t usize,
++				 mpi_ptr_t vp, mpi_size_t vsize,
++				 struct karatsuba_ctx *ctx );
++
++
++/*-- mpih-mul_1.c (or xxx/cpu/ *.S) --*/
++mpi_limb_t _gcry_mpih_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
++			  mpi_size_t s1_size, mpi_limb_t s2_limb);
++
++/*-- mpih-div.c --*/
++mpi_limb_t _gcry_mpih_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
++						 mpi_limb_t divisor_limb);
++mpi_limb_t _gcry_mpih_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs,
++			   mpi_ptr_t np, mpi_size_t nsize,
++			   mpi_ptr_t dp, mpi_size_t dsize);
++mpi_limb_t _gcry_mpih_divmod_1( mpi_ptr_t quot_ptr,
++			     mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
++			     mpi_limb_t divisor_limb);
++
++/*-- mpih-shift.c --*/
++mpi_limb_t _gcry_mpih_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
++							   unsigned cnt);
++mpi_limb_t _gcry_mpih_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
++							   unsigned cnt);
++
++/*-- mpih-const-time.c --*/
++#define mpih_set_cond(w,u,s,o) _gcry_mpih_set_cond ((w),(u),(s),(o))
++#define mpih_add_n_cond(w,u,v,s,o) _gcry_mpih_add_n_cond ((w),(u),(v),(s),(o))
++#define mpih_sub_n_cond(w,u,v,s,o) _gcry_mpih_sub_n_cond ((w),(u),(v),(s),(o))
++#define mpih_swap_cond(u,v,s,o) _gcry_mpih_swap_cond ((u),(v),(s),(o))
++#define mpih_abs_cond(w,u,s,o) _gcry_mpih_abs_cond ((w),(u),(s),(o))
++#define mpih_mod(v,vs,u,us) _gcry_mpih_mod ((v),(vs),(u),(us))
++
++void _gcry_mpih_set_cond (mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
++                          unsigned long op_enable);
++mpi_limb_t _gcry_mpih_add_n_cond (mpi_ptr_t wp, mpi_ptr_t up, mpi_ptr_t vp,
++                                  mpi_size_t usize, unsigned long op_enable);
++mpi_limb_t _gcry_mpih_sub_n_cond (mpi_ptr_t wp, mpi_ptr_t up, mpi_ptr_t vp,
++                                  mpi_size_t usize, unsigned long op_enable);
++void _gcry_mpih_swap_cond (mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t usize,
++                           unsigned long op_enable);
++void _gcry_mpih_abs_cond (mpi_ptr_t wp, mpi_ptr_t up,
++                          mpi_size_t usize, unsigned long op_enable);
++mpi_ptr_t _gcry_mpih_mod (mpi_ptr_t vp, mpi_size_t vsize,
++                          mpi_ptr_t up, mpi_size_t usize);
++int _gcry_mpih_cmp_ui (mpi_ptr_t up, mpi_size_t usize, unsigned long v);
++
++
++/* Define stuff for longlong.h.  */
++#define W_TYPE_SIZE BITS_PER_MPI_LIMB
++  typedef mpi_limb_t   UWtype;
++  typedef unsigned int UHWtype;
++#if defined (__GNUC__)
++  typedef unsigned int UQItype	  __attribute__ ((mode (QI)));
++  typedef	   int SItype	  __attribute__ ((mode (SI)));
++  typedef unsigned int USItype	  __attribute__ ((mode (SI)));
++  typedef	   int DItype	  __attribute__ ((mode (DI)));
++  typedef unsigned int UDItype	  __attribute__ ((mode (DI)));
++#else
++  typedef unsigned char UQItype;
++  typedef	   long SItype;
++  typedef unsigned long USItype;
++#endif
++
++#ifdef __GNUC__
++#include "mpi-inline.h"
++#endif
++
++#endif /*G10_MPI_INTERNAL_H*/
+diff --git a/cipher/poly1305-new.c b/cipher/poly1305-new.c
+new file mode 100644
+index 00000000..56a1a56e
+--- /dev/null
++++ b/cipher/poly1305-new.c
+@@ -0,0 +1,749 @@
++/* poly1305.c  -  Poly1305 internals and generic implementation
++ * Copyright (C) 2014,2017,2018 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser general Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <config.h>
++
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    defined(USE_CHACHA20) && \
++    __GNUC__ >= 4
++
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++
++#include "types.h"
++#include "g10lib.h"
++#include "cipher.h"
++#include "bufhelp.h"
++#include "poly1305-internal-new.h"
++
++#include "mpi-new/mpi-internal.h"
++#include "mpi/longlong.h"
++
++
++static const char *selftest (void);
++
++
++#undef HAVE_ASM_POLY1305_BLOCKS
++
++#undef USE_MPI_64BIT
++#undef USE_MPI_32BIT
++#if BYTES_PER_MPI_LIMB == 8 // && defined(HAVE_TYPE_U64)  removed: added in types.h, can be assumed
++# define USE_MPI_64BIT 1
++#elif BYTES_PER_MPI_LIMB == 4
++# define USE_MPI_32BIT 1
++#else
++# error please implement for this limb size.
++#endif
++
++
++/* USE_S390X_ASM indicates whether to enable zSeries code. */
++#undef USE_S390X_ASM
++#if BYTES_PER_MPI_LIMB == 8
++# if defined (__s390x__) && __GNUC__ >= 4 && __ARCH__ >= 9
++#  if defined(HAVE_GCC_INLINE_ASM_S390X)
++#   define USE_S390X_ASM 1
++#  endif /* USE_S390X_ASM */
++# endif
++#endif
++
++
++#ifdef USE_S390X_ASM
++
++#define HAVE_ASM_POLY1305_BLOCKS 1
++
++extern unsigned int _gcry_poly1305_s390x_blocks1(void *state,
++						 const byte *buf, size_t len,
++						 byte high_pad);
++
++static unsigned int
++poly1305_blocks (poly1305_context_t *ctx, const byte *buf, size_t len,
++		 byte high_pad)
++{
++  return _gcry_poly1305_s390x_blocks1(&ctx->state, buf, len, high_pad);
++}
++
++#endif /* USE_S390X_ASM */
++
++
++static void poly1305_init (poly1305_context_t *ctx,
++			   const byte key[POLY1305_KEYLEN])
++{
++  POLY1305_STATE *st = &ctx->state;
++
++  ctx->leftover = 0;
++
++  st->h[0] = 0;
++  st->h[1] = 0;
++  st->h[2] = 0;
++  st->h[3] = 0;
++  st->h[4] = 0;
++
++  st->r[0] = buf_get_le32(key + 0)  & 0x0fffffff;
++  st->r[1] = buf_get_le32(key + 4)  & 0x0ffffffc;
++  st->r[2] = buf_get_le32(key + 8)  & 0x0ffffffc;
++  st->r[3] = buf_get_le32(key + 12) & 0x0ffffffc;
++
++  st->k[0] = buf_get_le32(key + 16);
++  st->k[1] = buf_get_le32(key + 20);
++  st->k[2] = buf_get_le32(key + 24);
++  st->k[3] = buf_get_le32(key + 28);
++}
++
++
++#ifdef USE_MPI_64BIT
++
++#if defined (__aarch64__) && defined(HAVE_CPU_ARCH_ARM) && __GNUC__ >= 4
++
++/* A += B (armv8/aarch64) */
++#define ADD_1305_64(A2, A1, A0, B2, B1, B0) \
++      __asm__ ("adds %0, %3, %0\n" \
++	       "adcs %1, %4, %1\n" \
++	       "adc  %2, %5, %2\n" \
++	       : "+r" (A0), "+r" (A1), "+r" (A2) \
++	       : "r" (B0), "r" (B1), "r" (B2) \
++	       : "cc" )
++
++#endif /* __aarch64__ */
++
++#if defined (__x86_64__) && defined(HAVE_CPU_ARCH_X86) && __GNUC__ >= 4
++
++/* A += B (x86-64) */
++#define ADD_1305_64(A2, A1, A0, B2, B1, B0) \
++      __asm__ ("addq %3, %0\n" \
++	       "adcq %4, %1\n" \
++	       "adcq %5, %2\n" \
++	       : "+r" (A0), "+r" (A1), "+r" (A2) \
++	       : "g" (B0), "g" (B1), "g" (B2) \
++	       : "cc" )
++
++#endif /* __x86_64__ */
++
++#if defined (__powerpc__) && defined(HAVE_CPU_ARCH_PPC) && __GNUC__ >= 4
++
++/* A += B (ppc64) */
++#define ADD_1305_64(A2, A1, A0, B2, B1, B0) \
++      __asm__ ("addc %0, %3, %0\n" \
++	       "adde %1, %4, %1\n" \
++	       "adde %2, %5, %2\n" \
++	       : "+r" (A0), "+r" (A1), "+r" (A2) \
++	       : "r" (B0), "r" (B1), "r" (B2) \
++	       : "cc" )
++
++#endif /* __powerpc__ */
++
++#ifndef ADD_1305_64
++/* A += B (generic, mpi) */
++#  define ADD_1305_64(A2, A1, A0, B2, B1, B0) do { \
++    u64 carry; \
++    add_ssaaaa(carry, A0, 0, A0, 0, B0); \
++    add_ssaaaa(A2, A1, A2, A1, B2, B1); \
++    add_ssaaaa(A2, A1, A2, A1, 0, carry); \
++  } while (0)
++#endif
++
++/* H = H * R mod 2¹³⁰-5 */
++#define MUL_MOD_1305_64(H2, H1, H0, R1, R0, R1_MULT5) do { \
++    u64 x0_lo, x0_hi, x1_lo, x1_hi; \
++    u64 t0_lo, t0_hi, t1_lo, t1_hi; \
++    \
++    /* x = a * r (partial mod 2^130-5) */ \
++    umul_ppmm(x0_hi, x0_lo, H0, R0);  /* h0 * r0 */ \
++    umul_ppmm(x1_hi, x1_lo, H0, R1);  /* h0 * r1 */ \
++    \
++    umul_ppmm(t0_hi, t0_lo, H1, R1_MULT5); /* h1 * r1 mod 2^130-5 */ \
++    add_ssaaaa(x0_hi, x0_lo, x0_hi, x0_lo, t0_hi, t0_lo); \
++    umul_ppmm(t1_hi, t1_lo, H1, R0);       /* h1 * r0 */ \
++    add_ssaaaa(x1_hi, x1_lo, x1_hi, x1_lo, t1_hi, t1_lo); \
++    \
++    t1_lo = H2 * R1_MULT5; /* h2 * r1 mod 2^130-5 */ \
++    t1_hi = H2 * R0;       /* h2 * r0 */ \
++    add_ssaaaa(H0, H1, x1_hi, x1_lo, t1_hi, t1_lo); \
++    \
++    /* carry propagation */ \
++    H2 = H0 & 3; \
++    H0 = (H0 >> 2) * 5; /* msb mod 2^130-5 */ \
++    ADD_1305_64(H2, H1, H0, (u64)0, x0_hi, x0_lo); \
++  } while (0)
++
++#ifndef HAVE_ASM_POLY1305_BLOCKS
++
++static unsigned int
++poly1305_blocks (poly1305_context_t *ctx, const byte *buf, size_t len,
++		 byte high_pad)
++{
++  POLY1305_STATE *st = &ctx->state;
++  u64 r0, r1, r1_mult5;
++  u64 h0, h1, h2;
++  u64 m0, m1, m2;
++
++  m2 = high_pad;
++
++  h0 = st->h[0] + ((u64)st->h[1] << 32);
++  h1 = st->h[2] + ((u64)st->h[3] << 32);
++  h2 = st->h[4];
++
++  r0 = st->r[0] + ((u64)st->r[1] << 32);
++  r1 = st->r[2] + ((u64)st->r[3] << 32);
++
++  r1_mult5 = (r1 >> 2) + r1;
++
++  m0 = buf_get_le64(buf + 0);
++  m1 = buf_get_le64(buf + 8);
++  buf += POLY1305_BLOCKSIZE;
++  len -= POLY1305_BLOCKSIZE;
++
++  while (len >= POLY1305_BLOCKSIZE)
++    {
++      /* a = h + m */
++      ADD_1305_64(h2, h1, h0, m2, m1, m0);
++
++      m0 = buf_get_le64(buf + 0);
++      m1 = buf_get_le64(buf + 8);
++
++      /* h = a * r (partial mod 2^130-5) */
++      MUL_MOD_1305_64(h2, h1, h0, r1, r0, r1_mult5);
++
++      buf += POLY1305_BLOCKSIZE;
++      len -= POLY1305_BLOCKSIZE;
++    }
++
++  /* a = h + m */
++  ADD_1305_64(h2, h1, h0, m2, m1, m0);
++
++  /* h = a * r (partial mod 2^130-5) */
++  MUL_MOD_1305_64(h2, h1, h0, r1, r0, r1_mult5);
++
++  st->h[0] = h0;
++  st->h[1] = h0 >> 32;
++  st->h[2] = h1;
++  st->h[3] = h1 >> 32;
++  st->h[4] = h2;
++
++  return 6 * sizeof (void *) + 18 * sizeof (u64);
++}
++
++#endif /* !HAVE_ASM_POLY1305_BLOCKS */
++
++static unsigned int poly1305_final (poly1305_context_t *ctx,
++				    byte mac[POLY1305_TAGLEN])
++{
++  POLY1305_STATE *st = &ctx->state;
++  unsigned int burn = 0;
++  u64 u, carry;
++  u64 k0, k1;
++  u64 h0, h1;
++  u64 h2;
++
++  /* process the remaining block */
++  if (ctx->leftover)
++    {
++      ctx->buffer[ctx->leftover++] = 1;
++      if (ctx->leftover < POLY1305_BLOCKSIZE)
++	{
++	  memset (&ctx->buffer[ctx->leftover], 0,
++		  POLY1305_BLOCKSIZE - ctx->leftover);
++	  ctx->leftover = POLY1305_BLOCKSIZE;
++	}
++      burn = poly1305_blocks (ctx, ctx->buffer, POLY1305_BLOCKSIZE, 0);
++    }
++
++  h0 = st->h[0] + ((u64)st->h[1] << 32);
++  h1 = st->h[2] + ((u64)st->h[3] << 32);
++  h2 = st->h[4];
++
++  k0 = st->k[0] + ((u64)st->k[1] << 32);
++  k1 = st->k[2] + ((u64)st->k[3] << 32);
++
++  /* check if h is more than 2^130-5, by adding 5. */
++  add_ssaaaa(carry, u, 0, h0, 0, 5);
++  add_ssaaaa(carry, u, 0, carry, 0, h1);
++  u = (carry + h2) >> 2; /* u == 0 or 1 */
++
++  /* minus 2^130-5 ... (+5) */
++  u = (-u) & 5;
++  add_ssaaaa(h1, h0, h1, h0, 0, u);
++
++  /* add high part of key + h */
++  add_ssaaaa(h1, h0, h1, h0, k1, k0);
++  buf_put_le64(mac + 0, h0);
++  buf_put_le64(mac + 8, h1);
++
++  /* burn_stack */
++  return 4 * sizeof (void *) + 7 * sizeof (u64) + burn;
++}
++
++#endif /* USE_MPI_64BIT */
++
++#ifdef USE_MPI_32BIT
++
++#ifdef HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS
++
++/* HI:LO += A * B (arm) */
++#define UMUL_ADD_32(HI, LO, A, B) \
++      __asm__ ("umlal %1, %0, %4, %5" \
++	       : "=r" (HI), "=r" (LO) \
++	       : "0" (HI), "1" (LO), "r" (A), "r" (B) )
++
++/* A += B (arm) */
++#define ADD_1305_32(A4, A3, A2, A1, A0, B4, B3, B2, B1, B0) \
++      __asm__ ("adds %0, %0, %5\n" \
++	       "adcs %1, %1, %6\n" \
++	       "adcs %2, %2, %7\n" \
++	       "adcs %3, %3, %8\n" \
++	       "adc %4, %4, %9\n" \
++	       : "+r" (A0), "+r" (A1), "+r" (A2), "+r" (A3), "+r" (A4) \
++	       : "r" (B0), "r" (B1), "r" (B2), "r" (B3), "r" (B4) \
++	       : "cc" )
++
++#endif /* HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS */
++
++#if defined (__i386__) && defined(HAVE_CPU_ARCH_X86) && __GNUC__ >= 5
++/* Note: ADD_1305_32 below does not compile on GCC-4.7 */
++
++/* A += B (i386) */
++#define ADD_1305_32(A4, A3, A2, A1, A0, B4, B3, B2, B1, B0) \
++      __asm__ ("addl %5, %0\n" \
++	       "adcl %6, %1\n" \
++	       "adcl %7, %2\n" \
++	       "adcl %8, %3\n" \
++	       "adcl %9, %4\n" \
++	       : "+r" (A0), "+r" (A1), "+r" (A2), "+r" (A3), "+r" (A4) \
++	       : "g" (B0), "g" (B1), "g" (B2), "g" (B3), "g" (B4) \
++	       : "cc" )
++
++#endif /* __i386__ */
++
++#ifndef UMUL_ADD_32
++/* HI:LO += A * B (generic, mpi) */
++#  define UMUL_ADD_32(HI, LO, A, B) do { \
++    u32 t_lo, t_hi; \
++    umul_ppmm(t_hi, t_lo, A, B); \
++    add_ssaaaa(HI, LO, HI, LO, t_hi, t_lo); \
++  } while (0)
++#endif
++
++#ifndef ADD_1305_32
++/* A += B (generic, mpi) */
++#  define ADD_1305_32(A4, A3, A2, A1, A0, B4, B3, B2, B1, B0) do { \
++    u32 carry0, carry1, carry2; \
++    add_ssaaaa(carry0, A0, 0, A0, 0, B0); \
++    add_ssaaaa(carry1, A1, 0, A1, 0, B1); \
++    add_ssaaaa(carry1, A1, carry1, A1, 0, carry0); \
++    add_ssaaaa(carry2, A2, 0, A2, 0, B2); \
++    add_ssaaaa(carry2, A2, carry2, A2, 0, carry1); \
++    add_ssaaaa(A4, A3, A4, A3, B4, B3); \
++    add_ssaaaa(A4, A3, A4, A3, 0, carry2); \
++  } while (0)
++#endif
++
++/* H = H * R mod 2¹³⁰-5 */
++#define MUL_MOD_1305_32(H4, H3, H2, H1, H0, R3, R2, R1, R0, \
++                        R3_MULT5, R2_MULT5, R1_MULT5) do { \
++    u32 x0_lo, x0_hi, x1_lo, x1_hi, x2_lo, x2_hi, x3_lo, x3_hi; \
++    u32 t0_lo, t0_hi; \
++    \
++    /* x = a * r (partial mod 2^130-5) */ \
++    umul_ppmm(x0_hi, x0_lo, H0, R0);  /* h0 * r0 */ \
++    umul_ppmm(x1_hi, x1_lo, H0, R1);  /* h0 * r1 */ \
++    umul_ppmm(x2_hi, x2_lo, H0, R2);  /* h0 * r2 */ \
++    umul_ppmm(x3_hi, x3_lo, H0, R3);  /* h0 * r3 */ \
++    \
++    UMUL_ADD_32(x0_hi, x0_lo, H1, R3_MULT5); /* h1 * r3 mod 2^130-5 */ \
++    UMUL_ADD_32(x1_hi, x1_lo, H1, R0);       /* h1 * r0 */ \
++    UMUL_ADD_32(x2_hi, x2_lo, H1, R1);       /* h1 * r1 */ \
++    UMUL_ADD_32(x3_hi, x3_lo, H1, R2);       /* h1 * r2 */ \
++    \
++    UMUL_ADD_32(x0_hi, x0_lo, H2, R2_MULT5); /* h2 * r2 mod 2^130-5 */ \
++    UMUL_ADD_32(x1_hi, x1_lo, H2, R3_MULT5); /* h2 * r3 mod 2^130-5 */ \
++    UMUL_ADD_32(x2_hi, x2_lo, H2, R0);       /* h2 * r0 */ \
++    UMUL_ADD_32(x3_hi, x3_lo, H2, R1);       /* h2 * r1 */ \
++    \
++    UMUL_ADD_32(x0_hi, x0_lo, H3, R1_MULT5); /* h3 * r1 mod 2^130-5 */ \
++    H1 = x0_hi; \
++    UMUL_ADD_32(x1_hi, x1_lo, H3, R2_MULT5); /* h3 * r2 mod 2^130-5 */ \
++    UMUL_ADD_32(x2_hi, x2_lo, H3, R3_MULT5); /* h3 * r3 mod 2^130-5 */ \
++    UMUL_ADD_32(x3_hi, x3_lo, H3, R0);       /* h3 * r0 */ \
++    \
++    t0_lo = H4 * R1_MULT5; /* h4 * r1 mod 2^130-5 */ \
++    t0_hi = H4 * R2_MULT5; /* h4 * r2 mod 2^130-5 */ \
++    add_ssaaaa(H2, x1_lo, x1_hi, x1_lo, 0, t0_lo); \
++    add_ssaaaa(H3, x2_lo, x2_hi, x2_lo, 0, t0_hi); \
++    t0_lo = H4 * R3_MULT5; /* h4 * r3 mod 2^130-5 */ \
++    t0_hi = H4 * R0;       /* h4 * r0 */ \
++    add_ssaaaa(H4, x3_lo, x3_hi, x3_lo, t0_hi, t0_lo); \
++    \
++    /* carry propagation */ \
++    H0 = (H4 >> 2) * 5; /* msb mod 2^130-5 */ \
++    H4 = H4 & 3; \
++    ADD_1305_32(H4, H3, H2, H1, H0, 0, x3_lo, x2_lo, x1_lo, x0_lo); \
++  } while (0)
++
++#ifndef HAVE_ASM_POLY1305_BLOCKS
++
++static unsigned int
++poly1305_blocks (poly1305_context_t *ctx, const byte *buf, size_t len,
++		 byte high_pad)
++{
++  POLY1305_STATE *st = &ctx->state;
++  u32 r1_mult5, r2_mult5, r3_mult5;
++  u32 h0, h1, h2, h3, h4;
++  u32 m0, m1, m2, m3, m4;
++
++  m4 = high_pad;
++
++  h0 = st->h[0];
++  h1 = st->h[1];
++  h2 = st->h[2];
++  h3 = st->h[3];
++  h4 = st->h[4];
++
++  r1_mult5 = (st->r[1] >> 2) + st->r[1];
++  r2_mult5 = (st->r[2] >> 2) + st->r[2];
++  r3_mult5 = (st->r[3] >> 2) + st->r[3];
++
++  while (len >= POLY1305_BLOCKSIZE)
++    {
++      m0 = buf_get_le32(buf + 0);
++      m1 = buf_get_le32(buf + 4);
++      m2 = buf_get_le32(buf + 8);
++      m3 = buf_get_le32(buf + 12);
++
++      /* a = h + m */
++      ADD_1305_32(h4, h3, h2, h1, h0, m4, m3, m2, m1, m0);
++
++      /* h = a * r (partial mod 2^130-5) */
++      MUL_MOD_1305_32(h4, h3, h2, h1, h0,
++		      st->r[3], st->r[2], st->r[1], st->r[0],
++		      r3_mult5, r2_mult5, r1_mult5);
++
++      buf += POLY1305_BLOCKSIZE;
++      len -= POLY1305_BLOCKSIZE;
++    }
++
++  st->h[0] = h0;
++  st->h[1] = h1;
++  st->h[2] = h2;
++  st->h[3] = h3;
++  st->h[4] = h4;
++
++  return 6 * sizeof (void *) + 28 * sizeof (u32);
++}
++
++#endif /* !HAVE_ASM_POLY1305_BLOCKS */
++
++static unsigned int poly1305_final (poly1305_context_t *ctx,
++				    byte mac[POLY1305_TAGLEN])
++{
++  POLY1305_STATE *st = &ctx->state;
++  unsigned int burn = 0;
++  u32 carry, tmp0, tmp1, tmp2, u;
++  u32 h4, h3, h2, h1, h0;
++
++  /* process the remaining block */
++  if (ctx->leftover)
++    {
++      ctx->buffer[ctx->leftover++] = 1;
++      if (ctx->leftover < POLY1305_BLOCKSIZE)
++	{
++	  memset (&ctx->buffer[ctx->leftover], 0,
++		  POLY1305_BLOCKSIZE - ctx->leftover);
++	  ctx->leftover = POLY1305_BLOCKSIZE;
++	}
++      burn = poly1305_blocks (ctx, ctx->buffer, POLY1305_BLOCKSIZE, 0);
++    }
++
++  h0 = st->h[0];
++  h1 = st->h[1];
++  h2 = st->h[2];
++  h3 = st->h[3];
++  h4 = st->h[4];
++
++  /* check if h is more than 2^130-5, by adding 5. */
++  add_ssaaaa(carry, tmp0, 0, h0, 0, 5);
++  add_ssaaaa(carry, tmp0, 0, carry, 0, h1);
++  add_ssaaaa(carry, tmp0, 0, carry, 0, h2);
++  add_ssaaaa(carry, tmp0, 0, carry, 0, h3);
++  u = (carry + h4) >> 2; /* u == 0 or 1 */
++
++  /* minus 2^130-5 ... (+5) */
++  u = (-u) & 5;
++  add_ssaaaa(carry, h0, 0, h0, 0, u);
++  add_ssaaaa(carry, h1, 0, h1, 0, carry);
++  add_ssaaaa(carry, h2, 0, h2, 0, carry);
++  add_ssaaaa(carry, h3, 0, h3, 0, carry);
++
++  /* add high part of key + h */
++  add_ssaaaa(tmp0, h0, 0, h0, 0, st->k[0]);
++  add_ssaaaa(tmp1, h1, 0, h1, 0, st->k[1]);
++  add_ssaaaa(tmp1, h1, tmp1, h1, 0, tmp0);
++  add_ssaaaa(tmp2, h2, 0, h2, 0, st->k[2]);
++  add_ssaaaa(tmp2, h2, tmp2, h2, 0, tmp1);
++  add_ssaaaa(carry, h3, 0, h3, 0, st->k[3]);
++  h3 += tmp2;
++
++  buf_put_le32(mac + 0, h0);
++  buf_put_le32(mac + 4, h1);
++  buf_put_le32(mac + 8, h2);
++  buf_put_le32(mac + 12, h3);
++
++  /* burn_stack */
++  return 4 * sizeof (void *) + 10 * sizeof (u32) + burn;
++}
++
++#endif /* USE_MPI_32BIT */
++
++
++unsigned int
++_gcry_poly1305_update_burn (poly1305_context_t *ctx, const byte *m,
++			    size_t bytes)
++{
++  unsigned int burn = 0;
++
++  /* handle leftover */
++  if (ctx->leftover)
++    {
++      size_t want = (POLY1305_BLOCKSIZE - ctx->leftover);
++      if (want > bytes)
++	want = bytes;
++      buf_cpy (ctx->buffer + ctx->leftover, m, want);
++      bytes -= want;
++      m += want;
++      ctx->leftover += want;
++      if (ctx->leftover < POLY1305_BLOCKSIZE)
++	return 0;
++      burn = poly1305_blocks (ctx, ctx->buffer, POLY1305_BLOCKSIZE, 1);
++      ctx->leftover = 0;
++    }
++
++  /* process full blocks */
++  if (bytes >= POLY1305_BLOCKSIZE)
++    {
++      size_t nblks = bytes / POLY1305_BLOCKSIZE;
++      burn = poly1305_blocks (ctx, m, nblks * POLY1305_BLOCKSIZE, 1);
++      m += nblks * POLY1305_BLOCKSIZE;
++      bytes -= nblks * POLY1305_BLOCKSIZE;
++    }
++
++  /* store leftover */
++  if (bytes)
++    {
++      buf_cpy (ctx->buffer + ctx->leftover, m, bytes);
++      ctx->leftover += bytes;
++    }
++
++  return burn;
++}
++
++
++void
++_gcry_poly1305_update (poly1305_context_t *ctx, const byte *m, size_t bytes)
++{
++  unsigned int burn;
++
++  burn = _gcry_poly1305_update_burn (ctx, m, bytes);
++
++  if (burn)
++    _gcry_burn_stack (burn);
++}
++
++
++void
++_gcry_poly1305_finish (poly1305_context_t *ctx, byte mac[POLY1305_TAGLEN])
++{
++  unsigned int burn;
++
++  burn = poly1305_final (ctx, mac);
++
++  _gcry_burn_stack (burn);
++}
++
++
++gcry_err_code_t
++_gcry_poly1305_init (poly1305_context_t * ctx, const byte * key,
++		     size_t keylen)
++{
++  static int initialized;
++  static const char *selftest_failed;
++
++  if (!initialized)
++    {
++      initialized = 1;
++      selftest_failed = selftest ();
++      if (selftest_failed)
++	log_error ("Poly1305 selftest failed (%s)\n", selftest_failed);
++    }
++
++  if (keylen != POLY1305_KEYLEN)
++    return GPG_ERR_INV_KEYLEN;
++
++  if (selftest_failed)
++    return GPG_ERR_SELFTEST_FAILED;
++
++  poly1305_init (ctx, key);
++
++  return 0;
++}
++
++
++static void
++poly1305_auth (byte mac[POLY1305_TAGLEN], const byte * m, size_t bytes,
++	       const byte * key)
++{
++  poly1305_context_t ctx;
++
++  memset (&ctx, 0, sizeof (ctx));
++
++  _gcry_poly1305_init (&ctx, key, POLY1305_KEYLEN);
++  _gcry_poly1305_update (&ctx, m, bytes);
++  _gcry_poly1305_finish (&ctx, mac);
++
++  wipememory (&ctx, sizeof (ctx));
++}
++
++
++static const char *
++selftest (void)
++{
++  /* example from nacl */
++  static const byte nacl_key[POLY1305_KEYLEN] = {
++    0xee, 0xa6, 0xa7, 0x25, 0x1c, 0x1e, 0x72, 0x91,
++    0x6d, 0x11, 0xc2, 0xcb, 0x21, 0x4d, 0x3c, 0x25,
++    0x25, 0x39, 0x12, 0x1d, 0x8e, 0x23, 0x4e, 0x65,
++    0x2d, 0x65, 0x1f, 0xa4, 0xc8, 0xcf, 0xf8, 0x80,
++  };
++
++  static const byte nacl_msg[131] = {
++    0x8e, 0x99, 0x3b, 0x9f, 0x48, 0x68, 0x12, 0x73,
++    0xc2, 0x96, 0x50, 0xba, 0x32, 0xfc, 0x76, 0xce,
++    0x48, 0x33, 0x2e, 0xa7, 0x16, 0x4d, 0x96, 0xa4,
++    0x47, 0x6f, 0xb8, 0xc5, 0x31, 0xa1, 0x18, 0x6a,
++    0xc0, 0xdf, 0xc1, 0x7c, 0x98, 0xdc, 0xe8, 0x7b,
++    0x4d, 0xa7, 0xf0, 0x11, 0xec, 0x48, 0xc9, 0x72,
++    0x71, 0xd2, 0xc2, 0x0f, 0x9b, 0x92, 0x8f, 0xe2,
++    0x27, 0x0d, 0x6f, 0xb8, 0x63, 0xd5, 0x17, 0x38,
++    0xb4, 0x8e, 0xee, 0xe3, 0x14, 0xa7, 0xcc, 0x8a,
++    0xb9, 0x32, 0x16, 0x45, 0x48, 0xe5, 0x26, 0xae,
++    0x90, 0x22, 0x43, 0x68, 0x51, 0x7a, 0xcf, 0xea,
++    0xbd, 0x6b, 0xb3, 0x73, 0x2b, 0xc0, 0xe9, 0xda,
++    0x99, 0x83, 0x2b, 0x61, 0xca, 0x01, 0xb6, 0xde,
++    0x56, 0x24, 0x4a, 0x9e, 0x88, 0xd5, 0xf9, 0xb3,
++    0x79, 0x73, 0xf6, 0x22, 0xa4, 0x3d, 0x14, 0xa6,
++    0x59, 0x9b, 0x1f, 0x65, 0x4c, 0xb4, 0x5a, 0x74,
++    0xe3, 0x55, 0xa5
++  };
++
++  static const byte nacl_mac[16] = {
++    0xf3, 0xff, 0xc7, 0x70, 0x3f, 0x94, 0x00, 0xe5,
++    0x2a, 0x7d, 0xfb, 0x4b, 0x3d, 0x33, 0x05, 0xd9
++  };
++
++  /* generates a final value of (2^130 - 2) == 3 */
++  static const byte wrap_key[POLY1305_KEYLEN] = {
++    0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++  };
++
++  static const byte wrap_msg[16] = {
++    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++  };
++
++  static const byte wrap_mac[16] = {
++    0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++  };
++
++  /* mac of the macs of messages of length 0 to 256, where the key and messages
++   * have all their values set to the length
++   */
++  static const byte total_key[POLY1305_KEYLEN] = {
++    0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++    0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9,
++    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++  };
++
++  static const byte total_mac[16] = {
++    0x64, 0xaf, 0xe2, 0xe8, 0xd6, 0xad, 0x7b, 0xbd,
++    0xd2, 0x87, 0xf9, 0x7c, 0x44, 0x62, 0x3d, 0x39
++  };
++
++  poly1305_context_t ctx;
++  poly1305_context_t total_ctx;
++  byte all_key[POLY1305_KEYLEN];
++  byte all_msg[256];
++  byte mac[16];
++  size_t i, j;
++
++  memset (&ctx, 0, sizeof (ctx));
++  memset (&total_ctx, 0, sizeof (total_ctx));
++
++  memset (mac, 0, sizeof (mac));
++  poly1305_auth (mac, nacl_msg, sizeof (nacl_msg), nacl_key);
++  if (memcmp (nacl_mac, mac, sizeof (nacl_mac)) != 0)
++    return "Poly1305 test 1 failed.";
++
++  /* SSE2/AVX have a 32 byte block size, but also support 64 byte blocks, so
++   * make sure everything still works varying between them */
++  memset (mac, 0, sizeof (mac));
++  _gcry_poly1305_init (&ctx, nacl_key, POLY1305_KEYLEN);
++  _gcry_poly1305_update (&ctx, nacl_msg + 0, 32);
++  _gcry_poly1305_update (&ctx, nacl_msg + 32, 64);
++  _gcry_poly1305_update (&ctx, nacl_msg + 96, 16);
++  _gcry_poly1305_update (&ctx, nacl_msg + 112, 8);
++  _gcry_poly1305_update (&ctx, nacl_msg + 120, 4);
++  _gcry_poly1305_update (&ctx, nacl_msg + 124, 2);
++  _gcry_poly1305_update (&ctx, nacl_msg + 126, 1);
++  _gcry_poly1305_update (&ctx, nacl_msg + 127, 1);
++  _gcry_poly1305_update (&ctx, nacl_msg + 128, 1);
++  _gcry_poly1305_update (&ctx, nacl_msg + 129, 1);
++  _gcry_poly1305_update (&ctx, nacl_msg + 130, 1);
++  _gcry_poly1305_finish (&ctx, mac);
++  if (memcmp (nacl_mac, mac, sizeof (nacl_mac)) != 0)
++    return "Poly1305 test 2 failed.";
++
++  memset (mac, 0, sizeof (mac));
++  poly1305_auth (mac, wrap_msg, sizeof (wrap_msg), wrap_key);
++  if (memcmp (wrap_mac, mac, sizeof (nacl_mac)) != 0)
++    return "Poly1305 test 3 failed.";
++
++  _gcry_poly1305_init (&total_ctx, total_key, POLY1305_KEYLEN);
++  for (i = 0; i < 256; i++)
++    {
++      /* set key and message to 'i,i,i..' */
++      for (j = 0; j < sizeof (all_key); j++)
++	all_key[j] = i;
++      for (j = 0; j < i; j++)
++	all_msg[j] = i;
++      poly1305_auth (mac, all_msg, i, all_key);
++      _gcry_poly1305_update (&total_ctx, mac, 16);
++    }
++  _gcry_poly1305_finish (&total_ctx, mac);
++  if (memcmp (total_mac, mac, sizeof (total_mac)) != 0)
++    return "Poly1305 test 4 failed.";
++
++  return NULL;
++}
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/cipher/poly1305.c b/cipher/poly1305.c
+index 22255fb1..b45a9dc8 100644
+--- a/cipher/poly1305.c
++++ b/cipher/poly1305.c
+@@ -23,6 +23,11 @@
+  */
+ 
+ #include <config.h>
++
++#if !defined(ENABLE_PPC_CRYPTO_SUPPORT) || \
++    !defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) || \
++    !defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
++
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -641,3 +646,5 @@ selftest (void)
+ 
+   return NULL;
+ }
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/configure.ac b/configure.ac
+index 9bcb1318..397c2f19 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -2504,6 +2504,18 @@ if test "$found" = "1" ; then
+          GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-ssse3-amd64.lo"
+          GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-avx2-amd64.lo"
+       ;;
++      powerpc64le-*-*)
++         # Build with the ppc8 vector implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-ppc.lo chacha20-new.lo"
++      ;;
++      powerpc64-*-*)
++         # Build with the ppc8 vector implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-ppc.lo chacha20-new.lo"
++      ;;
++      powerpc-*-*)
++         # Build with the ppc8 vector implementation
++	 GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-ppc.lo chacha20-new.lo"
++      ;;
+    esac
+ 
+    if test x"$neonsupport" = xyes ; then
+@@ -2518,6 +2530,18 @@ case "${host}" in
+       GCRYPT_CIPHERS="$GCRYPT_CIPHERS poly1305-sse2-amd64.lo"
+       GCRYPT_CIPHERS="$GCRYPT_CIPHERS poly1305-avx2-amd64.lo"
+    ;;
++   powerpc64le-*-*)
++      # Build with the ppc8 vector implementation
++      GCRYPT_CIPHERS="$GCRYPT_CIPHERS poly1305-new.lo"
++   ;;
++   powerpc64-*-*)
++      # Build with the ppc8 vector implementation
++      GCRYPT_CIPHERS="$GCRYPT_CIPHERS poly1305-new.lo"
++   ;;
++   powerpc-*-*)
++      # Build with the ppc8 vector implementation
++      GCRYPT_CIPHERS="$GCRYPT_CIPHERS poly1305-new.lo"
++   ;;
+ esac
+ 
+ if test x"$neonsupport" = xyes ; then
+diff --git a/mpi/longlong.h b/mpi/longlong.h
+index d6958f3b..c0f24c85 100644
+--- a/mpi/longlong.h
++++ b/mpi/longlong.h
+@@ -1088,7 +1088,6 @@ typedef unsigned int UTItype __attribute__ ((mode (TI)));
+ /* Powerpc 64 bit support taken from gmp-4.1.2. */
+ /* We should test _IBMR2 here when we add assembly support for the system
+    vendor compilers.  */
+-#if 0 /* Not yet enabled because we don't have hardware for a test. */
+ #if (defined (_ARCH_PPC) || defined (__powerpc__)) && W_TYPE_SIZE == 64
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+   do {									\
+@@ -1141,7 +1140,6 @@ typedef unsigned int UTItype __attribute__ ((mode (TI)));
+ #define SMUL_TIME 14  /* ??? */
+ #define UDIV_TIME 120 /* ??? */
+ #endif /* 64-bit PowerPC.  */
+-#endif /* if 0 */
+ 
+ /***************************************
+  **************  PYR  ******************
+
+diff --git a/cipher/poly1305-internal-new.h b/cipher/poly1305-internal-new.h
+new file mode 100644
+index 00000000..c0f24c85 100644
+--- /dev/null
++++ b/cipher/poly1305-internal-new.h
+@@ -0,0 +1,64 @@
++/* poly1305-internal.h  -  Poly1305 internals
++ * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser general Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef G10_POLY1305_INTERNAL_H
++#define G10_POLY1305_INTERNAL_H
++
++#include <config.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include "types.h"
++#include "g10lib.h"
++#include "cipher.h"
++#include "bufhelp.h"
++
++#define POLY1305_TAGLEN 16
++#define POLY1305_KEYLEN 32
++#define POLY1305_BLOCKSIZE 16
++
++
++typedef struct
++{
++  u32 k[4];
++  u32 r[4];
++  u32 h[5];
++} POLY1305_STATE;
++
++typedef struct poly1305_context_s
++{
++  POLY1305_STATE state;
++  byte buffer[POLY1305_BLOCKSIZE];
++  unsigned int leftover;
++} poly1305_context_t;
++
++
++gcry_err_code_t _gcry_poly1305_init (poly1305_context_t *ctx, const byte *key,
++				     size_t keylen);
++
++void _gcry_poly1305_finish (poly1305_context_t *ctx,
++			     byte mac[POLY1305_TAGLEN]);
++
++void _gcry_poly1305_update (poly1305_context_t *ctx, const byte *buf,
++			     size_t buflen);
++
++unsigned int _gcry_poly1305_update_burn (poly1305_context_t *ctx,
++					 const byte *m, size_t bytes);
++
++#endif /* G10_POLY1305_INTERNAL_H */
+
+-- 
+2.27.0
+
diff --git a/SOURCES/libgcrypt-1.8.5-ppc-crc32.patch b/SOURCES/libgcrypt-1.8.5-ppc-crc32.patch
new file mode 100644
index 0000000..16baed6
--- /dev/null
+++ b/SOURCES/libgcrypt-1.8.5-ppc-crc32.patch
@@ -0,0 +1,794 @@
+diff --git a/cipher/Makefile.am b/cipher/Makefile.am
+index cb41c251..1728e9f9 100644
+--- a/cipher/Makefile.am
++++ b/cipher/Makefile.am
+@@ -67,7 +67,7 @@ cast5.c cast5-amd64.S cast5-arm.S \
+ chacha20.c chacha20-sse2-amd64.S chacha20-ssse3-amd64.S chacha20-avx2-amd64.S \
+   chacha20-armv7-neon.S \
+ crc.c \
+-  crc-intel-pclmul.c \
++  crc-intel-pclmul.c crc-ppc.c \
+ des.c des-amd64.S \
+ dsa.c \
+ elgamal.c \
+@@ -159,3 +159,9 @@ sha512-ppc.o: $(srcdir)/sha512-ppc.c Makefile
+ 
+ sha512-ppc.lo: $(srcdir)/sha512-ppc.c Makefile
+ 	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++crc-ppc.o: $(srcdir)/crc-ppc.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++crc-ppc.lo: $(srcdir)/crc-ppc.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
+diff --git a/cipher/crc-ppc.c b/cipher/crc-ppc.c
+new file mode 100644
+index 00000000..4d7f0add
+--- /dev/null
++++ b/cipher/crc-ppc.c
+@@ -0,0 +1,619 @@
++/* crc-ppc.c - POWER8 vpmsum accelerated CRC implementation
++ * Copyright (C) 2019-2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
++ *
++ */
++
++#include <config.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++
++#include "g10lib.h"
++
++#include "bithelp.h"
++#include "bufhelp.h"
++
++
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    __GNUC__ >= 4
++
++#include <altivec.h>
++#include "bufhelp.h"
++
++
++#define ALWAYS_INLINE inline __attribute__((always_inline))
++#define NO_INLINE __attribute__((noinline))
++#define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
++
++#define ASM_FUNC_ATTR          NO_INSTRUMENT_FUNCTION
++#define ASM_FUNC_ATTR_INLINE   ASM_FUNC_ATTR ALWAYS_INLINE
++#define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE
++
++#define ALIGNED_64 __attribute__ ((aligned (64)))
++
++
++typedef vector unsigned char vector16x_u8;
++typedef vector unsigned int vector4x_u32;
++typedef vector unsigned long long vector2x_u64;
++
++
++/* Constants structure for generic reflected/non-reflected CRC32 PMULL
++ * functions. */
++struct crc32_consts_s
++{
++  /* k: { x^(32*17), x^(32*15), x^(32*5), x^(32*3), x^(32*2), 0 } mod P(x) */
++  unsigned long long k[6];
++  /* my_p: { floor(x^64 / P(x)), P(x) } */
++  unsigned long long my_p[2];
++};
++
++/* PMULL constants for CRC32 and CRC32RFC1510. */
++static const struct crc32_consts_s crc32_consts ALIGNED_64 =
++{
++  { /* k[6] = reverse_33bits( x^(32*y) mod P(x) ) */
++    U64_C(0x154442bd4), U64_C(0x1c6e41596), /* y = { 17, 15 } */
++    U64_C(0x1751997d0), U64_C(0x0ccaa009e), /* y = { 5, 3 } */
++    U64_C(0x163cd6124), 0                   /* y = 2 */
++  },
++  { /* my_p[2] = reverse_33bits ( { floor(x^64 / P(x)), P(x) } ) */
++    U64_C(0x1f7011641), U64_C(0x1db710641)
++  }
++};
++
++/* PMULL constants for CRC24RFC2440 (polynomial multiplied with x⁸). */
++static const struct crc32_consts_s crc24rfc2440_consts ALIGNED_64 =
++{
++  { /* k[6] = x^(32*y) mod P(x) << 32*/
++    U64_C(0x08289a00) << 32, U64_C(0x74b44a00) << 32, /* y = { 17, 15 } */
++    U64_C(0xc4b14d00) << 32, U64_C(0xfd7e0c00) << 32, /* y = { 5, 3 } */
++    U64_C(0xd9fe8c00) << 32, 0                        /* y = 2 */
++  },
++  { /* my_p[2] = { floor(x^64 / P(x)), P(x) } */
++    U64_C(0x1f845fe24), U64_C(0x1864cfb00)
++  }
++};
++
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++asm_vpmsumd(vector2x_u64 a, vector2x_u64 b)
++{
++  __asm__("vpmsumd %0, %1, %2"
++	  : "=v" (a)
++	  : "v" (a), "v" (b));
++  return a;
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++asm_swap_u64(vector2x_u64 a)
++{
++  __asm__("xxswapd %x0, %x1"
++	  : "=wa" (a)
++	  : "wa" (a));
++  return a;
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_sld_u32(vector4x_u32 a, vector4x_u32 b, unsigned int idx)
++{
++  return vec_sld (a, b, (4 * idx) & 15);
++}
++
++
++static const byte crc32_partial_fold_input_mask[16 + 16] ALIGNED_64 =
++  {
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++  };
++static const byte crc32_shuf_shift[3 * 16] ALIGNED_64 =
++  {
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++    0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
++    0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++  };
++static const byte crc32_refl_shuf_shift[3 * 16] ALIGNED_64 =
++  {
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++    0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
++  };
++static const vector16x_u8 bswap_const ALIGNED_64 =
++  { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
++
++
++#define CRC_VEC_SWAP(v) ({ vector2x_u64 __vecu64 = (v); \
++                           vec_perm(__vecu64, __vecu64, bswap_const); })
++
++#ifdef WORDS_BIGENDIAN
++# define CRC_VEC_U64_DEF(lo, hi) { (hi), (lo) }
++# define CRC_VEC_U64_LOAD(offs, ptr) \
++          asm_swap_u64(vec_vsx_ld((offs), (const unsigned long long *)(ptr)))
++# define CRC_VEC_U64_LOAD_LE(offs, ptr) \
++	  CRC_VEC_SWAP(vec_vsx_ld((offs), (const unsigned long long *)(ptr)))
++# define CRC_VEC_U64_LOAD_BE(offs, ptr) \
++         vec_vsx_ld((offs), (const unsigned long long *)(ptr))
++# define CRC_VEC_SWAP_TO_LE(v) CRC_VEC_SWAP(v)
++# define CRC_VEC_SWAP_TO_BE(v) (v)
++# define VEC_U64_LO 1
++# define VEC_U64_HI 0
++#else
++# define CRC_VEC_U64_DEF(lo, hi) { (lo), (hi) }
++# define CRC_VEC_U64_LOAD(offs, ptr) \
++	  vec_vsx_ld((offs), (const unsigned long long *)(ptr))
++# define CRC_VEC_U64_LOAD_LE(offs, ptr) CRC_VEC_U64_LOAD((offs), (ptr))
++# define CRC_VEC_U64_LOAD_BE(offs, ptr) asm_vec_u64_load_be(offs, ptr)
++# define CRC_VEC_SWAP_TO_LE(v) (v)
++# define CRC_VEC_SWAP_TO_BE(v) CRC_VEC_SWAP(v)
++# define VEC_U64_LO 0
++# define VEC_U64_HI 1
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++asm_vec_u64_load_be(unsigned int offset, const void *ptr)
++{
++  static const vector16x_u8 vec_load_le_const =
++    { ~7, ~6, ~5, ~4, ~3, ~2, ~1, ~0, ~15, ~14, ~13, ~12, ~11, ~10, ~9, ~8 };
++  vector2x_u64 vecu64;
++
++#if __GNUC__ >= 4
++  if (__builtin_constant_p (offset) && offset == 0)
++    __asm__ ("lxvd2x %%vs32,0,%1\n\t"
++	     "vperm %0,%%v0,%%v0,%2\n\t"
++	     : "=v" (vecu64)
++	     : "r" ((uintptr_t)(ptr)), "v" (vec_load_le_const)
++	     : "memory", "v0");
++#endif
++  else
++    __asm__ ("lxvd2x %%vs32,%1,%2\n\t"
++	     "vperm %0,%%v0,%%v0,%3\n\t"
++	     : "=v" (vecu64)
++	     : "r" (offset), "r" ((uintptr_t)(ptr)),
++	       "v" (vec_load_le_const)
++	     : "memory", "r0", "v0");
++
++  return vecu64;
++}
++#endif
++
++
++static ASM_FUNC_ATTR_INLINE void
++crc32r_ppc8_ce_bulk (u32 *pcrc, const byte *inbuf, size_t inlen,
++		     const struct crc32_consts_s *consts)
++{
++  vector4x_u32 zero = { 0, 0, 0, 0 };
++  vector2x_u64 low_64bit_mask = CRC_VEC_U64_DEF((u64)-1, 0);
++  vector2x_u64 low_32bit_mask = CRC_VEC_U64_DEF((u32)-1, 0);
++  vector2x_u64 my_p = CRC_VEC_U64_LOAD(0, &consts->my_p[0]);
++  vector2x_u64 k1k2 = CRC_VEC_U64_LOAD(0, &consts->k[1 - 1]);
++  vector2x_u64 k3k4 = CRC_VEC_U64_LOAD(0, &consts->k[3 - 1]);
++  vector2x_u64 k4lo = CRC_VEC_U64_DEF(k3k4[VEC_U64_HI], 0);
++  vector2x_u64 k5lo = CRC_VEC_U64_LOAD(0, &consts->k[5 - 1]);
++  vector2x_u64 crc = CRC_VEC_U64_DEF(*pcrc, 0);
++  vector2x_u64 crc0, crc1, crc2, crc3;
++  vector2x_u64 v0;
++
++  if (inlen >= 8 * 16)
++    {
++      crc0 = CRC_VEC_U64_LOAD_LE(0 * 16, inbuf);
++      crc0 ^= crc;
++      crc1 = CRC_VEC_U64_LOAD_LE(1 * 16, inbuf);
++      crc2 = CRC_VEC_U64_LOAD_LE(2 * 16, inbuf);
++      crc3 = CRC_VEC_U64_LOAD_LE(3 * 16, inbuf);
++
++      inbuf += 4 * 16;
++      inlen -= 4 * 16;
++
++      /* Fold by 4. */
++      while (inlen >= 4 * 16)
++	{
++	  v0 = CRC_VEC_U64_LOAD_LE(0 * 16, inbuf);
++	  crc0 = asm_vpmsumd(crc0, k1k2) ^ v0;
++
++	  v0 = CRC_VEC_U64_LOAD_LE(1 * 16, inbuf);
++	  crc1 = asm_vpmsumd(crc1, k1k2) ^ v0;
++
++	  v0 = CRC_VEC_U64_LOAD_LE(2 * 16, inbuf);
++	  crc2 = asm_vpmsumd(crc2, k1k2) ^ v0;
++
++	  v0 = CRC_VEC_U64_LOAD_LE(3 * 16, inbuf);
++	  crc3 = asm_vpmsumd(crc3, k1k2) ^ v0;
++
++	  inbuf += 4 * 16;
++	  inlen -= 4 * 16;
++	}
++
++      /* Fold 4 to 1. */
++      crc1 ^= asm_vpmsumd(crc0, k3k4);
++      crc2 ^= asm_vpmsumd(crc1, k3k4);
++      crc3 ^= asm_vpmsumd(crc2, k3k4);
++      crc = crc3;
++    }
++  else
++    {
++      v0 = CRC_VEC_U64_LOAD_LE(0, inbuf);
++      crc ^= v0;
++
++      inbuf += 16;
++      inlen -= 16;
++    }
++
++  /* Fold by 1. */
++  while (inlen >= 16)
++    {
++      v0 = CRC_VEC_U64_LOAD_LE(0, inbuf);
++      crc = asm_vpmsumd(k3k4, crc);
++      crc ^= v0;
++
++      inbuf += 16;
++      inlen -= 16;
++    }
++
++  /* Partial fold. */
++  if (inlen)
++    {
++      /* Load last input and add padding zeros. */
++      vector2x_u64 mask = CRC_VEC_U64_LOAD_LE(inlen, crc32_partial_fold_input_mask);
++      vector2x_u64 shl_shuf = CRC_VEC_U64_LOAD_LE(inlen, crc32_refl_shuf_shift);
++      vector2x_u64 shr_shuf = CRC_VEC_U64_LOAD_LE(inlen + 16, crc32_refl_shuf_shift);
++
++      v0 = CRC_VEC_U64_LOAD_LE(inlen - 16, inbuf);
++      v0 &= mask;
++
++      crc = CRC_VEC_SWAP_TO_LE(crc);
++      v0 |= (vector2x_u64)vec_perm((vector16x_u8)crc, (vector16x_u8)zero,
++				   (vector16x_u8)shr_shuf);
++      crc = (vector2x_u64)vec_perm((vector16x_u8)crc, (vector16x_u8)zero,
++				   (vector16x_u8)shl_shuf);
++      crc = asm_vpmsumd(k3k4, crc);
++      crc ^= v0;
++
++      inbuf += inlen;
++      inlen -= inlen;
++    }
++
++  /* Final fold. */
++
++  /* reduce 128-bits to 96-bits */
++  v0 = asm_swap_u64(crc);
++  v0 &= low_64bit_mask;
++  crc = asm_vpmsumd(k4lo, crc);
++  crc ^= v0;
++
++  /* reduce 96-bits to 64-bits */
++  v0 = (vector2x_u64)vec_sld_u32((vector4x_u32)crc,
++				 (vector4x_u32)crc, 3);  /* [x0][x3][x2][x1] */
++  v0 &= low_64bit_mask;                                  /* [00][00][x2][x1] */
++  crc = crc & low_32bit_mask;                            /* [00][00][00][x0] */
++  crc = v0 ^ asm_vpmsumd(k5lo, crc);                     /* [00][00][xx][xx] */
++
++  /* barrett reduction */
++  v0 = crc << 32;                                        /* [00][00][x0][00] */
++  v0 = asm_vpmsumd(my_p, v0);
++  v0 = asm_swap_u64(v0);
++  v0 = asm_vpmsumd(my_p, v0);
++  crc = (vector2x_u64)vec_sld_u32((vector4x_u32)crc,
++				  zero, 1);              /* [00][x1][x0][00] */
++  crc ^= v0;
++
++  *pcrc = (u32)crc[VEC_U64_HI];
++}
++
++
++static ASM_FUNC_ATTR_INLINE u32
++crc32r_ppc8_ce_reduction_4 (u32 data, u32 crc,
++			    const struct crc32_consts_s *consts)
++{
++  vector4x_u32 zero = { 0, 0, 0, 0 };
++  vector2x_u64 my_p = CRC_VEC_U64_LOAD(0, &consts->my_p[0]);
++  vector2x_u64 v0 = CRC_VEC_U64_DEF((u64)data, 0);
++  v0 = asm_vpmsumd(v0, my_p);                          /* [00][00][xx][xx] */
++  v0 = (vector2x_u64)vec_sld_u32((vector4x_u32)v0,
++				 zero, 3);             /* [x0][00][00][00] */
++  v0 = (vector2x_u64)vec_sld_u32((vector4x_u32)v0,
++				 (vector4x_u32)v0, 3); /* [00][x0][00][00] */
++  v0 = asm_vpmsumd(v0, my_p);                          /* [00][00][xx][xx] */
++  return (v0[VEC_U64_LO] >> 32) ^ crc;
++}
++
++
++static ASM_FUNC_ATTR_INLINE void
++crc32r_less_than_16 (u32 *pcrc, const byte *inbuf, size_t inlen,
++		     const struct crc32_consts_s *consts)
++{
++  u32 crc = *pcrc;
++  u32 data;
++
++  while (inlen >= 4)
++    {
++      data = buf_get_le32(inbuf);
++      data ^= crc;
++
++      inlen -= 4;
++      inbuf += 4;
++
++      crc = crc32r_ppc8_ce_reduction_4 (data, 0, consts);
++    }
++
++  switch (inlen)
++    {
++    case 0:
++      break;
++    case 1:
++      data = inbuf[0];
++      data ^= crc;
++      data <<= 24;
++      crc >>= 8;
++      crc = crc32r_ppc8_ce_reduction_4 (data, crc, consts);
++      break;
++    case 2:
++      data = inbuf[0] << 0;
++      data |= inbuf[1] << 8;
++      data ^= crc;
++      data <<= 16;
++      crc >>= 16;
++      crc = crc32r_ppc8_ce_reduction_4 (data, crc, consts);
++      break;
++    case 3:
++      data = inbuf[0] << 0;
++      data |= inbuf[1] << 8;
++      data |= inbuf[2] << 16;
++      data ^= crc;
++      data <<= 8;
++      crc >>= 24;
++      crc = crc32r_ppc8_ce_reduction_4 (data, crc, consts);
++      break;
++    }
++
++  *pcrc = crc;
++}
++
++
++static ASM_FUNC_ATTR_INLINE void
++crc32_ppc8_ce_bulk (u32 *pcrc, const byte *inbuf, size_t inlen,
++		    const struct crc32_consts_s *consts)
++{
++  vector4x_u32 zero = { 0, 0, 0, 0 };
++  vector2x_u64 low_96bit_mask = CRC_VEC_U64_DEF(~0, ~((u64)(u32)-1 << 32));
++  vector2x_u64 p_my = asm_swap_u64(CRC_VEC_U64_LOAD(0, &consts->my_p[0]));
++  vector2x_u64 p_my_lo, p_my_hi;
++  vector2x_u64 k2k1 = asm_swap_u64(CRC_VEC_U64_LOAD(0, &consts->k[1 - 1]));
++  vector2x_u64 k4k3 = asm_swap_u64(CRC_VEC_U64_LOAD(0, &consts->k[3 - 1]));
++  vector2x_u64 k4hi = CRC_VEC_U64_DEF(0, consts->k[4 - 1]);
++  vector2x_u64 k5hi = CRC_VEC_U64_DEF(0, consts->k[5 - 1]);
++  vector2x_u64 crc = CRC_VEC_U64_DEF(0, _gcry_bswap64(*pcrc));
++  vector2x_u64 crc0, crc1, crc2, crc3;
++  vector2x_u64 v0;
++
++  if (inlen >= 8 * 16)
++    {
++      crc0 = CRC_VEC_U64_LOAD_BE(0 * 16, inbuf);
++      crc0 ^= crc;
++      crc1 = CRC_VEC_U64_LOAD_BE(1 * 16, inbuf);
++      crc2 = CRC_VEC_U64_LOAD_BE(2 * 16, inbuf);
++      crc3 = CRC_VEC_U64_LOAD_BE(3 * 16, inbuf);
++
++      inbuf += 4 * 16;
++      inlen -= 4 * 16;
++
++      /* Fold by 4. */
++      while (inlen >= 4 * 16)
++	{
++	  v0 = CRC_VEC_U64_LOAD_BE(0 * 16, inbuf);
++	  crc0 = asm_vpmsumd(crc0, k2k1) ^ v0;
++
++	  v0 = CRC_VEC_U64_LOAD_BE(1 * 16, inbuf);
++	  crc1 = asm_vpmsumd(crc1, k2k1) ^ v0;
++
++	  v0 = CRC_VEC_U64_LOAD_BE(2 * 16, inbuf);
++	  crc2 = asm_vpmsumd(crc2, k2k1) ^ v0;
++
++	  v0 = CRC_VEC_U64_LOAD_BE(3 * 16, inbuf);
++	  crc3 = asm_vpmsumd(crc3, k2k1) ^ v0;
++
++	  inbuf += 4 * 16;
++	  inlen -= 4 * 16;
++	}
++
++      /* Fold 4 to 1. */
++      crc1 ^= asm_vpmsumd(crc0, k4k3);
++      crc2 ^= asm_vpmsumd(crc1, k4k3);
++      crc3 ^= asm_vpmsumd(crc2, k4k3);
++      crc = crc3;
++    }
++  else
++    {
++      v0 = CRC_VEC_U64_LOAD_BE(0, inbuf);
++      crc ^= v0;
++
++      inbuf += 16;
++      inlen -= 16;
++    }
++
++  /* Fold by 1. */
++  while (inlen >= 16)
++    {
++      v0 = CRC_VEC_U64_LOAD_BE(0, inbuf);
++      crc = asm_vpmsumd(k4k3, crc);
++      crc ^= v0;
++
++      inbuf += 16;
++      inlen -= 16;
++    }
++
++  /* Partial fold. */
++  if (inlen)
++    {
++      /* Load last input and add padding zeros. */
++      vector2x_u64 mask = CRC_VEC_U64_LOAD_LE(inlen, crc32_partial_fold_input_mask);
++      vector2x_u64 shl_shuf = CRC_VEC_U64_LOAD_LE(32 - inlen, crc32_refl_shuf_shift);
++      vector2x_u64 shr_shuf = CRC_VEC_U64_LOAD_LE(inlen + 16, crc32_shuf_shift);
++
++      v0 = CRC_VEC_U64_LOAD_LE(inlen - 16, inbuf);
++      v0 &= mask;
++
++      crc = CRC_VEC_SWAP_TO_LE(crc);
++      crc2 = (vector2x_u64)vec_perm((vector16x_u8)crc, (vector16x_u8)zero,
++				    (vector16x_u8)shr_shuf);
++      v0 |= crc2;
++      v0 = CRC_VEC_SWAP(v0);
++      crc = (vector2x_u64)vec_perm((vector16x_u8)crc, (vector16x_u8)zero,
++				   (vector16x_u8)shl_shuf);
++      crc = asm_vpmsumd(k4k3, crc);
++      crc ^= v0;
++
++      inbuf += inlen;
++      inlen -= inlen;
++    }
++
++  /* Final fold. */
++
++  /* reduce 128-bits to 96-bits */
++  v0 = (vector2x_u64)vec_sld_u32((vector4x_u32)crc,
++				 (vector4x_u32)zero, 2);
++  crc = asm_vpmsumd(k4hi, crc);
++  crc ^= v0; /* bottom 32-bit are zero */
++
++  /* reduce 96-bits to 64-bits */
++  v0 = crc & low_96bit_mask;    /* [00][x2][x1][00] */
++  crc >>= 32;                   /* [00][x3][00][x0] */
++  crc = asm_vpmsumd(k5hi, crc); /* [00][xx][xx][00] */
++  crc ^= v0;                    /* top and bottom 32-bit are zero */
++
++  /* barrett reduction */
++  p_my_hi = p_my;
++  p_my_lo = p_my;
++  p_my_hi[VEC_U64_LO] = 0;
++  p_my_lo[VEC_U64_HI] = 0;
++  v0 = crc >> 32;                                        /* [00][00][00][x1] */
++  crc = asm_vpmsumd(p_my_hi, crc);                       /* [00][xx][xx][xx] */
++  crc = (vector2x_u64)vec_sld_u32((vector4x_u32)crc,
++				  (vector4x_u32)crc, 3); /* [x0][00][x2][x1] */
++  crc = asm_vpmsumd(p_my_lo, crc);                       /* [00][xx][xx][xx] */
++  crc ^= v0;
++
++  *pcrc = _gcry_bswap32(crc[VEC_U64_LO]);
++}
++
++
++static ASM_FUNC_ATTR_INLINE u32
++crc32_ppc8_ce_reduction_4 (u32 data, u32 crc,
++			   const struct crc32_consts_s *consts)
++{
++  vector2x_u64 my_p = CRC_VEC_U64_LOAD(0, &consts->my_p[0]);
++  vector2x_u64 v0 = CRC_VEC_U64_DEF((u64)data << 32, 0);
++  v0 = asm_vpmsumd(v0, my_p); /* [00][x1][x0][00] */
++  v0[VEC_U64_LO] = 0;         /* [00][x1][00][00] */
++  v0 = asm_vpmsumd(v0, my_p); /* [00][00][xx][xx] */
++  return _gcry_bswap32(v0[VEC_U64_LO]) ^ crc;
++}
++
++
++static ASM_FUNC_ATTR_INLINE void
++crc32_less_than_16 (u32 *pcrc, const byte *inbuf, size_t inlen,
++		    const struct crc32_consts_s *consts)
++{
++  u32 crc = *pcrc;
++  u32 data;
++
++  while (inlen >= 4)
++    {
++      data = buf_get_le32(inbuf);
++      data ^= crc;
++      data = _gcry_bswap32(data);
++
++      inlen -= 4;
++      inbuf += 4;
++
++      crc = crc32_ppc8_ce_reduction_4 (data, 0, consts);
++    }
++
++  switch (inlen)
++    {
++    case 0:
++      break;
++    case 1:
++      data = inbuf[0];
++      data ^= crc;
++      data = data & 0xffU;
++      crc = crc >> 8;
++      crc = crc32_ppc8_ce_reduction_4 (data, crc, consts);
++      break;
++    case 2:
++      data = inbuf[0] << 0;
++      data |= inbuf[1] << 8;
++      data ^= crc;
++      data = _gcry_bswap32(data << 16);
++      crc = crc >> 16;
++      crc = crc32_ppc8_ce_reduction_4 (data, crc, consts);
++      break;
++    case 3:
++      data = inbuf[0] << 0;
++      data |= inbuf[1] << 8;
++      data |= inbuf[2] << 16;
++      data ^= crc;
++      data = _gcry_bswap32(data << 8);
++      crc = crc >> 24;
++      crc = crc32_ppc8_ce_reduction_4 (data, crc, consts);
++      break;
++    }
++
++  *pcrc = crc;
++}
++
++void ASM_FUNC_ATTR
++_gcry_crc32_ppc8_vpmsum (u32 *pcrc, const byte *inbuf, size_t inlen)
++{
++  const struct crc32_consts_s *consts = &crc32_consts;
++
++  if (!inlen)
++    return;
++
++  if (inlen >= 16)
++    crc32r_ppc8_ce_bulk (pcrc, inbuf, inlen, consts);
++  else
++    crc32r_less_than_16 (pcrc, inbuf, inlen, consts);
++}
++
++void ASM_FUNC_ATTR
++_gcry_crc24rfc2440_ppc8_vpmsum (u32 *pcrc, const byte *inbuf, size_t inlen)
++{
++  const struct crc32_consts_s *consts = &crc24rfc2440_consts;
++
++  if (!inlen)
++    return;
++
++  /* Note: *pcrc in input endian. */
++
++  if (inlen >= 16)
++    crc32_ppc8_ce_bulk (pcrc, inbuf, inlen, consts);
++  else
++    crc32_less_than_16 (pcrc, inbuf, inlen, consts);
++}
++
++#endif
+diff --git a/cipher/crc.c b/cipher/crc.c
+index a1ce50b6..bbb159ce 100644
+--- a/cipher/crc.c
++++ b/cipher/crc.c
+@@ -43,11 +43,27 @@
+ #endif /* USE_INTEL_PCLMUL */
+ 
+ 
++/* USE_PPC_VPMSUM indicates whether to enable PowerPC vector
++ * accelerated code. */
++#undef USE_PPC_VPMSUM
++#ifdef ENABLE_PPC_CRYPTO_SUPPORT
++# if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++     defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
++#  if __GNUC__ >= 4
++#   define USE_PPC_VPMSUM 1
++#  endif
++# endif
++#endif /* USE_PPC_VPMSUM */
++
++
+ typedef struct
+ {
+   u32 CRC;
+ #ifdef USE_INTEL_PCLMUL
+   unsigned int use_pclmul:1;           /* Intel PCLMUL shall be used.  */
++#endif
++#ifdef USE_PPC_VPMSUM
++  unsigned int use_vpmsum:1;           /* POWER vpmsum shall be used. */
+ #endif
+   byte buf[4];
+ }
+@@ -61,6 +77,20 @@ void _gcry_crc24rfc2440_intel_pclmul (u32 *pcrc, const byte *inbuf,
+ 				      size_t inlen);
+ #endif
+ 
++#ifdef USE_ARM_PMULL
++/*-- crc-armv8-ce.c --*/
++void _gcry_crc32_armv8_ce_pmull (u32 *pcrc, const byte *inbuf, size_t inlen);
++void _gcry_crc24rfc2440_armv8_ce_pmull (u32 *pcrc, const byte *inbuf,
++					size_t inlen);
++#endif
++
++#ifdef USE_PPC_VPMSUM
++/*-- crc-ppc.c --*/
++void _gcry_crc32_ppc8_vpmsum (u32 *pcrc, const byte *inbuf, size_t inlen);
++void _gcry_crc24rfc2440_ppc8_vpmsum (u32 *pcrc, const byte *inbuf,
++				     size_t inlen);
++#endif
++
+ 
+ /*
+  * Code generated by universal_crc by Danjel McGougan
+@@ -361,11 +391,13 @@ static void
+ crc32_init (void *context, unsigned int flags)
+ {
+   CRC_CONTEXT *ctx = (CRC_CONTEXT *) context;
+-#ifdef USE_INTEL_PCLMUL
+   u32 hwf = _gcry_get_hw_features ();
+-
++#ifdef USE_INTEL_PCLMUL
+   ctx->use_pclmul = (hwf & HWF_INTEL_SSE4_1) && (hwf & HWF_INTEL_PCLMUL);
+ #endif
++#ifdef USE_PPC_VPMSUM
++  ctx->use_vpmsum = !!(hwf & HWF_PPC_ARCH_2_07);
++#endif
+ 
+   (void)flags;
+ 
+@@ -386,6 +418,13 @@ crc32_write (void *context, const void *inbuf_arg, size_t inlen)
+       return;
+     }
+ #endif
++#ifdef USE_PPC_VPMSUM
++  if (ctx->use_vpmsum)
++    {
++      _gcry_crc32_ppc8_vpmsum(&ctx->CRC, inbuf, inlen);
++      return;
++    }
++#endif
+ 
+   if (!inbuf || !inlen)
+     return;
+@@ -444,6 +483,10 @@ crc32rfc1510_init (void *context, unsigned int flags)
+ 
+   ctx->use_pclmul = (hwf & HWF_INTEL_SSE4_1) && (hwf & HWF_INTEL_PCLMUL);
+ #endif
++#ifdef USE_PPC_VPMSUM
++  u32 hwf = _gcry_get_hw_features ();
++  ctx->use_vpmsum = !!(hwf & HWF_PPC_ARCH_2_07);
++#endif
+ 
+   (void)flags;
+ 
+@@ -774,6 +817,10 @@ crc24rfc2440_init (void *context, unsigned int flags)
+ 
+   ctx->use_pclmul = (hwf & HWF_INTEL_SSE4_1) && (hwf & HWF_INTEL_PCLMUL);
+ #endif
++#ifdef USE_PPC_VPMSUM
++  u32 hwf = _gcry_get_hw_features ();
++  ctx->use_vpmsum = !!(hwf & HWF_PPC_ARCH_2_07);
++#endif
+ 
+   (void)flags;
+ 
+@@ -794,6 +841,13 @@ crc24rfc2440_write (void *context, const void *inbuf_arg, size_t inlen)
+       return;
+     }
+ #endif
++#ifdef USE_PPC_VPMSUM
++  if (ctx->use_vpmsum)
++    {
++      _gcry_crc24rfc2440_ppc8_vpmsum(&ctx->CRC, inbuf, inlen);
++      return;
++    }
++#endif
+ 
+   if (!inbuf || !inlen)
+     return;
+diff --git a/configure.ac b/configure.ac
+index 953a20e9..b6b6455a 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1916,6 +1916,7 @@ AC_CACHE_CHECK([whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto
+ 		    "vadduwm %v0, %v1, %v22;\n"
+ 		    "vshasigmaw %v0, %v1, 0, 15;\n"
+ 		    "vshasigmad %v0, %v1, 0, 15;\n"
++		    "vpmsumd %v11, %v11, %v11;\n"
+ 		  );
+             ]])],
+           [gcry_cv_gcc_inline_asm_ppc_altivec=yes])
+@@ -2556,6 +2557,15 @@ if test "$found" = "1" ; then
+          # Build with the assembly implementation
+          GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc-intel-pclmul.lo"
+       ;;
++      powerpc64le-*-*)
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS crc-ppc.lo"
++      ;;
++      powerpc64-*-*)
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS crc-ppc.lo"
++      ;;
++      powerpc-*-*)
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS crc-ppc.lo"
++      ;;
+    esac
+ fi
diff --git a/SOURCES/libgcrypt-1.8.5-ppc-sha2.patch b/SOURCES/libgcrypt-1.8.5-ppc-sha2.patch
new file mode 100644
index 0000000..071a60a
--- /dev/null
+++ b/SOURCES/libgcrypt-1.8.5-ppc-sha2.patch
@@ -0,0 +1,2138 @@
+diff --git a/cipher/Makefile.am b/cipher/Makefile.am
+index 85a5b5fb..cb41c251 100644
+--- a/cipher/Makefile.am
++++ b/cipher/Makefile.am
+@@ -94,9 +94,9 @@ serpent.c serpent-sse2-amd64.S serpent-avx2-amd64.S serpent-armv7-neon.S \
+ sha1.c sha1-ssse3-amd64.S sha1-avx-amd64.S sha1-avx-bmi2-amd64.S \
+   sha1-armv7-neon.S sha1-armv8-aarch32-ce.S sha1-armv8-aarch64-ce.S \
+ sha256.c sha256-ssse3-amd64.S sha256-avx-amd64.S sha256-avx2-bmi2-amd64.S \
+-  sha256-armv8-aarch32-ce.S sha256-armv8-aarch64-ce.S \
++  sha256-armv8-aarch32-ce.S sha256-armv8-aarch64-ce.S sha256-ppc.c \
+ sha512.c sha512-ssse3-amd64.S sha512-avx-amd64.S sha512-avx2-bmi2-amd64.S \
+-  sha512-armv7-neon.S sha512-arm.S \
++  sha512-armv7-neon.S sha512-arm.S sha512-ppc.c \
+ keccak.c keccak_permute_32.h keccak_permute_64.h keccak-armv7-neon.S \
+ stribog.c \
+ tiger.c \
+@@ -148,4 +148,14 @@ rijndael-ppc9le.o: $(srcdir)/rijndael-ppc9le.c Makefile
+ rijndael-ppc9le.lo: $(srcdir)/rijndael-ppc9le.c Makefile
+ 	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
+ 
++sha256-ppc.o: $(srcdir)/sha256-ppc.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++sha256-ppc.lo: $(srcdir)/sha256-ppc.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
+ 
++sha512-ppc.o: $(srcdir)/sha512-ppc.c Makefile
++	`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< `
++
++sha512-ppc.lo: $(srcdir)/sha512-ppc.c Makefile
++	`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< `
+diff --git a/cipher/sha256-ppc.c b/cipher/sha256-ppc.c
+new file mode 100644
+index 00000000..a9b59714
+--- /dev/null
++++ b/cipher/sha256-ppc.c
+@@ -0,0 +1,795 @@
++/* sha256-ppc.c - PowerPC vcrypto implementation of SHA-256 transform
++ * Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <config.h>
++
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    defined(USE_SHA256) && \
++    __GNUC__ >= 4
++
++#include <altivec.h>
++#include "bufhelp.h"
++
++
++typedef vector unsigned char vector16x_u8;
++typedef vector unsigned int vector4x_u32;
++typedef vector unsigned long long vector2x_u64;
++
++
++#define ALWAYS_INLINE inline __attribute__((always_inline))
++#define NO_INLINE __attribute__((noinline))
++#define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
++
++#define ASM_FUNC_ATTR          NO_INSTRUMENT_FUNCTION
++#define ASM_FUNC_ATTR_INLINE   ASM_FUNC_ATTR ALWAYS_INLINE
++#define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE
++
++
++static const u32 K[64] =
++  {
++#define TBL(v) v
++    TBL(0x428a2f98), TBL(0x71374491), TBL(0xb5c0fbcf), TBL(0xe9b5dba5),
++    TBL(0x3956c25b), TBL(0x59f111f1), TBL(0x923f82a4), TBL(0xab1c5ed5),
++    TBL(0xd807aa98), TBL(0x12835b01), TBL(0x243185be), TBL(0x550c7dc3),
++    TBL(0x72be5d74), TBL(0x80deb1fe), TBL(0x9bdc06a7), TBL(0xc19bf174),
++    TBL(0xe49b69c1), TBL(0xefbe4786), TBL(0x0fc19dc6), TBL(0x240ca1cc),
++    TBL(0x2de92c6f), TBL(0x4a7484aa), TBL(0x5cb0a9dc), TBL(0x76f988da),
++    TBL(0x983e5152), TBL(0xa831c66d), TBL(0xb00327c8), TBL(0xbf597fc7),
++    TBL(0xc6e00bf3), TBL(0xd5a79147), TBL(0x06ca6351), TBL(0x14292967),
++    TBL(0x27b70a85), TBL(0x2e1b2138), TBL(0x4d2c6dfc), TBL(0x53380d13),
++    TBL(0x650a7354), TBL(0x766a0abb), TBL(0x81c2c92e), TBL(0x92722c85),
++    TBL(0xa2bfe8a1), TBL(0xa81a664b), TBL(0xc24b8b70), TBL(0xc76c51a3),
++    TBL(0xd192e819), TBL(0xd6990624), TBL(0xf40e3585), TBL(0x106aa070),
++    TBL(0x19a4c116), TBL(0x1e376c08), TBL(0x2748774c), TBL(0x34b0bcb5),
++    TBL(0x391c0cb3), TBL(0x4ed8aa4a), TBL(0x5b9cca4f), TBL(0x682e6ff3),
++    TBL(0x748f82ee), TBL(0x78a5636f), TBL(0x84c87814), TBL(0x8cc70208),
++    TBL(0x90befffa), TBL(0xa4506ceb), TBL(0xbef9a3f7), TBL(0xc67178f2)
++#undef TBL
++  };
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_rol_elems(vector4x_u32 v, unsigned int idx)
++{
++#ifndef WORDS_BIGENDIAN
++  return vec_sld (v, v, (16 - (4 * idx)) & 15);
++#else
++  return vec_sld (v, v, (4 * idx) & 15);
++#endif
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_merge_idx0_elems(vector4x_u32 v0, vector4x_u32 v1,
++		     vector4x_u32 v2, vector4x_u32 v3)
++{
++  return (vector4x_u32)vec_mergeh ((vector2x_u64) vec_mergeh(v0, v1),
++				   (vector2x_u64) vec_mergeh(v2, v3));
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_ror_u32(vector4x_u32 v, unsigned int shift)
++{
++  return (v >> (shift & 31)) ^ (v << ((32 - shift) & 31));
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector4x_u32
++vec_vshasigma_u32(vector4x_u32 v, unsigned int a, unsigned int b)
++{
++  asm ("vshasigmaw %0,%1,%2,%3"
++       : "=v" (v)
++       : "v" (v), "g" (a), "g" (b)
++       : "memory");
++  return v;
++}
++
++
++/* SHA2 round in vector registers */
++#define R(a,b,c,d,e,f,g,h,k,w) do                             \
++    {                                                         \
++      t1  = (h);                                              \
++      t1 += ((k) + (w));                                      \
++      t1 += Cho((e),(f),(g));                                 \
++      t1 += Sum1((e));                                        \
++      t2  = Sum0((a));                                        \
++      t2 += Maj((a),(b),(c));                                 \
++      d  += t1;                                               \
++      h   = t1 + t2;                                          \
++    } while (0)
++
++#define Cho(b, c, d)  (vec_sel(d, c, b))
++
++#define Maj(c, d, b)  (vec_sel(c, b, c ^ d))
++
++#define Sum0(x)       (vec_vshasigma_u32(x, 1, 0))
++
++#define Sum1(x)       (vec_vshasigma_u32(x, 1, 15))
++
++
++/* Message expansion on general purpose registers */
++#define S0(x) (ror ((x), 7) ^ ror ((x), 18) ^ ((x) >> 3))
++#define S1(x) (ror ((x), 17) ^ ror ((x), 19) ^ ((x) >> 10))
++
++#define I(i) ( w[i] = buf_get_be32(data + i * 4) )
++#define W(i) ({ w[i&0x0f] +=    w[(i-7) &0x0f];  \
++		w[i&0x0f] += S0(w[(i-15)&0x0f]); \
++		w[i&0x0f] += S1(w[(i-2) &0x0f]); \
++		w[i&0x0f]; })
++
++#define I2(i) ( w2[i] = buf_get_be32(64 + data + i * 4), I(i) )
++#define W2(i) ({ w2[i]  = w2[i-7];       \
++		 w2[i] += S1(w2[i-2]);   \
++		 w2[i] += S0(w2[i-15]);  \
++		 w2[i] += w2[i-16];      \
++		 W(i); })
++#define R2(i) ( w2[i] )
++
++
++unsigned int ASM_FUNC_ATTR
++_gcry_sha256_transform_ppc8(u32 state[8], const unsigned char *data,
++			    size_t nblks)
++{
++  /* GPRs used for message expansion as vector intrinsics based generates
++   * slower code. */
++  vector4x_u32 h0, h1, h2, h3, h4, h5, h6, h7;
++  vector4x_u32 h0_h3, h4_h7;
++  vector4x_u32 a, b, c, d, e, f, g, h, t1, t2;
++  u32 w[16];
++  u32 w2[64];
++
++  h0_h3 = vec_vsx_ld (4 * 0, state);
++  h4_h7 = vec_vsx_ld (4 * 4, state);
++
++  h0 = h0_h3;
++  h1 = vec_rol_elems (h0_h3, 1);
++  h2 = vec_rol_elems (h0_h3, 2);
++  h3 = vec_rol_elems (h0_h3, 3);
++  h4 = h4_h7;
++  h5 = vec_rol_elems (h4_h7, 1);
++  h6 = vec_rol_elems (h4_h7, 2);
++  h7 = vec_rol_elems (h4_h7, 3);
++
++  while (nblks >= 2)
++    {
++      a = h0;
++      b = h1;
++      c = h2;
++      d = h3;
++      e = h4;
++      f = h5;
++      g = h6;
++      h = h7;
++
++      R(a, b, c, d, e, f, g, h, K[0], I2(0));
++      R(h, a, b, c, d, e, f, g, K[1], I2(1));
++      R(g, h, a, b, c, d, e, f, K[2], I2(2));
++      R(f, g, h, a, b, c, d, e, K[3], I2(3));
++      R(e, f, g, h, a, b, c, d, K[4], I2(4));
++      R(d, e, f, g, h, a, b, c, K[5], I2(5));
++      R(c, d, e, f, g, h, a, b, K[6], I2(6));
++      R(b, c, d, e, f, g, h, a, K[7], I2(7));
++      R(a, b, c, d, e, f, g, h, K[8], I2(8));
++      R(h, a, b, c, d, e, f, g, K[9], I2(9));
++      R(g, h, a, b, c, d, e, f, K[10], I2(10));
++      R(f, g, h, a, b, c, d, e, K[11], I2(11));
++      R(e, f, g, h, a, b, c, d, K[12], I2(12));
++      R(d, e, f, g, h, a, b, c, K[13], I2(13));
++      R(c, d, e, f, g, h, a, b, K[14], I2(14));
++      R(b, c, d, e, f, g, h, a, K[15], I2(15));
++      data += 64 * 2;
++
++      R(a, b, c, d, e, f, g, h, K[16], W2(16));
++      R(h, a, b, c, d, e, f, g, K[17], W2(17));
++      R(g, h, a, b, c, d, e, f, K[18], W2(18));
++      R(f, g, h, a, b, c, d, e, K[19], W2(19));
++      R(e, f, g, h, a, b, c, d, K[20], W2(20));
++      R(d, e, f, g, h, a, b, c, K[21], W2(21));
++      R(c, d, e, f, g, h, a, b, K[22], W2(22));
++      R(b, c, d, e, f, g, h, a, K[23], W2(23));
++      R(a, b, c, d, e, f, g, h, K[24], W2(24));
++      R(h, a, b, c, d, e, f, g, K[25], W2(25));
++      R(g, h, a, b, c, d, e, f, K[26], W2(26));
++      R(f, g, h, a, b, c, d, e, K[27], W2(27));
++      R(e, f, g, h, a, b, c, d, K[28], W2(28));
++      R(d, e, f, g, h, a, b, c, K[29], W2(29));
++      R(c, d, e, f, g, h, a, b, K[30], W2(30));
++      R(b, c, d, e, f, g, h, a, K[31], W2(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W2(32));
++      R(h, a, b, c, d, e, f, g, K[33], W2(33));
++      R(g, h, a, b, c, d, e, f, K[34], W2(34));
++      R(f, g, h, a, b, c, d, e, K[35], W2(35));
++      R(e, f, g, h, a, b, c, d, K[36], W2(36));
++      R(d, e, f, g, h, a, b, c, K[37], W2(37));
++      R(c, d, e, f, g, h, a, b, K[38], W2(38));
++      R(b, c, d, e, f, g, h, a, K[39], W2(39));
++      R(a, b, c, d, e, f, g, h, K[40], W2(40));
++      R(h, a, b, c, d, e, f, g, K[41], W2(41));
++      R(g, h, a, b, c, d, e, f, K[42], W2(42));
++      R(f, g, h, a, b, c, d, e, K[43], W2(43));
++      R(e, f, g, h, a, b, c, d, K[44], W2(44));
++      R(d, e, f, g, h, a, b, c, K[45], W2(45));
++      R(c, d, e, f, g, h, a, b, K[46], W2(46));
++      R(b, c, d, e, f, g, h, a, K[47], W2(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W2(48));
++      R(h, a, b, c, d, e, f, g, K[49], W2(49));
++      R(g, h, a, b, c, d, e, f, K[50], W2(50));
++      R(f, g, h, a, b, c, d, e, K[51], W2(51));
++      R(e, f, g, h, a, b, c, d, K[52], W2(52));
++      R(d, e, f, g, h, a, b, c, K[53], W2(53));
++      R(c, d, e, f, g, h, a, b, K[54], W2(54));
++      R(b, c, d, e, f, g, h, a, K[55], W2(55));
++      R(a, b, c, d, e, f, g, h, K[56], W2(56));
++      R(h, a, b, c, d, e, f, g, K[57], W2(57));
++      R(g, h, a, b, c, d, e, f, K[58], W2(58));
++      R(f, g, h, a, b, c, d, e, K[59], W2(59));
++      R(e, f, g, h, a, b, c, d, K[60], W2(60));
++      R(d, e, f, g, h, a, b, c, K[61], W2(61));
++      R(c, d, e, f, g, h, a, b, K[62], W2(62));
++      R(b, c, d, e, f, g, h, a, K[63], W2(63));
++
++      h0 += a;
++      h1 += b;
++      h2 += c;
++      h3 += d;
++      h4 += e;
++      h5 += f;
++      h6 += g;
++      h7 += h;
++
++      a = h0;
++      b = h1;
++      c = h2;
++      d = h3;
++      e = h4;
++      f = h5;
++      g = h6;
++      h = h7;
++
++      R(a, b, c, d, e, f, g, h, K[0], R2(0));
++      R(h, a, b, c, d, e, f, g, K[1], R2(1));
++      R(g, h, a, b, c, d, e, f, K[2], R2(2));
++      R(f, g, h, a, b, c, d, e, K[3], R2(3));
++      R(e, f, g, h, a, b, c, d, K[4], R2(4));
++      R(d, e, f, g, h, a, b, c, K[5], R2(5));
++      R(c, d, e, f, g, h, a, b, K[6], R2(6));
++      R(b, c, d, e, f, g, h, a, K[7], R2(7));
++      R(a, b, c, d, e, f, g, h, K[8], R2(8));
++      R(h, a, b, c, d, e, f, g, K[9], R2(9));
++      R(g, h, a, b, c, d, e, f, K[10], R2(10));
++      R(f, g, h, a, b, c, d, e, K[11], R2(11));
++      R(e, f, g, h, a, b, c, d, K[12], R2(12));
++      R(d, e, f, g, h, a, b, c, K[13], R2(13));
++      R(c, d, e, f, g, h, a, b, K[14], R2(14));
++      R(b, c, d, e, f, g, h, a, K[15], R2(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], R2(16));
++      R(h, a, b, c, d, e, f, g, K[17], R2(17));
++      R(g, h, a, b, c, d, e, f, K[18], R2(18));
++      R(f, g, h, a, b, c, d, e, K[19], R2(19));
++      R(e, f, g, h, a, b, c, d, K[20], R2(20));
++      R(d, e, f, g, h, a, b, c, K[21], R2(21));
++      R(c, d, e, f, g, h, a, b, K[22], R2(22));
++      R(b, c, d, e, f, g, h, a, K[23], R2(23));
++      R(a, b, c, d, e, f, g, h, K[24], R2(24));
++      R(h, a, b, c, d, e, f, g, K[25], R2(25));
++      R(g, h, a, b, c, d, e, f, K[26], R2(26));
++      R(f, g, h, a, b, c, d, e, K[27], R2(27));
++      R(e, f, g, h, a, b, c, d, K[28], R2(28));
++      R(d, e, f, g, h, a, b, c, K[29], R2(29));
++      R(c, d, e, f, g, h, a, b, K[30], R2(30));
++      R(b, c, d, e, f, g, h, a, K[31], R2(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], R2(32));
++      R(h, a, b, c, d, e, f, g, K[33], R2(33));
++      R(g, h, a, b, c, d, e, f, K[34], R2(34));
++      R(f, g, h, a, b, c, d, e, K[35], R2(35));
++      R(e, f, g, h, a, b, c, d, K[36], R2(36));
++      R(d, e, f, g, h, a, b, c, K[37], R2(37));
++      R(c, d, e, f, g, h, a, b, K[38], R2(38));
++      R(b, c, d, e, f, g, h, a, K[39], R2(39));
++      R(a, b, c, d, e, f, g, h, K[40], R2(40));
++      R(h, a, b, c, d, e, f, g, K[41], R2(41));
++      R(g, h, a, b, c, d, e, f, K[42], R2(42));
++      R(f, g, h, a, b, c, d, e, K[43], R2(43));
++      R(e, f, g, h, a, b, c, d, K[44], R2(44));
++      R(d, e, f, g, h, a, b, c, K[45], R2(45));
++      R(c, d, e, f, g, h, a, b, K[46], R2(46));
++      R(b, c, d, e, f, g, h, a, K[47], R2(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], R2(48));
++      R(h, a, b, c, d, e, f, g, K[49], R2(49));
++      R(g, h, a, b, c, d, e, f, K[50], R2(50));
++      R(f, g, h, a, b, c, d, e, K[51], R2(51));
++      R(e, f, g, h, a, b, c, d, K[52], R2(52));
++      R(d, e, f, g, h, a, b, c, K[53], R2(53));
++      R(c, d, e, f, g, h, a, b, K[54], R2(54));
++      R(b, c, d, e, f, g, h, a, K[55], R2(55));
++      R(a, b, c, d, e, f, g, h, K[56], R2(56));
++      R(h, a, b, c, d, e, f, g, K[57], R2(57));
++      R(g, h, a, b, c, d, e, f, K[58], R2(58));
++      R(f, g, h, a, b, c, d, e, K[59], R2(59));
++      R(e, f, g, h, a, b, c, d, K[60], R2(60));
++      R(d, e, f, g, h, a, b, c, K[61], R2(61));
++      R(c, d, e, f, g, h, a, b, K[62], R2(62));
++      R(b, c, d, e, f, g, h, a, K[63], R2(63));
++
++      h0 += a;
++      h1 += b;
++      h2 += c;
++      h3 += d;
++      h4 += e;
++      h5 += f;
++      h6 += g;
++      h7 += h;
++
++      nblks -= 2;
++    }
++
++  while (nblks)
++    {
++      a = h0;
++      b = h1;
++      c = h2;
++      d = h3;
++      e = h4;
++      f = h5;
++      g = h6;
++      h = h7;
++
++      R(a, b, c, d, e, f, g, h, K[0], I(0));
++      R(h, a, b, c, d, e, f, g, K[1], I(1));
++      R(g, h, a, b, c, d, e, f, K[2], I(2));
++      R(f, g, h, a, b, c, d, e, K[3], I(3));
++      R(e, f, g, h, a, b, c, d, K[4], I(4));
++      R(d, e, f, g, h, a, b, c, K[5], I(5));
++      R(c, d, e, f, g, h, a, b, K[6], I(6));
++      R(b, c, d, e, f, g, h, a, K[7], I(7));
++      R(a, b, c, d, e, f, g, h, K[8], I(8));
++      R(h, a, b, c, d, e, f, g, K[9], I(9));
++      R(g, h, a, b, c, d, e, f, K[10], I(10));
++      R(f, g, h, a, b, c, d, e, K[11], I(11));
++      R(e, f, g, h, a, b, c, d, K[12], I(12));
++      R(d, e, f, g, h, a, b, c, K[13], I(13));
++      R(c, d, e, f, g, h, a, b, K[14], I(14));
++      R(b, c, d, e, f, g, h, a, K[15], I(15));
++      data += 64;
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      h0 += a;
++      h1 += b;
++      h2 += c;
++      h3 += d;
++      h4 += e;
++      h5 += f;
++      h6 += g;
++      h7 += h;
++
++      nblks--;
++    }
++
++  h0_h3 = vec_merge_idx0_elems (h0, h1, h2, h3);
++  h4_h7 = vec_merge_idx0_elems (h4, h5, h6, h7);
++  vec_vsx_st (h0_h3, 4 * 0, state);
++  vec_vsx_st (h4_h7, 4 * 4, state);
++
++  return sizeof(w2) + sizeof(w);
++}
++#undef R
++#undef Cho
++#undef Maj
++#undef Sum0
++#undef Sum1
++#undef S0
++#undef S1
++#undef I
++#undef W
++#undef I2
++#undef W2
++#undef R2
++
++
++/* SHA2 round in general purpose registers */
++#define R(a,b,c,d,e,f,g,h,k,w) do                                 \
++          {                                                       \
++            t1 = (h) + Sum1((e)) + Cho((e),(f),(g)) + ((k) + (w));\
++            t2 = Sum0((a)) + Maj((a),(b),(c));                    \
++            d += t1;                                              \
++            h  = t1 + t2;                                         \
++          } while (0)
++
++#define Cho(x, y, z)  ((x & y) + (~x & z))
++
++#define Maj(z, x, y)  ((x & y) + (z & (x ^ y)))
++
++#define Sum0(x)       (ror (x, 2) ^ ror (x ^ ror (x, 22-13), 13))
++
++#define Sum1(x)       (ror (x, 6) ^ ror (x, 11) ^ ror (x, 25))
++
++
++/* Message expansion on general purpose registers */
++#define S0(x) (ror ((x), 7) ^ ror ((x), 18) ^ ((x) >> 3))
++#define S1(x) (ror ((x), 17) ^ ror ((x), 19) ^ ((x) >> 10))
++
++#define I(i) ( w[i] = buf_get_be32(data + i * 4) )
++#define WN(i) ({ w[i&0x0f] +=    w[(i-7) &0x0f];  \
++		 w[i&0x0f] += S0(w[(i-15)&0x0f]); \
++		 w[i&0x0f] += S1(w[(i-2) &0x0f]); \
++		 w[i&0x0f]; })
++#define W(i) ({ u32 r = w[i&0x0f]; WN(i); r; })
++#define L(i) w[i&0x0f]
++
++
++unsigned int ASM_FUNC_ATTR
++_gcry_sha256_transform_ppc9(u32 state[8], const unsigned char *data,
++			    size_t nblks)
++{
++  /* GPRs used for round function and message expansion as vector intrinsics
++   * based generates slower code for POWER9. */
++  u32 a, b, c, d, e, f, g, h, t1, t2;
++  u32 w[16];
++
++  a = state[0];
++  b = state[1];
++  c = state[2];
++  d = state[3];
++  e = state[4];
++  f = state[5];
++  g = state[6];
++  h = state[7];
++
++  while (nblks >= 2)
++    {
++      I(0); I(1); I(2); I(3);
++      I(4); I(5); I(6); I(7);
++      I(8); I(9); I(10); I(11);
++      I(12); I(13); I(14); I(15);
++      data += 64;
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], L(48));
++      R(h, a, b, c, d, e, f, g, K[49], L(49));
++      R(g, h, a, b, c, d, e, f, K[50], L(50));
++      R(f, g, h, a, b, c, d, e, K[51], L(51));
++      I(0); I(1); I(2); I(3);
++      R(e, f, g, h, a, b, c, d, K[52], L(52));
++      R(d, e, f, g, h, a, b, c, K[53], L(53));
++      R(c, d, e, f, g, h, a, b, K[54], L(54));
++      R(b, c, d, e, f, g, h, a, K[55], L(55));
++      I(4); I(5); I(6); I(7);
++      R(a, b, c, d, e, f, g, h, K[56], L(56));
++      R(h, a, b, c, d, e, f, g, K[57], L(57));
++      R(g, h, a, b, c, d, e, f, K[58], L(58));
++      R(f, g, h, a, b, c, d, e, K[59], L(59));
++      I(8); I(9); I(10); I(11);
++      R(e, f, g, h, a, b, c, d, K[60], L(60));
++      R(d, e, f, g, h, a, b, c, K[61], L(61));
++      R(c, d, e, f, g, h, a, b, K[62], L(62));
++      R(b, c, d, e, f, g, h, a, K[63], L(63));
++      I(12); I(13); I(14); I(15);
++      data += 64;
++
++      a += state[0];
++      b += state[1];
++      c += state[2];
++      d += state[3];
++      e += state[4];
++      f += state[5];
++      g += state[6];
++      h += state[7];
++      state[0] = a;
++      state[1] = b;
++      state[2] = c;
++      state[3] = d;
++      state[4] = e;
++      state[5] = f;
++      state[6] = g;
++      state[7] = h;
++
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], L(48));
++      R(h, a, b, c, d, e, f, g, K[49], L(49));
++      R(g, h, a, b, c, d, e, f, K[50], L(50));
++      R(f, g, h, a, b, c, d, e, K[51], L(51));
++      R(e, f, g, h, a, b, c, d, K[52], L(52));
++      R(d, e, f, g, h, a, b, c, K[53], L(53));
++      R(c, d, e, f, g, h, a, b, K[54], L(54));
++      R(b, c, d, e, f, g, h, a, K[55], L(55));
++      R(a, b, c, d, e, f, g, h, K[56], L(56));
++      R(h, a, b, c, d, e, f, g, K[57], L(57));
++      R(g, h, a, b, c, d, e, f, K[58], L(58));
++      R(f, g, h, a, b, c, d, e, K[59], L(59));
++      R(e, f, g, h, a, b, c, d, K[60], L(60));
++      R(d, e, f, g, h, a, b, c, K[61], L(61));
++      R(c, d, e, f, g, h, a, b, K[62], L(62));
++      R(b, c, d, e, f, g, h, a, K[63], L(63));
++
++      a += state[0];
++      b += state[1];
++      c += state[2];
++      d += state[3];
++      e += state[4];
++      f += state[5];
++      g += state[6];
++      h += state[7];
++      state[0] = a;
++      state[1] = b;
++      state[2] = c;
++      state[3] = d;
++      state[4] = e;
++      state[5] = f;
++      state[6] = g;
++      state[7] = h;
++
++      nblks -= 2;
++    }
++
++  while (nblks)
++    {
++      I(0); I(1); I(2); I(3);
++      I(4); I(5); I(6); I(7);
++      I(8); I(9); I(10); I(11);
++      I(12); I(13); I(14); I(15);
++      data += 64;
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], L(48));
++      R(h, a, b, c, d, e, f, g, K[49], L(49));
++      R(g, h, a, b, c, d, e, f, K[50], L(50));
++      R(f, g, h, a, b, c, d, e, K[51], L(51));
++      R(e, f, g, h, a, b, c, d, K[52], L(52));
++      R(d, e, f, g, h, a, b, c, K[53], L(53));
++      R(c, d, e, f, g, h, a, b, K[54], L(54));
++      R(b, c, d, e, f, g, h, a, K[55], L(55));
++      R(a, b, c, d, e, f, g, h, K[56], L(56));
++      R(h, a, b, c, d, e, f, g, K[57], L(57));
++      R(g, h, a, b, c, d, e, f, K[58], L(58));
++      R(f, g, h, a, b, c, d, e, K[59], L(59));
++      R(e, f, g, h, a, b, c, d, K[60], L(60));
++      R(d, e, f, g, h, a, b, c, K[61], L(61));
++      R(c, d, e, f, g, h, a, b, K[62], L(62));
++      R(b, c, d, e, f, g, h, a, K[63], L(63));
++
++      a += state[0];
++      b += state[1];
++      c += state[2];
++      d += state[3];
++      e += state[4];
++      f += state[5];
++      g += state[6];
++      h += state[7];
++      state[0] = a;
++      state[1] = b;
++      state[2] = c;
++      state[3] = d;
++      state[4] = e;
++      state[5] = f;
++      state[6] = g;
++      state[7] = h;
++
++      nblks--;
++    }
++
++  return sizeof(w);
++}
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/cipher/sha256.c b/cipher/sha256.c
+index d174321d..6d49b6c2 100644
+--- a/cipher/sha256.c
++++ b/cipher/sha256.c
+@@ -90,6 +90,18 @@
+ # endif
+ #endif
+ 
++/* USE_PPC_CRYPTO indicates whether to enable PowerPC vector crypto
++ * accelerated code. */
++#undef USE_PPC_CRYPTO
++#ifdef ENABLE_PPC_CRYPTO_SUPPORT
++# if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++     defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
++#  if __GNUC__ >= 4
++#   define USE_PPC_CRYPTO 1
++#  endif
++# endif
++#endif
++
+ 
+ typedef struct {
+   gcry_md_block_ctx_t bctx;
+@@ -108,28 +120,41 @@ typedef struct {
+ #endif
+ } SHA256_CONTEXT;
+ 
++#ifdef USE_PPC_CRYPTO
++unsigned int _gcry_sha256_transform_ppc8(u32 state[8],
++					 const unsigned char *input_data,
++					 size_t num_blks);
++
++unsigned int _gcry_sha256_transform_ppc9(u32 state[8],
++					 const unsigned char *input_data,
++					 size_t num_blks);
++
++static unsigned int
++do_sha256_transform_ppc8(void *ctx, const unsigned char *data, size_t nblks)
++{
++  SHA256_CONTEXT *hd = ctx;
++  return _gcry_sha256_transform_ppc8 (&hd->h0, data, nblks);
++}
++
++static unsigned int
++do_sha256_transform_ppc9(void *ctx, const unsigned char *data, size_t nblks)
++{
++  SHA256_CONTEXT *hd = ctx;
++  return _gcry_sha256_transform_ppc9 (&hd->h0, data, nblks);
++}
++#endif
++
++
+ 
+ static unsigned int
+ transform (void *c, const unsigned char *data, size_t nblks);
+ 
+ 
+ static void
+-sha256_init (void *context, unsigned int flags)
++sha256_common_init (SHA256_CONTEXT *hd)
+ {
+-  SHA256_CONTEXT *hd = context;
+   unsigned int features = _gcry_get_hw_features ();
+ 
+-  (void)flags;
+-
+-  hd->h0 = 0x6a09e667;
+-  hd->h1 = 0xbb67ae85;
+-  hd->h2 = 0x3c6ef372;
+-  hd->h3 = 0xa54ff53a;
+-  hd->h4 = 0x510e527f;
+-  hd->h5 = 0x9b05688c;
+-  hd->h6 = 0x1f83d9ab;
+-  hd->h7 = 0x5be0cd19;
+-
+   hd->bctx.nblocks = 0;
+   hd->bctx.nblocks_high = 0;
+   hd->bctx.count = 0;
+@@ -149,16 +174,41 @@ sha256_init (void *context, unsigned int flags)
+ #endif
+ #ifdef USE_ARM_CE
+   hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
++#endif
++#ifdef USE_PPC_CRYPTO
++  if ((features & HWF_PPC_VCRYPTO) != 0)
++    hd->bctx.bwrite = do_sha256_transform_ppc8;
++  if ((features & HWF_PPC_VCRYPTO) != 0 && (features & HWF_PPC_ARCH_3_00) != 0)
++    hd->bctx.bwrite = do_sha256_transform_ppc9;
+ #endif
+   (void)features;
+ }
+ 
+ 
++static void
++sha256_init (void *context, unsigned int flags)
++{
++  SHA256_CONTEXT *hd = context;
++
++  (void)flags;
++
++  hd->h0 = 0x6a09e667;
++  hd->h1 = 0xbb67ae85;
++  hd->h2 = 0x3c6ef372;
++  hd->h3 = 0xa54ff53a;
++  hd->h4 = 0x510e527f;
++  hd->h5 = 0x9b05688c;
++  hd->h6 = 0x1f83d9ab;
++  hd->h7 = 0x5be0cd19;
++
++  sha256_common_init (hd);
++}
++
++
+ static void
+ sha224_init (void *context, unsigned int flags)
+ {
+   SHA256_CONTEXT *hd = context;
+-  unsigned int features = _gcry_get_hw_features ();
+ 
+   (void)flags;
+ 
+@@ -171,27 +221,7 @@ sha224_init (void *context, unsigned int flags)
+   hd->h6 = 0x64f98fa7;
+   hd->h7 = 0xbefa4fa4;
+ 
+-  hd->bctx.nblocks = 0;
+-  hd->bctx.nblocks_high = 0;
+-  hd->bctx.count = 0;
+-  hd->bctx.blocksize = 64;
+-  hd->bctx.bwrite = transform;
+-
+-#ifdef USE_SSSE3
+-  hd->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+-#endif
+-#ifdef USE_AVX
+-  /* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
+-   * Therefore use this implementation on Intel CPUs only. */
+-  hd->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
+-#endif
+-#ifdef USE_AVX2
+-  hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+-#endif
+-#ifdef USE_ARM_CE
+-  hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
+-#endif
+-  (void)features;
++  sha256_common_init (hd);
+ }
+ 
+ 
+diff --git a/cipher/sha512-ppc.c b/cipher/sha512-ppc.c
+new file mode 100644
+index 00000000..a758e1ea
+--- /dev/null
++++ b/cipher/sha512-ppc.c
+@@ -0,0 +1,921 @@
++/* sha512-ppc.c - PowerPC vcrypto implementation of SHA-512 transform
++ * Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <config.h>
++
++#if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \
++    defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++    defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \
++    defined(USE_SHA512) && \
++    __GNUC__ >= 4
++
++#include <altivec.h>
++#include "bufhelp.h"
++
++
++typedef vector unsigned char vector16x_u8;
++typedef vector unsigned long long vector2x_u64;
++
++
++#define ALWAYS_INLINE inline __attribute__((always_inline))
++#define NO_INLINE __attribute__((noinline))
++#define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
++
++#define ASM_FUNC_ATTR          NO_INSTRUMENT_FUNCTION
++#define ASM_FUNC_ATTR_INLINE   ASM_FUNC_ATTR ALWAYS_INLINE
++#define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE
++
++
++static const u64 K[80] =
++  {
++    U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd),
++    U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc),
++    U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019),
++    U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118),
++    U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe),
++    U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2),
++    U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1),
++    U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694),
++    U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3),
++    U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65),
++    U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483),
++    U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5),
++    U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210),
++    U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4),
++    U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725),
++    U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70),
++    U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926),
++    U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df),
++    U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8),
++    U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b),
++    U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001),
++    U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30),
++    U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910),
++    U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8),
++    U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53),
++    U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8),
++    U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb),
++    U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3),
++    U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60),
++    U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec),
++    U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9),
++    U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b),
++    U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207),
++    U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178),
++    U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6),
++    U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b),
++    U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493),
++    U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c),
++    U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a),
++    U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817)
++  };
++
++
++static ASM_FUNC_ATTR_INLINE u64
++ror64 (u64 v, u64 shift)
++{
++  return (v >> (shift & 63)) ^ (v << ((64 - shift) & 63));
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++vec_rol_elems(vector2x_u64 v, unsigned int idx)
++{
++#ifndef WORDS_BIGENDIAN
++  return vec_sld (v, v, (16 - (8 * idx)) & 15);
++#else
++  return vec_sld (v, v, (8 * idx) & 15);
++#endif
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++vec_merge_idx0_elems(vector2x_u64 v0, vector2x_u64 v1)
++{
++  return vec_mergeh (v0, v1);
++}
++
++
++static ASM_FUNC_ATTR_INLINE vector2x_u64
++vec_vshasigma_u64(vector2x_u64 v, unsigned int a, unsigned int b)
++{
++  asm ("vshasigmad %0,%1,%2,%3"
++       : "=v" (v)
++       : "v" (v), "g" (a), "g" (b)
++       : "memory");
++  return v;
++}
++
++
++/* SHA2 round in vector registers */
++#define R(a,b,c,d,e,f,g,h,k,w) do                             \
++    {                                                         \
++      t1  = (h);                                              \
++      t1 += ((k) + (w));                                      \
++      t1 += Cho((e),(f),(g));                                 \
++      t1 += Sum1((e));                                        \
++      t2  = Sum0((a));                                        \
++      t2 += Maj((a),(b),(c));                                 \
++      d  += t1;                                               \
++      h   = t1 + t2;                                          \
++    } while (0)
++
++#define Cho(b, c, d)  (vec_sel(d, c, b))
++
++#define Maj(c, d, b)  (vec_sel(c, b, c ^ d))
++
++#define Sum0(x)       (vec_vshasigma_u64(x, 1, 0))
++
++#define Sum1(x)       (vec_vshasigma_u64(x, 1, 15))
++
++
++/* Message expansion on general purpose registers */
++#define S0(x) (ror64 ((x), 1) ^ ror64 ((x), 8) ^ ((x) >> 7))
++#define S1(x) (ror64 ((x), 19) ^ ror64 ((x), 61) ^ ((x) >> 6))
++
++#define I(i) ( w[i] = buf_get_be64(data + i * 8) )
++#define WN(i) ({ w[i&0x0f] +=    w[(i-7) &0x0f];  \
++		 w[i&0x0f] += S0(w[(i-15)&0x0f]); \
++		 w[i&0x0f] += S1(w[(i-2) &0x0f]); \
++		 w[i&0x0f]; })
++#define W(i) ({ u64 r = w[i&0x0f]; WN(i); r; })
++#define L(i) w[i&0x0f]
++
++
++unsigned int ASM_FUNC_ATTR
++_gcry_sha512_transform_ppc8(u64 state[8],
++			    const unsigned char *data, size_t nblks)
++{
++  /* GPRs used for message expansion as vector intrinsics based generates
++   * slower code. */
++  vector2x_u64 h0, h1, h2, h3, h4, h5, h6, h7;
++  vector2x_u64 a, b, c, d, e, f, g, h, t1, t2;
++  u64 w[16];
++
++  h0 = vec_vsx_ld (8 * 0, (unsigned long long *)state);
++  h1 = vec_rol_elems (h0, 1);
++  h2 = vec_vsx_ld (8 * 2, (unsigned long long *)state);
++  h3 = vec_rol_elems (h2, 1);
++  h4 = vec_vsx_ld (8 * 4, (unsigned long long *)state);
++  h5 = vec_rol_elems (h4, 1);
++  h6 = vec_vsx_ld (8 * 6, (unsigned long long *)state);
++  h7 = vec_rol_elems (h6, 1);
++
++  while (nblks >= 2)
++    {
++      a = h0;
++      b = h1;
++      c = h2;
++      d = h3;
++      e = h4;
++      f = h5;
++      g = h6;
++      h = h7;
++
++      I(0); I(1); I(2); I(3);
++      I(4); I(5); I(6); I(7);
++      I(8); I(9); I(10); I(11);
++      I(12); I(13); I(14); I(15);
++      data += 128;
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      R(a, b, c, d, e, f, g, h, K[64], L(64));
++      R(h, a, b, c, d, e, f, g, K[65], L(65));
++      R(g, h, a, b, c, d, e, f, K[66], L(66));
++      R(f, g, h, a, b, c, d, e, K[67], L(67));
++      I(0); I(1); I(2); I(3);
++      R(e, f, g, h, a, b, c, d, K[68], L(68));
++      R(d, e, f, g, h, a, b, c, K[69], L(69));
++      R(c, d, e, f, g, h, a, b, K[70], L(70));
++      R(b, c, d, e, f, g, h, a, K[71], L(71));
++      I(4); I(5); I(6); I(7);
++      R(a, b, c, d, e, f, g, h, K[72], L(72));
++      R(h, a, b, c, d, e, f, g, K[73], L(73));
++      R(g, h, a, b, c, d, e, f, K[74], L(74));
++      R(f, g, h, a, b, c, d, e, K[75], L(75));
++      I(8); I(9); I(10); I(11);
++      R(e, f, g, h, a, b, c, d, K[76], L(76));
++      R(d, e, f, g, h, a, b, c, K[77], L(77));
++      R(c, d, e, f, g, h, a, b, K[78], L(78));
++      R(b, c, d, e, f, g, h, a, K[79], L(79));
++      I(12); I(13); I(14); I(15);
++      data += 128;
++
++      h0 += a;
++      h1 += b;
++      h2 += c;
++      h3 += d;
++      h4 += e;
++      h5 += f;
++      h6 += g;
++      h7 += h;
++      a = h0;
++      b = h1;
++      c = h2;
++      d = h3;
++      e = h4;
++      f = h5;
++      g = h6;
++      h = h7;
++
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      R(a, b, c, d, e, f, g, h, K[64], L(64));
++      R(h, a, b, c, d, e, f, g, K[65], L(65));
++      R(g, h, a, b, c, d, e, f, K[66], L(66));
++      R(f, g, h, a, b, c, d, e, K[67], L(67));
++      R(e, f, g, h, a, b, c, d, K[68], L(68));
++      R(d, e, f, g, h, a, b, c, K[69], L(69));
++      R(c, d, e, f, g, h, a, b, K[70], L(70));
++      R(b, c, d, e, f, g, h, a, K[71], L(71));
++      R(a, b, c, d, e, f, g, h, K[72], L(72));
++      R(h, a, b, c, d, e, f, g, K[73], L(73));
++      R(g, h, a, b, c, d, e, f, K[74], L(74));
++      R(f, g, h, a, b, c, d, e, K[75], L(75));
++      R(e, f, g, h, a, b, c, d, K[76], L(76));
++      R(d, e, f, g, h, a, b, c, K[77], L(77));
++      R(c, d, e, f, g, h, a, b, K[78], L(78));
++      R(b, c, d, e, f, g, h, a, K[79], L(79));
++
++      h0 += a;
++      h1 += b;
++      h2 += c;
++      h3 += d;
++      h4 += e;
++      h5 += f;
++      h6 += g;
++      h7 += h;
++
++      nblks -= 2;
++    }
++
++  while (nblks)
++    {
++      a = h0;
++      b = h1;
++      c = h2;
++      d = h3;
++      e = h4;
++      f = h5;
++      g = h6;
++      h = h7;
++
++      I(0); I(1); I(2); I(3);
++      I(4); I(5); I(6); I(7);
++      I(8); I(9); I(10); I(11);
++      I(12); I(13); I(14); I(15);
++      data += 128;
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      R(a, b, c, d, e, f, g, h, K[64], L(64));
++      R(h, a, b, c, d, e, f, g, K[65], L(65));
++      R(g, h, a, b, c, d, e, f, K[66], L(66));
++      R(f, g, h, a, b, c, d, e, K[67], L(67));
++      R(e, f, g, h, a, b, c, d, K[68], L(68));
++      R(d, e, f, g, h, a, b, c, K[69], L(69));
++      R(c, d, e, f, g, h, a, b, K[70], L(70));
++      R(b, c, d, e, f, g, h, a, K[71], L(71));
++      R(a, b, c, d, e, f, g, h, K[72], L(72));
++      R(h, a, b, c, d, e, f, g, K[73], L(73));
++      R(g, h, a, b, c, d, e, f, K[74], L(74));
++      R(f, g, h, a, b, c, d, e, K[75], L(75));
++      R(e, f, g, h, a, b, c, d, K[76], L(76));
++      R(d, e, f, g, h, a, b, c, K[77], L(77));
++      R(c, d, e, f, g, h, a, b, K[78], L(78));
++      R(b, c, d, e, f, g, h, a, K[79], L(79));
++
++      h0 += a;
++      h1 += b;
++      h2 += c;
++      h3 += d;
++      h4 += e;
++      h5 += f;
++      h6 += g;
++      h7 += h;
++
++      nblks--;
++    }
++
++  h0 = vec_merge_idx0_elems (h0, h1);
++  h2 = vec_merge_idx0_elems (h2, h3);
++  h4 = vec_merge_idx0_elems (h4, h5);
++  h6 = vec_merge_idx0_elems (h6, h7);
++  vec_vsx_st (h0, 8 * 0, (unsigned long long *)state);
++  vec_vsx_st (h2, 8 * 2, (unsigned long long *)state);
++  vec_vsx_st (h4, 8 * 4, (unsigned long long *)state);
++  vec_vsx_st (h6, 8 * 6, (unsigned long long *)state);
++
++  return sizeof(w);
++}
++#undef R
++#undef Cho
++#undef Maj
++#undef Sum0
++#undef Sum1
++#undef S0
++#undef S1
++#undef I
++#undef W
++#undef I2
++#undef W2
++#undef R2
++
++
++/* SHA2 round in general purpose registers */
++#define R(a,b,c,d,e,f,g,h,k,w) do                                 \
++          {                                                       \
++            t1 = (h) + Sum1((e)) + Cho((e),(f),(g)) + ((k) + (w));\
++            t2 = Sum0((a)) + Maj((a),(b),(c));                    \
++            d += t1;                                              \
++            h  = t1 + t2;                                         \
++          } while (0)
++
++#define Cho(x, y, z)  ((x & y) + (~x & z))
++
++#define Maj(z, x, y)  ((x & y) + (z & (x ^ y)))
++
++#define Sum0(x)       (ror64(x, 28) ^ ror64(x ^ ror64(x, 39-34), 34))
++
++#define Sum1(x)       (ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41))
++
++
++/* Message expansion on general purpose registers */
++#define S0(x) (ror64 ((x), 1) ^ ror64 ((x), 8) ^ ((x) >> 7))
++#define S1(x) (ror64 ((x), 19) ^ ror64 ((x), 61) ^ ((x) >> 6))
++
++#define I(i) ( w[i] = buf_get_be64(data + i * 8) )
++#define WN(i) ({ w[i&0x0f] +=    w[(i-7) &0x0f];  \
++		 w[i&0x0f] += S0(w[(i-15)&0x0f]); \
++		 w[i&0x0f] += S1(w[(i-2) &0x0f]); \
++		 w[i&0x0f]; })
++#define W(i) ({ u64 r = w[i&0x0f]; WN(i); r; })
++#define L(i) w[i&0x0f]
++
++
++unsigned int ASM_FUNC_ATTR
++_gcry_sha512_transform_ppc9(u64 state[8], const unsigned char *data,
++			    size_t nblks)
++{
++  /* GPRs used for round function and message expansion as vector intrinsics
++   * based generates slower code for POWER9. */
++  u64 a, b, c, d, e, f, g, h, t1, t2;
++  u64 w[16];
++
++  a = state[0];
++  b = state[1];
++  c = state[2];
++  d = state[3];
++  e = state[4];
++  f = state[5];
++  g = state[6];
++  h = state[7];
++
++  while (nblks >= 2)
++    {
++      I(0); I(1); I(2); I(3);
++      I(4); I(5); I(6); I(7);
++      I(8); I(9); I(10); I(11);
++      I(12); I(13); I(14); I(15);
++      data += 128;
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      R(a, b, c, d, e, f, g, h, K[64], L(64));
++      R(h, a, b, c, d, e, f, g, K[65], L(65));
++      R(g, h, a, b, c, d, e, f, K[66], L(66));
++      R(f, g, h, a, b, c, d, e, K[67], L(67));
++      I(0); I(1); I(2); I(3);
++      R(e, f, g, h, a, b, c, d, K[68], L(68));
++      R(d, e, f, g, h, a, b, c, K[69], L(69));
++      R(c, d, e, f, g, h, a, b, K[70], L(70));
++      R(b, c, d, e, f, g, h, a, K[71], L(71));
++      I(4); I(5); I(6); I(7);
++      R(a, b, c, d, e, f, g, h, K[72], L(72));
++      R(h, a, b, c, d, e, f, g, K[73], L(73));
++      R(g, h, a, b, c, d, e, f, K[74], L(74));
++      R(f, g, h, a, b, c, d, e, K[75], L(75));
++      I(8); I(9); I(10); I(11);
++      R(e, f, g, h, a, b, c, d, K[76], L(76));
++      R(d, e, f, g, h, a, b, c, K[77], L(77));
++      R(c, d, e, f, g, h, a, b, K[78], L(78));
++      R(b, c, d, e, f, g, h, a, K[79], L(79));
++      I(12); I(13); I(14); I(15);
++      data += 128;
++
++      a += state[0];
++      b += state[1];
++      c += state[2];
++      d += state[3];
++      e += state[4];
++      f += state[5];
++      g += state[6];
++      h += state[7];
++      state[0] = a;
++      state[1] = b;
++      state[2] = c;
++      state[3] = d;
++      state[4] = e;
++      state[5] = f;
++      state[6] = g;
++      state[7] = h;
++
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      R(a, b, c, d, e, f, g, h, K[64], L(64));
++      R(h, a, b, c, d, e, f, g, K[65], L(65));
++      R(g, h, a, b, c, d, e, f, K[66], L(66));
++      R(f, g, h, a, b, c, d, e, K[67], L(67));
++      R(e, f, g, h, a, b, c, d, K[68], L(68));
++      R(d, e, f, g, h, a, b, c, K[69], L(69));
++      R(c, d, e, f, g, h, a, b, K[70], L(70));
++      R(b, c, d, e, f, g, h, a, K[71], L(71));
++      R(a, b, c, d, e, f, g, h, K[72], L(72));
++      R(h, a, b, c, d, e, f, g, K[73], L(73));
++      R(g, h, a, b, c, d, e, f, K[74], L(74));
++      R(f, g, h, a, b, c, d, e, K[75], L(75));
++      R(e, f, g, h, a, b, c, d, K[76], L(76));
++      R(d, e, f, g, h, a, b, c, K[77], L(77));
++      R(c, d, e, f, g, h, a, b, K[78], L(78));
++      R(b, c, d, e, f, g, h, a, K[79], L(79));
++
++      a += state[0];
++      b += state[1];
++      c += state[2];
++      d += state[3];
++      e += state[4];
++      f += state[5];
++      g += state[6];
++      h += state[7];
++      state[0] = a;
++      state[1] = b;
++      state[2] = c;
++      state[3] = d;
++      state[4] = e;
++      state[5] = f;
++      state[6] = g;
++      state[7] = h;
++
++      nblks -= 2;
++    }
++
++  while (nblks)
++    {
++      I(0); I(1); I(2); I(3);
++      I(4); I(5); I(6); I(7);
++      I(8); I(9); I(10); I(11);
++      I(12); I(13); I(14); I(15);
++      data += 128;
++      R(a, b, c, d, e, f, g, h, K[0], W(0));
++      R(h, a, b, c, d, e, f, g, K[1], W(1));
++      R(g, h, a, b, c, d, e, f, K[2], W(2));
++      R(f, g, h, a, b, c, d, e, K[3], W(3));
++      R(e, f, g, h, a, b, c, d, K[4], W(4));
++      R(d, e, f, g, h, a, b, c, K[5], W(5));
++      R(c, d, e, f, g, h, a, b, K[6], W(6));
++      R(b, c, d, e, f, g, h, a, K[7], W(7));
++      R(a, b, c, d, e, f, g, h, K[8], W(8));
++      R(h, a, b, c, d, e, f, g, K[9], W(9));
++      R(g, h, a, b, c, d, e, f, K[10], W(10));
++      R(f, g, h, a, b, c, d, e, K[11], W(11));
++      R(e, f, g, h, a, b, c, d, K[12], W(12));
++      R(d, e, f, g, h, a, b, c, K[13], W(13));
++      R(c, d, e, f, g, h, a, b, K[14], W(14));
++      R(b, c, d, e, f, g, h, a, K[15], W(15));
++
++      R(a, b, c, d, e, f, g, h, K[16], W(16));
++      R(h, a, b, c, d, e, f, g, K[17], W(17));
++      R(g, h, a, b, c, d, e, f, K[18], W(18));
++      R(f, g, h, a, b, c, d, e, K[19], W(19));
++      R(e, f, g, h, a, b, c, d, K[20], W(20));
++      R(d, e, f, g, h, a, b, c, K[21], W(21));
++      R(c, d, e, f, g, h, a, b, K[22], W(22));
++      R(b, c, d, e, f, g, h, a, K[23], W(23));
++      R(a, b, c, d, e, f, g, h, K[24], W(24));
++      R(h, a, b, c, d, e, f, g, K[25], W(25));
++      R(g, h, a, b, c, d, e, f, K[26], W(26));
++      R(f, g, h, a, b, c, d, e, K[27], W(27));
++      R(e, f, g, h, a, b, c, d, K[28], W(28));
++      R(d, e, f, g, h, a, b, c, K[29], W(29));
++      R(c, d, e, f, g, h, a, b, K[30], W(30));
++      R(b, c, d, e, f, g, h, a, K[31], W(31));
++
++      R(a, b, c, d, e, f, g, h, K[32], W(32));
++      R(h, a, b, c, d, e, f, g, K[33], W(33));
++      R(g, h, a, b, c, d, e, f, K[34], W(34));
++      R(f, g, h, a, b, c, d, e, K[35], W(35));
++      R(e, f, g, h, a, b, c, d, K[36], W(36));
++      R(d, e, f, g, h, a, b, c, K[37], W(37));
++      R(c, d, e, f, g, h, a, b, K[38], W(38));
++      R(b, c, d, e, f, g, h, a, K[39], W(39));
++      R(a, b, c, d, e, f, g, h, K[40], W(40));
++      R(h, a, b, c, d, e, f, g, K[41], W(41));
++      R(g, h, a, b, c, d, e, f, K[42], W(42));
++      R(f, g, h, a, b, c, d, e, K[43], W(43));
++      R(e, f, g, h, a, b, c, d, K[44], W(44));
++      R(d, e, f, g, h, a, b, c, K[45], W(45));
++      R(c, d, e, f, g, h, a, b, K[46], W(46));
++      R(b, c, d, e, f, g, h, a, K[47], W(47));
++
++      R(a, b, c, d, e, f, g, h, K[48], W(48));
++      R(h, a, b, c, d, e, f, g, K[49], W(49));
++      R(g, h, a, b, c, d, e, f, K[50], W(50));
++      R(f, g, h, a, b, c, d, e, K[51], W(51));
++      R(e, f, g, h, a, b, c, d, K[52], W(52));
++      R(d, e, f, g, h, a, b, c, K[53], W(53));
++      R(c, d, e, f, g, h, a, b, K[54], W(54));
++      R(b, c, d, e, f, g, h, a, K[55], W(55));
++      R(a, b, c, d, e, f, g, h, K[56], W(56));
++      R(h, a, b, c, d, e, f, g, K[57], W(57));
++      R(g, h, a, b, c, d, e, f, K[58], W(58));
++      R(f, g, h, a, b, c, d, e, K[59], W(59));
++      R(e, f, g, h, a, b, c, d, K[60], W(60));
++      R(d, e, f, g, h, a, b, c, K[61], W(61));
++      R(c, d, e, f, g, h, a, b, K[62], W(62));
++      R(b, c, d, e, f, g, h, a, K[63], W(63));
++
++      R(a, b, c, d, e, f, g, h, K[64], L(64));
++      R(h, a, b, c, d, e, f, g, K[65], L(65));
++      R(g, h, a, b, c, d, e, f, K[66], L(66));
++      R(f, g, h, a, b, c, d, e, K[67], L(67));
++      R(e, f, g, h, a, b, c, d, K[68], L(68));
++      R(d, e, f, g, h, a, b, c, K[69], L(69));
++      R(c, d, e, f, g, h, a, b, K[70], L(70));
++      R(b, c, d, e, f, g, h, a, K[71], L(71));
++      R(a, b, c, d, e, f, g, h, K[72], L(72));
++      R(h, a, b, c, d, e, f, g, K[73], L(73));
++      R(g, h, a, b, c, d, e, f, K[74], L(74));
++      R(f, g, h, a, b, c, d, e, K[75], L(75));
++      R(e, f, g, h, a, b, c, d, K[76], L(76));
++      R(d, e, f, g, h, a, b, c, K[77], L(77));
++      R(c, d, e, f, g, h, a, b, K[78], L(78));
++      R(b, c, d, e, f, g, h, a, K[79], L(79));
++
++      a += state[0];
++      b += state[1];
++      c += state[2];
++      d += state[3];
++      e += state[4];
++      f += state[5];
++      g += state[6];
++      h += state[7];
++      state[0] = a;
++      state[1] = b;
++      state[2] = c;
++      state[3] = d;
++      state[4] = e;
++      state[5] = f;
++      state[6] = g;
++      state[7] = h;
++
++      nblks--;
++    }
++
++  return sizeof(w);
++}
++
++#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
+diff --git a/cipher/sha512.c b/cipher/sha512.c
+index 06e8a2b9..b8035eca 100644
+--- a/cipher/sha512.c
++++ b/cipher/sha512.c
+@@ -104,6 +104,19 @@
+ #endif
+ 
+ 
++/* USE_PPC_CRYPTO indicates whether to enable PowerPC vector crypto
++ * accelerated code. */
++#undef USE_PPC_CRYPTO
++#ifdef ENABLE_PPC_CRYPTO_SUPPORT
++# if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
++     defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
++#  if __GNUC__ >= 4
++#   define USE_PPC_CRYPTO 1
++#  endif
++# endif
++#endif
++
++
+ typedef struct
+ {
+   u64 h0, h1, h2, h3, h4, h5, h6, h7;
+@@ -130,6 +143,31 @@ typedef struct
+ static unsigned int
+ transform (void *context, const unsigned char *data, size_t nblks);
+ 
++#ifdef USE_PPC_CRYPTO
++unsigned int _gcry_sha512_transform_ppc8(u64 state[8],
++					 const unsigned char *input_data,
++					 size_t num_blks);
++
++unsigned int _gcry_sha512_transform_ppc9(u64 state[8],
++					 const unsigned char *input_data,
++					 size_t num_blks);
++
++static unsigned int
++do_sha512_transform_ppc8(void *ctx, const unsigned char *data, size_t nblks)
++{
++  SHA512_CONTEXT *hd = ctx;
++  return _gcry_sha512_transform_ppc8 (&hd->state.h0, data, nblks);
++}
++
++static unsigned int
++do_sha512_transform_ppc9(void *ctx, const unsigned char *data, size_t nblks)
++{
++  SHA512_CONTEXT *hd = ctx;
++  return _gcry_sha512_transform_ppc9 (&hd->state.h0, data, nblks);
++}
++#endif
++
++
+ static void
+ sha512_init (void *context, unsigned int flags)
+ {
+@@ -166,6 +204,12 @@ sha512_init (void *context, unsigned int flags)
+ #ifdef USE_AVX2
+   ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+ #endif
++#ifdef USE_PPC_CRYPTO
++  if ((features & HWF_PPC_VCRYPTO) != 0)
++    ctx->bctx.bwrite = do_sha512_transform_ppc8;
++  if ((features & HWF_PPC_VCRYPTO) != 0 && (features & HWF_PPC_ARCH_3_00) != 0)
++    ctx->bctx.bwrite = do_sha512_transform_ppc9;
++#endif
+ 
+   (void)features;
+ }
+diff --git a/configure.ac b/configure.ac
+index 06e122c9..953a20e9 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1840,6 +1840,115 @@ if test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then
+ fi
+ 
+ 
++#
++# Check whether PowerPC AltiVec/VSX intrinsics
++#
++AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX intrinsics],
++      [gcry_cv_cc_ppc_altivec],
++      [if test "$mpi_cpu_arch" != "ppc" ; then
++	gcry_cv_cc_ppc_altivec="n/a"
++      else
++	gcry_cv_cc_ppc_altivec=no
++	AC_COMPILE_IFELSE([AC_LANG_SOURCE(
++	[[#include <altivec.h>
++	  typedef vector unsigned char block;
++	  block fn(block in)
++	  {
++	    block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
++	    return vec_cipher_be (t, in);
++	  }
++	  ]])],
++	[gcry_cv_cc_ppc_altivec=yes])
++      fi])
++if test "$gcry_cv_cc_ppc_altivec" = "yes" ; then
++    AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1,
++	    [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics])
++fi
++
++_gcc_cflags_save=$CFLAGS
++CFLAGS="$CFLAGS -maltivec -mvsx -mcrypto"
++
++if test "$gcry_cv_cc_ppc_altivec" = "no" &&
++    test "$mpi_cpu_arch" = "ppc" ; then
++  AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags],
++    [gcry_cv_cc_ppc_altivec_cflags],
++    [gcry_cv_cc_ppc_altivec_cflags=no
++    AC_COMPILE_IFELSE([AC_LANG_SOURCE(
++      [[#include <altivec.h>
++	typedef vector unsigned char block;
++	block fn(block in)
++	{
++	  block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
++	  return vec_cipher_be (t, in);
++	}]])],
++      [gcry_cv_cc_ppc_altivec_cflags=yes])])
++  if test "$gcry_cv_cc_ppc_altivec_cflags" = "yes" ; then
++    AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1,
++	      [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics])
++    AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC_WITH_CFLAGS,1,
++	      [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags])
++  fi
++fi
++
++AM_CONDITIONAL(ENABLE_PPC_VCRYPTO_EXTRA_CFLAGS,
++	       test "$gcry_cv_cc_ppc_altivec_cflags" = "yes")
++
++# Restore flags.
++CFLAGS=$_gcc_cflags_save;
++
++
++#
++# Check whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto instructions
++#
++AC_CACHE_CHECK([whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto instructions],
++       [gcry_cv_gcc_inline_asm_ppc_altivec],
++       [if test "$mpi_cpu_arch" != "ppc" ; then
++          gcry_cv_gcc_inline_asm_ppc_altivec="n/a"
++        else
++          gcry_cv_gcc_inline_asm_ppc_altivec=no
++          AC_COMPILE_IFELSE([AC_LANG_SOURCE(
++          [[__asm__(".globl testfn;\n"
++		    "testfn:\n"
++		    "stvx %v31,%r12,%r0;\n"
++		    "lvx  %v20,%r12,%r0;\n"
++		    "vcipher %v0, %v1, %v22;\n"
++		    "lxvw4x %vs32, %r0, %r1;\n"
++		    "vadduwm %v0, %v1, %v22;\n"
++		    "vshasigmaw %v0, %v1, 0, 15;\n"
++		    "vshasigmad %v0, %v1, 0, 15;\n"
++		  );
++            ]])],
++          [gcry_cv_gcc_inline_asm_ppc_altivec=yes])
++        fi])
++if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" ; then
++   AC_DEFINE(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC,1,
++     [Defined if inline assembler supports PowerPC AltiVec/VSX/crypto instructions])
++fi
++
++
++#
++# Check whether GCC inline assembler supports PowerISA 3.00 instructions
++#
++AC_CACHE_CHECK([whether GCC inline assembler supports PowerISA 3.00 instructions],
++       [gcry_cv_gcc_inline_asm_ppc_arch_3_00],
++       [if test "$mpi_cpu_arch" != "ppc" ; then
++          gcry_cv_gcc_inline_asm_ppc_arch_3_00="n/a"
++        else
++          gcry_cv_gcc_inline_asm_ppc_arch_3_00=no
++          AC_COMPILE_IFELSE([AC_LANG_SOURCE(
++          [[__asm__(".globl testfn;\n"
++		    "testfn:\n"
++		    "stxvb16x %r1,%v12,%v30;\n"
++		  );
++            ]])],
++          [gcry_cv_gcc_inline_asm_ppc_arch_3_00=yes])
++        fi])
++if test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then
++   AC_DEFINE(HAVE_GCC_INLINE_ASM_PPC_ARCH_3_00,1,
++     [Defined if inline assembler supports PowerISA 3.00 instructions])
++fi
++
++
+ #######################################
+ #### Checks for library functions. ####
+ #######################################
+@@ -2510,6 +2619,19 @@ if test "$found" = "1" ; then
+          # Build with the assembly implementation
+          GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-armv8-aarch64-ce.lo"
+       ;;
++      powerpc64le-*-*)
++         # Build with the crypto extension implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha256-ppc.lo"
++      ;;
++      powerpc64-*-*)
++         # Big-Endian.
++         # Build with the crypto extension implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha256-ppc.lo"
++      ;;
++      powerpc-*-*)
++         # Big-Endian.
++         # Build with the crypto extension implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha256-ppc.lo"
+    esac
+ fi
+ 
+@@ -2529,6 +2651,19 @@ if test "$found" = "1" ; then
+          # Build with the assembly implementation
+          GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-arm.lo"
+       ;;
++      powerpc64le-*-*)
++         # Build with the crypto extension implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha512-ppc.lo"
++      ;;
++      powerpc64-*-*)
++         # Big-Endian.
++         # Build with the crypto extension implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha512-ppc.lo"
++      ;;
++      powerpc-*-*)
++         # Big-Endian.
++         # Build with the crypto extension implementation
++         GCRYPT_CIPHERS="$GCRYPT_CIPHERS sha512-ppc.lo"
+    esac
+ 
+    if test x"$neonsupport" = xyes ; then
diff --git a/SOURCES/libgcrypt-1.9.3-CVE-2021-33560.patch b/SOURCES/libgcrypt-1.9.3-CVE-2021-33560.patch
new file mode 100644
index 0000000..2161840
--- /dev/null
+++ b/SOURCES/libgcrypt-1.9.3-CVE-2021-33560.patch
@@ -0,0 +1,100 @@
+commit 3462280f2e23e16adf3ed5176e0f2413d8861320
+Author: NIIBE Yutaka <gniibe@fsij.org>
+Date:   Fri May 21 11:15:07 2021 +0900
+
+    cipher: Fix ElGamal encryption for other implementations.
+    
+    * cipher/elgamal.c (gen_k): Remove support of smaller K.
+    (do_encrypt): Never use smaller K.
+    (sign): Folllow the change of gen_k.
+    
+    --
+    
+    Cherry-pick master commit of:
+            632d80ef30e13de6926d503aa697f92b5dbfbc5e
+    
+    This change basically reverts encryption changes in two commits:
+    
+            74386120dad6b3da62db37f7044267c8ef34689b
+            78531373a342aeb847950f404343a05e36022065
+    
+    Use of smaller K for ephemeral key in ElGamal encryption is only good,
+    when we can guarantee that recipient's key is generated by our
+    implementation (or compatible).
+    
+    For detail, please see:
+    
+        Luca De Feo, Bertram Poettering, Alessandro Sorniotti,
+        "On the (in)security of ElGamal in OpenPGP";
+        in the proceedings of  CCS'2021.
+    
+    CVE-id: CVE-2021-33560
+    GnuPG-bug-id: 5328
+    Suggested-by: Luca De Feo, Bertram Poettering, Alessandro Sorniotti
+    Signed-off-by: NIIBE Yutaka <gniibe@fsij.org>
+
+diff --git a/cipher/elgamal.c b/cipher/elgamal.c
+index 9835122f..eead4502 100644
+--- a/cipher/elgamal.c
++++ b/cipher/elgamal.c
+@@ -66,7 +66,7 @@ static const char *elg_names[] =
+ 
+ 
+ static int test_keys (ELG_secret_key *sk, unsigned int nbits, int nodie);
+-static gcry_mpi_t gen_k (gcry_mpi_t p, int small_k);
++static gcry_mpi_t gen_k (gcry_mpi_t p);
+ static gcry_err_code_t generate (ELG_secret_key *sk, unsigned nbits,
+                                  gcry_mpi_t **factors);
+ static int  check_secret_key (ELG_secret_key *sk);
+@@ -189,11 +189,10 @@ test_keys ( ELG_secret_key *sk, unsigned int nbits, int nodie )
+ 
+ /****************
+  * Generate a random secret exponent k from prime p, so that k is
+- * relatively prime to p-1.  With SMALL_K set, k will be selected for
+- * better encryption performance - this must never be used signing!
++ * relatively prime to p-1.
+  */
+ static gcry_mpi_t
+-gen_k( gcry_mpi_t p, int small_k )
++gen_k( gcry_mpi_t p )
+ {
+   gcry_mpi_t k = mpi_alloc_secure( 0 );
+   gcry_mpi_t temp = mpi_alloc( mpi_get_nlimbs(p) );
+@@ -202,18 +201,7 @@ gen_k( gcry_mpi_t p, int small_k )
+   unsigned int nbits, nbytes;
+   char *rndbuf = NULL;
+ 
+-  if (small_k)
+-    {
+-      /* Using a k much lesser than p is sufficient for encryption and
+-       * it greatly improves the encryption performance.  We use
+-       * Wiener's table and add a large safety margin. */
+-      nbits = wiener_map( orig_nbits ) * 3 / 2;
+-      if( nbits >= orig_nbits )
+-        BUG();
+-    }
+-  else
+-    nbits = orig_nbits;
+-
++  nbits = orig_nbits;
+ 
+   nbytes = (nbits+7)/8;
+   if( DBG_CIPHER )
+@@ -492,7 +480,7 @@ do_encrypt(gcry_mpi_t a, gcry_mpi_t b, gcry_mpi_t input, ELG_public_key *pkey )
+    * error code.
+    */
+ 
+-  k = gen_k( pkey->p, 1 );
++  k = gen_k( pkey->p );
+   mpi_powm (a, pkey->g, k, pkey->p);
+ 
+   /* b = (y^k * input) mod p
+@@ -608,7 +596,7 @@ sign(gcry_mpi_t a, gcry_mpi_t b, gcry_mpi_t input, ELG_secret_key *skey )
+     *
+     */
+     mpi_sub_ui(p_1, p_1, 1);
+-    k = gen_k( skey->p, 0 /* no small K ! */ );
++    k = gen_k( skey->p );
+     mpi_powm( a, skey->g, k, skey->p );
+     mpi_mul(t, skey->x, a );
+     mpi_subm(t, input, t, p_1 );
diff --git a/SPECS/libgcrypt.spec b/SPECS/libgcrypt.spec
index f41c9c9..52437c7 100644
--- a/SPECS/libgcrypt.spec
+++ b/SPECS/libgcrypt.spec
@@ -1,6 +1,6 @@
 Name: libgcrypt
 Version: 1.8.5
-Release: 4%{?dist}
+Release: 6%{?dist}
 URL: http://www.gnupg.org/
 Source0: libgcrypt-%{version}-hobbled.tar.xz
 # The original libgcrypt sources now contain potentially patented ECC
@@ -53,6 +53,20 @@ Patch30: libgcrypt-1.8.5-fips-module.patch
 Patch31: libgcrypt-1.8.5-aes-perf.patch
 # FIPS selftest for PBKDF2
 Patch32: libgcrypt-1.8.5-kdf-selftest.patch
+# ppc64 performance for SHA2 (#1855231)
+Patch33: libgcrypt-1.8.5-ppc-sha2.patch
+# ppc64 performance for CRC32 (#1855231)
+Patch34: libgcrypt-1.8.5-ppc-crc32.patch
+# ppc64 bugfixes (#1855231)
+Patch35: libgcrypt-1.8.5-ppc-bugfix.patch
+# ppc64 performance AES-GCM (#1855231)
+Patch36: libgcrypt-1.8.5-ppc-aes-gcm.patch
+# ppc64 performance AES-GCM (#1855231)
+Patch37: libgcrypt-1.9.3-CVE-2021-33560.patch
+# We can use HW optimizations in FIPS (#1976137)
+Patch38: libgcrypt-1.8.5-fips-hwfeatures.patch
+# ppc64 performance chacha20 and poly1305 (#1855231)
+Patch39: libgcrypt-1.8.5-ppc-chacha20-poly1305.patch
 
 %define gcrylibdir %{_libdir}
 
@@ -106,6 +120,13 @@ applications using libgcrypt.
 %patch30 -p1 -b .fips-module
 %patch31 -p1 -b .aes-perf
 %patch32 -p1 -b .kdf-selftest
+%patch33 -p1 -b .ppc-sha2
+%patch34 -p1 -b .ppc-crc32
+%patch35 -p1 -b .ppc-bugfix
+%patch36 -p1 -b .ppc-aes-gcm
+%patch37 -p1 -b .CVE-2021-33560
+%patch38 -p1 -b .hw-fips
+%patch39 -p1 -b .ppc-chacha
 
 cp %{SOURCE4} cipher/
 cp %{SOURCE5} %{SOURCE6} tests/
@@ -221,6 +242,14 @@ exit 0
 %license COPYING
 
 %changelog
+* Mon Jun 28 2021 Jakub Jelen <jjelen@redhat.com> - 1.8.5-6
+- Fix for CVE-2021-33560 (#1971421)
+- Enable HW optimizations in FIPS (#1976137)
+- Performance enchancements for ChaCha20 and Poly1305 (#1855231)
+
+* Thu May 13 2021 Jakub Jelen <jjelen@redhat.com> - 1.8.5-5
+- Performance enchancements for AES-GCM, CRC32 and SHA2 (#1855231)
+
 * Mon Jun 15 2020 Tomáš Mráz <tmraz@redhat.com> 1.8.5-4
 - add PBKDF2 selftest for FIPS POST