diff --git a/SOURCES/openssl-1.0.2i-fips.patch b/SOURCES/openssl-1.0.2i-fips.patch
index 669922c..9a2dd6c 100644
--- a/SOURCES/openssl-1.0.2i-fips.patch
+++ b/SOURCES/openssl-1.0.2i-fips.patch
@@ -997,7 +997,7 @@ diff -up openssl-1.0.2i/crypto/dsa/dsa.h.fips openssl-1.0.2i/crypto/dsa/dsa.h
  # define DSA_R_INVALID_DIGEST_TYPE                        106
 -# define DSA_R_INVALID_PARAMETERS                         112
 +# define DSA_R_INVALID_PARAMETERS                         212
-+# define DSA_R_KEY_SIZE_INVALID                           113
++# define DSA_R_KEY_SIZE_INVALID                           201
 +# define DSA_R_KEY_SIZE_TOO_SMALL                         110
  # define DSA_R_MISSING_PARAMETERS                         101
  # define DSA_R_MODULUS_TOO_LARGE                          103
diff --git a/SOURCES/openssl-1.0.2k-cve-2017-3735.patch b/SOURCES/openssl-1.0.2k-cve-2017-3735.patch
new file mode 100644
index 0000000..19b4b4b
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-cve-2017-3735.patch
@@ -0,0 +1,20 @@
+diff -up openssl-1.0.2k/crypto/x509v3/v3_addr.c.overread openssl-1.0.2k/crypto/x509v3/v3_addr.c
+--- openssl-1.0.2k/crypto/x509v3/v3_addr.c.overread	2017-01-26 14:22:04.000000000 +0100
++++ openssl-1.0.2k/crypto/x509v3/v3_addr.c	2018-06-18 13:49:30.001625137 +0200
+@@ -130,10 +130,12 @@ static int length_from_afi(const unsigne
+  */
+ unsigned int v3_addr_get_afi(const IPAddressFamily *f)
+ {
+-    return ((f != NULL &&
+-             f->addressFamily != NULL && f->addressFamily->data != NULL)
+-            ? ((f->addressFamily->data[0] << 8) | (f->addressFamily->data[1]))
+-            : 0);
++    if (f == NULL
++            || f->addressFamily == NULL
++            || f->addressFamily->data == NULL
++            || f->addressFamily->length < 2)
++        return 0;
++    return (f->addressFamily->data[0] << 8) | f->addressFamily->data[1];
+ }
+ 
+ /*
diff --git a/SOURCES/openssl-1.0.2k-cve-2018-0495.patch b/SOURCES/openssl-1.0.2k-cve-2018-0495.patch
new file mode 100644
index 0000000..78bba65
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-cve-2018-0495.patch
@@ -0,0 +1,896 @@
+diff -up openssl-1.0.2k/crypto/bn/bn_div.c.rohnp-fix openssl-1.0.2k/crypto/bn/bn_div.c
+--- openssl-1.0.2k/crypto/bn/bn_div.c.rohnp-fix	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/bn/bn_div.c	2018-08-14 10:57:21.592518702 +0200
+@@ -290,6 +290,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const
+     wnum.neg = 0;
+     wnum.d = &(snum->d[loop]);
+     wnum.top = div_n;
++    wnum.flags = BN_FLG_STATIC_DATA;
+     /*
+      * only needed when BN_ucmp messes up the values between top and max
+      */
+diff -up openssl-1.0.2k/crypto/bn/bn_exp.c.rohnp-fix openssl-1.0.2k/crypto/bn/bn_exp.c
+--- openssl-1.0.2k/crypto/bn/bn_exp.c.rohnp-fix	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/bn/bn_exp.c	2018-08-14 10:57:21.596518798 +0200
+@@ -466,17 +466,17 @@ int BN_mod_exp_mont(BIGNUM *rr, const BI
+         ret = 1;
+         goto err;
+     }
+-    if (!BN_to_montgomery(val[0], aa, mont, ctx))
++    if (!bn_to_mont_fixed_top(val[0], aa, mont, ctx))
+         goto err;               /* 1 */
+ 
+     window = BN_window_bits_for_exponent_size(bits);
+     if (window > 1) {
+-        if (!BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx))
++        if (!bn_mul_mont_fixed_top(d, val[0], val[0], mont, ctx))
+             goto err;           /* 2 */
+         j = 1 << (window - 1);
+         for (i = 1; i < j; i++) {
+             if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
+-                !BN_mod_mul_montgomery(val[i], val[i - 1], d, mont, ctx))
++                !bn_mul_mont_fixed_top(val[i], val[i - 1], d, mont, ctx))
+                 goto err;
+         }
+     }
+@@ -498,19 +498,15 @@ int BN_mod_exp_mont(BIGNUM *rr, const BI
+         for (i = 1; i < j; i++)
+             r->d[i] = (~m->d[i]) & BN_MASK2;
+         r->top = j;
+-        /*
+-         * Upper words will be zero if the corresponding words of 'm' were
+-         * 0xfff[...], so decrement r->top accordingly.
+-         */
+-        bn_correct_top(r);
++        r->flags |= BN_FLG_FIXED_TOP;
+     } else
+ #endif
+-    if (!BN_to_montgomery(r, BN_value_one(), mont, ctx))
++    if (!bn_to_mont_fixed_top(r, BN_value_one(), mont, ctx))
+         goto err;
+     for (;;) {
+         if (BN_is_bit_set(p, wstart) == 0) {
+             if (!start) {
+-                if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
++                if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx))
+                     goto err;
+             }
+             if (wstart == 0)
+@@ -541,12 +537,12 @@ int BN_mod_exp_mont(BIGNUM *rr, const BI
+         /* add the 'bytes above' */
+         if (!start)
+             for (i = 0; i < j; i++) {
+-                if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
++                if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx))
+                     goto err;
+             }
+ 
+         /* wvalue will be an odd number < 2^window */
+-        if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx))
++        if (!bn_mul_mont_fixed_top(r, r, val[wvalue >> 1], mont, ctx))
+             goto err;
+ 
+         /* move the 'window' down further */
+@@ -556,6 +552,11 @@ int BN_mod_exp_mont(BIGNUM *rr, const BI
+         if (wstart < 0)
+             break;
+     }
++    /*
++     * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery
++     * removes padding [if any] and makes return value suitable for public
++     * API consumer.
++     */
+ #if defined(SPARC_T4_MONT)
+     if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
+         j = mont->N.top;        /* borrow j */
+@@ -674,7 +675,7 @@ static int MOD_EXP_CTIME_COPY_FROM_PREBU
+     }
+ 
+     b->top = top;
+-    bn_correct_top(b);
++    b->flags |= BN_FLG_FIXED_TOP;
+     return 1;
+ }
+ 
+@@ -841,16 +842,16 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+         tmp.top = top;
+     } else
+ #endif
+-    if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx))
++    if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx))
+         goto err;
+ 
+     /* prepare a^1 in Montgomery domain */
+     if (a->neg || BN_ucmp(a, m) >= 0) {
+         if (!BN_mod(&am, a, m, ctx))
+             goto err;
+-        if (!BN_to_montgomery(&am, &am, mont, ctx))
++        if (!bn_to_mont_fixed_top(&am, &am, mont, ctx))
+             goto err;
+-    } else if (!BN_to_montgomery(&am, a, mont, ctx))
++    } else if (!bn_to_mont_fixed_top(&am, a, mont, ctx))
+         goto err;
+ 
+ #if defined(SPARC_T4_MONT)
+@@ -1117,14 +1118,14 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+          * performance advantage of sqr over mul).
+          */
+         if (window > 1) {
+-            if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx))
++            if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx))
+                 goto err;
+             if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2,
+                                               window))
+                 goto err;
+             for (i = 3; i < numPowers; i++) {
+                 /* Calculate a^i = a^(i-1) * a */
+-                if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx))
++                if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx))
+                     goto err;
+                 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i,
+                                                   window))
+@@ -1148,7 +1149,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+ 
+             /* Scan the window, squaring the result as we go */
+             for (i = 0; i < window; i++, bits--) {
+-                if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx))
++                if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx))
+                     goto err;
+                 wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
+             }
+@@ -1161,12 +1162,16 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+                 goto err;
+ 
+             /* Multiply the result into the intermediate result */
+-            if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx))
++            if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx))
+                 goto err;
+         }
+     }
+ 
+-    /* Convert the final result from montgomery to standard format */
++    /*
++     * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery
++     * removes padding [if any] and makes return value suitable for public
++     * API consumer.
++     */
+ #if defined(SPARC_T4_MONT)
+     if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
+         am.d[0] = 1;            /* borrow am */
+diff -up openssl-1.0.2k/crypto/bn/bn.h.rohnp-fix openssl-1.0.2k/crypto/bn/bn.h
+--- openssl-1.0.2k/crypto/bn/bn.h.rohnp-fix	2018-06-20 17:44:01.752387208 +0200
++++ openssl-1.0.2k/crypto/bn/bn.h	2018-08-14 10:57:21.592518702 +0200
+@@ -702,6 +702,16 @@ BIGNUM *bn_dup_expand(const BIGNUM *a, i
+ /* We only need assert() when debugging */
+ #  include <assert.h>
+ 
++/*
++ * The new BN_FLG_FIXED_TOP flag marks vectors that were not treated with
++ * bn_correct_top, in other words such vectors are permitted to have zeros
++ * in most significant limbs. Such vectors are used internally to achieve
++ * execution time invariance for critical operations with private keys.
++ * It's BN_DEBUG-only flag, because user application is not supposed to
++ * observe it anyway. Moreover, optimizing compiler would actually remove
++ * all operations manipulating the bit in question in non-BN_DEBUG build.
++ */
++#  define BN_FLG_FIXED_TOP 0x10000
+ #  ifdef BN_DEBUG_RAND
+ /* To avoid "make update" cvs wars due to BN_DEBUG, use some tricks */
+ #   ifndef RAND_pseudo_bytes
+@@ -734,8 +744,10 @@ int RAND_pseudo_bytes(unsigned char *buf
+         do { \
+                 const BIGNUM *_bnum2 = (a); \
+                 if (_bnum2 != NULL) { \
+-                        assert((_bnum2->top == 0) || \
+-                                (_bnum2->d[_bnum2->top - 1] != 0)); \
++                        int _top = _bnum2->top; \
++                        assert((_top == 0) || \
++                               (_bnum2->flags & BN_FLG_FIXED_TOP) || \
++                               (_bnum2->d[_top - 1] != 0)); \
+                         bn_pollute(_bnum2); \
+                 } \
+         } while(0)
+@@ -753,6 +765,7 @@ int RAND_pseudo_bytes(unsigned char *buf
+ 
+ # else                          /* !BN_DEBUG */
+ 
++#  define BN_FLG_FIXED_TOP 0
+ #  define bn_pollute(a)
+ #  define bn_check_top(a)
+ #  define bn_fix_top(a)           bn_correct_top(a)
+diff -up openssl-1.0.2k/crypto/bn/bn_lcl.h.rohnp-fix openssl-1.0.2k/crypto/bn/bn_lcl.h
+--- openssl-1.0.2k/crypto/bn/bn_lcl.h.rohnp-fix	2018-06-20 17:44:01.748387114 +0200
++++ openssl-1.0.2k/crypto/bn/bn_lcl.h	2018-08-14 10:57:21.596518798 +0200
+@@ -113,6 +113,7 @@
+ # define HEADER_BN_LCL_H
+ 
+ # include <openssl/bn.h>
++# include "bn_int.h"
+ 
+ #ifdef  __cplusplus
+ extern "C" {
+diff -up openssl-1.0.2k/crypto/bn/bn_lib.c.rohnp-fix openssl-1.0.2k/crypto/bn/bn_lib.c
+--- openssl-1.0.2k/crypto/bn/bn_lib.c.rohnp-fix	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/bn/bn_lib.c	2018-08-14 10:57:21.592518702 +0200
+@@ -290,8 +290,6 @@ static BN_ULONG *bn_expand_internal(cons
+     const BN_ULONG *B;
+     int i;
+ 
+-    bn_check_top(b);
+-
+     if (words > (INT_MAX / (4 * BN_BITS2))) {
+         BNerr(BN_F_BN_EXPAND_INTERNAL, BN_R_BIGNUM_TOO_LONG);
+         return NULL;
+@@ -425,8 +423,6 @@ BIGNUM *bn_dup_expand(const BIGNUM *b, i
+ 
+ BIGNUM *bn_expand2(BIGNUM *b, int words)
+ {
+-    bn_check_top(b);
+-
+     if (words > b->dmax) {
+         BN_ULONG *a = bn_expand_internal(b, words);
+         if (!a)
+@@ -460,7 +456,6 @@ BIGNUM *bn_expand2(BIGNUM *b, int words)
+         assert(A == &(b->d[b->dmax]));
+     }
+ #endif
+-    bn_check_top(b);
+     return b;
+ }
+ 
+@@ -572,6 +567,7 @@ void BN_clear(BIGNUM *a)
+         OPENSSL_cleanse(a->d, a->dmax * sizeof(a->d[0]));
+     a->top = 0;
+     a->neg = 0;
++    a->flags &= ~BN_FLG_FIXED_TOP;
+ }
+ 
+ BN_ULONG BN_get_word(const BIGNUM *a)
+@@ -592,6 +588,7 @@ int BN_set_word(BIGNUM *a, BN_ULONG w)
+     a->neg = 0;
+     a->d[0] = w;
+     a->top = (w ? 1 : 0);
++    a->flags &= ~BN_FLG_FIXED_TOP;
+     bn_check_top(a);
+     return (1);
+ }
+@@ -738,6 +735,7 @@ int BN_set_bit(BIGNUM *a, int n)
+         for (k = a->top; k < i + 1; k++)
+             a->d[k] = 0;
+         a->top = i + 1;
++        a->flags &= ~BN_FLG_FIXED_TOP;
+     }
+ 
+     a->d[i] |= (((BN_ULONG)1) << j);
+diff -up openssl-1.0.2k/crypto/bn/bn_mod.c.rohnp-fix openssl-1.0.2k/crypto/bn/bn_mod.c
+--- openssl-1.0.2k/crypto/bn/bn_mod.c.rohnp-fix	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/bn/bn_mod.c	2018-08-14 10:57:21.601518919 +0200
+@@ -149,18 +149,73 @@ int BN_mod_add(BIGNUM *r, const BIGNUM *
+ 
+ /*
+  * BN_mod_add variant that may be used if both a and b are non-negative and
+- * less than m
++ * less than m. The original algorithm was
++ *
++ *    if (!BN_uadd(r, a, b))
++ *       return 0;
++ *    if (BN_ucmp(r, m) >= 0)
++ *       return BN_usub(r, r, m);
++ *
++ * which is replaced with addition, subtracting modulus, and conditional
++ * move depending on whether or not subtraction borrowed.
+  */
+-int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+-                     const BIGNUM *m)
++int bn_mod_add_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
++                         const BIGNUM *m)
+ {
+-    if (!BN_uadd(r, a, b))
++    size_t i, ai, bi, mtop = m->top;
++    BN_ULONG storage[1024 / BN_BITS2];
++    BN_ULONG carry, temp, mask, *rp, *tp = storage;
++    const BN_ULONG *ap, *bp;
++
++    if (bn_wexpand(r, m->top) == NULL)
+         return 0;
+-    if (BN_ucmp(r, m) >= 0)
+-        return BN_usub(r, r, m);
++
++    if (mtop > sizeof(storage) / sizeof(storage[0])
++        && (tp = OPENSSL_malloc(mtop * sizeof(BN_ULONG))) == NULL)
++	return 0;
++
++    ap = a->d != NULL ? a->d : tp;
++    bp = b->d != NULL ? b->d : tp;
++
++    for (i = 0, ai = 0, bi = 0, carry = 0; i < mtop;) {
++        mask = (BN_ULONG)0 - ((i - a->top) >> (8 * sizeof(i) - 1));
++        temp = ((ap[ai] & mask) + carry) & BN_MASK2;
++        carry = (temp < carry);
++
++        mask = (BN_ULONG)0 - ((i - b->top) >> (8 * sizeof(i) - 1));
++        tp[i] = ((bp[bi] & mask) + temp) & BN_MASK2;
++        carry += (tp[i] < temp);
++
++        i++;
++        ai += (i - a->dmax) >> (8 * sizeof(i) - 1);
++        bi += (i - b->dmax) >> (8 * sizeof(i) - 1);
++    }
++    rp = r->d;
++    carry -= bn_sub_words(rp, tp, m->d, mtop);
++    for (i = 0; i < mtop; i++) {
++        rp[i] = (carry & tp[i]) | (~carry & rp[i]);
++        ((volatile BN_ULONG *)tp)[i] = 0;
++    }
++    r->top = mtop;
++    r->neg = 0;
++
++    if (tp != storage)
++        OPENSSL_free(tp);
++
+     return 1;
+ }
+ 
++int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
++                     const BIGNUM *m)
++{
++    int ret = bn_mod_add_fixed_top(r, a, b, m);
++
++    if (ret)
++        bn_correct_top(r);
++
++    return ret;
++}
++
+ int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m,
+                BN_CTX *ctx)
+ {
+diff -up openssl-1.0.2k/crypto/bn/bn_mont.c.rohnp-fix openssl-1.0.2k/crypto/bn/bn_mont.c
+--- openssl-1.0.2k/crypto/bn/bn_mont.c.rohnp-fix	2018-08-14 10:57:21.589518629 +0200
++++ openssl-1.0.2k/crypto/bn/bn_mont.c	2018-08-14 11:15:11.425320301 +0200
+@@ -56,7 +56,7 @@
+  * [including the GNU Public Licence.]
+  */
+ /* ====================================================================
+- * Copyright (c) 1998-2006 The OpenSSL Project.  All rights reserved.
++ * Copyright (c) 1998-2018 The OpenSSL Project.  All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+  * modification, are permitted provided that the following conditions
+@@ -123,12 +123,23 @@
+ #define MONT_WORD               /* use the faster word-based algorithm */
+ 
+ #ifdef MONT_WORD
+-static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont);
++static int bn_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont);
+ #endif
+ 
+ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+                           BN_MONT_CTX *mont, BN_CTX *ctx)
+ {
++    int ret = bn_mul_mont_fixed_top(r, a, b, mont, ctx);
++
++    bn_correct_top(r);
++    bn_check_top(r);
++
++    return ret;
++}
++
++int bn_mul_mont_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
++                          BN_MONT_CTX *mont, BN_CTX *ctx)
++{
+     BIGNUM *tmp;
+     int ret = 0;
+ #if defined(OPENSSL_BN_ASM_MONT) && defined(MONT_WORD)
+@@ -140,8 +151,8 @@ int BN_mod_mul_montgomery(BIGNUM *r, con
+         if (bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) {
+             r->neg = a->neg ^ b->neg;
+             r->top = num;
+-            bn_correct_top(r);
+-            return (1);
++            r->flags |= BN_FLG_FIXED_TOP;
++            return 1;
+         }
+     }
+ #endif
+@@ -161,13 +172,12 @@ int BN_mod_mul_montgomery(BIGNUM *r, con
+     }
+     /* reduce from aRR to aR */
+ #ifdef MONT_WORD
+-    if (!BN_from_montgomery_word(r, tmp, mont))
++    if (!bn_from_montgomery_word(r, tmp, mont))
+         goto err;
+ #else
+     if (!BN_from_montgomery(r, tmp, mont, ctx))
+         goto err;
+ #endif
+-    bn_check_top(r);
+     ret = 1;
+  err:
+     BN_CTX_end(ctx);
+@@ -175,7 +185,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, con
+ }
+ 
+ #ifdef MONT_WORD
+-static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
++static int bn_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
+ {
+     BIGNUM *n;
+     BN_ULONG *ap, *np, *rp, n0, v, carry;
+@@ -205,28 +215,16 @@ static int BN_from_montgomery_word(BIGNU
+ # endif
+ 
+     r->top = max;
++    r->flags |= BN_FLG_FIXED_TOP;
+     n0 = mont->n0[0];
+ 
+-# ifdef BN_COUNT
+-    fprintf(stderr, "word BN_from_montgomery_word %d * %d\n", nl, nl);
+-# endif
++    /*
++     * Add multiples of |n| to |r| until R = 2^(nl * BN_BITS2) divides it. On
++     * input, we had |r| < |n| * R, so now |r| < 2 * |n| * R. Note that |r|
++     * includes |carry| which is stored separately.
++     */
+     for (carry = 0, i = 0; i < nl; i++, rp++) {
+-# ifdef __TANDEM
+-        {
+-            long long t1;
+-            long long t2;
+-            long long t3;
+-            t1 = rp[0] * (n0 & 0177777);
+-            t2 = 037777600000l;
+-            t2 = n0 & t2;
+-            t3 = rp[0] & 0177777;
+-            t2 = (t3 * t2) & BN_MASK2;
+-            t1 = t1 + t2;
+-            v = bn_mul_add_words(rp, np, nl, (BN_ULONG)t1);
+-        }
+-# else
+         v = bn_mul_add_words(rp, np, nl, (rp[0] * n0) & BN_MASK2);
+-# endif
+         v = (v + carry + rp[nl]) & BN_MASK2;
+         carry |= (v != rp[nl]);
+         carry &= (v <= rp[nl]);
+@@ -236,52 +234,27 @@ static int BN_from_montgomery_word(BIGNU
+     if (bn_wexpand(ret, nl) == NULL)
+         return (0);
+     ret->top = nl;
++    ret->flags |= BN_FLG_FIXED_TOP;
+     ret->neg = r->neg;
+ 
+     rp = ret->d;
+-    ap = &(r->d[nl]);
+ 
+-# define BRANCH_FREE 1
+-# if BRANCH_FREE
+-    {
+-        BN_ULONG *nrp;
+-        size_t m;
++    /*
++     * Shift |nl| words to divide by R. We have |ap| < 2 * |n|. Note that |ap|
++     * includes |carry| which is stored separately.
++     */
++    ap = &(r->d[nl]);
+ 
+-        v = bn_sub_words(rp, ap, np, nl) - carry;
+-        /*
+-         * if subtraction result is real, then trick unconditional memcpy
+-         * below to perform in-place "refresh" instead of actual copy.
+-         */
+-        m = (0 - (size_t)v);
+-        nrp =
+-            (BN_ULONG *)(((PTR_SIZE_INT) rp & ~m) | ((PTR_SIZE_INT) ap & m));
+-
+-        for (i = 0, nl -= 4; i < nl; i += 4) {
+-            BN_ULONG t1, t2, t3, t4;
+-
+-            t1 = nrp[i + 0];
+-            t2 = nrp[i + 1];
+-            t3 = nrp[i + 2];
+-            ap[i + 0] = 0;
+-            t4 = nrp[i + 3];
+-            ap[i + 1] = 0;
+-            rp[i + 0] = t1;
+-            ap[i + 2] = 0;
+-            rp[i + 1] = t2;
+-            ap[i + 3] = 0;
+-            rp[i + 2] = t3;
+-            rp[i + 3] = t4;
+-        }
+-        for (nl += 4; i < nl; i++)
+-            rp[i] = nrp[i], ap[i] = 0;
++    carry -= bn_sub_words(rp, ap, np, nl);
++    /*
++     * |carry| is -1 if |ap| - |np| underflowed or zero if it did not. Note
++     * |carry| cannot be 1. That would imply the subtraction did not fit in
++     * |nl| words, and we know at most one subtraction is needed.
++     */
++    for (i = 0; i < nl; i++) {
++        rp[i] = (carry & ap[i]) | (~carry & rp[i]);
++        ap[i] = 0;
+     }
+-# else
+-    if (bn_sub_words(rp, ap, np, nl) - carry)
+-        memcpy(rp, ap, nl * sizeof(BN_ULONG));
+-# endif
+-    bn_correct_top(r);
+-    bn_correct_top(ret);
+-    bn_check_top(ret);
+ 
+     return (1);
+ }
+@@ -295,8 +268,11 @@ int BN_from_montgomery(BIGNUM *ret, cons
+     BIGNUM *t;
+ 
+     BN_CTX_start(ctx);
+-    if ((t = BN_CTX_get(ctx)) && BN_copy(t, a))
+-        retn = BN_from_montgomery_word(ret, t, mont);
++    if ((t = BN_CTX_get(ctx)) && BN_copy(t, a)) {
++        retn = bn_from_montgomery_word(ret, t, mont);
++        bn_correct_top(ret);
++        bn_check_top(ret);
++    }
+     BN_CTX_end(ctx);
+ #else                           /* !MONT_WORD */
+     BIGNUM *t1, *t2;
+@@ -334,6 +310,12 @@ int BN_from_montgomery(BIGNUM *ret, cons
+     return (retn);
+ }
+ 
++int bn_to_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont,
++                         BN_CTX *ctx)
++{
++    return bn_mul_mont_fixed_top(r, a, &(mont->RR), mont, ctx);
++}
++
+ BN_MONT_CTX *BN_MONT_CTX_new(void)
+ {
+     BN_MONT_CTX *ret;
+@@ -370,7 +352,7 @@ void BN_MONT_CTX_free(BN_MONT_CTX *mont)
+ 
+ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
+ {
+-    int ret = 0;
++    int i, ret = 0;
+     BIGNUM *Ri, *R;
+ 
+     if (BN_is_zero(mod))
+@@ -382,6 +364,8 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, c
+     R = &(mont->RR);            /* grab RR as a temp */
+     if (!BN_copy(&(mont->N), mod))
+         goto err;               /* Set N */
++    if (BN_get_flags(mod, BN_FLG_CONSTTIME) != 0)
++        BN_set_flags(&(mont->N), BN_FLG_CONSTTIME);
+     mont->N.neg = 0;
+ 
+ #ifdef MONT_WORD
+@@ -394,6 +378,9 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, c
+         tmod.dmax = 2;
+         tmod.neg = 0;
+ 
++        if (BN_get_flags(mod, BN_FLG_CONSTTIME) != 0)
++            BN_set_flags(&tmod, BN_FLG_CONSTTIME);
++
+         mont->ri = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2;
+ 
+ # if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)
+@@ -496,6 +483,11 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, c
+     if (!BN_mod(&(mont->RR), &(mont->RR), &(mont->N), ctx))
+         goto err;
+ 
++    for (i = mont->RR.top, ret = mont->N.top; i < ret; i++)
++        mont->RR.d[i] = 0;
++    mont->RR.top = ret;
++    mont->RR.flags |= BN_FLG_FIXED_TOP;
++
+     ret = 1;
+  err:
+     BN_CTX_end(ctx);
+diff -up openssl-1.0.2k/crypto/bn/bn_sqr.c.rohnp-fix openssl-1.0.2k/crypto/bn/bn_sqr.c
+--- openssl-1.0.2k/crypto/bn/bn_sqr.c.rohnp-fix	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/bn/bn_sqr.c	2018-08-14 10:57:21.593518726 +0200
+@@ -135,14 +135,8 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, B
+     }
+ 
+     rr->neg = 0;
+-    /*
+-     * If the most-significant half of the top word of 'a' is zero, then the
+-     * square of 'a' will max-1 words.
+-     */
+-    if (a->d[al - 1] == (a->d[al - 1] & BN_MASK2l))
+-        rr->top = max - 1;
+-    else
+-        rr->top = max;
++    rr->top = max;
++    bn_correct_top(rr);
+     if (r != rr && BN_copy(r, rr) == NULL)
+         goto err;
+ 
+diff -up openssl-1.0.2k/crypto/bn_int.h.rohnp-fix openssl-1.0.2k/crypto/bn_int.h
+--- openssl-1.0.2k/crypto/bn_int.h.rohnp-fix	2018-08-14 10:57:21.597518822 +0200
++++ openssl-1.0.2k/crypto/bn_int.h	2018-08-14 10:57:21.599518871 +0200
+@@ -0,0 +1,13 @@
++/*
++ * Some BIGNUM functions assume most significant limb to be non-zero, which
++ * is customarily arranged by bn_correct_top. Output from below functions
++ * is not processed with bn_correct_top, and for this reason it may not be
++ * returned out of public API. It may only be passed internally into other
++ * functions known to support non-minimal or zero-padded BIGNUMs.
++ */
++int bn_mul_mont_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
++                          BN_MONT_CTX *mont, BN_CTX *ctx);
++int bn_to_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont,
++                         BN_CTX *ctx);
++int bn_mod_add_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
++                         const BIGNUM *m);
+diff -up openssl-1.0.2k/crypto/dsa/dsa_ossl.c.rohnp-fix openssl-1.0.2k/crypto/dsa/dsa_ossl.c
+--- openssl-1.0.2k/crypto/dsa/dsa_ossl.c.rohnp-fix	2018-06-20 17:44:02.153396702 +0200
++++ openssl-1.0.2k/crypto/dsa/dsa_ossl.c	2018-06-20 17:44:02.577406741 +0200
+@@ -136,8 +136,7 @@ const DSA_METHOD *DSA_OpenSSL(void)
+ static DSA_SIG *dsa_do_sign(const unsigned char *dgst, int dlen, DSA *dsa)
+ {
+     BIGNUM *kinv = NULL, *r = NULL, *s = NULL;
+-    BIGNUM m;
+-    BIGNUM xr;
++    BIGNUM *m, *blind, *blindm, *tmp;
+     BN_CTX *ctx = NULL;
+     int reason = ERR_R_BN_LIB;
+     DSA_SIG *ret = NULL;
+@@ -156,9 +155,6 @@ static DSA_SIG *dsa_do_sign(const unsign
+     }
+ #endif
+ 
+-    BN_init(&m);
+-    BN_init(&xr);
+-
+     if (!dsa->p || !dsa->q || !dsa->g) {
+         reason = DSA_R_MISSING_PARAMETERS;
+         goto err;
+@@ -170,6 +166,14 @@ static DSA_SIG *dsa_do_sign(const unsign
+     ctx = BN_CTX_new();
+     if (ctx == NULL)
+         goto err;
++    BN_CTX_start(ctx);
++    m = BN_CTX_get(ctx);
++    blind = BN_CTX_get(ctx);
++    blindm = BN_CTX_get(ctx);
++    tmp = BN_CTX_get(ctx);
++    if (tmp == NULL)
++        goto err;
++
+  redo:
+     if ((dsa->kinv == NULL) || (dsa->r == NULL)) {
+         if (!DSA_sign_setup(dsa, ctx, &kinv, &r))
+@@ -189,20 +193,52 @@ static DSA_SIG *dsa_do_sign(const unsign
+          * 4.2
+          */
+         dlen = BN_num_bytes(dsa->q);
+-    if (BN_bin2bn(dgst, dlen, &m) == NULL)
++    if (BN_bin2bn(dgst, dlen, m) == NULL)
+         goto err;
+ 
+-    /* Compute  s = inv(k) (m + xr) mod q */
+-    if (!BN_mod_mul(&xr, dsa->priv_key, r, dsa->q, ctx))
+-        goto err;               /* s = xr */
+-    if (!BN_add(s, &xr, &m))
+-        goto err;               /* s = m + xr */
+-    if (BN_cmp(s, dsa->q) > 0)
+-        if (!BN_sub(s, s, dsa->q))
++    /*
++     * The normal signature calculation is:
++     *
++     *   s := k^-1 * (m + r * priv_key) mod q
++     *
++     * We will blind this to protect against side channel attacks
++     *
++     *   s := blind^-1 * k^-1 * (blind * m + blind * r * priv_key) mod q
++     */
++
++    /* Generate a blinding value */
++    do {
++        if (!BN_rand(blind, BN_num_bits(dsa->q) - 1, -1, 0))
+             goto err;
++    } while (BN_is_zero(blind));
++    BN_set_flags(blind, BN_FLG_CONSTTIME);
++    BN_set_flags(blindm, BN_FLG_CONSTTIME);
++    BN_set_flags(tmp, BN_FLG_CONSTTIME);
++
++    /* tmp := blind * priv_key * r mod q */
++    if (!BN_mod_mul(tmp, blind, dsa->priv_key, dsa->q, ctx))
++        goto err;
++    if (!BN_mod_mul(tmp, tmp, r, dsa->q, ctx))
++        goto err;
++
++    /* blindm := blind * m mod q */
++    if (!BN_mod_mul(blindm, blind, m, dsa->q, ctx))
++        goto err;
++
++    /* s : = (blind * priv_key * r) + (blind * m) mod q */
++    if (!BN_mod_add_quick(s, tmp, blindm, dsa->q))
++        goto err;
++
++    /* s := s * k^-1 mod q */
+     if (!BN_mod_mul(s, s, kinv, dsa->q, ctx))
+         goto err;
+ 
++    /* s:= s * blind^-1 mod q */
++    if (BN_mod_inverse(blind, blind, dsa->q, ctx) == NULL)
++        goto err;
++    if (!BN_mod_mul(s, s, blind, dsa->q, ctx))
++        goto err;
++
+     /*
+      * Redo if r or s is zero as required by FIPS 186-3: this is very
+      * unlikely.
+@@ -226,13 +262,12 @@ static DSA_SIG *dsa_do_sign(const unsign
+         BN_free(r);
+         BN_free(s);
+     }
+-    if (ctx != NULL)
++    if (ctx != NULL) {
++        BN_CTX_end(ctx);
+         BN_CTX_free(ctx);
+-    BN_clear_free(&m);
+-    BN_clear_free(&xr);
+-    if (kinv != NULL)           /* dsa->kinv is NULL now if we used it */
+-        BN_clear_free(kinv);
+-    return (ret);
++    }
++    BN_clear_free(kinv);
++    return ret;
+ }
+ 
+ static int dsa_sign_setup(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp,
+diff -up openssl-1.0.2k/crypto/ecdsa/ecs_ossl.c.rohnp-fix openssl-1.0.2k/crypto/ecdsa/ecs_ossl.c
+--- openssl-1.0.2k/crypto/ecdsa/ecs_ossl.c.rohnp-fix	2018-06-20 17:44:02.205397934 +0200
++++ openssl-1.0.2k/crypto/ecdsa/ecs_ossl.c	2018-08-14 11:18:02.062439755 +0200
+@@ -63,6 +63,7 @@
+ #ifdef OPENSSL_FIPS
+ # include <openssl/fips.h>
+ #endif
++#include "bn_int.h"
+ 
+ static ECDSA_SIG *ecdsa_do_sign(const unsigned char *dgst, int dlen,
+                                 const BIGNUM *, const BIGNUM *,
+@@ -98,6 +99,7 @@ static int ecdsa_sign_setup(EC_KEY *ecke
+     EC_POINT *tmp_point = NULL;
+     const EC_GROUP *group;
+     int ret = 0;
++    int order_bits;
+ 
+     if (eckey == NULL || (group = EC_KEY_get0_group(eckey)) == NULL) {
+         ECDSAerr(ECDSA_F_ECDSA_SIGN_SETUP, ERR_R_PASSED_NULL_PARAMETER);
+@@ -129,6 +131,13 @@ static int ecdsa_sign_setup(EC_KEY *ecke
+         goto err;
+     }
+ 
++    /* Preallocate space */
++    order_bits = BN_num_bits(order);
++    if (!BN_set_bit(k, order_bits)
++        || !BN_set_bit(r, order_bits)
++        || !BN_set_bit(X, order_bits))
++        goto err;
++
+     do {
+         /* get random k */
+         do
+@@ -142,13 +151,19 @@ static int ecdsa_sign_setup(EC_KEY *ecke
+         /*
+          * We do not want timing information to leak the length of k, so we
+          * compute G*k using an equivalent scalar of fixed bit-length.
++         *
++         * We unconditionally perform both of these additions to prevent a
++         * small timing information leakage.  We then choose the sum that is
++         * one bit longer than the order.  This guarantees the code
++         * path used in the constant time implementations elsewhere.
++         *
++         * TODO: revisit the BN_copy aiming for a memory access agnostic
++         * conditional copy.
+          */
+-
+-        if (!BN_add(k, k, order))
++        if (!BN_add(r, k, order)
++            || !BN_add(X, r, order)
++            || !BN_copy(k, BN_num_bits(r) > order_bits ? r : X))
+             goto err;
+-        if (BN_num_bits(k) <= BN_num_bits(order))
+-            if (!BN_add(k, k, order))
+-                goto err;
+ 
+         /* compute r the x-coordinate of generator * k */
+         if (!EC_POINT_mul(group, tmp_point, k, NULL, NULL, ctx)) {
+@@ -240,13 +255,14 @@ static ECDSA_SIG *ecdsa_do_sign(const un
+                                 EC_KEY *eckey)
+ {
+     int ok = 0, i;
+-    BIGNUM *kinv = NULL, *s, *m = NULL, *tmp = NULL, *order = NULL;
++    BIGNUM *kinv = NULL, *s, *m = NULL, *order = NULL;
+     const BIGNUM *ckinv;
+     BN_CTX *ctx = NULL;
+     const EC_GROUP *group;
+     ECDSA_SIG *ret;
+     ECDSA_DATA *ecdsa;
+     const BIGNUM *priv_key;
++    BN_MONT_CTX *mont_data;
+ 
+ #ifdef OPENSSL_FIPS
+     if (FIPS_selftest_failed()) {
+@@ -272,7 +288,7 @@ static ECDSA_SIG *ecdsa_do_sign(const un
+     s = ret->s;
+ 
+     if ((ctx = BN_CTX_new()) == NULL || (order = BN_new()) == NULL ||
+-        (tmp = BN_new()) == NULL || (m = BN_new()) == NULL) {
++        (m = BN_new()) == NULL) {
+         ECDSAerr(ECDSA_F_ECDSA_DO_SIGN, ERR_R_MALLOC_FAILURE);
+         goto err;
+     }
+@@ -281,6 +297,8 @@ static ECDSA_SIG *ecdsa_do_sign(const un
+         ECDSAerr(ECDSA_F_ECDSA_DO_SIGN, ERR_R_EC_LIB);
+         goto err;
+     }
++    mont_data = EC_GROUP_get_mont_data(group);
++
+     i = BN_num_bits(order);
+     /*
+      * Need to truncate digest if it is too long: first truncate whole bytes.
+@@ -311,21 +329,33 @@ static ECDSA_SIG *ecdsa_do_sign(const un
+             }
+         }
+ 
+-        if (!BN_mod_mul(tmp, priv_key, ret->r, order, ctx)) {
+-            ECDSAerr(ECDSA_F_ECDSA_DO_SIGN, ERR_R_BN_LIB);
++        /*
++         * With only one multiplicant being in Montgomery domain
++         * multiplication yields real result without post-conversion.
++         * Also note that all operations but last are performed with
++         * zero-padded vectors. Last operation, BN_mod_mul_montgomery
++         * below, returns user-visible value with removed zero padding.
++         */
++        if (!bn_to_mont_fixed_top(s, ret->r, mont_data, ctx)
++            || !bn_mul_mont_fixed_top(s, s, priv_key, mont_data, ctx)) {
+             goto err;
+         }
+-        if (!BN_mod_add_quick(s, tmp, m, order)) {
++        if (!bn_mod_add_fixed_top(s, s, m, order)) {
+             ECDSAerr(ECDSA_F_ECDSA_DO_SIGN, ERR_R_BN_LIB);
+             goto err;
+         }
+-        if (!BN_mod_mul(s, s, ckinv, order, ctx)) {
++        /*
++         * |s| can still be larger than modulus, because |m| can be. In
++         * such case we count on Montgomery reduction to tie it up.
++         */
++        if (!bn_to_mont_fixed_top(s, s, mont_data, ctx)
++            || !BN_mod_mul_montgomery(s, s, ckinv, mont_data, ctx)) {
+             ECDSAerr(ECDSA_F_ECDSA_DO_SIGN, ERR_R_BN_LIB);
+             goto err;
+         }
+         if (BN_is_zero(s)) {
+             /*
+-             * if kinv and r have been supplied by the caller don't to
++             * if kinv and r have been supplied by the caller don't
+              * generate new kinv and r values
+              */
+             if (in_kinv != NULL && in_r != NULL) {
+@@ -349,8 +379,6 @@ static ECDSA_SIG *ecdsa_do_sign(const un
+         BN_CTX_free(ctx);
+     if (m)
+         BN_clear_free(m);
+-    if (tmp)
+-        BN_clear_free(tmp);
+     if (order)
+         BN_free(order);
+     if (kinv)
+diff -up openssl-1.0.2k/crypto/Makefile.rohnp-fix openssl-1.0.2k/crypto/Makefile
+--- openssl-1.0.2k/crypto/Makefile.rohnp-fix	2018-06-20 17:44:02.467404137 +0200
++++ openssl-1.0.2k/crypto/Makefile	2018-08-14 10:57:21.595518774 +0200
+@@ -45,7 +45,7 @@ SRC= $(LIBSRC)
+ EXHEADER= crypto.h opensslv.h opensslconf.h ebcdic.h symhacks.h \
+ 	ossl_typ.h
+ HEADER=	cryptlib.h buildinf.h md32_common.h o_time.h o_str.h o_dir.h \
+-	constant_time_locl.h $(EXHEADER)
++	constant_time_locl.h bn_int.h $(EXHEADER)
+ 
+ ALL=    $(GENERAL) $(SRC) $(HEADER)
+ 
diff --git a/SOURCES/openssl-1.0.2k-cve-2018-0732.patch b/SOURCES/openssl-1.0.2k-cve-2018-0732.patch
new file mode 100644
index 0000000..1b8920f
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-cve-2018-0732.patch
@@ -0,0 +1,24 @@
+diff -up openssl-1.0.2k/crypto/dh/dh_key.c.large-dh openssl-1.0.2k/crypto/dh/dh_key.c
+--- openssl-1.0.2k/crypto/dh/dh_key.c.large-dh	2018-06-18 13:46:24.268137362 +0200
++++ openssl-1.0.2k/crypto/dh/dh_key.c	2018-06-18 13:59:04.605497462 +0200
+@@ -133,7 +133,7 @@ static int generate_key(DH *dh)
+     int ok = 0;
+     int generate_new_key = 0;
+     unsigned l;
+-    BN_CTX *ctx;
++    BN_CTX *ctx = NULL;
+     BN_MONT_CTX *mont = NULL;
+     BIGNUM *pub_key = NULL, *priv_key = NULL;
+ 
+@@ -145,6 +145,11 @@ static int generate_key(DH *dh)
+     }
+ #endif
+ 
++    if (BN_num_bits(dh->p) > OPENSSL_DH_MAX_MODULUS_BITS) {
++        DHerr(DH_F_GENERATE_KEY, DH_R_MODULUS_TOO_LARGE);
++        return 0;
++    }
++
+     ctx = BN_CTX_new();
+     if (ctx == NULL)
+         goto err;
diff --git a/SOURCES/openssl-1.0.2k-cve-2018-0737.patch b/SOURCES/openssl-1.0.2k-cve-2018-0737.patch
new file mode 100644
index 0000000..bd9381f
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-cve-2018-0737.patch
@@ -0,0 +1,274 @@
+diff -up openssl-1.0.2k/crypto/rsa/rsa_gen.c.gen-timing openssl-1.0.2k/crypto/rsa/rsa_gen.c
+--- openssl-1.0.2k/crypto/rsa/rsa_gen.c.gen-timing	2018-06-18 13:46:24.323138691 +0200
++++ openssl-1.0.2k/crypto/rsa/rsa_gen.c	2018-06-18 14:53:26.361975922 +0200
+@@ -1,6 +1,6 @@
+ /* crypto/rsa/rsa_gen.c */
+ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+- * Copyright (C) 2013 Red Hat, Inc.
++ * Copyright (C) 2013, 2018 Red Hat, Inc.
+  * All rights reserved.
+  *
+  * This package is an SSL implementation written
+@@ -175,14 +175,13 @@ static int FIPS_rsa_builtin_keygen(RSA *
+                                    BN_GENCB *cb)
+ {
+     BIGNUM *r0 = NULL, *r1 = NULL, *r2 = NULL, *r3 = NULL, *tmp;
+-    BIGNUM local_r0, local_d, local_p;
+-    BIGNUM *pr0, *d, *p;
+     BN_CTX *ctx = NULL;
+     int ok = -1;
+     int i;
+     int n = 0;
+     int test = 0;
+     int pbits = bits / 2;
++    unsigned long error = 0;
+ 
+     if (FIPS_selftest_failed()) {
+         FIPSerr(FIPS_F_RSA_BUILTIN_KEYGEN, FIPS_R_FIPS_SELFTEST_FAILED);
+@@ -251,6 +250,12 @@ static int FIPS_rsa_builtin_keygen(RSA *
+     if (!BN_is_zero(rsa->p) && !BN_is_zero(rsa->q))
+         test = 1;
+ 
++    BN_set_flags(r0, BN_FLG_CONSTTIME);
++    BN_set_flags(r1, BN_FLG_CONSTTIME);
++    BN_set_flags(r2, BN_FLG_CONSTTIME);
++    BN_set_flags(rsa->p, BN_FLG_CONSTTIME);
++    BN_set_flags(rsa->q, BN_FLG_CONSTTIME);
++
+  retry:
+     /* generate p and q */
+     for (i = 0; i < 5 * pbits; i++) {
+@@ -266,9 +271,9 @@ static int FIPS_rsa_builtin_keygen(RSA *
+ 
+         if (!BN_sub(r2, rsa->p, BN_value_one()))
+             goto err;
+-        if (!BN_gcd(r1, r2, rsa->e, ctx))
+-            goto err;
+-        if (BN_is_one(r1)) {
++        ERR_set_mark();
++        if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
++            /* GCD == 1 since inverse exists */
+             int r;
+             r = BN_is_prime_fasttest_ex(rsa->p, pbits > 1024 ? 4 : 5, ctx, 0,
+                                         cb);
+@@ -276,8 +281,16 @@ static int FIPS_rsa_builtin_keygen(RSA *
+                 goto err;
+             if (r > 0)
+                 break;
++        } else {
++            error = ERR_peek_last_error();
++            if (ERR_GET_LIB(error) == ERR_LIB_BN
++                && ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
++                /* GCD != 1 */
++                ERR_pop_to_mark();
++            } else {
++                goto err;
++            }
+         }
+-
+         if (!BN_GENCB_call(cb, 2, n++))
+             goto err;
+     }
+@@ -309,9 +322,9 @@ static int FIPS_rsa_builtin_keygen(RSA *
+ 
+         if (!BN_sub(r2, rsa->q, BN_value_one()))
+             goto err;
+-        if (!BN_gcd(r1, r2, rsa->e, ctx))
+-            goto err;
+-        if (BN_is_one(r1)) {
++        ERR_set_mark();
++        if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
++            /* GCD == 1 since inverse exists */
+             int r;
+             r = BN_is_prime_fasttest_ex(rsa->q, pbits > 1024 ? 4 : 5, ctx, 0,
+                                         cb);
+@@ -319,8 +332,16 @@ static int FIPS_rsa_builtin_keygen(RSA *
+                 goto err;
+             if (r > 0)
+                 break;
++        } else {
++            error = ERR_peek_last_error();
++            if (ERR_GET_LIB(error) == ERR_LIB_BN
++                && ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
++                /* GCD != 1 */
++                ERR_pop_to_mark();
++            } else {
++                goto err;
++            }
+         }
+-
+         if (!BN_GENCB_call(cb, 2, n++))
+             goto err;
+     }
+@@ -355,51 +376,44 @@ static int FIPS_rsa_builtin_keygen(RSA *
+     if (!BN_sub(r2, rsa->q, BN_value_one()))
+         goto err;               /* q-1 */
+ 
++    /* note that computing gcd is not safe to timing attacks */
+     if (!BN_gcd(r0, r1, r2, ctx))
+         goto err;
+-    if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
+-        pr0 = &local_r0;
+-        BN_with_flags(pr0, r0, BN_FLG_CONSTTIME);
+-    } else
+-        pr0 = r0;
+-    if (!BN_div(r0, NULL, r1, pr0, ctx))
+-        goto err;
+-    if (!BN_mul(r0, r0, r2, ctx))
+-        goto err;               /* lcm(p-1, q-1) */
+-
+-    if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
+-        pr0 = &local_r0;
+-        BN_with_flags(pr0, r0, BN_FLG_CONSTTIME);
+-    } else
+-        pr0 = r0;
+-    if (!BN_mod_inverse(rsa->d, rsa->e, pr0, ctx))
+-        goto err;               /* d */
++
++    {
++        if (!BN_div(r0, NULL, r1, r0, ctx))
++            goto err;
++
++        if (!BN_mul(r0, r0, r2, ctx)) /* lcm(p-1, q-1) */
++            goto err;
++
++        if (!BN_mod_inverse(rsa->d, rsa->e, r0, ctx)) /* d */
++            goto err;
++    }
+ 
+     if (BN_num_bits(rsa->d) < pbits)
+         goto retry;             /* d is too small */
+ 
+-    /* set up d for correct BN_FLG_CONSTTIME flag */
+-    if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
+-        d = &local_d;
+-        BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
+-    } else
+-        d = rsa->d;
++    {
++        BIGNUM *d = BN_new();
+ 
+-    /* calculate d mod (p-1) */
+-    if (!BN_mod(rsa->dmp1, d, r1, ctx))
+-        goto err;
++        if (d == NULL)
++            goto err;
++        BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
+ 
+-    /* calculate d mod (q-1) */
+-    if (!BN_mod(rsa->dmq1, d, r2, ctx))
+-        goto err;
++        if (/* calculate d mod (p-1) */
++            !BN_mod(rsa->dmp1, d, r1, ctx)
++            /* calculate d mod (q-1) */
++            || !BN_mod(rsa->dmq1, d, r2, ctx)) {
++            BN_free(d);
++            goto err;
++        }
++        /* We MUST free d before any further use of rsa->d */
++        BN_free(d);
++    }
+ 
+     /* calculate inverse of q mod p */
+-    if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
+-        p = &local_p;
+-        BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME);
+-    } else
+-        p = rsa->p;
+-    if (!BN_mod_inverse(rsa->iqmp, rsa->q, p, ctx))
++    if (!BN_mod_inverse(rsa->iqmp, rsa->q, rsa->p, ctx))
+         goto err;
+ 
+     if (fips_rsa_pairwise_fail)
+@@ -431,6 +445,17 @@ static int rsa_builtin_keygen(RSA *rsa,
+     BIGNUM *pr0, *d, *p;
+     int bitsp, bitsq, ok = -1, n = 0;
+     BN_CTX *ctx = NULL;
++    unsigned long error = 0;
++
++    /*
++     * When generating ridiculously small keys, we can get stuck
++     * continually regenerating the same prime values.
++     */
++    if (bits < 16) {
++        ok = 0;             /* we set our own err */
++        RSAerr(RSA_F_RSA_BUILTIN_KEYGEN, RSA_R_KEY_SIZE_TOO_SMALL);
++        goto err;
++    }
+ 
+ #ifdef OPENSSL_FIPS
+     if (FIPS_module_mode()) {
+@@ -483,45 +508,55 @@ static int rsa_builtin_keygen(RSA *rsa,
+     if (BN_copy(rsa->e, e_value) == NULL)
+         goto err;
+ 
++    BN_set_flags(rsa->p, BN_FLG_CONSTTIME);
++    BN_set_flags(rsa->q, BN_FLG_CONSTTIME);
++    BN_set_flags(r2, BN_FLG_CONSTTIME);
+     /* generate p and q */
+     for (;;) {
+         if (!BN_generate_prime_ex(rsa->p, bitsp, 0, NULL, NULL, cb))
+             goto err;
+         if (!BN_sub(r2, rsa->p, BN_value_one()))
+             goto err;
+-        if (!BN_gcd(r1, r2, rsa->e, ctx))
+-            goto err;
+-        if (BN_is_one(r1))
++        ERR_set_mark();
++        if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
++            /* GCD == 1 since inverse exists */
+             break;
++        }
++        error = ERR_peek_last_error();
++        if (ERR_GET_LIB(error) == ERR_LIB_BN
++            && ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
++            /* GCD != 1 */
++            ERR_pop_to_mark();
++        } else {
++            goto err;
++        }
+         if (!BN_GENCB_call(cb, 2, n++))
+             goto err;
+     }
+     if (!BN_GENCB_call(cb, 3, 0))
+         goto err;
+     for (;;) {
+-        /*
+-         * When generating ridiculously small keys, we can get stuck
+-         * continually regenerating the same prime values. Check for this and
+-         * bail if it happens 3 times.
+-         */
+-        unsigned int degenerate = 0;
+         do {
+             if (!BN_generate_prime_ex(rsa->q, bitsq, 0, NULL, NULL, cb))
+                 goto err;
+             if (!BN_sub(r2, rsa->q, rsa->p))
+                 goto err;
+-        } while ((BN_ucmp(r2, r3) <= 0) && (++degenerate < 3));
+-        if (degenerate == 3) {
+-            ok = 0;             /* we set our own err */
+-            RSAerr(RSA_F_RSA_BUILTIN_KEYGEN, RSA_R_KEY_SIZE_TOO_SMALL);
+-            goto err;
+-        }
++        } while (BN_ucmp(r2, r3) <= 0);
+         if (!BN_sub(r2, rsa->q, BN_value_one()))
+             goto err;
+-        if (!BN_gcd(r1, r2, rsa->e, ctx))
+-            goto err;
+-        if (BN_is_one(r1))
++        ERR_set_mark();
++        if (BN_mod_inverse(r1, r2, rsa->e, ctx) != NULL) {
++            /* GCD == 1 since inverse exists */
+             break;
++        }
++        error = ERR_peek_last_error();
++        if (ERR_GET_LIB(error) == ERR_LIB_BN
++            && ERR_GET_REASON(error) == BN_R_NO_INVERSE) {
++            /* GCD != 1 */
++            ERR_pop_to_mark();
++        } else {
++            goto err;
++        }
+         if (!BN_GENCB_call(cb, 2, n++))
+             goto err;
+     }
diff --git a/SOURCES/openssl-1.0.2k-cve-2018-0739.patch b/SOURCES/openssl-1.0.2k-cve-2018-0739.patch
new file mode 100644
index 0000000..14d5e0b
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-cve-2018-0739.patch
@@ -0,0 +1,217 @@
+diff -up openssl-1.0.2k/crypto/asn1/asn1_err.c.asn1-recursive openssl-1.0.2k/crypto/asn1/asn1_err.c
+--- openssl-1.0.2k/crypto/asn1/asn1_err.c.asn1-recursive	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/asn1/asn1_err.c	2018-06-18 15:08:18.333412753 +0200
+@@ -279,6 +279,7 @@ static ERR_STRING_DATA ASN1_str_reasons[
+     {ERR_REASON(ASN1_R_MSTRING_NOT_UNIVERSAL), "mstring not universal"},
+     {ERR_REASON(ASN1_R_MSTRING_WRONG_TAG), "mstring wrong tag"},
+     {ERR_REASON(ASN1_R_NESTED_ASN1_STRING), "nested asn1 string"},
++    {ERR_REASON(ASN1_R_NESTED_TOO_DEEP), "nested too deep"},
+     {ERR_REASON(ASN1_R_NON_HEX_CHARACTERS), "non hex characters"},
+     {ERR_REASON(ASN1_R_NOT_ASCII_FORMAT), "not ascii format"},
+     {ERR_REASON(ASN1_R_NOT_ENOUGH_DATA), "not enough data"},
+diff -up openssl-1.0.2k/crypto/asn1/asn1.h.asn1-recursive openssl-1.0.2k/crypto/asn1/asn1.h
+--- openssl-1.0.2k/crypto/asn1/asn1.h.asn1-recursive	2018-06-18 13:46:23.857127431 +0200
++++ openssl-1.0.2k/crypto/asn1/asn1.h	2018-06-18 15:07:53.915826715 +0200
+@@ -1365,6 +1365,7 @@ void ERR_load_ASN1_strings(void);
+ # define ASN1_R_MSTRING_NOT_UNIVERSAL                     139
+ # define ASN1_R_MSTRING_WRONG_TAG                         140
+ # define ASN1_R_NESTED_ASN1_STRING                        197
++# define ASN1_R_NESTED_TOO_DEEP                           219
+ # define ASN1_R_NON_HEX_CHARACTERS                        141
+ # define ASN1_R_NOT_ASCII_FORMAT                          190
+ # define ASN1_R_NOT_ENOUGH_DATA                           142
+diff -up openssl-1.0.2k/crypto/asn1/tasn_dec.c.asn1-recursive openssl-1.0.2k/crypto/asn1/tasn_dec.c
+--- openssl-1.0.2k/crypto/asn1/tasn_dec.c.asn1-recursive	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/asn1/tasn_dec.c	2018-06-18 15:14:28.978308482 +0200
+@@ -4,7 +4,7 @@
+  * 2000.
+  */
+ /* ====================================================================
+- * Copyright (c) 2000-2005 The OpenSSL Project.  All rights reserved.
++ * Copyright (c) 2000-2018 The OpenSSL Project.  All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+  * modification, are permitted provided that the following conditions
+@@ -65,6 +65,14 @@
+ #include <openssl/buffer.h>
+ #include <openssl/err.h>
+ 
++/*
++ * Constructed types with a recursive definition (such as can be found in PKCS7)
++ * could eventually exceed the stack given malicious input with excessive
++ * recursion. Therefore we limit the stack depth. This is the maximum number of
++ * recursive invocations of asn1_item_embed_d2i().
++ */
++#define ASN1_MAX_CONSTRUCTED_NEST 30
++
+ static int asn1_check_eoc(const unsigned char **in, long len);
+ static int asn1_find_end(const unsigned char **in, long len, char inf);
+ 
+@@ -81,11 +89,11 @@ static int asn1_check_tlen(long *olen, i
+ static int asn1_template_ex_d2i(ASN1_VALUE **pval,
+                                 const unsigned char **in, long len,
+                                 const ASN1_TEMPLATE *tt, char opt,
+-                                ASN1_TLC *ctx);
++                                ASN1_TLC *ctx, int depth);
+ static int asn1_template_noexp_d2i(ASN1_VALUE **val,
+                                    const unsigned char **in, long len,
+                                    const ASN1_TEMPLATE *tt, char opt,
+-                                   ASN1_TLC *ctx);
++                                   ASN1_TLC *ctx, int depth);
+ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
+                                  const unsigned char **in, long len,
+                                  const ASN1_ITEM *it,
+@@ -154,17 +162,16 @@ int ASN1_template_d2i(ASN1_VALUE **pval,
+ {
+     ASN1_TLC c;
+     asn1_tlc_clear_nc(&c);
+-    return asn1_template_ex_d2i(pval, in, len, tt, 0, &c);
++    return asn1_template_ex_d2i(pval, in, len, tt, 0, &c, 0);
+ }
+ 
+ /*
+  * Decode an item, taking care of IMPLICIT tagging, if any. If 'opt' set and
+  * tag mismatch return -1 to handle OPTIONAL
+  */
+-
+-int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
+-                     const ASN1_ITEM *it,
+-                     int tag, int aclass, char opt, ASN1_TLC *ctx)
++static int asn1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in,
++                            long len, const ASN1_ITEM *it, int tag, int aclass,
++                            char opt, ASN1_TLC *ctx, int depth)
+ {
+     const ASN1_TEMPLATE *tt, *errtt = NULL;
+     const ASN1_COMPAT_FUNCS *cf;
+@@ -189,6 +196,11 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval,
+     else
+         asn1_cb = 0;
+ 
++    if (++depth > ASN1_MAX_CONSTRUCTED_NEST) {
++        ASN1err(ASN1_F_ASN1_ITEM_EX_D2I, ASN1_R_NESTED_TOO_DEEP);
++        goto err;
++    }
++
+     switch (it->itype) {
+     case ASN1_ITYPE_PRIMITIVE:
+         if (it->templates) {
+@@ -204,7 +216,7 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval,
+                 goto err;
+             }
+             return asn1_template_ex_d2i(pval, in, len,
+-                                        it->templates, opt, ctx);
++                                        it->templates, opt, ctx, depth);
+         }
+         return asn1_d2i_ex_primitive(pval, in, len, it,
+                                      tag, aclass, opt, ctx);
+@@ -326,7 +338,7 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval,
+             /*
+              * We mark field as OPTIONAL so its absence can be recognised.
+              */
+-            ret = asn1_template_ex_d2i(pchptr, &p, len, tt, 1, ctx);
++            ret = asn1_template_ex_d2i(pchptr, &p, len, tt, 1, ctx, depth);
+             /* If field not present, try the next one */
+             if (ret == -1)
+                 continue;
+@@ -444,7 +456,8 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval,
+              * attempt to read in field, allowing each to be OPTIONAL
+              */
+ 
+-            ret = asn1_template_ex_d2i(pseqval, &p, len, seqtt, isopt, ctx);
++            ret = asn1_template_ex_d2i(pseqval, &p, len, seqtt, isopt, ctx,
++                                       depth);
+             if (!ret) {
+                 errtt = seqtt;
+                 goto err;
+@@ -514,6 +527,13 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval,
+     return 0;
+ }
+ 
++int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
++                     const ASN1_ITEM *it,
++                     int tag, int aclass, char opt, ASN1_TLC *ctx)
++{
++    return asn1_item_ex_d2i(pval, in, len, it, tag, aclass, opt, ctx, 0);
++}
++
+ /*
+  * Templates are handled with two separate functions. One handles any
+  * EXPLICIT tag and the other handles the rest.
+@@ -522,7 +542,7 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval,
+ static int asn1_template_ex_d2i(ASN1_VALUE **val,
+                                 const unsigned char **in, long inlen,
+                                 const ASN1_TEMPLATE *tt, char opt,
+-                                ASN1_TLC *ctx)
++                                ASN1_TLC *ctx, int depth)
+ {
+     int flags, aclass;
+     int ret;
+@@ -557,7 +577,7 @@ static int asn1_template_ex_d2i(ASN1_VAL
+             return 0;
+         }
+         /* We've found the field so it can't be OPTIONAL now */
+-        ret = asn1_template_noexp_d2i(val, &p, len, tt, 0, ctx);
++        ret = asn1_template_noexp_d2i(val, &p, len, tt, 0, ctx, depth);
+         if (!ret) {
+             ASN1err(ASN1_F_ASN1_TEMPLATE_EX_D2I, ERR_R_NESTED_ASN1_ERROR);
+             return 0;
+@@ -581,7 +601,7 @@ static int asn1_template_ex_d2i(ASN1_VAL
+             }
+         }
+     } else
+-        return asn1_template_noexp_d2i(val, in, inlen, tt, opt, ctx);
++        return asn1_template_noexp_d2i(val, in, inlen, tt, opt, ctx, depth);
+ 
+     *in = p;
+     return 1;
+@@ -594,7 +614,7 @@ static int asn1_template_ex_d2i(ASN1_VAL
+ static int asn1_template_noexp_d2i(ASN1_VALUE **val,
+                                    const unsigned char **in, long len,
+                                    const ASN1_TEMPLATE *tt, char opt,
+-                                   ASN1_TLC *ctx)
++                                   ASN1_TLC *ctx, int depth)
+ {
+     int flags, aclass;
+     int ret;
+@@ -665,14 +685,15 @@ static int asn1_template_noexp_d2i(ASN1_
+                 break;
+             }
+             skfield = NULL;
+-            if (!ASN1_item_ex_d2i(&skfield, &p, len,
+-                                  ASN1_ITEM_ptr(tt->item), -1, 0, 0, ctx)) {
++            if (!asn1_item_ex_d2i(&skfield, &p, len, ASN1_ITEM_ptr(tt->item),
++                                  -1, 0, 0, ctx, depth)) {
+                 ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I,
+                         ERR_R_NESTED_ASN1_ERROR);
+                 goto err;
+             }
+             len -= p - q;
+             if (!sk_ASN1_VALUE_push((STACK_OF(ASN1_VALUE) *)*val, skfield)) {
++                ASN1_item_ex_free(&skfield, ASN1_ITEM_ptr(tt->item));
+                 ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_MALLOC_FAILURE);
+                 goto err;
+             }
+@@ -683,9 +704,8 @@ static int asn1_template_noexp_d2i(ASN1_
+         }
+     } else if (flags & ASN1_TFLG_IMPTAG) {
+         /* IMPLICIT tagging */
+-        ret = ASN1_item_ex_d2i(val, &p, len,
+-                               ASN1_ITEM_ptr(tt->item), tt->tag, aclass, opt,
+-                               ctx);
++        ret = asn1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item), tt->tag,
++                               aclass, opt, ctx, depth);
+         if (!ret) {
+             ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
+             goto err;
+@@ -693,8 +713,9 @@ static int asn1_template_noexp_d2i(ASN1_
+             return -1;
+     } else {
+         /* Nothing special */
+-        ret = ASN1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item),
+-                               -1, tt->flags & ASN1_TFLG_COMBINE, opt, ctx);
++        ret = asn1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item),
++                               -1, tt->flags & ASN1_TFLG_COMBINE, opt, ctx,
++                               depth);
+         if (!ret) {
+             ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
+             goto err;
diff --git a/SOURCES/openssl-1.0.2k-name-sensitive.patch b/SOURCES/openssl-1.0.2k-name-sensitive.patch
new file mode 100644
index 0000000..909ce30
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-name-sensitive.patch
@@ -0,0 +1,57 @@
+diff -up openssl-1.0.2k/ssl/ssl_cert.c.name-sensitive openssl-1.0.2k/ssl/ssl_cert.c
+--- openssl-1.0.2k/ssl/ssl_cert.c.name-sensitive	2017-01-26 14:22:04.000000000 +0100
++++ openssl-1.0.2k/ssl/ssl_cert.c	2018-06-18 13:43:12.452502627 +0200
+@@ -855,9 +855,33 @@ int SSL_CTX_add_client_CA(SSL_CTX *ctx,
+     return (add_client_CA(&(ctx->client_CA), x));
+ }
+ 
+-static int xname_cmp(const X509_NAME *const *a, const X509_NAME *const *b)
++static int xname_cmp(const X509_NAME *a, const X509_NAME *b)
+ {
+-    return (X509_NAME_cmp(*a, *b));
++    unsigned char *abuf = NULL, *bbuf = NULL;
++    int alen, blen, ret;
++
++    /* X509_NAME_cmp() itself casts away constness in this way, so
++     * assume it's safe:
++     */
++    alen = i2d_X509_NAME((X509_NAME *)a, &abuf);
++    blen = i2d_X509_NAME((X509_NAME *)b, &bbuf);
++
++    if (alen < 0 || blen < 0)
++        ret = -2;
++    else if (alen != blen)
++        ret = alen - blen;
++    else /* alen == blen */
++        ret = memcmp(abuf, bbuf, alen);
++
++    OPENSSL_free(abuf);
++    OPENSSL_free(bbuf);
++
++    return ret;
++}
++
++static int xname_sk_cmp(const X509_NAME *const *a, const X509_NAME *const *b)
++{
++    return xname_cmp(*a, *b);
+ }
+ 
+ #ifndef OPENSSL_NO_STDIO
+@@ -876,7 +900,7 @@ STACK_OF(X509_NAME) *SSL_load_client_CA_
+     X509_NAME *xn = NULL;
+     STACK_OF(X509_NAME) *ret = NULL, *sk;
+ 
+-    sk = sk_X509_NAME_new(xname_cmp);
++    sk = sk_X509_NAME_new(xname_sk_cmp);
+ 
+     in = BIO_new(BIO_s_file_internal());
+ 
+@@ -948,7 +972,7 @@ int SSL_add_file_cert_subjects_to_stack(
+     int ret = 1;
+     int (*oldcmp) (const X509_NAME *const *a, const X509_NAME *const *b);
+ 
+-    oldcmp = sk_X509_NAME_set_cmp_func(stack, xname_cmp);
++    oldcmp = sk_X509_NAME_set_cmp_func(stack, xname_sk_cmp);
+ 
+     in = BIO_new(BIO_s_file_internal());
+ 
diff --git a/SOURCES/openssl-1.0.2k-s390x-update.patch b/SOURCES/openssl-1.0.2k-s390x-update.patch
new file mode 100644
index 0000000..8712de5
--- /dev/null
+++ b/SOURCES/openssl-1.0.2k-s390x-update.patch
@@ -0,0 +1,1368 @@
+diff -up openssl-1.0.2k/crypto/aes/asm/aes-s390x.pl.s390x-update openssl-1.0.2k/crypto/aes/asm/aes-s390x.pl
+--- openssl-1.0.2k/crypto/aes/asm/aes-s390x.pl.s390x-update	2018-06-18 12:20:47.197994636 +0200
++++ openssl-1.0.2k/crypto/aes/asm/aes-s390x.pl	2018-06-18 13:27:37.109817203 +0200
+@@ -122,6 +122,8 @@ sub _data_word()
+ }
+ 
+ $code=<<___;
++#include "s390x_arch.h"
++
+ .text
+ 
+ .type	AES_Te,\@object
+@@ -814,13 +816,10 @@ $code.=<<___ if (!$softonly);
+ 	ar	%r5,%r0
+ 
+ 	larl	%r1,OPENSSL_s390xcap_P
+-	lg	%r0,0(%r1)
+-	tmhl	%r0,0x4000	# check for message-security assist
+-	jz	.Lekey_internal
+-
+ 	llihh	%r0,0x8000
+ 	srlg	%r0,%r0,0(%r5)
+-	ng	%r0,48(%r1)	# check kmc capability vector
++	ng	%r0,S390X_KM(%r1)  # check availability of both km...
++	ng	%r0,S390X_KMC(%r1) # ...and kmc support for given key length
+ 	jz	.Lekey_internal
+ 
+ 	lmg	%r0,%r1,0($inp)	# just copy 128 bits...
+@@ -1443,7 +1442,7 @@ $code.=<<___ if (0);	######### kmctr cod
+ 	larl	%r1,OPENSSL_s390xcap_P
+ 	llihh	%r0,0x8000	# check if kmctr supports the function code
+ 	srlg	%r0,%r0,0($s0)
+-	ng	%r0,64(%r1)	# check kmctr capability vector
++	ng	%r0,S390X_KMCTR(%r1)	# check kmctr capability vector
+ 	lgr	%r0,$s0
+ 	lgr	%r1,$s1
+ 	jz	.Lctr32_km_loop
+@@ -1593,7 +1592,7 @@ $code.=<<___ if(1);
+ 	larl	%r1,OPENSSL_s390xcap_P
+ 	llihh	%r0,0x8000
+ 	srlg	%r0,%r0,32($s1)		# check for 32+function code
+-	ng	%r0,32(%r1)		# check km capability vector
++	ng	%r0,S390X_KM(%r1)	# check km capability vector
+ 	lgr	%r0,$s0			# restore the function code
+ 	la	%r1,0($key1)		# restore $key1
+ 	jz	.Lxts_km_vanilla
+@@ -2220,7 +2219,6 @@ ___
+ }
+ $code.=<<___;
+ .string	"AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+-.comm	OPENSSL_s390xcap_P,80,8
+ ___
+ 
+ $code =~ s/\`([^\`]*)\`/eval $1/gem;
+diff -up openssl-1.0.2k/crypto/aes/Makefile.s390x-update openssl-1.0.2k/crypto/aes/Makefile
+--- openssl-1.0.2k/crypto/aes/Makefile.s390x-update	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/aes/Makefile	2018-06-18 13:27:37.108817179 +0200
+@@ -92,6 +92,8 @@ aesv8-armx.S:	asm/aesv8-armx.pl
+ 	$(PERL) asm/aesv8-armx.pl $(PERLASM_SCHEME) $@
+ aesv8-armx.o:	aesv8-armx.S
+ 
++aes-s390x.o:	aes-s390x.S
++
+ # GNU make "catch all"
+ aes-%.S:	asm/aes-%.pl;	$(PERL) $< $(PERLASM_SCHEME) > $@
+ aes-armv4.o:	aes-armv4.S
+diff -up openssl-1.0.2k/crypto/evp/e_aes.c.s390x-update openssl-1.0.2k/crypto/evp/e_aes.c
+--- openssl-1.0.2k/crypto/evp/e_aes.c.s390x-update	2018-06-18 12:20:47.104992361 +0200
++++ openssl-1.0.2k/crypto/evp/e_aes.c	2018-06-18 13:28:07.033543735 +0200
+@@ -854,6 +854,723 @@ static const EVP_CIPHER aes_##keylen##_#
+ const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
+ { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
+ 
++#elif defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
++/*
++ * IBM S390X support
++ */
++# include "s390x_arch.h"
++
++typedef struct {
++    union {
++        double align;
++        /*-
++         * KMA-GCM-AES parameter block
++         * (see z/Architecture Principles of Operation SA22-7832-11)
++         */
++        struct {
++            unsigned char reserved[12];
++            union {
++                unsigned int w;
++                unsigned char b[4];
++            } cv;
++            union {
++                unsigned long long g[2];
++                unsigned char b[16];
++            } t;
++            unsigned char h[16];
++            unsigned long long taadl;
++            unsigned long long tpcl;
++            union {
++                unsigned long long g[2];
++                unsigned int w[4];
++            } j0;
++            unsigned char k[32];
++        } param;
++    } kma;
++    unsigned int fc;
++    int key_set;
++
++    unsigned char *iv;
++    int ivlen;
++    int iv_set;
++    int iv_gen;
++
++    int taglen;
++
++    unsigned char ares[16];
++    unsigned char mres[16];
++    unsigned char kres[16];
++    int areslen;
++    int mreslen;
++    int kreslen;
++
++    int tls_aad_len;
++} S390X_AES_GCM_CTX;
++
++# define S390X_aes_128_CAPABLE ((OPENSSL_s390xcap_P.km[0] &	\
++                                 S390X_CAPBIT(S390X_AES_128)) &&\
++                                (OPENSSL_s390xcap_P.kmc[0] &	\
++                                 S390X_CAPBIT(S390X_AES_128)))
++# define S390X_aes_192_CAPABLE ((OPENSSL_s390xcap_P.km[0] &	\
++                                 S390X_CAPBIT(S390X_AES_192)) &&\
++                                (OPENSSL_s390xcap_P.kmc[0] &	\
++                                 S390X_CAPBIT(S390X_AES_192)))
++# define S390X_aes_256_CAPABLE ((OPENSSL_s390xcap_P.km[0] &	\
++                                 S390X_CAPBIT(S390X_AES_256)) &&\
++                                (OPENSSL_s390xcap_P.kmc[0] &	\
++                                 S390X_CAPBIT(S390X_AES_256)))
++
++# define s390x_aes_init_key aes_init_key
++static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
++                              const unsigned char *iv, int enc);
++
++# define S390X_aes_128_cbc_CAPABLE	1	/* checked by callee */
++# define S390X_aes_192_cbc_CAPABLE	1
++# define S390X_aes_256_cbc_CAPABLE	1
++
++# define s390x_aes_cbc_cipher aes_cbc_cipher
++static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++
++# define S390X_aes_128_ecb_CAPABLE	0
++# define S390X_aes_192_ecb_CAPABLE	0
++# define S390X_aes_256_ecb_CAPABLE	0
++
++# define s390x_aes_ecb_cipher aes_ecb_cipher
++static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++
++# define S390X_aes_128_ofb_CAPABLE	0
++# define S390X_aes_192_ofb_CAPABLE	0
++# define S390X_aes_256_ofb_CAPABLE	0
++
++# define s390x_aes_ofb_cipher aes_ofb_cipher
++static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++
++# define S390X_aes_128_cfb_CAPABLE	0
++# define S390X_aes_192_cfb_CAPABLE	0
++# define S390X_aes_256_cfb_CAPABLE	0
++
++# define s390x_aes_cfb_cipher aes_cfb_cipher
++static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++
++# define S390X_aes_128_cfb8_CAPABLE	0
++# define S390X_aes_192_cfb8_CAPABLE	0
++# define S390X_aes_256_cfb8_CAPABLE	0
++
++# define s390x_aes_cfb8_cipher aes_cfb8_cipher
++static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                 const unsigned char *in, size_t len);
++
++# define S390X_aes_128_cfb1_CAPABLE	0
++# define S390X_aes_192_cfb1_CAPABLE	0
++# define S390X_aes_256_cfb1_CAPABLE	0
++
++# define s390x_aes_cfb1_cipher aes_cfb1_cipher
++static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                 const unsigned char *in, size_t len);
++
++# define S390X_aes_128_ctr_CAPABLE	1	/* checked by callee */
++# define S390X_aes_192_ctr_CAPABLE	1
++# define S390X_aes_256_ctr_CAPABLE	1
++
++# define s390x_aes_ctr_cipher aes_ctr_cipher
++static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++
++# define S390X_aes_128_gcm_CAPABLE (S390X_aes_128_CAPABLE &&		\
++                                    (OPENSSL_s390xcap_P.kma[0] &	\
++                                     S390X_CAPBIT(S390X_AES_128)))
++# define S390X_aes_192_gcm_CAPABLE (S390X_aes_192_CAPABLE &&		\
++                                    (OPENSSL_s390xcap_P.kma[0] &	\
++                                     S390X_CAPBIT(S390X_AES_192)))
++# define S390X_aes_256_gcm_CAPABLE (S390X_aes_256_CAPABLE &&		\
++                                    (OPENSSL_s390xcap_P.kma[0] &	\
++                                     S390X_CAPBIT(S390X_AES_256)))
++
++/* iv + padding length for iv lenghts != 12 */
++# define S390X_gcm_ivpadlen(i)	((((i) + 15) >> 4 << 4) + 16)
++
++static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
++                             size_t len)
++{
++    unsigned long long alen;
++    int n, rem;
++
++    if (ctx->kma.param.tpcl)
++        return -2;
++
++    alen = ctx->kma.param.taadl + len;
++    if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
++        return -1;
++    ctx->kma.param.taadl = alen;
++
++    n = ctx->areslen;
++    if (n) {
++        while (n && len) {
++            ctx->ares[n] = *aad;
++            n = (n + 1) & 0xf;
++            ++aad;
++            --len;
++        }
++        /* ctx->ares contains a complete block if offset has wrapped around */
++        if (!n) {
++            s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
++            ctx->fc |= S390X_KMA_HS;
++        }
++        ctx->areslen = n;
++    }
++
++    rem = len & 0xf;
++
++    len &= ~0xf;
++    if (len) {
++        s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
++        aad += len;
++        ctx->fc |= S390X_KMA_HS;
++    }
++
++    if (rem) {
++        ctx->areslen = rem;
++
++        do {
++            --rem;
++            ctx->ares[rem] = aad[rem];
++        } while (rem);
++    }
++    return 0;
++}
++
++static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
++                         unsigned char *out, size_t len)
++{
++    const unsigned char *inptr;
++    unsigned long long mlen;
++    union {
++        unsigned int w[4];
++        unsigned char b[16];
++    } buf;
++    size_t inlen;
++    int n, rem, i;
++
++    mlen = ctx->kma.param.tpcl + len;
++    if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
++        return -1;
++    ctx->kma.param.tpcl = mlen;
++
++    n = ctx->mreslen;
++    if (n) {
++        inptr = in;
++        inlen = len;
++        while (n && inlen) {
++            ctx->mres[n] = *inptr;
++            n = (n + 1) & 0xf;
++            ++inptr;
++            --inlen;
++        }
++        /* ctx->mres contains a complete block if offset has wrapped around */
++        if (!n) {
++            s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
++                      ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
++            ctx->fc |= S390X_KMA_HS;
++            ctx->areslen = 0;
++
++            /* previous call already encrypted/decrypted its remainder,
++             * see comment below */
++            n = ctx->mreslen;
++            while (n) {
++                *out = buf.b[n];
++                n = (n + 1) & 0xf;
++                ++out;
++                ++in;
++                --len;
++            }
++            ctx->mreslen = 0;
++        }
++    }
++
++    rem = len & 0xf;
++
++    len &= ~0xf;
++    if (len) {
++        s390x_kma(ctx->ares, ctx->areslen, in, len, out,
++                  ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
++        in += len;
++        out += len;
++        ctx->fc |= S390X_KMA_HS;
++        ctx->areslen = 0;
++    }
++
++    /*-
++     * If there is a remainder, it has to be saved such that it can be
++     * processed by kma later. However, we also have to do the for-now
++     * unauthenticated encryption/decryption part here and now...
++     */
++    if (rem) {
++        if (!ctx->mreslen) {
++            buf.w[0] = ctx->kma.param.j0.w[0];
++            buf.w[1] = ctx->kma.param.j0.w[1];
++            buf.w[2] = ctx->kma.param.j0.w[2];
++            buf.w[3] = ctx->kma.param.cv.w + 1;
++            s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
++        }
++
++        n = ctx->mreslen;
++        for (i = 0; i < rem; i++) {
++            ctx->mres[n + i] = in[i];
++            out[i] = in[i] ^ ctx->kres[n + i];
++        }
++
++        ctx->mreslen += rem;
++    }
++    return 0;
++}
++
++static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
++                                const unsigned char *iv)
++{
++    ctx->kma.param.t.g[0] = 0;
++    ctx->kma.param.t.g[1] = 0;
++    ctx->kma.param.tpcl = 0;
++    ctx->kma.param.taadl = 0;
++    ctx->mreslen = 0;
++    ctx->areslen = 0;
++    ctx->kreslen = 0;
++
++    if (ctx->ivlen == 12) {
++        memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
++        ctx->kma.param.j0.w[3] = 1;
++        ctx->kma.param.cv.w = 1;
++    } else {
++        /* ctx->iv has the right size and is already padded. */
++        memcpy(ctx->iv, iv, ctx->ivlen);
++        s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
++                  ctx->fc, &ctx->kma.param);
++        ctx->fc |= S390X_KMA_HS;
++
++        ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
++        ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
++        ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
++        ctx->kma.param.t.g[0] = 0;
++        ctx->kma.param.t.g[1] = 0;
++    }
++}
++
++static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
++{
++    S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
++    S390X_AES_GCM_CTX *gctx_out;
++    EVP_CIPHER_CTX *out;
++    unsigned char *buf, *iv;
++    int ivlen, enc, len;
++
++    switch (type) {
++    case EVP_CTRL_INIT:
++        ivlen = c->cipher->iv_len;;
++        iv = c->iv;
++        gctx->key_set = 0;
++        gctx->iv_set = 0;
++        gctx->ivlen = ivlen;
++        gctx->iv = iv;
++        gctx->taglen = -1;
++        gctx->iv_gen = 0;
++        gctx->tls_aad_len = -1;
++        return 1;
++
++    case EVP_CTRL_GCM_SET_IVLEN:
++        if (arg <= 0)
++            return 0;
++
++        if (arg != 12) {
++            iv = c->iv;
++            len = S390X_gcm_ivpadlen(arg);
++
++            /* Allocate memory for iv if needed. */
++            if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
++                if (gctx->iv != iv)
++                    OPENSSL_free(gctx->iv);
++
++                gctx->iv = OPENSSL_malloc(len);
++                if (gctx->iv == NULL)
++                    return 0;
++            }
++            /* Add padding. */
++            memset(gctx->iv + arg, 0, len - arg - 8);
++            *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
++        }
++        gctx->ivlen = arg;
++        return 1;
++
++    case EVP_CTRL_GCM_SET_TAG:
++        buf = c->buf;
++        enc = c->encrypt;
++        if (arg <= 0 || arg > 16 || enc)
++            return 0;
++
++        memcpy(buf, ptr, arg);
++        gctx->taglen = arg;
++        return 1;
++
++    case EVP_CTRL_GCM_GET_TAG:
++        enc = c->encrypt;
++        if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
++            return 0;
++
++        memcpy(ptr, gctx->kma.param.t.b, arg);
++        return 1;
++
++    case EVP_CTRL_GCM_SET_IV_FIXED:
++        /* Special case: -1 length restores whole iv */
++        if (arg == -1) {
++            memcpy(gctx->iv, ptr, gctx->ivlen);
++            gctx->iv_gen = 1;
++            return 1;
++        }
++        /*
++         * Fixed field must be at least 4 bytes and invocation field at least
++         * 8.
++         */
++        if ((arg < 4) || (gctx->ivlen - arg) < 8)
++            return 0;
++
++        if (arg)
++            memcpy(gctx->iv, ptr, arg);
++
++        enc = c->encrypt;
++        if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
++            return 0;
++
++        gctx->iv_gen = 1;
++        return 1;
++
++    case EVP_CTRL_GCM_IV_GEN:
++        if (gctx->iv_gen == 0 || gctx->key_set == 0)
++            return 0;
++
++        s390x_aes_gcm_setiv(gctx, gctx->iv);
++
++        if (arg <= 0 || arg > gctx->ivlen)
++            arg = gctx->ivlen;
++
++        memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
++        /*
++         * Invocation field will be at least 8 bytes in size and so no need
++         * to check wrap around or increment more than last 8 bytes.
++         */
++        (*(unsigned long long *)(gctx->iv + gctx->ivlen - 8))++;
++        gctx->iv_set = 1;
++        return 1;
++
++    case EVP_CTRL_GCM_SET_IV_INV:
++        enc = c->encrypt;
++        if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
++            return 0;
++
++        memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
++        s390x_aes_gcm_setiv(gctx, gctx->iv);
++        gctx->iv_set = 1;
++        return 1;
++
++    case EVP_CTRL_AEAD_TLS1_AAD:
++        /* Save the aad for later use. */
++        if (arg != EVP_AEAD_TLS1_AAD_LEN)
++            return 0;
++
++        buf = c->buf;
++        memcpy(buf, ptr, arg);
++        gctx->tls_aad_len = arg;
++
++        len = buf[arg - 2] << 8 | buf[arg - 1];
++        /* Correct length for explicit iv. */
++        if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
++            return 0;
++        len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
++
++        /* If decrypting correct for tag too. */
++        enc = c->encrypt;
++        if (!enc) {
++            if (len < EVP_GCM_TLS_TAG_LEN)
++                return 0;
++            len -= EVP_GCM_TLS_TAG_LEN;
++        }
++        buf[arg - 2] = len >> 8;
++        buf[arg - 1] = len & 0xff;
++        /* Extra padding: tag appended to record. */
++        return EVP_GCM_TLS_TAG_LEN;
++
++    case EVP_CTRL_COPY:
++        out = ptr;
++        gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
++        iv = c->iv;
++
++        if (gctx->iv == iv) {
++            gctx_out->iv = out->iv;
++        } else {
++            len = S390X_gcm_ivpadlen(gctx->ivlen);
++
++            gctx_out->iv = OPENSSL_malloc(len);
++            if (gctx_out->iv == NULL)
++                return 0;
++
++            memcpy(gctx_out->iv, gctx->iv, len);
++        }
++        return 1;
++
++    default:
++        return -1;
++    }
++}
++
++static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
++                                  const unsigned char *key,
++                                  const unsigned char *iv, int enc)
++{
++    S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
++    int keylen;
++
++    if (iv == NULL && key == NULL)
++        return 1;
++
++    if (key != NULL) {
++        keylen = EVP_CIPHER_CTX_key_length(ctx);
++        memcpy(&gctx->kma.param.k, key, keylen);
++
++        /* Convert key size to function code. */
++        gctx->fc = S390X_AES_128 + (((keylen << 3) - 128) >> 6);
++        if (!enc)
++            gctx->fc |= S390X_DECRYPT;
++
++        if (iv == NULL && gctx->iv_set)
++            iv = gctx->iv;
++
++        if (iv != NULL) {
++            s390x_aes_gcm_setiv(gctx, iv);
++            gctx->iv_set = 1;
++        }
++        gctx->key_set = 1;
++    } else {
++        if (gctx->key_set)
++            s390x_aes_gcm_setiv(gctx, iv);
++        else
++            memcpy(gctx->iv, iv, gctx->ivlen);
++
++        gctx->iv_set = 1;
++        gctx->iv_gen = 0;
++    }
++    return 1;
++}
++
++static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                    const unsigned char *in, size_t len)
++{
++    S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
++    const unsigned char *buf = ctx->buf;
++    const int enc = ctx->encrypt;
++    int rv = -1;
++
++    if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
++        return -1;
++
++    if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
++                                     : EVP_CTRL_GCM_SET_IV_INV,
++                            EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
++        goto err;
++
++    in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
++    out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
++    len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
++
++    gctx->kma.param.taadl = gctx->tls_aad_len << 3;
++    gctx->kma.param.tpcl = len << 3;
++    s390x_kma(buf, gctx->tls_aad_len, in, len, out,
++              gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
++
++    if (enc) {
++        memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
++        rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
++    } else {
++        if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
++                          EVP_GCM_TLS_TAG_LEN)) {
++            OPENSSL_cleanse(out, len);
++            goto err;
++        }
++        rv = len;
++    }
++err:
++    gctx->iv_set = 0;
++    gctx->tls_aad_len = -1;
++    return rv;
++}
++
++static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len)
++{
++    S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
++    unsigned char *buf, tmp[16];
++    int enc;
++
++    if (!gctx->key_set)
++        return -1;
++
++    if (gctx->tls_aad_len >= 0)
++        return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
++
++    if (!gctx->iv_set)
++        return -1;
++
++    if (in != NULL) {
++        if (out == NULL) {
++            if (s390x_aes_gcm_aad(gctx, in, len))
++                return -1;
++        } else {
++            if (s390x_aes_gcm(gctx, in, out, len))
++                return -1;
++        }
++        return len;
++    } else {
++        gctx->kma.param.taadl <<= 3;
++        gctx->kma.param.tpcl <<= 3;
++        s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
++                  gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
++        /* recall that we already did en-/decrypt gctx->mres
++         * and returned it to caller... */
++        OPENSSL_cleanse(tmp, gctx->mreslen);
++        gctx->iv_set = 0;
++
++        enc = ctx->encrypt;
++        if (enc) {
++            gctx->taglen = 16;
++        } else {
++            if (gctx->taglen < 0)
++                return -1;
++
++            buf = ctx->buf;
++            if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
++                return -1;
++        }
++        return 0;
++    }
++}
++
++static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
++{
++    S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
++    const unsigned char *iv;
++
++    if (gctx == NULL)
++        return 0;
++
++    iv = c->iv;
++    if (iv != gctx->iv)
++        OPENSSL_free(gctx->iv);
++
++    OPENSSL_cleanse(gctx, sizeof(*gctx));
++    return 1;
++}
++
++# define S390X_AES_XTS_CTX		EVP_AES_XTS_CTX
++# define S390X_aes_128_xts_CAPABLE	1	/* checked by callee */
++# define S390X_aes_256_xts_CAPABLE	1
++
++# define s390x_aes_xts_init_key aes_xts_init_key
++static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
++                                  const unsigned char *key,
++                                  const unsigned char *iv, int enc);
++# define s390x_aes_xts_cipher aes_xts_cipher
++static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++# define s390x_aes_xts_ctrl aes_xts_ctrl
++static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
++# define s390x_aes_xts_cleanup aes_xts_cleanup
++
++# define S390X_AES_CCM_CTX		EVP_AES_CCM_CTX
++# define S390X_aes_128_ccm_CAPABLE	0
++# define S390X_aes_192_ccm_CAPABLE	0
++# define S390X_aes_256_ccm_CAPABLE	0
++
++# define s390x_aes_ccm_init_key aes_ccm_init_key
++static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
++                                  const unsigned char *key,
++                                  const unsigned char *iv, int enc);
++# define s390x_aes_ccm_cipher aes_ccm_cipher
++static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
++                                const unsigned char *in, size_t len);
++# define s390x_aes_ccm_ctrl aes_ccm_ctrl
++static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
++# define s390x_aes_ccm_cleanup aes_ccm_cleanup
++
++# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,	\
++                              MODE,flags)				\
++static const EVP_CIPHER s390x_aes_##keylen##_##mode = {			\
++    nid##_##keylen##_##nmode,blocksize,					\
++    keylen / 8,								\
++    ivlen,								\
++    flags | EVP_CIPH_##MODE##_MODE,					\
++    s390x_aes_init_key,							\
++    s390x_aes_##mode##_cipher,						\
++    NULL,								\
++    sizeof(EVP_AES_KEY),						\
++    NULL,								\
++    NULL,								\
++    NULL,								\
++    NULL								\
++};									\
++static const EVP_CIPHER aes_##keylen##_##mode = {			\
++    nid##_##keylen##_##nmode,						\
++    blocksize,								\
++    keylen / 8,								\
++    ivlen,								\
++    flags | EVP_CIPH_##MODE##_MODE,					\
++    aes_init_key,							\
++    aes_##mode##_cipher,						\
++    NULL,								\
++    sizeof(EVP_AES_KEY),						\
++    NULL,NULL,NULL,NULL							\
++};									\
++const EVP_CIPHER *EVP_aes_##keylen##_##mode(void)			\
++{									\
++    return S390X_aes_##keylen##_##mode##_CAPABLE ?			\
++           &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode;	\
++}
++
++# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
++static const EVP_CIPHER s390x_aes_##keylen##_##mode = {			\
++    nid##_##keylen##_##mode,						\
++    blocksize,								\
++    (EVP_CIPH_##MODE##_MODE == EVP_CIPH_XTS_MODE ? 2 : 1) * keylen / 8,	\
++    ivlen,								\
++    flags | EVP_CIPH_##MODE##_MODE,					\
++    s390x_aes_##mode##_init_key,					\
++    s390x_aes_##mode##_cipher,						\
++    s390x_aes_##mode##_cleanup,						\
++    sizeof(S390X_AES_##MODE##_CTX),					\
++    NULL,								\
++    NULL,								\
++    s390x_aes_##mode##_ctrl,						\
++    NULL								\
++};									\
++static const EVP_CIPHER aes_##keylen##_##mode = {			\
++    nid##_##keylen##_##mode,blocksize,					\
++    (EVP_CIPH_##MODE##_MODE == EVP_CIPH_XTS_MODE ? 2 : 1) * keylen / 8,	\
++    ivlen,								\
++    flags | EVP_CIPH_##MODE##_MODE,					\
++    aes_##mode##_init_key,						\
++    aes_##mode##_cipher,						\
++    aes_##mode##_cleanup,						\
++    sizeof(EVP_AES_##MODE##_CTX),					\
++    NULL,								\
++    NULL,								\
++    aes_##mode##_ctrl,							\
++    NULL								\
++};									\
++const EVP_CIPHER *EVP_aes_##keylen##_##mode(void)			\
++{									\
++    return S390X_aes_##keylen##_##mode##_CAPABLE ?			\
++           &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode;	\
++}
++
+ # else
+ 
+ #  define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
+diff -up openssl-1.0.2k/crypto/Makefile.s390x-update openssl-1.0.2k/crypto/Makefile
+--- openssl-1.0.2k/crypto/Makefile.s390x-update	2018-06-18 12:20:49.079040659 +0200
++++ openssl-1.0.2k/crypto/Makefile	2018-06-18 13:27:53.065204592 +0200
+@@ -77,6 +77,7 @@ alphacpuid.s:	alphacpuid.pl
+ 	(preproc=$$$$.$@.S; trap "rm $$preproc" INT; \
+ 	$(PERL) alphacpuid.pl > $$preproc && \
+ 	$(CC) -E -P $$preproc > $@ && rm $$preproc)
++s390xcpuid.S:	s390xcpuid.pl;	$(PERL) s390xcpuid.pl $(PERLASM_SCHEME) $@
+ 
+ testapps:
+ 	[ -z "$(THIS)" ] || (	if echo $(SDIRS) | fgrep ' des '; \
+diff -up openssl-1.0.2k/crypto/modes/asm/ghash-s390x.pl.s390x-update openssl-1.0.2k/crypto/modes/asm/ghash-s390x.pl
+--- openssl-1.0.2k/crypto/modes/asm/ghash-s390x.pl.s390x-update	2018-06-18 12:20:47.170993976 +0200
++++ openssl-1.0.2k/crypto/modes/asm/ghash-s390x.pl	2018-06-18 13:27:37.110817228 +0200
+@@ -73,6 +73,8 @@ $rem_4bit="%r14";
+ $sp="%r15";
+ 
+ $code.=<<___;
++#include "s390x_arch.h"
++
+ .text
+ 
+ .globl	gcm_gmult_4bit
+@@ -85,12 +87,13 @@ $code.=<<___ if(!$softonly && 0);	# hard
+ 	tmhl	%r0,0x4000	# check for message-security-assist
+ 	jz	.Lsoft_gmult
+ 	lghi	%r0,0
+-	lg	%r1,24(%r1)	# load second word of kimd capabilities vector
++	lg	%r1,S390X_KIMD+8(%r1)	# load second word of kimd capabilities
++					#  vector
+ 	tmhh	%r1,0x4000	# check for function 65
+ 	jz	.Lsoft_gmult
+ 	stg	%r0,16($sp)	# arrange 16 bytes of zero input
+ 	stg	%r0,24($sp)
+-	lghi	%r0,65		# function 65
++	lghi	%r0,S390X_GHASH	# function 65
+ 	la	%r1,0($Xi)	# H lies right after Xi in gcm128_context
+ 	la	$inp,16($sp)
+ 	lghi	$len,16
+@@ -119,16 +122,11 @@ gcm_ghash_4bit:
+ ___
+ $code.=<<___ if(!$softonly);
+ 	larl	%r1,OPENSSL_s390xcap_P
+-	lg	%r0,0(%r1)
+-	tmhl	%r0,0x4000	# check for message-security-assist
+-	jz	.Lsoft_ghash
+-	lghi	%r0,0
+-	la	%r1,16($sp)
+-	.long	0xb93e0004	# kimd %r0,%r4
+-	lg	%r1,24($sp)
+-	tmhh	%r1,0x4000	# check for function 65
++	lg	%r0,S390X_KIMD+8(%r1)	# load second word of kimd capabilities
++					#  vector
++	tmhh	%r0,0x4000	# check for function 65
+ 	jz	.Lsoft_ghash
+-	lghi	%r0,65		# function 65
++	lghi	%r0,S390X_GHASH	# function 65
+ 	la	%r1,0($Xi)	# H lies right after Xi in gcm128_context
+ 	.long	0xb93e0004	# kimd %r0,$inp
+ 	brc	1,.-4		# pay attention to "partial completion"
+diff -up openssl-1.0.2k/crypto/modes/Makefile.s390x-update openssl-1.0.2k/crypto/modes/Makefile
+--- openssl-1.0.2k/crypto/modes/Makefile.s390x-update	2018-06-18 12:20:47.020990305 +0200
++++ openssl-1.0.2k/crypto/modes/Makefile	2018-06-18 13:27:37.110817228 +0200
+@@ -71,6 +71,8 @@ ghash-%.S:	asm/ghash-%.pl;	$(PERL) $< $(
+ ghash-armv4.o:	ghash-armv4.S
+ ghashv8-armx.o:	ghashv8-armx.S
+ 
++ghash-s390x.o:	ghash-s390x.S
++
+ files:
+ 	$(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO
+ 
+diff -up openssl-1.0.2k/crypto/sha/asm/sha1-s390x.pl.s390x-update openssl-1.0.2k/crypto/sha/asm/sha1-s390x.pl
+--- openssl-1.0.2k/crypto/sha/asm/sha1-s390x.pl.s390x-update	2018-06-18 12:20:47.174994073 +0200
++++ openssl-1.0.2k/crypto/sha/asm/sha1-s390x.pl	2018-06-18 13:27:37.112817276 +0200
+@@ -152,6 +152,8 @@ ___
+ }
+ 
+ $code.=<<___;
++#include "s390x_arch.h"
++
+ .text
+ .align	64
+ .type	Ktable,\@object
+@@ -164,10 +166,7 @@ sha1_block_data_order:
+ ___
+ $code.=<<___ if ($kimdfunc);
+ 	larl	%r1,OPENSSL_s390xcap_P
+-	lg	%r0,0(%r1)
+-	tmhl	%r0,0x4000	# check for message-security assist
+-	jz	.Lsoftware
+-	lg	%r0,16(%r1)	# check kimd capabilities
++	lg	%r0,S390X_KIMD(%r1)	# check kimd capabilities
+ 	tmhh	%r0,`0x8000>>$kimdfunc`
+ 	jz	.Lsoftware
+ 	lghi	%r0,$kimdfunc
+@@ -234,7 +233,6 @@ $code.=<<___;
+ 	br	%r14
+ .size	sha1_block_data_order,.-sha1_block_data_order
+ .string	"SHA1 block transform for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+-.comm	OPENSSL_s390xcap_P,80,8
+ ___
+ 
+ $code =~ s/\`([^\`]*)\`/eval $1/gem;
+diff -up openssl-1.0.2k/crypto/sha/asm/sha512-s390x.pl.s390x-update openssl-1.0.2k/crypto/sha/asm/sha512-s390x.pl
+--- openssl-1.0.2k/crypto/sha/asm/sha512-s390x.pl.s390x-update	2018-06-18 12:20:47.179994196 +0200
++++ openssl-1.0.2k/crypto/sha/asm/sha512-s390x.pl	2018-06-18 13:27:37.112817276 +0200
+@@ -163,6 +163,8 @@ ___
+ }
+ 
+ $code.=<<___;
++#include "s390x_arch.h"
++
+ .text
+ .align	64
+ .type	$Table,\@object
+@@ -237,10 +239,7 @@ $Func:
+ ___
+ $code.=<<___ if ($kimdfunc);
+ 	larl	%r1,OPENSSL_s390xcap_P
+-	lg	%r0,0(%r1)
+-	tmhl	%r0,0x4000	# check for message-security assist
+-	jz	.Lsoftware
+-	lg	%r0,16(%r1)	# check kimd capabilities
++	lg	%r0,S390X_KIMD(%r1)	# check kimd capabilities
+ 	tmhh	%r0,`0x8000>>$kimdfunc`
+ 	jz	.Lsoftware
+ 	lghi	%r0,$kimdfunc
+@@ -308,7 +307,6 @@ $code.=<<___;
+ 	br	%r14
+ .size	$Func,.-$Func
+ .string	"SHA${label} block transform for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+-.comm	OPENSSL_s390xcap_P,80,8
+ ___
+ 
+ $code =~ s/\`([^\`]*)\`/eval $1/gem;
+diff -up openssl-1.0.2k/crypto/sha/Makefile.s390x-update openssl-1.0.2k/crypto/sha/Makefile
+--- openssl-1.0.2k/crypto/sha/Makefile.s390x-update	2018-06-18 12:20:49.482050519 +0200
++++ openssl-1.0.2k/crypto/sha/Makefile	2018-06-18 13:27:37.112817276 +0200
+@@ -100,6 +100,10 @@ sha1-armv8.o:		sha1-armv8.S
+ sha256-armv8.o:		sha256-armv8.S
+ sha512-armv8.o:		sha512-armv8.S
+ 
++sha1-s390x.o:		sha1-s390x.S
++sha256-s390x.o:		sha256-s390x.S
++sha512-s390x.o:		sha512-s390x.S
++
+ files:
+ 	$(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO
+ 
+diff -up openssl-1.0.2k/crypto/s390x_arch.h.s390x-update openssl-1.0.2k/crypto/s390x_arch.h
+--- openssl-1.0.2k/crypto/s390x_arch.h.s390x-update	2018-06-18 13:27:37.110817228 +0200
++++ openssl-1.0.2k/crypto/s390x_arch.h	2018-06-18 13:27:53.066204616 +0200
+@@ -0,0 +1,93 @@
++/*
++ * Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
++ *
++ * Licensed under the OpenSSL license (the "License").  You may not use
++ * this file except in compliance with the License.  You can obtain a copy
++ * in the file LICENSE in the source distribution or at
++ * https://www.openssl.org/source/license.html
++ */
++
++#ifndef S390X_ARCH_H
++# define S390X_ARCH_H
++
++# ifndef __ASSEMBLER__
++
++void s390x_km(const unsigned char *in, size_t len, unsigned char *out,
++              unsigned int fc, void *param);
++void s390x_kma(const unsigned char *aad, size_t alen, const unsigned char *in,
++               size_t len, unsigned char *out, unsigned int fc, void *param);
++
++/*
++ * The field elements of OPENSSL_s390xcap_P are the 64-bit words returned by
++ * the STFLE instruction followed by the 64-bit word pairs returned by
++ * instructions' QUERY functions. If STFLE returns fewer data or an instruction
++ * is not supported, the corresponding field elements are zero.
++ */
++struct OPENSSL_s390xcap_st {
++    unsigned long long stfle[4];
++    unsigned long long kimd[2];
++    unsigned long long klmd[2];
++    unsigned long long km[2];
++    unsigned long long kmc[2];
++    unsigned long long kmac[2];
++    unsigned long long kmctr[2];
++    unsigned long long kmo[2];
++    unsigned long long kmf[2];
++    unsigned long long prno[2];
++    unsigned long long kma[2];
++};
++
++extern struct OPENSSL_s390xcap_st OPENSSL_s390xcap_P;
++
++/* convert facility bit number or function code to bit mask */
++#  define S390X_CAPBIT(i)	(1ULL << (63 - (i) % 64))
++
++# endif
++
++/* OPENSSL_s390xcap_P offsets [bytes] */
++# define S390X_STFLE		0x00
++# define S390X_KIMD		0x20
++# define S390X_KLMD		0x30
++# define S390X_KM		0x40
++# define S390X_KMC		0x50
++# define S390X_KMAC		0x60
++# define S390X_KMCTR		0x70
++# define S390X_KMO		0x80
++# define S390X_KMF		0x90
++# define S390X_PRNO		0xa0
++# define S390X_KMA		0xb0
++
++/* Facility Bit Numbers */
++# define S390X_VX		129
++# define S390X_VXD		134
++# define S390X_VXE		135
++
++/* Function Codes */
++
++/* all instructions */
++# define S390X_QUERY		0
++
++/* kimd/klmd */
++# define S390X_SHA3_224		32
++# define S390X_SHA3_256		33
++# define S390X_SHA3_384		34
++# define S390X_SHA3_512		35
++# define S390X_SHAKE_128	36
++# define S390X_SHAKE_256	37
++# define S390X_GHASH		65
++
++/* km/kmc/kmac/kmctr/kmo/kmf/kma */
++# define S390X_AES_128		18
++# define S390X_AES_192		19
++# define S390X_AES_256		20
++
++/* prno */
++# define S390X_TRNG		114
++
++/* Register 0 Flags */
++# define S390X_DECRYPT		0x80
++# define S390X_KMA_LPC		0x100
++# define S390X_KMA_LAAD		0x200
++# define S390X_KMA_HS		0x400
++
++#endif
+diff -up openssl-1.0.2k/crypto/s390xcap.c.s390x-update openssl-1.0.2k/crypto/s390xcap.c
+--- openssl-1.0.2k/crypto/s390xcap.c.s390x-update	2017-01-26 14:22:03.000000000 +0100
++++ openssl-1.0.2k/crypto/s390xcap.c	2018-06-18 13:27:37.111817252 +0200
+@@ -4,8 +4,7 @@
+ #include <setjmp.h>
+ #include <signal.h>
+ #include "cryptlib.h"
+-
+-extern unsigned long OPENSSL_s390xcap_P[];
++#include "s390x_arch.h"
+ 
+ static sigjmp_buf ill_jmp;
+ static void ill_handler(int sig)
+@@ -13,30 +12,48 @@ static void ill_handler(int sig)
+     siglongjmp(ill_jmp, sig);
+ }
+ 
+-unsigned long OPENSSL_s390x_facilities(void);
++void OPENSSL_s390x_facilities(void);
++void OPENSSL_vx_probe(void);
++
++struct OPENSSL_s390xcap_st OPENSSL_s390xcap_P;
+ 
+ void OPENSSL_cpuid_setup(void)
+ {
+     sigset_t oset;
+     struct sigaction ill_act, oact;
+ 
+-    if (OPENSSL_s390xcap_P[0])
++    if (OPENSSL_s390xcap_P.stfle[0])
+         return;
+ 
+-    OPENSSL_s390xcap_P[0] = 1UL << (8 * sizeof(unsigned long) - 1);
++    /* set a bit that will not be tested later */
++    OPENSSL_s390xcap_P.stfle[0] |= S390X_CAPBIT(0);
+ 
+     memset(&ill_act, 0, sizeof(ill_act));
+     ill_act.sa_handler = ill_handler;
+     sigfillset(&ill_act.sa_mask);
+     sigdelset(&ill_act.sa_mask, SIGILL);
++    sigdelset(&ill_act.sa_mask, SIGFPE);
+     sigdelset(&ill_act.sa_mask, SIGTRAP);
+     sigprocmask(SIG_SETMASK, &ill_act.sa_mask, &oset);
+     sigaction(SIGILL, &ill_act, &oact);
++    sigaction(SIGFPE, &ill_act, &oact);
+ 
+     /* protection against missing store-facility-list-extended */
+     if (sigsetjmp(ill_jmp, 1) == 0)
+         OPENSSL_s390x_facilities();
+ 
++    /* protection against disabled vector facility */
++    if ((OPENSSL_s390xcap_P.stfle[2] & S390X_CAPBIT(S390X_VX))
++        && (sigsetjmp(ill_jmp, 1) == 0)) {
++        OPENSSL_vx_probe();
++    } else {
++        OPENSSL_s390xcap_P.stfle[2] &= ~(S390X_CAPBIT(S390X_VX)
++                                         | S390X_CAPBIT(S390X_VXD)
++                                         | S390X_CAPBIT(S390X_VXE));
++    }
++
++    sigaction(SIGFPE, &oact, NULL);
++
+     sigaction(SIGILL, &oact, NULL);
+     sigprocmask(SIG_SETMASK, &oset, NULL);
+ }
+diff -up openssl-1.0.2k/crypto/s390xcpuid.pl.s390x-update openssl-1.0.2k/crypto/s390xcpuid.pl
+--- openssl-1.0.2k/crypto/s390xcpuid.pl.s390x-update	2018-06-18 13:27:53.067204641 +0200
++++ openssl-1.0.2k/crypto/s390xcpuid.pl	2018-06-18 13:27:53.067204641 +0200
+@@ -0,0 +1,259 @@
++#! /usr/bin/env perl
++
++$flavour = shift;
++
++if ($flavour =~ /3[12]/) {
++	$SIZE_T=4;
++	$g="";
++} else {
++	$SIZE_T=8;
++	$g="g";
++}
++
++while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
++open STDOUT,">$output";
++
++$ra="%r14";
++$sp="%r15";
++$stdframe=16*$SIZE_T+4*8;
++
++$code=<<___;
++#include "s390x_arch.h"
++
++.text
++
++.globl	OPENSSL_s390x_facilities
++.type	OPENSSL_s390x_facilities,\@function
++.align	16
++OPENSSL_s390x_facilities:
++	lghi	%r0,0
++	larl	%r4,OPENSSL_s390xcap_P
++
++	stg	%r0,S390X_STFLE+8(%r4)	# wipe capability vectors
++	stg	%r0,S390X_STFLE+16(%r4)
++	stg	%r0,S390X_STFLE+24(%r4)
++	stg	%r0,S390X_KIMD(%r4)
++	stg	%r0,S390X_KIMD+8(%r4)
++	stg	%r0,S390X_KLMD(%r4)
++	stg	%r0,S390X_KLMD+8(%r4)
++	stg	%r0,S390X_KM(%r4)
++	stg	%r0,S390X_KM+8(%r4)
++	stg	%r0,S390X_KMC(%r4)
++	stg	%r0,S390X_KMC+8(%r4)
++	stg	%r0,S390X_KMAC(%r4)
++	stg	%r0,S390X_KMAC+8(%r4)
++	stg	%r0,S390X_KMCTR(%r4)
++	stg	%r0,S390X_KMCTR+8(%r4)
++	stg	%r0,S390X_KMO(%r4)
++	stg	%r0,S390X_KMO+8(%r4)
++	stg	%r0,S390X_KMF(%r4)
++	stg	%r0,S390X_KMF+8(%r4)
++	stg	%r0,S390X_PRNO(%r4)
++	stg	%r0,S390X_PRNO+8(%r4)
++	stg	%r0,S390X_KMA(%r4)
++	stg	%r0,S390X_KMA+8(%r4)
++
++	.long	0xb2b04000		# stfle	0(%r4)
++	brc	8,.Ldone
++	lghi	%r0,1
++	.long	0xb2b04000		# stfle 0(%r4)
++	brc	8,.Ldone
++	lghi	%r0,2
++	.long	0xb2b04000		# stfle 0(%r4)
++.Ldone:
++	lmg	%r2,%r3,S390X_STFLE(%r4)
++	tmhl	%r2,0x4000		# check for message-security-assist
++ 	jz	.Lret
++
++	lghi	%r0,S390X_QUERY		# query kimd capabilities
++	la	%r1,S390X_KIMD(%r4)
++	.long	0xb93e0002		# kimd %r0,%r2
++
++	lghi	%r0,S390X_QUERY		# query klmd capabilities
++	la	%r1,S390X_KLMD(%r4)
++	.long	0xb93f0002		# klmd %r0,%r2
++
++	lghi	%r0,S390X_QUERY		# query km capability vector
++	la	%r1,S390X_KM(%r4)
++	.long	0xb92e0042		# km %r4,%r2
++
++	lghi	%r0,S390X_QUERY		# query kmc capability vector
++	la	%r1,S390X_KMC(%r4)
++	.long	0xb92f0042		# kmc %r4,%r2
++
++	lghi	%r0,S390X_QUERY		# query kmac capability vector
++	la	%r1,S390X_KMAC(%r4)
++	.long	0xb91e0042		# kmac %r4,%r2
++
++	tmhh	%r3,0x0004		# check for message-security-assist-4
++ 	jz	.Lret
++
++	lghi	%r0,S390X_QUERY		# query kmctr capability vector
++	la	%r1,S390X_KMCTR(%r4)
++	.long	0xb92d2042		# kmctr %r4,%r2,%r2
++
++	lghi	%r0,S390X_QUERY		# query kmo capability vector
++	la	%r1,S390X_KMO(%r4)
++	.long	0xb92b0042		# kmo %r4,%r2
++
++	lghi	%r0,S390X_QUERY		# query kmf capability vector
++	la	%r1,S390X_KMF(%r4)
++	.long	0xb92a0042		# kmf %r4,%r2
++
++	tml	%r2,0x40		# check for message-security-assist-5
++	jz	.Lret
++
++	lghi	%r0,S390X_QUERY		# query prno capability vector
++	la	%r1,S390X_PRNO(%r4)
++	.long	0xb93c0042		# prno %r4,%r2
++
++	lg	%r2,S390X_STFLE+16(%r4)
++	tmhl	%r2,0x2000		# check for message-security-assist-8
++	jz	.Lret
++
++	lghi	%r0,S390X_QUERY		# query kma capability vector
++	la	%r1,S390X_KMA(%r4)
++	.long	0xb9294022		# kma %r2,%r4,%r2
++
++.Lret:
++	br	$ra
++.size	OPENSSL_s390x_facilities,.-OPENSSL_s390x_facilities
++
++.globl	OPENSSL_rdtsc
++.type	OPENSSL_rdtsc,\@function
++.align	16
++OPENSSL_rdtsc:
++	stck	16($sp)
++	lg	%r2,16($sp)
++	br	$ra
++.size	OPENSSL_rdtsc,.-OPENSSL_rdtsc
++
++.globl	OPENSSL_atomic_add
++.type	OPENSSL_atomic_add,\@function
++.align	16
++OPENSSL_atomic_add:
++	l	%r1,0(%r2)
++.Lspin:	lr	%r0,%r1
++	ar	%r0,%r3
++	cs	%r1,%r0,0(%r2)
++	brc	4,.Lspin
++	lgfr	%r2,%r0		# OpenSSL expects the new value
++	br	$ra
++.size	OPENSSL_atomic_add,.-OPENSSL_atomic_add
++
++.globl	OPENSSL_wipe_cpu
++.type	OPENSSL_wipe_cpu,\@function
++.align	16
++OPENSSL_wipe_cpu:
++	xgr	%r0,%r0
++	xgr	%r1,%r1
++	lgr	%r2,$sp
++	xgr	%r3,%r3
++	xgr	%r4,%r4
++	lzdr	%f0
++	lzdr	%f1
++	lzdr	%f2
++	lzdr	%f3
++	lzdr	%f4
++	lzdr	%f5
++	lzdr	%f6
++	lzdr	%f7
++	br	$ra
++.size	OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu
++
++.globl	OPENSSL_cleanse
++.type	OPENSSL_cleanse,\@function
++.align	16
++OPENSSL_cleanse:
++#if !defined(__s390x__) && !defined(__s390x)
++	llgfr	%r3,%r3
++#endif
++	lghi	%r4,15
++	lghi	%r0,0
++	clgr	%r3,%r4
++	jh	.Lot
++	clgr	%r3,%r0
++	bcr	8,%r14
++.Little:
++	stc	%r0,0(%r2)
++	la	%r2,1(%r2)
++	brctg	%r3,.Little
++	br	%r14
++.align	4
++.Lot:	tmll	%r2,7
++	jz	.Laligned
++	stc	%r0,0(%r2)
++	la	%r2,1(%r2)
++	brctg	%r3,.Lot
++.Laligned:
++	srlg	%r4,%r3,3
++.Loop:	stg	%r0,0(%r2)
++	la	%r2,8(%r2)
++	brctg	%r4,.Loop
++	lghi	%r4,7
++	ngr	%r3,%r4
++	jnz	.Little
++	br	$ra
++.size	OPENSSL_cleanse,.-OPENSSL_cleanse
++
++.globl	OPENSSL_vx_probe
++.type	OPENSSL_vx_probe,\@function
++.align	16
++OPENSSL_vx_probe:
++	.word	0xe700,0x0000,0x0044	# vzero %v0
++	br	$ra
++.size	OPENSSL_vx_probe,.-OPENSSL_vx_probe
++___
++
++################
++# void s390x_km(const unsigned char *in, size_t len, unsigned char *out,
++#               unsigned int fc, void *param)
++{
++my ($in,$len,$out,$fc,$param) = map("%r$_",(2..6));
++$code.=<<___;
++.globl	s390x_km
++.type	s390x_km,\@function
++.align	16
++s390x_km:
++	lr	%r0,$fc
++	l${g}r	%r1,$param
++
++	.long	0xb92e0042	# km $out,$in
++	brc	1,.-4		# pay attention to "partial completion"
++
++	br	$ra
++.size	s390x_km,.-s390x_km
++___
++}
++
++################
++# void s390x_kma(const unsigned char *aad, size_t alen,
++#                const unsigned char *in, size_t len,
++#                unsigned char *out, unsigned int fc, void *param)
++{
++my ($aad,$alen,$in,$len,$out) = map("%r$_",(2..6));
++$code.=<<___;
++.globl	s390x_kma
++.type	s390x_kma,\@function
++.align	16
++s390x_kma:
++	st${g}	$out,6*$SIZE_T($sp)
++	lm${g}	%r0,%r1,$stdframe($sp)
++
++	.long	0xb9292064	# kma $out,$aad,$in
++	brc	1,.-4		# pay attention to "partial completion"
++
++	l${g}	$out,6*$SIZE_T($sp)
++	br	$ra
++.size	s390x_kma,.-s390x_kma
++___
++}
++
++$code.=<<___;
++.section	.init
++	brasl	$ra,OPENSSL_cpuid_setup
++___
++
++$code =~ s/\`([^\`]*)\`/eval $1/gem;
++print $code;
++close STDOUT;	# force flush
diff --git a/SPECS/openssl.spec b/SPECS/openssl.spec
index f65ee91..62e886e 100644
--- a/SPECS/openssl.spec
+++ b/SPECS/openssl.spec
@@ -16,14 +16,14 @@
 
 # Arches on which we need to prevent arch conflicts on opensslconf.h, must
 # also be handled in opensslconf-new.h.
-%define multilib_arches %{ix86} ia64 %{mips} ppc %{power64} s390 s390x sparcv9 sparc64 x86_64
+%define multilib_arches %{ix86} ia64 %{mips} ppc ppc64 s390 s390x sparcv9 sparc64 x86_64
 
 %global _performance_build 1
 
 Summary: Utilities from the general purpose cryptography library with TLS implementation
 Name: openssl
 Version: 1.0.2k
-Release: 12%{?dist}
+Release: 16%{?dist}
 Epoch: 1
 # We have to remove certain patented algorithms from the openssl source
 # tarball with the hobble-openssl script which is included below.
@@ -97,6 +97,13 @@ Patch85: openssl-1.0.2k-req-x509.patch
 Patch86: openssl-1.0.2k-cve-2017-3736.patch
 Patch87: openssl-1.0.2k-cve-2017-3737.patch
 Patch88: openssl-1.0.2k-cve-2017-3738.patch
+Patch89: openssl-1.0.2k-s390x-update.patch
+Patch100: openssl-1.0.2k-name-sensitive.patch
+Patch101: openssl-1.0.2k-cve-2017-3735.patch
+Patch102: openssl-1.0.2k-cve-2018-0732.patch
+Patch103: openssl-1.0.2k-cve-2018-0737.patch
+Patch104: openssl-1.0.2k-cve-2018-0739.patch
+Patch105: openssl-1.0.2k-cve-2018-0495.patch
 
 License: OpenSSL
 Group: System Environment/Libraries
@@ -226,6 +233,13 @@ cp %{SOURCE12} %{SOURCE13} crypto/ec/
 %patch86 -p1 -b .mont5-carry
 %patch87 -p1 -b .ssl-err
 %patch88 -p1 -b .rsaz-overflow
+%patch89 -p1 -b .s390x-update
+%patch100 -p1 -b .name-sensitive
+%patch101 -p1 -b .overread
+%patch102 -p1 -b .large-dh
+%patch103 -p1 -b .gen-timing
+%patch104 -p1 -b .asn1-recursive
+%patch105 -p1 -b .rohnp-fix
 
 sed -i 's/SHLIB_VERSION_NUMBER "1.0.0"/SHLIB_VERSION_NUMBER "%{version}"/' crypto/opensslv.h
 
@@ -525,6 +539,21 @@ rm -rf $RPM_BUILD_ROOT/%{_libdir}/fipscanister.*
 %postun libs -p /sbin/ldconfig
 
 %changelog
+* Tue Aug 14 2018 Tomáš Mráz <tmraz@redhat.com> 1.0.2k-16
+- fix CVE-2018-0495 - ROHNP - Key Extraction Side Channel on DSA, ECDSA
+- fix incorrect error message on FIPS DSA parameter generation (#1603597)
+
+* Tue Jun 19 2018 Tomáš Mráz <tmraz@redhat.com> 1.0.2k-14
+- ppc64le is not multilib architecture (#1585004)
+
+* Mon Jun 18 2018 Tomáš Mráz <tmraz@redhat.com> 1.0.2k-13
+- add S390x assembler updates
+- make CA name list comparison function case sensitive (#1548401)
+- fix CVE-2017-3735 - possible one byte overread with X.509 IPAdressFamily
+- fix CVE-2018-0732 - large prime DH DoS of TLS client
+- fix CVE-2018-0737 - RSA key generation cache timing vulnerability
+- fix CVE-2018-0739 - stack overflow parsing recursive ASN.1 structure
+
 * Wed Dec 13 2017 Tomáš Mráz <tmraz@redhat.com> 1.0.2k-12
 - fix CVE-2017-3737 - incorrect handling of fatal error state
 - fix CVE-2017-3738 - AVX2 Montgomery multiplication bug with 1024 bit modulus