Blame SOURCES/openssl-1.0.2k-fix-9-lives.patch

450916
diff -up openssl-1.0.2k/crypto/bn/bn_blind.c.9-lives openssl-1.0.2k/crypto/bn/bn_blind.c
450916
--- openssl-1.0.2k/crypto/bn/bn_blind.c.9-lives	2017-01-26 14:22:03.000000000 +0100
450916
+++ openssl-1.0.2k/crypto/bn/bn_blind.c	2019-04-05 10:50:56.136104388 +0200
450916
@@ -206,10 +206,15 @@ int BN_BLINDING_update(BN_BLINDING *b, B
450916
         if (!BN_BLINDING_create_param(b, NULL, NULL, ctx, NULL, NULL))
450916
             goto err;
450916
     } else if (!(b->flags & BN_BLINDING_NO_UPDATE)) {
450916
-        if (!BN_mod_mul(b->A, b->A, b->A, b->mod, ctx))
450916
-            goto err;
450916
-        if (!BN_mod_mul(b->Ai, b->Ai, b->Ai, b->mod, ctx))
450916
-            goto err;
450916
+        if (b->m_ctx != NULL) {
450916
+            if (!bn_mul_mont_fixed_top(b->Ai, b->Ai, b->Ai, b->m_ctx, ctx)
450916
+                || !bn_mul_mont_fixed_top(b->A, b->A, b->A, b->m_ctx, ctx))
450916
+                goto err;
450916
+        } else {
450916
+            if (!BN_mod_mul(b->Ai, b->Ai, b->Ai, b->mod, ctx)
450916
+                || !BN_mod_mul(b->A, b->A, b->A, b->mod, ctx))
450916
+                goto err;
450916
+        }
450916
     }
450916
 
450916
     ret = 1;
450916
@@ -241,13 +246,13 @@ int BN_BLINDING_convert_ex(BIGNUM *n, BI
450916
     else if (!BN_BLINDING_update(b, ctx))
450916
         return (0);
450916
 
450916
-    if (r != NULL) {
450916
-        if (!BN_copy(r, b->Ai))
450916
-            ret = 0;
450916
-    }
450916
+    if (r != NULL && (BN_copy(r, b->Ai) == NULL))
450916
+        return 0;
450916
 
450916
-    if (!BN_mod_mul(n, n, b->A, b->mod, ctx))
450916
-        ret = 0;
450916
+    if (b->m_ctx != NULL)
450916
+        ret = BN_mod_mul_montgomery(n, n, b->A, b->m_ctx, ctx);
450916
+    else
450916
+        ret = BN_mod_mul(n, n, b->A, b->mod, ctx);
450916
 
450916
     return ret;
450916
 }
450916
@@ -264,14 +269,29 @@ int BN_BLINDING_invert_ex(BIGNUM *n, con
450916
 
450916
     bn_check_top(n);
450916
 
450916
-    if (r != NULL)
450916
-        ret = BN_mod_mul(n, n, r, b->mod, ctx);
450916
-    else {
450916
-        if (b->Ai == NULL) {
450916
-            BNerr(BN_F_BN_BLINDING_INVERT_EX, BN_R_NOT_INITIALIZED);
450916
-            return (0);
450916
+    if (r == NULL && (r = b->Ai) == NULL) {
450916
+        BNerr(BN_F_BN_BLINDING_INVERT_EX, BN_R_NOT_INITIALIZED);
450916
+        return 0;
450916
+    }
450916
+
450916
+    if (b->m_ctx != NULL) {
450916
+        /* ensure that BN_mod_mul_montgomery takes pre-defined path */
450916
+        if (n->dmax >= r->top) {
450916
+            size_t i, rtop = r->top, ntop = n->top;
450916
+            BN_ULONG mask;
450916
+
450916
+            for (i = 0; i < rtop; i++) {
450916
+                mask = (BN_ULONG)0 - ((i - ntop) >> (8 * sizeof(i) - 1));
450916
+                n->d[i] &= mask;
450916
+            }
450916
+            mask = (BN_ULONG)0 - ((rtop - ntop) >> (8 * sizeof(ntop) - 1));
450916
+            /* always true, if (rtop >= ntop) n->top = r->top; */
450916
+            n->top = (int)(rtop & ~mask) | (ntop & mask);
450916
+            n->flags |= (BN_FLG_FIXED_TOP & ~mask);
450916
         }
450916
-        ret = BN_mod_mul(n, n, b->Ai, b->mod, ctx);
450916
+        ret = BN_mod_mul_montgomery(n, n, r, b->m_ctx, ctx);
450916
+    } else {
450916
+        ret = BN_mod_mul(n, n, r, b->mod, ctx);
450916
     }
450916
 
450916
     bn_check_top(n);
450916
@@ -366,14 +386,19 @@ BN_BLINDING *BN_BLINDING_create_param(BN
450916
     } while (1);
450916
 
450916
     if (ret->bn_mod_exp != NULL && ret->m_ctx != NULL) {
450916
-        if (!ret->bn_mod_exp
450916
-            (ret->A, ret->A, ret->e, ret->mod, ctx, ret->m_ctx))
450916
+        if (!ret->bn_mod_exp(ret->A, ret->A, ret->e, ret->mod, ctx, ret->m_ctx))
450916
             goto err;
450916
     } else {
450916
         if (!BN_mod_exp(ret->A, ret->A, ret->e, ret->mod, ctx))
450916
             goto err;
450916
     }
450916
 
450916
+    if (ret->m_ctx != NULL) {
450916
+        if (!bn_to_mont_fixed_top(ret->Ai, ret->Ai, ret->m_ctx, ctx)
450916
+            || !bn_to_mont_fixed_top(ret->A, ret->A, ret->m_ctx, ctx))
450916
+            goto err;
450916
+    }
450916
+
450916
     return ret;
450916
  err:
450916
     if (b == NULL && ret != NULL) {
450916
diff -up openssl-1.0.2k/crypto/bn/bn_lib.c.9-lives openssl-1.0.2k/crypto/bn/bn_lib.c
450916
--- openssl-1.0.2k/crypto/bn/bn_lib.c.9-lives	2019-04-05 10:50:56.128104529 +0200
450916
+++ openssl-1.0.2k/crypto/bn/bn_lib.c	2019-04-05 10:50:56.136104388 +0200
450916
@@ -144,74 +144,47 @@ const BIGNUM *BN_value_one(void)
450916
 
450916
 int BN_num_bits_word(BN_ULONG l)
450916
 {
450916
-    static const unsigned char bits[256] = {
450916
-        0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
450916
-        5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
450916
-        6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
450916
-        6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
450916
-        7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
450916
-        7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
450916
-        7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
450916
-        7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-        8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
450916
-    };
450916
-
450916
-#if defined(SIXTY_FOUR_BIT_LONG)
450916
-    if (l & 0xffffffff00000000L) {
450916
-        if (l & 0xffff000000000000L) {
450916
-            if (l & 0xff00000000000000L) {
450916
-                return (bits[(int)(l >> 56)] + 56);
450916
-            } else
450916
-                return (bits[(int)(l >> 48)] + 48);
450916
-        } else {
450916
-            if (l & 0x0000ff0000000000L) {
450916
-                return (bits[(int)(l >> 40)] + 40);
450916
-            } else
450916
-                return (bits[(int)(l >> 32)] + 32);
450916
-        }
450916
-    } else
450916
-#else
450916
-# ifdef SIXTY_FOUR_BIT
450916
-    if (l & 0xffffffff00000000LL) {
450916
-        if (l & 0xffff000000000000LL) {
450916
-            if (l & 0xff00000000000000LL) {
450916
-                return (bits[(int)(l >> 56)] + 56);
450916
-            } else
450916
-                return (bits[(int)(l >> 48)] + 48);
450916
-        } else {
450916
-            if (l & 0x0000ff0000000000LL) {
450916
-                return (bits[(int)(l >> 40)] + 40);
450916
-            } else
450916
-                return (bits[(int)(l >> 32)] + 32);
450916
-        }
450916
-    } else
450916
-# endif
450916
-#endif
450916
-    {
450916
-#if defined(THIRTY_TWO_BIT) || defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
450916
-        if (l & 0xffff0000L) {
450916
-            if (l & 0xff000000L)
450916
-                return (bits[(int)(l >> 24L)] + 24);
450916
-            else
450916
-                return (bits[(int)(l >> 16L)] + 16);
450916
-        } else
450916
-#endif
450916
-        {
450916
-#if defined(THIRTY_TWO_BIT) || defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
450916
-            if (l & 0xff00L)
450916
-                return (bits[(int)(l >> 8)] + 8);
450916
-            else
450916
+    BN_ULONG x, mask;
450916
+    int bits = (l != 0);
450916
+
450916
+#if BN_BITS2 > 32
450916
+    x = l >> 32;
450916
+    mask = (0 - x) & BN_MASK2;
450916
+    mask = (0 - (mask >> (BN_BITS2 - 1)));
450916
+    bits += 32 & mask;
450916
+    l ^= (x ^ l) & mask;
450916
 #endif
450916
-                return (bits[(int)(l)]);
450916
-        }
450916
-    }
450916
+
450916
+    x = l >> 16;
450916
+    mask = (0 - x) & BN_MASK2;
450916
+    mask = (0 - (mask >> (BN_BITS2 - 1)));
450916
+    bits += 16 & mask;
450916
+    l ^= (x ^ l) & mask;
450916
+
450916
+    x = l >> 8;
450916
+    mask = (0 - x) & BN_MASK2;
450916
+    mask = (0 - (mask >> (BN_BITS2 - 1)));
450916
+    bits += 8 & mask;
450916
+    l ^= (x ^ l) & mask;
450916
+
450916
+    x = l >> 4;
450916
+    mask = (0 - x) & BN_MASK2;
450916
+    mask = (0 - (mask >> (BN_BITS2 - 1)));
450916
+    bits += 4 & mask;
450916
+    l ^= (x ^ l) & mask;
450916
+
450916
+    x = l >> 2;
450916
+    mask = (0 - x) & BN_MASK2;
450916
+    mask = (0 - (mask >> (BN_BITS2 - 1)));
450916
+    bits += 2 & mask;
450916
+    l ^= (x ^ l) & mask;
450916
+
450916
+    x = l >> 1;
450916
+    mask = (0 - x) & BN_MASK2;
450916
+    mask = (0 - (mask >> (BN_BITS2 - 1)));
450916
+    bits += 1 & mask;
450916
+
450916
+    return bits;
450916
 }
450916
 
450916
 int BN_num_bits(const BIGNUM *a)
450916
@@ -519,12 +492,18 @@ BIGNUM *BN_copy(BIGNUM *a, const BIGNUM
450916
     memcpy(a->d, b->d, sizeof(b->d[0]) * b->top);
450916
 #endif
450916
 
450916
-    a->top = b->top;
450916
     a->neg = b->neg;
450916
+    a->top = b->top;
450916
+    a->flags |= b->flags & BN_FLG_FIXED_TOP;
450916
     bn_check_top(a);
450916
     return (a);
450916
 }
450916
 
450916
+#define FLAGS_DATA(flags) ((flags) & (BN_FLG_STATIC_DATA \
450916
+                                    | BN_FLG_CONSTTIME   \
450916
+                                    | BN_FLG_FIXED_TOP))
450916
+#define FLAGS_STRUCT(flags) ((flags) & (BN_FLG_MALLOCED))
450916
+
450916
 void BN_swap(BIGNUM *a, BIGNUM *b)
450916
 {
450916
     int flags_old_a, flags_old_b;
450916
@@ -552,10 +531,8 @@ void BN_swap(BIGNUM *a, BIGNUM *b)
450916
     b->dmax = tmp_dmax;
450916
     b->neg = tmp_neg;
450916
 
450916
-    a->flags =
450916
-        (flags_old_a & BN_FLG_MALLOCED) | (flags_old_b & BN_FLG_STATIC_DATA);
450916
-    b->flags =
450916
-        (flags_old_b & BN_FLG_MALLOCED) | (flags_old_a & BN_FLG_STATIC_DATA);
450916
+    a->flags = FLAGS_STRUCT(flags_old_a) | FLAGS_DATA(flags_old_b);
450916
+    b->flags = FLAGS_STRUCT(flags_old_b) | FLAGS_DATA(flags_old_a);
450916
     bn_check_top(a);
450916
     bn_check_top(b);
450916
 }
450916
@@ -637,6 +614,55 @@ BIGNUM *BN_bin2bn(const unsigned char *s
450916
 }
450916
 
450916
 /* ignore negative */
450916
+static int bn2binpad(const BIGNUM *a, unsigned char *to, int tolen)
450916
+{
450916
+    int n;
450916
+    size_t i, lasti, j, atop, mask;
450916
+    BN_ULONG l;
450916
+
450916
+    /*
450916
+     * In case |a| is fixed-top, BN_num_bytes can return bogus length,
450916
+     * but it's assumed that fixed-top inputs ought to be "nominated"
450916
+     * even for padded output, so it works out...
450916
+     */
450916
+    n = BN_num_bytes(a);
450916
+    if (tolen == -1) {
450916
+        tolen = n;
450916
+    } else if (tolen < n) {     /* uncommon/unlike case */
450916
+        BIGNUM temp = *a;
450916
+
450916
+        bn_correct_top(&temp);
450916
+        n = BN_num_bytes(&temp);
450916
+        if (tolen < n)
450916
+            return -1;
450916
+    }
450916
+
450916
+    /* Swipe through whole available data and don't give away padded zero. */
450916
+    atop = a->dmax * BN_BYTES;
450916
+    if (atop == 0) {
450916
+        OPENSSL_cleanse(to, tolen);
450916
+        return tolen;
450916
+    }
450916
+
450916
+    lasti = atop - 1;
450916
+    atop = a->top * BN_BYTES;
450916
+    for (i = 0, j = 0, to += tolen; j < (size_t)tolen; j++) {
450916
+        l = a->d[i / BN_BYTES];
450916
+        mask = 0 - ((j - atop) >> (8 * sizeof(i) - 1));
450916
+        *--to = (unsigned char)(l >> (8 * (i % BN_BYTES)) & mask);
450916
+        i += (i - lasti) >> (8 * sizeof(i) - 1); /* stay on last limb */
450916
+    }
450916
+
450916
+    return tolen;
450916
+}
450916
+
450916
+int bn_bn2binpad(const BIGNUM *a, unsigned char *to, int tolen)
450916
+{
450916
+    if (tolen < 0)
450916
+        return -1;
450916
+    return bn2binpad(a, to, tolen);
450916
+}
450916
+
450916
 int BN_bn2bin(const BIGNUM *a, unsigned char *to)
450916
 {
450916
     int n, i;
450916
@@ -810,6 +836,9 @@ int bn_cmp_words(const BN_ULONG *a, cons
450916
     int i;
450916
     BN_ULONG aa, bb;
450916
 
450916
+    if (n == 0)
450916
+        return 0;
450916
+
450916
     aa = a[n - 1];
450916
     bb = b[n - 1];
450916
     if (aa != bb)
450916
diff -up openssl-1.0.2k/crypto/bn/bn_mod.c.9-lives openssl-1.0.2k/crypto/bn/bn_mod.c
450916
--- openssl-1.0.2k/crypto/bn/bn_mod.c.9-lives	2019-04-05 10:50:56.125104581 +0200
450916
+++ openssl-1.0.2k/crypto/bn/bn_mod.c	2019-04-05 10:50:56.136104388 +0200
450916
@@ -197,6 +197,7 @@ int bn_mod_add_fixed_top(BIGNUM *r, cons
450916
         ((volatile BN_ULONG *)tp)[i] = 0;
450916
     }
450916
     r->top = mtop;
450916
+    r->flags |= BN_FLG_FIXED_TOP;
450916
     r->neg = 0;
450916
 
450916
     if (tp != storage)
450916
@@ -225,6 +226,70 @@ int BN_mod_sub(BIGNUM *r, const BIGNUM *
450916
 }
450916
 
450916
 /*
450916
+ * BN_mod_sub variant that may be used if both a and b are non-negative,
450916
+ * a is less than m, while b is of same bit width as m. It's implemented
450916
+ * as subtraction followed by two conditional additions.
450916
+ *
450916
+ * 0 <= a < m
450916
+ * 0 <= b < 2^w < 2*m
450916
+ *
450916
+ * after subtraction
450916
+ *
450916
+ * -2*m < r = a - b < m
450916
+ *
450916
+ * Thus it takes up to two conditional additions to make |r| positive.
450916
+ */
450916
+int bn_mod_sub_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
450916
+                         const BIGNUM *m)
450916
+{
450916
+    size_t i, ai, bi, mtop = m->top;
450916
+    BN_ULONG borrow, carry, ta, tb, mask, *rp;
450916
+    const BN_ULONG *ap, *bp;
450916
+
450916
+    if (bn_wexpand(r, m->top) == NULL)
450916
+        return 0;
450916
+
450916
+    rp = r->d;
450916
+    ap = a->d != NULL ? a->d : rp;
450916
+    bp = b->d != NULL ? b->d : rp;
450916
+
450916
+    for (i = 0, ai = 0, bi = 0, borrow = 0; i < mtop;) {
450916
+        mask = (BN_ULONG)0 - ((i - a->top) >> (8 * sizeof(i) - 1));
450916
+        ta = ap[ai] & mask;
450916
+
450916
+        mask = (BN_ULONG)0 - ((i - b->top) >> (8 * sizeof(i) - 1));
450916
+        tb = bp[bi] & mask;
450916
+        rp[i] = ta - tb - borrow;
450916
+        if (ta != tb)
450916
+            borrow = (ta < tb);
450916
+
450916
+        i++;
450916
+        ai += (i - a->dmax) >> (8 * sizeof(i) - 1);
450916
+        bi += (i - b->dmax) >> (8 * sizeof(i) - 1);
450916
+    }
450916
+    ap = m->d;
450916
+    for (i = 0, mask = 0 - borrow, carry = 0; i < mtop; i++) {
450916
+        ta = ((ap[i] & mask) + carry) & BN_MASK2;
450916
+        carry = (ta < carry);
450916
+        rp[i] = (rp[i] + ta) & BN_MASK2;
450916
+        carry += (rp[i] < ta);
450916
+    }
450916
+    borrow -= carry;
450916
+    for (i = 0, mask = 0 - borrow, carry = 0; i < mtop; i++) {
450916
+        ta = ((ap[i] & mask) + carry) & BN_MASK2;
450916
+        carry = (ta < carry);
450916
+        rp[i] = (rp[i] + ta) & BN_MASK2;
450916
+        carry += (rp[i] < ta);
450916
+    }
450916
+
450916
+    r->top = mtop;
450916
+    r->flags |= BN_FLG_FIXED_TOP;
450916
+    r->neg = 0;
450916
+
450916
+    return 1;
450916
+}
450916
+
450916
+/*
450916
  * BN_mod_sub variant that may be used if both a and b are non-negative and
450916
  * less than m
450916
  */
450916
diff -up openssl-1.0.2k/crypto/bn/bn_mont.c.9-lives openssl-1.0.2k/crypto/bn/bn_mont.c
450916
--- openssl-1.0.2k/crypto/bn/bn_mont.c.9-lives	2019-04-05 10:50:56.125104581 +0200
450916
+++ openssl-1.0.2k/crypto/bn/bn_mont.c	2019-04-05 10:50:56.137104370 +0200
450916
@@ -164,10 +164,10 @@ int bn_mul_mont_fixed_top(BIGNUM *r, con
450916
 
450916
     bn_check_top(tmp);
450916
     if (a == b) {
450916
-        if (!BN_sqr(tmp, a, ctx))
450916
+        if (!bn_sqr_fixed_top(tmp, a, ctx))
450916
             goto err;
450916
     } else {
450916
-        if (!BN_mul(tmp, a, b, ctx))
450916
+        if (!bn_mul_fixed_top(tmp, a, b, ctx))
450916
             goto err;
450916
     }
450916
     /* reduce from aRR to aR */
450916
@@ -190,6 +190,7 @@ static int bn_from_montgomery_word(BIGNU
450916
     BIGNUM *n;
450916
     BN_ULONG *ap, *np, *rp, n0, v, carry;
450916
     int nl, max, i;
450916
+    unsigned int rtop;
450916
 
450916
     n = &(mont->N);
450916
     nl = n->top;
450916
@@ -207,12 +208,10 @@ static int bn_from_montgomery_word(BIGNU
450916
     rp = r->d;
450916
 
450916
     /* clear the top words of T */
450916
-# if 1
450916
-    for (i = r->top; i < max; i++) /* memset? XXX */
450916
-        rp[i] = 0;
450916
-# else
450916
-    memset(&(rp[r->top]), 0, (max - r->top) * sizeof(BN_ULONG));
450916
-# endif
450916
+    for (rtop = r->top, i = 0; i < max; i++) {
450916
+        v = (BN_ULONG)0 - ((i - rtop) >> (8 * sizeof(rtop) - 1));
450916
+        rp[i] &= v;
450916
+    }
450916
 
450916
     r->top = max;
450916
     r->flags |= BN_FLG_FIXED_TOP;
450916
@@ -263,6 +262,18 @@ static int bn_from_montgomery_word(BIGNU
450916
 int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
450916
                        BN_CTX *ctx)
450916
 {
450916
+    int retn;
450916
+
450916
+    retn = bn_from_mont_fixed_top(ret, a, mont, ctx);
450916
+    bn_correct_top(ret);
450916
+    bn_check_top(ret);
450916
+
450916
+    return retn;
450916
+}
450916
+
450916
+int bn_from_mont_fixed_top(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
450916
+                           BN_CTX *ctx)
450916
+{
450916
     int retn = 0;
450916
 #ifdef MONT_WORD
450916
     BIGNUM *t;
450916
@@ -270,8 +281,6 @@ int BN_from_montgomery(BIGNUM *ret, cons
450916
     BN_CTX_start(ctx);
450916
     if ((t = BN_CTX_get(ctx)) && BN_copy(t, a)) {
450916
         retn = bn_from_montgomery_word(ret, t, mont);
450916
-        bn_correct_top(ret);
450916
-        bn_check_top(ret);
450916
     }
450916
     BN_CTX_end(ctx);
450916
 #else                           /* !MONT_WORD */
450916
diff -up openssl-1.0.2k/crypto/bn/bn_mul.c.9-lives openssl-1.0.2k/crypto/bn/bn_mul.c
450916
--- openssl-1.0.2k/crypto/bn/bn_mul.c.9-lives	2017-01-26 14:22:03.000000000 +0100
450916
+++ openssl-1.0.2k/crypto/bn/bn_mul.c	2019-04-05 10:50:56.137104370 +0200
450916
@@ -936,6 +936,16 @@ void bn_mul_high(BN_ULONG *r, BN_ULONG *
450916
 
450916
 int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
450916
 {
450916
+    int ret = bn_mul_fixed_top(r, a, b, ctx);
450916
+
450916
+    bn_correct_top(r);
450916
+    bn_check_top(r);
450916
+
450916
+    return ret;
450916
+}
450916
+
450916
+int bn_mul_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
450916
+{
450916
     int ret = 0;
450916
     int top, al, bl;
450916
     BIGNUM *rr;
450916
@@ -1032,46 +1042,6 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, c
450916
             rr->top = top;
450916
             goto end;
450916
         }
450916
-# if 0
450916
-        if (i == 1 && !BN_get_flags(b, BN_FLG_STATIC_DATA)) {
450916
-            BIGNUM *tmp_bn = (BIGNUM *)b;
450916
-            if (bn_wexpand(tmp_bn, al) == NULL)
450916
-                goto err;
450916
-            tmp_bn->d[bl] = 0;
450916
-            bl++;
450916
-            i--;
450916
-        } else if (i == -1 && !BN_get_flags(a, BN_FLG_STATIC_DATA)) {
450916
-            BIGNUM *tmp_bn = (BIGNUM *)a;
450916
-            if (bn_wexpand(tmp_bn, bl) == NULL)
450916
-                goto err;
450916
-            tmp_bn->d[al] = 0;
450916
-            al++;
450916
-            i++;
450916
-        }
450916
-        if (i == 0) {
450916
-            /* symmetric and > 4 */
450916
-            /* 16 or larger */
450916
-            j = BN_num_bits_word((BN_ULONG)al);
450916
-            j = 1 << (j - 1);
450916
-            k = j + j;
450916
-            t = BN_CTX_get(ctx);
450916
-            if (al == j) {      /* exact multiple */
450916
-                if (bn_wexpand(t, k * 2) == NULL)
450916
-                    goto err;
450916
-                if (bn_wexpand(rr, k * 2) == NULL)
450916
-                    goto err;
450916
-                bn_mul_recursive(rr->d, a->d, b->d, al, t->d);
450916
-            } else {
450916
-                if (bn_wexpand(t, k * 4) == NULL)
450916
-                    goto err;
450916
-                if (bn_wexpand(rr, k * 4) == NULL)
450916
-                    goto err;
450916
-                bn_mul_part_recursive(rr->d, a->d, b->d, al - j, j, t->d);
450916
-            }
450916
-            rr->top = top;
450916
-            goto end;
450916
-        }
450916
-# endif
450916
     }
450916
 #endif                          /* BN_RECURSION */
450916
     if (bn_wexpand(rr, top) == NULL)
450916
@@ -1082,7 +1052,7 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, c
450916
 #if defined(BN_MUL_COMBA) || defined(BN_RECURSION)
450916
  end:
450916
 #endif
450916
-    bn_correct_top(rr);
450916
+    rr->flags |= BN_FLG_FIXED_TOP;
450916
     if (r != rr && BN_copy(r, rr) == NULL)
450916
         goto err;
450916
 
450916
diff -up openssl-1.0.2k/crypto/bn/bn_sqr.c.9-lives openssl-1.0.2k/crypto/bn/bn_sqr.c
450916
--- openssl-1.0.2k/crypto/bn/bn_sqr.c.9-lives	2019-04-05 10:50:56.125104581 +0200
450916
+++ openssl-1.0.2k/crypto/bn/bn_sqr.c	2019-04-05 10:50:56.137104370 +0200
450916
@@ -66,6 +66,16 @@
450916
  */
450916
 int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx)
450916
 {
450916
+    int ret = bn_sqr_fixed_top(r, a, ctx);
450916
+
450916
+    bn_correct_top(r);
450916
+    bn_check_top(r);
450916
+
450916
+    return ret;
450916
+}
450916
+
450916
+int bn_sqr_fixed_top(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx)
450916
+{
450916
     int max, al;
450916
     int ret = 0;
450916
     BIGNUM *tmp, *rr;
450916
@@ -136,7 +146,7 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, B
450916
 
450916
     rr->neg = 0;
450916
     rr->top = max;
450916
-    bn_correct_top(rr);
450916
+    rr->flags |= BN_FLG_FIXED_TOP;
450916
     if (r != rr && BN_copy(r, rr) == NULL)
450916
         goto err;
450916
 
450916
diff -up openssl-1.0.2k/crypto/bn_int.h.9-lives openssl-1.0.2k/crypto/bn_int.h
450916
--- openssl-1.0.2k/crypto/bn_int.h.9-lives	2019-04-05 10:50:56.125104581 +0200
450916
+++ openssl-1.0.2k/crypto/bn_int.h	2019-04-05 10:50:56.137104370 +0200
450916
@@ -7,7 +7,15 @@
450916
  */
450916
 int bn_mul_mont_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
450916
                           BN_MONT_CTX *mont, BN_CTX *ctx);
450916
+int bn_from_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont,
450916
+                           BN_CTX *ctx);
450916
 int bn_to_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont,
450916
                          BN_CTX *ctx);
450916
 int bn_mod_add_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
450916
                          const BIGNUM *m);
450916
+int bn_mod_sub_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
450916
+                         const BIGNUM *m);
450916
+int bn_mul_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
450916
+int bn_sqr_fixed_top(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx);
450916
+
450916
+int bn_bn2binpad(const BIGNUM *a, unsigned char *to, int tolen);
450916
diff -up openssl-1.0.2k/crypto/constant_time_locl.h.9-lives openssl-1.0.2k/crypto/constant_time_locl.h
450916
--- openssl-1.0.2k/crypto/constant_time_locl.h.9-lives	2019-04-05 10:50:55.545114779 +0200
450916
+++ openssl-1.0.2k/crypto/constant_time_locl.h	2019-04-05 10:50:56.137104370 +0200
450916
@@ -204,6 +204,12 @@ static inline int constant_time_select_i
450916
     return (int)(constant_time_select(mask, (unsigned)(a), (unsigned)(b)));
450916
 }
450916
 
450916
+/*
450916
+ * Expected usage pattern is to unconditionally set error and then
450916
+ * wipe it if there was no actual error. |clear| is 1 or 0.
450916
+ */
450916
+void err_clear_last_constant_time(int clear);
450916
+
450916
 #ifdef __cplusplus
450916
 }
450916
 #endif
450916
diff -up openssl-1.0.2k/crypto/err/err.c.9-lives openssl-1.0.2k/crypto/err/err.c
450916
--- openssl-1.0.2k/crypto/err/err.c.9-lives	2017-01-26 14:22:03.000000000 +0100
450916
+++ openssl-1.0.2k/crypto/err/err.c	2019-04-05 10:50:56.138104353 +0200
450916
@@ -118,6 +118,7 @@
450916
 #include <openssl/buffer.h>
450916
 #include <openssl/bio.h>
450916
 #include <openssl/err.h>
450916
+#include "constant_time_locl.h"
450916
 
450916
 DECLARE_LHASH_OF(ERR_STRING_DATA);
450916
 DECLARE_LHASH_OF(ERR_STATE);
450916
@@ -819,8 +820,24 @@ static unsigned long get_error_values(in
450916
         return ERR_R_INTERNAL_ERROR;
450916
     }
450916
 
450916
+    while (es->bottom != es->top) {
450916
+        if (es->err_flags[es->top] & ERR_FLAG_CLEAR) {
450916
+            err_clear(es, es->top);
450916
+            es->top = es->top > 0 ? es->top - 1 : ERR_NUM_ERRORS - 1;
450916
+            continue;
450916
+        }
450916
+        i = (es->bottom + 1) % ERR_NUM_ERRORS;
450916
+        if (es->err_flags[i] & ERR_FLAG_CLEAR) {
450916
+            es->bottom = i;
450916
+            err_clear(es, es->bottom);
450916
+            continue;
450916
+        }
450916
+        break;
450916
+    }
450916
+
450916
     if (es->bottom == es->top)
450916
         return 0;
450916
+
450916
     if (top)
450916
         i = es->top;            /* last error */
450916
     else
450916
@@ -1146,3 +1163,23 @@ int ERR_pop_to_mark(void)
450916
     es->err_flags[es->top] &= ~ERR_FLAG_MARK;
450916
     return 1;
450916
 }
450916
+
450916
+void err_clear_last_constant_time(int clear)
450916
+{
450916
+    ERR_STATE *es;
450916
+    int top;
450916
+
450916
+    es = ERR_get_state();
450916
+    if (es == NULL)
450916
+        return;
450916
+
450916
+    top = es->top;
450916
+
450916
+    /*
450916
+     * Flag error as cleared but remove it elsewhere to avoid two errors
450916
+     * accessing the same error stack location, revealing timing information.
450916
+     */
450916
+    clear = constant_time_select_int(constant_time_eq_int(clear, 0),
450916
+                                     0, ERR_FLAG_CLEAR);
450916
+    es->err_flags[top] |= clear;
450916
+}
450916
diff -up openssl-1.0.2k/crypto/err/err.h.9-lives openssl-1.0.2k/crypto/err/err.h
450916
--- openssl-1.0.2k/crypto/err/err.h.9-lives	2019-04-05 10:50:55.450116449 +0200
450916
+++ openssl-1.0.2k/crypto/err/err.h	2019-04-05 11:14:57.689757981 +0200
450916
@@ -143,6 +143,7 @@ extern "C" {
450916
 # define ERR_TXT_STRING          0x02
450916
 
450916
 # define ERR_FLAG_MARK           0x01
450916
+# define ERR_FLAG_CLEAR          0x02
450916
 
450916
 # define ERR_NUM_ERRORS  16
450916
 typedef struct err_state_st {
450916
diff -up openssl-1.0.2k/crypto/rsa/rsa_eay.c.9-lives openssl-1.0.2k/crypto/rsa/rsa_eay.c
450916
--- openssl-1.0.2k/crypto/rsa/rsa_eay.c.9-lives	2019-04-05 10:50:55.998106814 +0200
450916
+++ openssl-1.0.2k/crypto/rsa/rsa_eay.c	2019-04-05 10:50:56.138104353 +0200
450916
@@ -118,6 +118,8 @@
450916
 #ifdef OPENSSL_FIPS
450916
 # include <openssl/fips.h>
450916
 #endif
450916
+#include "bn_int.h"
450916
+#include "constant_time_locl.h"
450916
 
450916
 #ifndef RSA_NULL
450916
 
450916
@@ -160,7 +162,7 @@ static int RSA_eay_public_encrypt(int fl
450916
                                   unsigned char *to, RSA *rsa, int padding)
450916
 {
450916
     BIGNUM *f, *ret;
450916
-    int i, j, k, num = 0, r = -1;
450916
+    int i, num = 0, r = -1;
450916
     unsigned char *buf = NULL;
450916
     BN_CTX *ctx = NULL;
450916
 
450916
@@ -252,15 +254,10 @@ static int RSA_eay_public_encrypt(int fl
450916
         goto err;
450916
 
450916
     /*
450916
-     * put in leading 0 bytes if the number is less than the length of the
450916
-     * modulus
450916
+     * BN_bn2binpad puts in leading 0 bytes if the number is less than
450916
+     * the length of the modulus.
450916
      */
450916
-    j = BN_num_bytes(ret);
450916
-    i = BN_bn2bin(ret, &(to[num - j]));
450916
-    for (k = 0; k < (num - i); k++)
450916
-        to[k] = 0;
450916
-
450916
-    r = num;
450916
+    r = bn_bn2binpad(ret, to, num);
450916
  err:
450916
     if (ctx != NULL) {
450916
         BN_CTX_end(ctx);
450916
@@ -369,7 +366,7 @@ static int RSA_eay_private_encrypt(int f
450916
                                    unsigned char *to, RSA *rsa, int padding)
450916
 {
450916
     BIGNUM *f, *ret, *res;
450916
-    int i, j, k, num = 0, r = -1;
450916
+    int i, num = 0, r = -1;
450916
     unsigned char *buf = NULL;
450916
     BN_CTX *ctx = NULL;
450916
     int local_blinding = 0;
450916
@@ -437,6 +434,11 @@ static int RSA_eay_private_encrypt(int f
450916
         goto err;
450916
     }
450916
 
450916
+    if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
450916
+        if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, CRYPTO_LOCK_RSA,
450916
+                                    rsa->n, ctx))
450916
+            goto err;
450916
+
450916
     if (!(rsa->flags & RSA_FLAG_NO_BLINDING)) {
450916
         blinding = rsa_get_blinding(rsa, &local_blinding, ctx);
450916
         if (blinding == NULL) {
450916
@@ -471,11 +473,6 @@ static int RSA_eay_private_encrypt(int f
450916
         } else
450916
             d = rsa->d;
450916
 
450916
-        if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
450916
-            if (!BN_MONT_CTX_set_locked
450916
-                (&rsa->_method_mod_n, CRYPTO_LOCK_RSA, rsa->n, ctx))
450916
-                goto err;
450916
-
450916
         if (!rsa->meth->bn_mod_exp(ret, f, d, rsa->n, ctx,
450916
                                    rsa->_method_mod_n))
450916
             goto err;
450916
@@ -495,15 +492,10 @@ static int RSA_eay_private_encrypt(int f
450916
         res = ret;
450916
 
450916
     /*
450916
-     * put in leading 0 bytes if the number is less than the length of the
450916
-     * modulus
450916
+     * BN_bn2binpad puts in leading 0 bytes if the number is less than
450916
+     * the length of the modulus.
450916
      */
450916
-    j = BN_num_bytes(res);
450916
-    i = BN_bn2bin(res, &(to[num - j]));
450916
-    for (k = 0; k < (num - i); k++)
450916
-        to[k] = 0;
450916
-
450916
-    r = num;
450916
+    r = bn_bn2binpad(res, to, num);
450916
  err:
450916
     if (ctx != NULL) {
450916
         BN_CTX_end(ctx);
450916
@@ -521,7 +513,6 @@ static int RSA_eay_private_decrypt(int f
450916
 {
450916
     BIGNUM *f, *ret;
450916
     int j, num = 0, r = -1;
450916
-    unsigned char *p;
450916
     unsigned char *buf = NULL;
450916
     BN_CTX *ctx = NULL;
450916
     int local_blinding = 0;
450916
@@ -628,8 +619,7 @@ static int RSA_eay_private_decrypt(int f
450916
         if (!rsa_blinding_invert(blinding, ret, unblind, ctx))
450916
             goto err;
450916
 
450916
-    p = buf;
450916
-    j = BN_bn2bin(ret, p);      /* j is only used with no-padding mode */
450916
+    j = bn_bn2binpad(ret, buf, num);
450916
 
450916
     switch (padding) {
450916
     case RSA_PKCS1_PADDING:
450916
@@ -644,14 +634,14 @@ static int RSA_eay_private_decrypt(int f
450916
         r = RSA_padding_check_SSLv23(to, num, buf, j, num);
450916
         break;
450916
     case RSA_NO_PADDING:
450916
-        r = RSA_padding_check_none(to, num, buf, j, num);
450916
+        memcpy(to, buf, (r = j));
450916
         break;
450916
     default:
450916
         RSAerr(RSA_F_RSA_EAY_PRIVATE_DECRYPT, RSA_R_UNKNOWN_PADDING_TYPE);
450916
         goto err;
450916
     }
450916
-    if (r < 0)
450916
-        RSAerr(RSA_F_RSA_EAY_PRIVATE_DECRYPT, RSA_R_PADDING_CHECK_FAILED);
450916
+    RSAerr(RSA_F_RSA_EAY_PRIVATE_DECRYPT, RSA_R_PADDING_CHECK_FAILED);
450916
+    err_clear_last_constant_time(1 & ~constant_time_msb(r));
450916
 
450916
  err:
450916
     if (ctx != NULL) {
450916
@@ -671,7 +661,6 @@ static int RSA_eay_public_decrypt(int fl
450916
 {
450916
     BIGNUM *f, *ret;
450916
     int i, num = 0, r = -1;
450916
-    unsigned char *p;
450916
     unsigned char *buf = NULL;
450916
     BN_CTX *ctx = NULL;
450916
 
450916
@@ -752,8 +741,7 @@ static int RSA_eay_public_decrypt(int fl
450916
         if (!BN_sub(ret, rsa->n, ret))
450916
             goto err;
450916
 
450916
-    p = buf;
450916
-    i = BN_bn2bin(ret, p);
450916
+    i = bn_bn2binpad(ret, buf, num);
450916
 
450916
     switch (padding) {
450916
     case RSA_PKCS1_PADDING:
450916
@@ -763,7 +751,7 @@ static int RSA_eay_public_decrypt(int fl
450916
         r = RSA_padding_check_X931(to, num, buf, i, num);
450916
         break;
450916
     case RSA_NO_PADDING:
450916
-        r = RSA_padding_check_none(to, num, buf, i, num);
450916
+        memcpy(to, buf, (r = i));
450916
         break;
450916
     default:
450916
         RSAerr(RSA_F_RSA_EAY_PUBLIC_DECRYPT, RSA_R_UNKNOWN_PADDING_TYPE);
450916
@@ -789,7 +777,7 @@ static int RSA_eay_mod_exp(BIGNUM *r0, c
450916
     BIGNUM *r1, *m1, *vrfy;
450916
     BIGNUM local_dmp1, local_dmq1, local_c, local_r1;
450916
     BIGNUM *dmp1, *dmq1, *c, *pr1;
450916
-    int ret = 0;
450916
+    int ret = 0, smooth = 0;
450916
 
450916
     BN_CTX_start(ctx);
450916
     r1 = BN_CTX_get(ctx);
450916
@@ -824,6 +812,9 @@ static int RSA_eay_mod_exp(BIGNUM *r0, c
450916
             if (!BN_MONT_CTX_set_locked
450916
                 (&rsa->_method_mod_q, CRYPTO_LOCK_RSA, q, ctx))
450916
                 goto err;
450916
+
450916
+            smooth = (rsa->meth->bn_mod_exp == BN_mod_exp_mont)
450916
+                     && (BN_num_bits(q) == BN_num_bits(p));
450916
         }
450916
     }
450916
 
450916
@@ -832,6 +823,47 @@ static int RSA_eay_mod_exp(BIGNUM *r0, c
450916
             (&rsa->_method_mod_n, CRYPTO_LOCK_RSA, rsa->n, ctx))
450916
             goto err;
450916
 
450916
+    if (smooth) {
450916
+        /*
450916
+         * Conversion from Montgomery domain, a.k.a. Montgomery reduction,
450916
+         * accepts values in [0-m*2^w) range. w is m's bit width rounded up
450916
+         * to limb width. So that at the very least if |I| is fully reduced,
450916
+         * i.e. less than p*q, we can count on from-to round to perform
450916
+         * below modulo operations on |I|. Unlike BN_mod it's constant time.
450916
+         */
450916
+        if (/* m1 = I moq q */
450916
+            !bn_from_mont_fixed_top(m1, I, rsa->_method_mod_q, ctx)
450916
+            || !bn_to_mont_fixed_top(m1, m1, rsa->_method_mod_q, ctx)
450916
+            /* m1 = m1^dmq1 mod q */
450916
+            || !BN_mod_exp_mont_consttime(m1, m1, rsa->dmq1, rsa->q, ctx,
450916
+                                          rsa->_method_mod_q)
450916
+            /* r1 = I mod p */
450916
+            || !bn_from_mont_fixed_top(r1, I, rsa->_method_mod_p, ctx)
450916
+            || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx)
450916
+            /* r1 = r1^dmp1 mod p */
450916
+            || !BN_mod_exp_mont_consttime(r1, r1, rsa->dmp1, rsa->p, ctx,
450916
+                                          rsa->_method_mod_p)
450916
+            /* r1 = (r1 - m1) mod p */
450916
+            /*
450916
+             * bn_mod_sub_fixed_top is not regular modular subtraction,
450916
+             * it can tolerate subtrahend to be larger than modulus, but
450916
+             * not bit-wise wider. This makes up for uncommon q>p case,
450916
+             * when |m1| can be larger than |rsa->p|.
450916
+             */
450916
+            || !bn_mod_sub_fixed_top(r1, r1, m1, rsa->p)
450916
+
450916
+            /* r1 = r1 * iqmp mod p */
450916
+            || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx)
450916
+            || !bn_mul_mont_fixed_top(r1, r1, rsa->iqmp, rsa->_method_mod_p,
450916
+                                      ctx)
450916
+            /* r0 = r1 * q + m1 */
450916
+            || !bn_mul_fixed_top(r0, r1, rsa->q, ctx)
450916
+            || !bn_mod_add_fixed_top(r0, r0, m1, rsa->n))
450916
+            goto err;
450916
+
450916
+        goto tail;
450916
+    }
450916
+
450916
     /* compute I mod q */
450916
     if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
450916
         c = &local_c;
450916
@@ -909,10 +941,18 @@ static int RSA_eay_mod_exp(BIGNUM *r0, c
450916
     if (!BN_add(r0, r1, m1))
450916
         goto err;
450916
 
450916
+ tail:
450916
     if (rsa->e && rsa->n) {
450916
-        if (!rsa->meth->bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx,
450916
-                                   rsa->_method_mod_n))
450916
-            goto err;
450916
+        if (rsa->meth->bn_mod_exp == BN_mod_exp_mont) {
450916
+            if (!BN_mod_exp_mont(vrfy, r0, rsa->e, rsa->n, ctx,
450916
+                                 rsa->_method_mod_n))
450916
+                goto err;
450916
+        } else {
450916
+            bn_correct_top(r0);
450916
+            if (!rsa->meth->bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx,
450916
+                                       rsa->_method_mod_n))
450916
+                goto err;
450916
+        }
450916
         /*
450916
          * If 'I' was greater than (or equal to) rsa->n, the operation will
450916
          * be equivalent to using 'I mod n'. However, the result of the
450916
@@ -921,6 +961,11 @@ static int RSA_eay_mod_exp(BIGNUM *r0, c
450916
          */
450916
         if (!BN_sub(vrfy, vrfy, I))
450916
             goto err;
450916
+        if (BN_is_zero(vrfy)) {
450916
+            bn_correct_top(r0);
450916
+            ret = 1;
450916
+            goto err;   /* not actually error */
450916
+        }
450916
         if (!BN_mod(vrfy, vrfy, rsa->n, ctx))
450916
             goto err;
450916
         if (BN_is_negative(vrfy))
450916
@@ -946,6 +991,15 @@ static int RSA_eay_mod_exp(BIGNUM *r0, c
450916
                 goto err;
450916
         }
450916
     }
450916
+    /*
450916
+     * It's unfortunate that we have to bn_correct_top(r0). What hopefully
450916
+     * saves the day is that correction is highly unlike, and private key
450916
+     * operations are customarily performed on blinded message. Which means
450916
+     * that attacker won't observe correlation with chosen plaintext.
450916
+     * Secondly, remaining code would still handle it in same computational
450916
+     * time and even conceal memory access pattern around corrected top.
450916
+     */
450916
+    bn_correct_top(r0);
450916
     ret = 1;
450916
  err:
450916
     BN_CTX_end(ctx);
450916
diff -up openssl-1.0.2k/crypto/rsa/rsa_oaep.c.9-lives openssl-1.0.2k/crypto/rsa/rsa_oaep.c
450916
--- openssl-1.0.2k/crypto/rsa/rsa_oaep.c.9-lives	2017-01-26 14:22:03.000000000 +0100
450916
+++ openssl-1.0.2k/crypto/rsa/rsa_oaep.c	2019-04-05 10:50:56.138104353 +0200
450916
@@ -120,8 +120,8 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un
450916
                                       int plen, const EVP_MD *md,
450916
                                       const EVP_MD *mgf1md)
450916
 {
450916
-    int i, dblen, mlen = -1, one_index = 0, msg_index;
450916
-    unsigned int good, found_one_byte;
450916
+    int i, dblen = 0, mlen = -1, one_index = 0, msg_index;
450916
+    unsigned int good = 0, found_one_byte, mask;
450916
     const unsigned char *maskedseed, *maskeddb;
450916
     /*
450916
      * |em| is the encoded message, zero-padded to exactly |num| bytes: em =
450916
@@ -144,31 +144,42 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un
450916
      * |num| is the length of the modulus; |flen| is the length of the
450916
      * encoded message. Therefore, for any |from| that was obtained by
450916
      * decrypting a ciphertext, we must have |flen| <= |num|. Similarly,
450916
-     * num < 2 * mdlen + 2 must hold for the modulus irrespective of
450916
+     * |num| >= 2 * |mdlen| + 2 must hold for the modulus irrespective of
450916
      * the ciphertext, see PKCS #1 v2.2, section 7.1.2.
450916
      * This does not leak any side-channel information.
450916
      */
450916
-    if (num < flen || num < 2 * mdlen + 2)
450916
-        goto decoding_err;
450916
+    if (num < flen || num < 2 * mdlen + 2) {
450916
+        RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1,
450916
+               RSA_R_OAEP_DECODING_ERROR);
450916
+        return -1;
450916
+    }
450916
 
450916
     dblen = num - mdlen - 1;
450916
     db = OPENSSL_malloc(dblen);
450916
-    em = OPENSSL_malloc(num);
450916
-    if (db == NULL || em == NULL) {
450916
+    if (db == NULL) {
450916
         RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, ERR_R_MALLOC_FAILURE);
450916
         goto cleanup;
450916
     }
450916
 
450916
+    em = OPENSSL_malloc(num);
450916
+    if (em == NULL) {
450916
+        RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1,
450916
+               ERR_R_MALLOC_FAILURE);
450916
+        goto cleanup;
450916
+    }
450916
+
450916
     /*
450916
-     * Always do this zero-padding copy (even when num == flen) to avoid
450916
-     * leaking that information. The copy still leaks some side-channel
450916
-     * information, but it's impossible to have a fixed  memory access
450916
-     * pattern since we can't read out of the bounds of |from|.
450916
-     *
450916
-     * TODO(emilia): Consider porting BN_bn2bin_padded from BoringSSL.
450916
-     */
450916
-    memset(em, 0, num);
450916
-    memcpy(em + num - flen, from, flen);
450916
+     * Caller is encouraged to pass zero-padded message created with
450916
+     * BN_bn2binpad. Trouble is that since we can't read out of |from|'s
450916
+     * bounds, it's impossible to have an invariant memory access pattern
450916
+     * in case |from| was not zero-padded in advance.
450916
+     */
450916
+    for (from += flen, em += num, i = 0; i < num; i++) {
450916
+        mask = ~constant_time_is_zero(flen);
450916
+        flen -= 1 & mask;
450916
+        from -= 1 & mask;
450916
+        *--em = *from & mask;
450916
+    }
450916
 
450916
     /*
450916
      * The first byte must be zero, however we must not leak if this is
450916
@@ -215,33 +226,53 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(un
450916
      * so plaintext-awareness ensures timing side-channels are no longer a
450916
      * concern.
450916
      */
450916
-    if (!good)
450916
-        goto decoding_err;
450916
-
450916
     msg_index = one_index + 1;
450916
     mlen = dblen - msg_index;
450916
 
450916
-    if (tlen < mlen) {
450916
-        RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1, RSA_R_DATA_TOO_LARGE);
450916
-        mlen = -1;
450916
-    } else {
450916
-        memcpy(to, db + msg_index, mlen);
450916
-        goto cleanup;
450916
+    /*
450916
+     * For good measure, do this check in constant time as well.
450916
+     */
450916
+    good &= constant_time_ge(tlen, mlen);
450916
+
450916
+    /*
450916
+     * Move the result in-place by |dblen|-|mdlen|-1-|mlen| bytes to the left.
450916
+     * Then if |good| move |mlen| bytes from |db|+|mdlen|+1 to |to|.
450916
+     * Otherwise leave |to| unchanged.
450916
+     * Copy the memory back in a way that does not reveal the size of
450916
+     * the data being copied via a timing side channel. This requires copying
450916
+     * parts of the buffer multiple times based on the bits set in the real
450916
+     * length. Clear bits do a non-copy with identical access pattern.
450916
+     * The loop below has overall complexity of O(N*log(N)).
450916
+     */
450916
+    tlen = constant_time_select_int(constant_time_lt(dblen - mdlen - 1, tlen),
450916
+                                    dblen - mdlen - 1, tlen);
450916
+    for (msg_index = 1; msg_index < dblen - mdlen - 1; msg_index <<= 1) {
450916
+        mask = ~constant_time_eq(msg_index & (dblen - mdlen - 1 - mlen), 0);
450916
+        for (i = mdlen + 1; i < dblen - msg_index; i++)
450916
+            db[i] = constant_time_select_8(mask, db[i + msg_index], db[i]);
450916
+    }
450916
+    for (i = 0; i < tlen; i++) {
450916
+        mask = good & constant_time_lt(i, mlen);
450916
+        to[i] = constant_time_select_8(mask, db[i + mdlen + 1], to[i]);
450916
     }
450916
 
450916
- decoding_err:
450916
     /*
450916
      * To avoid chosen ciphertext attacks, the error message should not
450916
      * reveal which kind of decoding error happened.
450916
      */
450916
     RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1,
450916
            RSA_R_OAEP_DECODING_ERROR);
450916
+    err_clear_last_constant_time(1 & good);
450916
  cleanup:
450916
+    OPENSSL_cleanse(seed, sizeof(seed));
450916
     if (db != NULL)
450916
-        OPENSSL_free(db);
450916
+        OPENSSL_cleanse(db, dblen);
450916
+    OPENSSL_free(db);
450916
     if (em != NULL)
450916
-        OPENSSL_free(em);
450916
-    return mlen;
450916
+        OPENSSL_cleanse(em, num);
450916
+    OPENSSL_free(em);
450916
+
450916
+    return constant_time_select_int(good, mlen, -1);
450916
 }
450916
 
450916
 int PKCS1_MGF1(unsigned char *mask, long len,
450916
diff -up openssl-1.0.2k/crypto/rsa/rsa_pk1.c.9-lives openssl-1.0.2k/crypto/rsa/rsa_pk1.c
450916
--- openssl-1.0.2k/crypto/rsa/rsa_pk1.c.9-lives	2017-01-26 14:22:03.000000000 +0100
450916
+++ openssl-1.0.2k/crypto/rsa/rsa_pk1.c	2019-04-05 10:50:56.139104335 +0200
450916
@@ -98,6 +98,27 @@ int RSA_padding_check_PKCS1_type_1(unsig
450916
     const unsigned char *p;
450916
 
450916
     p = from;
450916
+
450916
+    /*
450916
+     * The format is
450916
+     * 00 || 01 || PS || 00 || D
450916
+     * PS - padding string, at least 8 bytes of FF
450916
+     * D  - data.
450916
+     */
450916
+
450916
+    if (num < 11)
450916
+        return -1;
450916
+
450916
+    /* Accept inputs with and without the leading 0-byte. */
450916
+    if (num == flen) {
450916
+        if ((*p++) != 0x00) {
450916
+            RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_1,
450916
+                   RSA_R_INVALID_PADDING);
450916
+            return -1;
450916
+        }
450916
+        flen--;
450916
+    }
450916
+
450916
     if ((num != (flen + 1)) || (*(p++) != 01)) {
450916
         RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_1,
450916
                RSA_R_BLOCK_TYPE_IS_NOT_01);
450916
@@ -186,7 +207,7 @@ int RSA_padding_check_PKCS1_type_2(unsig
450916
     int i;
450916
     /* |em| is the encoded message, zero-padded to exactly |num| bytes */
450916
     unsigned char *em = NULL;
450916
-    unsigned int good, found_zero_byte;
450916
+    unsigned int good, found_zero_byte, mask;
450916
     int zero_index = 0, msg_index, mlen = -1;
450916
 
450916
     if (tlen < 0 || flen < 0)
450916
@@ -197,37 +218,40 @@ int RSA_padding_check_PKCS1_type_2(unsig
450916
      * section 7.2.2.
450916
      */
450916
 
450916
-    if (flen > num)
450916
-        goto err;
450916
-
450916
-    if (num < 11)
450916
-        goto err;
450916
+    if (flen > num || num < 11) {
450916
+        RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2,
450916
+               RSA_R_PKCS_DECODING_ERROR);
450916
+        return -1;
450916
+    }
450916
 
450916
     em = OPENSSL_malloc(num);
450916
     if (em == NULL) {
450916
         RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, ERR_R_MALLOC_FAILURE);
450916
         return -1;
450916
     }
450916
-    memset(em, 0, num);
450916
     /*
450916
-     * Always do this zero-padding copy (even when num == flen) to avoid
450916
-     * leaking that information. The copy still leaks some side-channel
450916
-     * information, but it's impossible to have a fixed  memory access
450916
-     * pattern since we can't read out of the bounds of |from|.
450916
-     *
450916
-     * TODO(emilia): Consider porting BN_bn2bin_padded from BoringSSL.
450916
-     */
450916
-    memcpy(em + num - flen, from, flen);
450916
+     * Caller is encouraged to pass zero-padded message created with
450916
+     * BN_bn2binpad. Trouble is that since we can't read out of |from|'s
450916
+     * bounds, it's impossible to have an invariant memory access pattern
450916
+     * in case |from| was not zero-padded in advance.
450916
+     */
450916
+    for (from += flen, em += num, i = 0; i < num; i++) {
450916
+        mask = ~constant_time_is_zero(flen);
450916
+        flen -= 1 & mask;
450916
+        from -= 1 & mask;
450916
+        *--em = *from & mask;
450916
+    }
450916
 
450916
     good = constant_time_is_zero(em[0]);
450916
     good &= constant_time_eq(em[1], 2);
450916
 
450916
+    /* scan over padding data */
450916
     found_zero_byte = 0;
450916
     for (i = 2; i < num; i++) {
450916
         unsigned int equals0 = constant_time_is_zero(em[i]);
450916
-        zero_index =
450916
-            constant_time_select_int(~found_zero_byte & equals0, i,
450916
-                                     zero_index);
450916
+
450916
+        zero_index = constant_time_select_int(~found_zero_byte & equals0,
450916
+                                              i, zero_index);
450916
         found_zero_byte |= equals0;
450916
     }
450916
 
450916
@@ -236,7 +260,7 @@ int RSA_padding_check_PKCS1_type_2(unsig
450916
      * If we never found a 0-byte, then |zero_index| is 0 and the check
450916
      * also fails.
450916
      */
450916
-    good &= constant_time_ge((unsigned int)(zero_index), 2 + 8);
450916
+    good &= constant_time_ge(zero_index, 2 + 8);
450916
 
450916
     /*
450916
      * Skip the zero byte. This is incorrect if we never found a zero-byte
450916
@@ -246,30 +270,36 @@ int RSA_padding_check_PKCS1_type_2(unsig
450916
     mlen = num - msg_index;
450916
 
450916
     /*
450916
-     * For good measure, do this check in constant time as well; it could
450916
-     * leak something if |tlen| was assuming valid padding.
450916
+     * For good measure, do this check in constant time as well.
450916
      */
450916
-    good &= constant_time_ge((unsigned int)(tlen), (unsigned int)(mlen));
450916
+    good &= constant_time_ge(tlen, mlen);
450916
 
450916
     /*
450916
-     * We can't continue in constant-time because we need to copy the result
450916
-     * and we cannot fake its length. This unavoidably leaks timing
450916
-     * information at the API boundary.
450916
-     * TODO(emilia): this could be addressed at the call site,
450916
-     * see BoringSSL commit 0aa0767340baf925bda4804882aab0cb974b2d26.
450916
-     */
450916
-    if (!good) {
450916
-        mlen = -1;
450916
-        goto err;
450916
-    }
450916
+     * Move the result in-place by |num|-11-|mlen| bytes to the left.
450916
+     * Then if |good| move |mlen| bytes from |em|+11 to |to|.
450916
+     * Otherwise leave |to| unchanged.
450916
+     * Copy the memory back in a way that does not reveal the size of
450916
+     * the data being copied via a timing side channel. This requires copying
450916
+     * parts of the buffer multiple times based on the bits set in the real
450916
+     * length. Clear bits do a non-copy with identical access pattern.
450916
+     * The loop below has overall complexity of O(N*log(N)).
450916
+     */
450916
+    tlen = constant_time_select_int(constant_time_lt(num - 11, tlen),
450916
+                                    num - 11, tlen);
450916
+    for (msg_index = 1; msg_index < num - 11; msg_index <<= 1) {
450916
+        mask = ~constant_time_eq(msg_index & (num - 11 - mlen), 0);
450916
+        for (i = 11; i < num - msg_index; i++)
450916
+            em[i] = constant_time_select_8(mask, em[i + msg_index], em[i]);
450916
+    }
450916
+    for (i = 0; i < tlen; i++) {
450916
+        mask = good & constant_time_lt(i, mlen);
450916
+        to[i] = constant_time_select_8(mask, em[i + 11], to[i]);
450916
+    }
450916
+
450916
+    OPENSSL_cleanse(em, num);
450916
+    OPENSSL_free(em);
450916
+    RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, RSA_R_PKCS_DECODING_ERROR);
450916
+    err_clear_last_constant_time(1 & good);
450916
 
450916
-    memcpy(to, em + msg_index, mlen);
450916
-
450916
- err:
450916
-    if (em != NULL)
450916
-        OPENSSL_free(em);
450916
-    if (mlen == -1)
450916
-        RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2,
450916
-               RSA_R_PKCS_DECODING_ERROR);
450916
-    return mlen;
450916
+    return constant_time_select_int(good, mlen, -1);
450916
 }
450916
diff -up openssl-1.0.2k/crypto/rsa/rsa_ssl.c.9-lives openssl-1.0.2k/crypto/rsa/rsa_ssl.c
450916
--- openssl-1.0.2k/crypto/rsa/rsa_ssl.c.9-lives	2017-01-26 14:22:03.000000000 +0100
450916
+++ openssl-1.0.2k/crypto/rsa/rsa_ssl.c	2019-04-05 10:50:56.139104335 +0200
450916
@@ -61,6 +61,7 @@
450916
 #include <openssl/bn.h>
450916
 #include <openssl/rsa.h>
450916
 #include <openssl/rand.h>
450916
+#include "constant_time_locl.h"
450916
 
450916
 int RSA_padding_add_SSLv23(unsigned char *to, int tlen,
450916
                            const unsigned char *from, int flen)
450916
@@ -101,49 +102,119 @@ int RSA_padding_add_SSLv23(unsigned char
450916
     return (1);
450916
 }
450916
 
450916
+/*
450916
+ * Copy of RSA_padding_check_PKCS1_type_2 with a twist that rejects padding
450916
+ * if nul delimiter is not preceded by 8 consecutive 0x03 bytes. It also
450916
+ * preserves error code reporting for backward compatibility.
450916
+ */
450916
 int RSA_padding_check_SSLv23(unsigned char *to, int tlen,
450916
                              const unsigned char *from, int flen, int num)
450916
 {
450916
-    int i, j, k;
450916
-    const unsigned char *p;
450916
+    int i;
450916
+    /* |em| is the encoded message, zero-padded to exactly |num| bytes */
450916
+    unsigned char *em = NULL;
450916
+    unsigned int good, found_zero_byte, mask, threes_in_row;
450916
+    int zero_index = 0, msg_index, mlen = -1, err;
450916
 
450916
-    p = from;
450916
-    if (flen < 10) {
450916
+    if (tlen <= 0 || flen <= 0)
450916
+        return -1;
450916
+
450916
+    if (flen > num || num < 11) {
450916
         RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_DATA_TOO_SMALL);
450916
         return (-1);
450916
     }
450916
-    if ((num != (flen + 1)) || (*(p++) != 02)) {
450916
-        RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_BLOCK_TYPE_IS_NOT_02);
450916
-        return (-1);
450916
-    }
450916
 
450916
-    /* scan over padding data */
450916
-    j = flen - 1;               /* one for type */
450916
-    for (i = 0; i < j; i++)
450916
-        if (*(p++) == 0)
450916
-            break;
450916
-
450916
-    if ((i == j) || (i < 8)) {
450916
-        RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23,
450916
-               RSA_R_NULL_BEFORE_BLOCK_MISSING);
450916
-        return (-1);
450916
-    }
450916
-    for (k = -9; k < -1; k++) {
450916
-        if (p[k] != 0x03)
450916
-            break;
450916
-    }
450916
-    if (k == -1) {
450916
-        RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_SSLV3_ROLLBACK_ATTACK);
450916
-        return (-1);
450916
-    }
450916
+    em = OPENSSL_malloc(num);
450916
+    if (em == NULL) {
450916
+        RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, ERR_R_MALLOC_FAILURE);
450916
+        return -1;
450916
+    }
450916
+    /*
450916
+     * Caller is encouraged to pass zero-padded message created with
450916
+     * BN_bn2binpad. Trouble is that since we can't read out of |from|'s
450916
+     * bounds, it's impossible to have an invariant memory access pattern
450916
+     * in case |from| was not zero-padded in advance.
450916
+     */
450916
+    for (from += flen, em += num, i = 0; i < num; i++) {
450916
+        mask = ~constant_time_is_zero(flen);
450916
+        flen -= 1 & mask;
450916
+        from -= 1 & mask;
450916
+        *--em = *from & mask;
450916
+    }
450916
+
450916
+    good = constant_time_is_zero(em[0]);
450916
+    good &= constant_time_eq(em[1], 2);
450916
+    err = constant_time_select_int(good, 0, RSA_R_BLOCK_TYPE_IS_NOT_02);
450916
+    mask = ~good;
450916
 
450916
-    i++;                        /* Skip over the '\0' */
450916
-    j -= i;
450916
-    if (j > tlen) {
450916
-        RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, RSA_R_DATA_TOO_LARGE);
450916
-        return (-1);
450916
-    }
450916
-    memcpy(to, p, (unsigned int)j);
450916
+    /* scan over padding data */
450916
+    found_zero_byte = 0;
450916
+    threes_in_row = 0;
450916
+    for (i = 2; i < num; i++) {
450916
+        unsigned int equals0 = constant_time_is_zero(em[i]);
450916
+
450916
+        zero_index = constant_time_select_int(~found_zero_byte & equals0,
450916
+                                              i, zero_index);
450916
+        found_zero_byte |= equals0;
450916
+
450916
+        threes_in_row += 1 & ~found_zero_byte;
450916
+        threes_in_row &= found_zero_byte | constant_time_eq(em[i], 3);
450916
+    }
450916
+
450916
+    /*
450916
+     * PS must be at least 8 bytes long, and it starts two bytes into |em|.
450916
+     * If we never found a 0-byte, then |zero_index| is 0 and the check
450916
+     * also fails.
450916
+     */
450916
+    good &= constant_time_ge(zero_index, 2 + 8);
450916
+    err = constant_time_select_int(mask | good, err,
450916
+                                   RSA_R_NULL_BEFORE_BLOCK_MISSING);
450916
+    mask = ~good;
450916
+
450916
+    good &= constant_time_ge(threes_in_row, 8);
450916
+    err = constant_time_select_int(mask | good, err,
450916
+                                   RSA_R_SSLV3_ROLLBACK_ATTACK);
450916
+    mask = ~good;
450916
+
450916
+    /*
450916
+     * Skip the zero byte. This is incorrect if we never found a zero-byte
450916
+     * but in this case we also do not copy the message out.
450916
+     */
450916
+    msg_index = zero_index + 1;
450916
+    mlen = num - msg_index;
450916
+
450916
+    /*
450916
+     * For good measure, do this check in constant time as well.
450916
+     */
450916
+    good &= constant_time_ge(tlen, mlen);
450916
+    err = constant_time_select_int(mask | good, err, RSA_R_DATA_TOO_LARGE);
450916
+
450916
+    /*
450916
+     * Move the result in-place by |num|-11-|mlen| bytes to the left.
450916
+     * Then if |good| move |mlen| bytes from |em|+11 to |to|.
450916
+     * Otherwise leave |to| unchanged.
450916
+     * Copy the memory back in a way that does not reveal the size of
450916
+     * the data being copied via a timing side channel. This requires copying
450916
+     * parts of the buffer multiple times based on the bits set in the real
450916
+     * length. Clear bits do a non-copy with identical access pattern.
450916
+     * The loop below has overall complexity of O(N*log(N)).
450916
+     */
450916
+    tlen = constant_time_select_int(constant_time_lt(num - 11, tlen),
450916
+                                    num - 11, tlen);
450916
+    for (msg_index = 1; msg_index < num - 11; msg_index <<= 1) {
450916
+        mask = ~constant_time_eq(msg_index & (num - 11 - mlen), 0);
450916
+        for (i = 11; i < num - msg_index; i++)
450916
+            em[i] = constant_time_select_8(mask, em[i + msg_index], em[i]);
450916
+    }
450916
+    for (i = 0; i < tlen; i++) {
450916
+        mask = good & constant_time_lt(i, mlen);
450916
+        to[i] = constant_time_select_8(mask, em[i + 11], to[i]);
450916
+    }
450916
+
450916
+    OPENSSL_cleanse(em, num);
450916
+    OPENSSL_free(em);
450916
+    RSAerr(RSA_F_RSA_PADDING_CHECK_SSLV23, err);
450916
+    err_clear_last_constant_time(1 & good);
450916
 
450916
-    return (j);
450916
+    return constant_time_select_int(good, mlen, -1);
450916
 }
450916
diff -up openssl-1.0.2k/doc/crypto/RSA_padding_add_PKCS1_type_1.pod.9-lives openssl-1.0.2k/doc/crypto/RSA_padding_add_PKCS1_type_1.pod
450916
--- openssl-1.0.2k/doc/crypto/RSA_padding_add_PKCS1_type_1.pod.9-lives	2017-01-26 14:22:04.000000000 +0100
450916
+++ openssl-1.0.2k/doc/crypto/RSA_padding_add_PKCS1_type_1.pod	2019-04-05 10:50:56.139104335 +0200
450916
@@ -104,6 +104,18 @@ The RSA_padding_check_xxx() functions re
450916
 recovered data, -1 on error. Error codes can be obtained by calling
450916
 L<ERR_get_error(3)|ERR_get_error(3)>.
450916
 
450916
+=head1 WARNING
450916
+
450916
+The RSA_padding_check_PKCS1_type_2() padding check leaks timing
450916
+information which can potentially be used to mount a Bleichenbacher
450916
+padding oracle attack. This is an inherent weakness in the PKCS #1
450916
+v1.5 padding design. Prefer PKCS1_OAEP padding. Otherwise it can
450916
+be recommended to pass zero-padded B<f>, so that B<fl> equals to
450916
+B<rsa_len>, and if fixed by protocol, B<tlen> being set to the
450916
+expected length. In such case leakage would be minimal, it would
450916
+take attacker's ability to observe memory access pattern with byte
450916
+granilarity as it occurs, post-factum timing analysis won't do.
450916
+
450916
 =head1 SEE ALSO
450916
 
450916
 L<RSA_public_encrypt(3)|RSA_public_encrypt(3)>,