Blob Blame History Raw
diff -up ./doc/doxygen/Doxyfile.orig ./doc/doxygen/Doxyfile
diff -up ./lib/nettle/ecc/ecc-gostdsa-verify.c.orig ./lib/nettle/ecc/ecc-gostdsa-verify.c
--- ./lib/nettle/ecc/ecc-gostdsa-verify.c.orig	2020-06-03 15:05:27.000000000 +0200
+++ ./lib/nettle/ecc/ecc-gostdsa-verify.c	2021-04-01 11:24:42.820992320 +0200
@@ -63,6 +63,8 @@ ecc_gostdsa_verify (const struct ecc_cur
 		  const mp_limb_t *rp, const mp_limb_t *sp,
 		  mp_limb_t *scratch)
 {
+  mp_limb_t cy;
+
   /* Procedure, according to GOST R 34.10. q denotes the group
      order.
 
@@ -101,11 +103,17 @@ ecc_gostdsa_verify (const struct ecc_cur
   ecc->q.invert (&ecc->q, vp, hp, vp + 2*ecc->p.size);
 
   /* z1 = s / h, P1 = z1 * G */
-  ecc_mod_mul (&ecc->q, z1, sp, vp);
+  ecc_mod_mul (&ecc->q, z1 + ecc->q.size, sp, vp);
+  /* Ensure canonical reduction */
+  cy = mpn_sub_n (z1, z1 + ecc->q.size, ecc->q.m, ecc->q.size);
+  cnd_copy (cy, z1, z1 + ecc->q.size, ecc->q.size);
 
   /* z2 = - r / h, P2 = z2 * Y */
-  ecc_mod_mul (&ecc->q, z2, rp, vp);
-  mpn_sub_n (z2, ecc->q.m, z2, ecc->p.size);
+  mpn_sub_n (hp, ecc->q.m, rp, ecc->p.size);
+  ecc_mod_mul (&ecc->q, z2 + ecc->q.size, hp, vp);
+  /* Ensure canonical reduction */
+  cy = mpn_sub_n (z2, z2 + ecc->q.size, ecc->q.m, ecc->q.size);
+  cnd_copy (cy, z2, z2 + ecc->q.size, ecc->q.size);
 
    /* Total storage: 5*ecc->p.size + ecc->mul_itch */
   ecc->mul (ecc, P2, z2, pp, z2 + ecc->p.size);
diff -up ./lib/nettle/ecc/eddsa-hash.c.orig ./lib/nettle/ecc/eddsa-hash.c
--- ./lib/nettle/ecc/eddsa-hash.c.orig	2020-06-03 15:05:28.000000000 +0200
+++ ./lib/nettle/ecc/eddsa-hash.c	2021-04-01 11:24:42.821992314 +0200
@@ -43,13 +43,14 @@
 #include <nettle/ecc.h>
 #include "ecc-internal.h"
 
-/* Convert hash digest to integer, and reduce modulo q, to m->size
-   limbs. Needs space for 2*m->size + 1 at rp. */
+/* Convert hash digest to integer, and reduce canonically modulo q.
+   Needs space for 2*m->size + 1 at rp. */
 void
 _eddsa_hash (const struct ecc_modulo *m,
 	     mp_limb_t *rp, size_t digest_size, const uint8_t *digest)
 {
   mp_size_t nlimbs = (8*digest_size + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
+  mp_limb_t cy;
 
   mpn_set_base256_le (rp, nlimbs, digest, digest_size);
 
@@ -74,4 +75,8 @@ _eddsa_hash (const struct ecc_modulo *m,
       assert (hi == 0);
     }
   m->mod (m, rp);
+  /* Ensure canonical reduction. */
+  cy = mpn_sub_n (rp + m->size, rp, m->m, m->size);
+  cnd_copy (cy, rp + m->size, rp, m->size);
+  mpn_copyi (rp, rp + m->size, m->size);
 }
diff -up ./lib/nettle/ecc/gostdsa-vko.c.orig ./lib/nettle/ecc/gostdsa-vko.c
--- ./lib/nettle/ecc/gostdsa-vko.c.orig	2020-06-03 15:05:28.000000000 +0200
+++ ./lib/nettle/ecc/gostdsa-vko.c	2021-04-01 11:24:42.821992314 +0200
@@ -64,6 +64,7 @@ gostdsa_vko (const struct ecc_scalar *pr
   mp_size_t size = ecc->p.size;
   mp_size_t itch = 4*size + ecc->mul_itch;
   mp_limb_t *scratch;
+  mp_limb_t cy;
 
   if (itch < 5*size + ecc->h_to_a_itch)
       itch = 5*size + ecc->h_to_a_itch;
@@ -87,7 +88,11 @@ gostdsa_vko (const struct ecc_scalar *pr
   if (mpn_zero_p (UKM, size))
     UKM[0] = 1;
 
-  ecc_mod_mul (&ecc->q, TEMP, priv->p, UKM); /* TEMP = UKM * priv */
+  ecc_mod_mul (&ecc->q, TEMP + ecc->q.size, priv->p, UKM); /* TEMP = UKM * priv */
+  /* Ensure canonical reduction */
+  cy = mpn_sub_n (TEMP, TEMP + ecc->q.size, ecc->q.m, ecc->q.size);
+  cnd_copy (cy, TEMP, TEMP + ecc->q.size, ecc->q.size);
+
   ecc->mul (ecc, XYZ, TEMP, pub->p, scratch + 4*size); /* XYZ = UKM * priv * pub */
   ecc->h_to_a (ecc, 0, TEMP, XYZ, scratch + 5*size); /* TEMP = XYZ */
   mpn_get_base256_le (out, bsize, TEMP, size);