Ciao,

since someone is asking about secure powm... I reply to an old message :-)

Il 2018-12-13 07:05 ni...@lysator.liu.se ha scritto:
t...@gmplib.org (Torbjörn Granlund) writes:

I am looking into doing karatsuba multiplication at evaluation points 0,
+1, and infinity.  We usually evaluate in -1 instead of +1.

It will be very interesting to see what thresholds we get with that.

At a first glance, I'd say around a dozen limbs higher than the non-sec thresholds.

You can try, by replacing the two files mpn/generic/toom2{2_mul,_sqr}.c with the attached ones. Then make check and make tune, to see what happens.

Of course the two functions should not replace the current ones, but that was the easiest way to test the new files :-)

The main advantage of evaluating in +1 is that it makes it more straight- forward to avoid side channel leakage. (Currently, we completely avoid all o(n^2) multiplication algorithms in side channel sensitive contexts.)

I don't think we should rule out using -1. It needs side-channel silent
conditional negation, but that's not so hard. sec_invert.c implements

The attached code for squaring, uses -1.
I.e. it is strictly equivalent to the non-sec version. I only replaced the strategy to obtain the absolute value of the difference, and carry propagation. It also has exactly the same memory usage.

mpn_cnd_neg in in terms of mpn_lshift and mpn_cnd_sub_n, one could also

I'm recycling your code here, in mpn_sec_absub.

What's most efficient is not at all clear to me: negation costs O(n),
but so does handling of the extra top bits one get with evaluation in
+1.

That's true only for the last recursion level, when you fall into _basecase...

I know that toom2_sqr enters the game later wrt toom22_mul. But in the _sec_ area, to compute the square of a number we use sqr_basecase if the number is small enough, and mul_basecase for larger integers... so that a Karatsuba function may be more important for squaring than for the product...

Ĝis,
m
/* mpn_sec_toom2_sqr -- Square {ap,an}.

   Contributed to the GNU project by Torbjorn Granlund.
   Modified by Marco BODRATO to obtain a side-channel silent version.

Copyright 2006-2010, 2012, 2014, 2018, 2020 Free Software Foundation, Inc.

This code is free software; you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.

You should have received a copy of the GNU Affero General Public
License.  If not, see https://www.gnu.org/licenses/.  */

#include "gmp-impl.h"

static mp_limb_t
mpn_sec_add (mp_ptr rp,
	     mp_srcptr ap, mp_size_t an,
	     mp_srcptr bp, mp_size_t bn,
	     mp_ptr tp)
{
  mp_limb_t cy;

  ASSERT (an >= bn);

  cy = mpn_add_n (rp, ap, bp, bn);
  if (an != bn)
    cy = mpn_sec_add_1 (rp + bn, ap + bn, an - bn, cy, tp);
  return cy;
}

static mp_size_t
mpn_sec_add_itch (mp_size_t an, mp_size_t bn)
{
  ASSERT (an >= bn);
  return mpn_sec_add_1_itch (an - bn);
}

static mp_limb_t
mpn_sec_sub (mp_ptr rp,
	     mp_srcptr ap, mp_size_t an,
	     mp_srcptr bp, mp_size_t bn,
	     mp_ptr tp)
{
  mp_limb_t bw;

  ASSERT (an >= bn);

  bw = mpn_sub_n (rp, ap, bp, bn);
  if (an != bn)
    bw = mpn_sec_sub_1 (rp + bn, ap + bn, an - bn, bw, tp);
  return bw;
}

static mp_size_t
mpn_sec_sub_itch (mp_size_t an, mp_size_t bn)
{
  ASSERT (an >= bn);
  return mpn_sec_sub_1_itch (an - bn);
}

static mp_limb_t
mpn_sec_absub (mp_ptr rp,
	       mp_srcptr ap, mp_size_t an,
	       mp_srcptr bp, mp_size_t bn,
	       mp_ptr tp)
{
  mp_limb_t bw;

  ASSERT (an >= bn);

  bw = mpn_sec_sub (rp, ap, an, bp, bn, tp);

  /* mpn_cnd_neg_ip (bw, rp, n, tp); */
#if 0
  mpn_sec_sub_1 (rp, rp, an, bw, tp);
  MPN_FILL (tp, an, -bw);
  mpn_xor_n (rp, rp, tp, an);
#else
  mpn_lshift (tp, rp, an, 1);
  mpn_cnd_sub_n (bw, rp, rp, tp, an);
#endif

  return bw;
}

static mp_size_t
mpn_sec_absub_itch (mp_size_t an, mp_size_t bn)
{
  ASSERT (an >= mpn_sec_sub_itch (an, bn));
  ASSERT (an >= mpn_sec_sub_1_itch (an));
  return an;
}

/* Evaluate in: -1, 0, +inf

  <-s--><--n-->
   ____ ______
  |_a1_|___a0_|

  v0  =  a0     ^2  #   A(0)^2
  vm1 = (a0- a1)^2  #  A(-1)^2
  vinf=      a1 ^2  # A(inf)^2
*/

#if TUNE_PROGRAM_BUILD || WANT_FAT_BINARY
#define MAYBE_sqr_toom2   1
#else
#define MAYBE_sqr_toom2							\
  (SQR_TOOM3_THRESHOLD >= 2 * SQR_TOOM2_THRESHOLD)
#endif

#define TOOM2_SQR_REC(p, a, n, ws)					\
  do {									\
    if (! MAYBE_sqr_toom2						\
	|| BELOW_THRESHOLD (n, SQR_TOOM2_THRESHOLD))			\
      mpn_sqr_basecase (p, a, n);					\
    else								\
      mpn_toom2_sqr (p, a, n, ws);					\
  } while (0)

void
mpn_toom2_sqr (mp_ptr pp,
	       mp_srcptr ap, mp_size_t an,
	       mp_ptr scratch)
{
  const int __gmpn_cpuvec_initialized = 1;
  mp_size_t n, s;
  mp_limb_t cy, cy2;
  mp_ptr asm1;

#define a0  ap
#define a1  (ap + n)

  s = an >> 1;
  n = an - s;

  ASSERT (0 < s && s <= n && s >= n - 1);

  asm1 = pp;

  /* Compute asm1.  */
  ASSERT (mpn_toom2_sqr_itch(an) >= mpn_sec_absub_itch (n, s));
  mpn_sec_absub (asm1, a0, n, a1, s, scratch);

#define v0	pp				/* 2n */
#define vinf	(pp + 2 * n)			/* s+s */
#define vm1	scratch				/* 2n */
#define scratch_out	scratch + 2 * n

  /* vm1, 2n limbs */
  TOOM2_SQR_REC (vm1, asm1, n, scratch_out);

  /* vinf, s+s limbs */
  TOOM2_SQR_REC (vinf, a1, s, scratch_out);

  /* v0, 2n limbs */
  TOOM2_SQR_REC (v0, ap, n, scratch_out);

  /* H(v0) + L(vinf) */
  cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);

  /* L(v0) + H(v0) */
  cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);

  /* L(vinf) + H(vinf) */
  ASSERT (mpn_toom2_sqr_itch(an) >= mpn_sec_add_itch (n, s + s - n) + 2 * n);
  cy += mpn_sec_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + s - n, scratch_out);

  cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);

  ASSERT (cy + 1 <= 3);
  ASSERT (cy2 <= 2);

  cy += mpn_sec_add_1 (pp + 2 * n, pp + 2 * n, n, cy2, scratch);
  ASSERT (cy <= 3);
  ASSERT_NOCARRY (mpn_sec_add_1 (pp + 3 * n, pp + 3 * n, s + s - n, cy, scratch));
}
/* mpn_sec_toom22_mul. Multiply {ap,an} and {bp,bn} where an >= bn.
   Or more accurately, bn <= an < 2bn.

   Contributed to the GNU project by Marco Bodrato.
   This code is dedicated to Buvik.

Copyright 2014, 2020 Free Software Foundation, Inc.

This code is free software; you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.

You should have received a copy of the GNU Affero General Public
License.  If not, see https://www.gnu.org/licenses/.  */

#include "gmp-impl.h"

static mp_limb_t
mpn_sec_add (mp_ptr rp,
	     mp_srcptr ap, mp_size_t an,
	     mp_srcptr bp, mp_size_t bn,
	     mp_ptr tp)
{
  mp_limb_t cy;

  ASSERT (an >= bn);

  cy = mpn_add_n (rp, ap, bp, bn);
  if (an != bn)
    cy = mpn_sec_add_1 (rp + bn, ap + bn, an - bn, cy, tp);
  return cy;
}

static mp_size_t
mpn_sec_add_itch (mp_size_t an, mp_size_t bn)
{
  ASSERT (an >= bn);
  return mpn_sec_add_1_itch (an - bn);
}

static mp_limb_t
mpn_sec_sub (mp_ptr rp,
	     mp_srcptr ap, mp_size_t an,
	     mp_srcptr bp, mp_size_t bn,
	     mp_ptr tp)
{
  mp_limb_t bw;

  ASSERT (an >= bn);

  bw = mpn_sub_n (rp, ap, bp, bn);
  if (an != bn)
    bw = mpn_sec_sub_1 (rp + bn, ap + bn, an - bn, bw, tp);
  return bw;
}

static mp_size_t
mpn_sec_sub_itch (mp_size_t an, mp_size_t bn)
{
  ASSERT (an >= bn);
  return mpn_sec_sub_1_itch (an - bn);
}

mp_size_t
mpn_sec_toom22_mul_itch (mp_size_t an, mp_size_t bn)
{
  return an * 2 + 6;
}

void
mpn_sec_toom22_mul (mp_ptr rp,
	     mp_srcptr ap, mp_size_t an,
	     mp_srcptr bp, mp_size_t bn,
	     mp_ptr tp)
{
  mp_size_t n, s, t;

#define cp1p tp
#define c0p rp
#define cinfp (rp + 2 * n)

  ASSERT (an >= bn);

  s = an >> 1;
  n = an - s;
  t = bn - n;

  if (UNLIKELY (s + t <= n))
    {
      mpn_mul_basecase (rp, ap, an, bp, bn);
      return;
    }

  ASSERT (t > 0);
  ASSERT (mpn_sec_toom22_mul_itch (an, bn) >= 4*n + 4);

  {
    mp_ptr ap1p, bp1p;

    ap1p = tp + 2 * n + 2;
    bp1p = tp + 3 * n + 3;

    ASSERT (mpn_sec_add_itch (n, s) <= an + bn);
    ap1p [n] = mpn_sec_add (ap1p, ap, n, ap + n, s, rp);
    ASSERT (mpn_sec_add_itch (n, t) <= an + bn);
    bp1p [n] = mpn_sec_add (bp1p, bp, n, bp + n, t, rp);

    ASSERT (mpn_sec_mul_itch (n + 1, n + 1) <= an + bn);
    mpn_sec_mul (cp1p, ap1p, n + 1, bp1p, n + 1, rp);
  }

  {
    mp_limb_t cy;
    ASSERT (cp1p [2 * n + 1] == 0);
    cy = cp1p [2 * n];
    ASSERT (cy < 4);

    ASSERT (mpn_sec_mul_itch (n, n) <= mpn_sec_toom22_mul_itch (an, bn) - 2*n);
    mpn_sec_mul (c0p, ap, n, bp, n, tp + 2 * n);

    ASSERT (mpn_sec_mul_itch (s, t) <= mpn_sec_toom22_mul_itch (an, bn) - 2*n);
    mpn_sec_mul (cinfp, ap + n, s, bp + n, t, tp + 2 * n );

    cy -= mpn_sub_n (cp1p, cp1p, c0p, 2 * n);
    ASSERT (cy < 4);

    ASSERT (mpn_sec_sub_itch (2*n, s+t) <= mpn_sec_toom22_mul_itch (an, bn) - 2*n);
    cy -= mpn_sec_sub (cp1p, cp1p, 2 * n, cinfp, s + t, tp + 2 * n);
    ASSERT (cy < 4);

#if HAVE_NATIVE_mpn_add_nc
    cy += mpn_add_nc (rp + 2 * n, cinfp, cp1p + n, n,
		      mpn_add_n (rp + n, c0p + n, cp1p, n));
#else
    ASSERT (mpn_sec_add_1_itch (n) <= mpn_sec_toom22_mul_itch (an, bn) - 2*n);
    cy += mpn_sec_add_1 (cinfp, cinfp, n,
			 mpn_add_n (rp + n, c0p + n, cp1p, n), tp + 2 * n);

    cy += mpn_add_n (rp + 2 * n, cinfp, cp1p + n, n);
#endif

    ASSERT (mpn_sec_add_1_itch (s+t-n) <= mpn_sec_toom22_mul_itch (an, bn));
    ASSERT_NOCARRY (mpn_sec_add_1 (rp + 3 * n, rp + 3 * n, s + t - n, cy, tp));
  }
}

void
mpn_toom22_mul (mp_ptr rp,
	     mp_srcptr ap, mp_size_t an,
	     mp_srcptr bp, mp_size_t bn,
	     mp_ptr tp)
{
  ASSERT (mpn_toom22_mul_itch (an, bn) >= mpn_sec_toom22_mul_itch (an, bn));
  mpn_sec_toom22_mul (rp, ap, an, bp, bn, tp);
}
_______________________________________________
gmp-devel mailing list
gmp-devel@gmplib.org
https://gmplib.org/mailman/listinfo/gmp-devel

Reply via email to