This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 3f0cc5f09c8ced3abe2452f6feb0b901ec7aceeb
Author: makejian <[email protected]>
AuthorDate: Thu Aug 7 14:04:39 2025 +0800

    crypto: export algorithm about ecc
    
    Transplanting the ECC algorithm from https://github.com/jestan/easy-ecc
    
    which is BSD lisence
    
    Signed-off-by: makejian <[email protected]>
---
 crypto/CMakeLists.txt |    1 +
 crypto/Makefile       |    1 +
 crypto/ecc.c          | 1731 +++++++++++++++++++++++++++++++++++++++++++++++++
 include/crypto/ecc.h  |  143 ++++
 4 files changed, 1876 insertions(+)

diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt
index dbe5098fbac..ea47feefc10 100644
--- a/crypto/CMakeLists.txt
+++ b/crypto/CMakeLists.txt
@@ -45,6 +45,7 @@ if(CONFIG_CRYPTO)
   list(APPEND SRCS chachapoly.c)
   list(APPEND SRCS ecb_enc.c)
   list(APPEND SRCS ecb3_enc.c)
+  list(APPEND SRCS ecc.c)
   list(APPEND SRCS set_key.c)
   list(APPEND SRCS md5.c)
   list(APPEND SRCS poly1305.c)
diff --git a/crypto/Makefile b/crypto/Makefile
index f315dd81f58..3c879e6c504 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -49,6 +49,7 @@ CRYPTO_CSRCS += cast.c
 CRYPTO_CSRCS += chachapoly.c
 CRYPTO_CSRCS += ecb_enc.c
 CRYPTO_CSRCS += ecb3_enc.c
+CRYPTO_CSRCS += ecc.c
 CRYPTO_CSRCS += set_key.c
 CRYPTO_CSRCS += md5.c
 CRYPTO_CSRCS += poly1305.c
diff --git a/crypto/ecc.c b/crypto/ecc.c
new file mode 100644
index 00000000000..0a94621aa6f
--- /dev/null
+++ b/crypto/ecc.c
@@ -0,0 +1,1731 @@
+/****************************************************************************
+ * crypto/ecc.c
+ *
+ * Copyright (c) 2013, Kenneth MacKay All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * SPECIAL, HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include <crypto/ecc.h>
+#include <nuttx/macro.h>
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define NUM_ECC_DIGITS (ECC_BYTES / 8)
+#define MAX_TRIES 16
+
+#define EVEN(vli) (!(vli[0] & 1))
+
+#define curve_p_16 { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFDFFFFFFFF }
+#define curve_p_24 { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull, \
+                     0xFFFFFFFFFFFFFFFFull }
+#define curve_p_32 { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, \
+                     0x0000000000000000ull, 0xFFFFFFFF00000001ull }
+#define curve_p_48 { 0x00000000FFFFFFFF, 0xFFFFFFFF00000000, \
+                     0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, \
+                     0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF }
+
+#define curve_b_16 { 0xD824993C2CEE5ED3, 0xE87579C11079F43D }
+#define curve_b_24 { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull, \
+                     0x64210519E59C80E7ull }
+#define curve_b_32 { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull, \
+                     0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull }
+#define curve_b_48 { 0x2A85C8EDD3EC2AEF, 0xC656398D8A2ED19D, \
+                     0x0314088F5013875A, 0x181D9C6EFE814112, \
+                     0x988E056BE3F82D19, 0xB3312FA7E23EE7E4 }
+
+#define curve_g_16 {                          \
+  { 0x0C28607CA52C5B86, 0x161FF7528B899B2D }, \
+  { 0xC02DA292DDED7A83, 0xCF5AC8395BAFEB13 }}
+
+#define curve_g_24 {                                                       \
+  { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, 0x188DA80EB03090F6ull }, \
+  { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull, 0x07192B95FFC8DA78ull }}
+
+#define curve_g_32 {                                \
+  { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,   \
+    0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull }, \
+  { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,   \
+    0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull }}
+
+#define curve_g_48 {                                             \
+  { 0x3A545E3872760AB7, 0x5502F25DBF55296C, 0x59F741E082542A38,  \
+    0x6E1D3B628BA79B98, 0x8EB1C71EF320AD74, 0xAA87CA22BE8B0537}, \
+  { 0x7A431D7C90EA0E5F, 0x0A60B1CE1D7E819D, 0xE9DA3113B5F0B8C0,  \
+    0xF8F41DBD289A147C, 0x5D9E98BF9292DC29, 0x3617DE4A96262C6F }}
+
+#define curve_n_16 { 0x75A30D1B9038A115, 0xFFFFFFFE00000000 }
+#define curve_n_24 { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull, \
+                     0xFFFFFFFFFFFFFFFFull }
+#define curve_n_32 { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, \
+                     0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull }
+#define curve_n_48 { 0xECEC196ACCC52973, 0x581A0DB248B0A77A, \
+                     0xC7634D81F4372DDF, 0xFFFFFFFFFFFFFFFF, \
+                     0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF }
+
+#if defined(__SIZEOF_INT128__)
+#  define SUPPORTS_INT128 1
+#else
+#  define SUPPORTS_INT128 0
+#endif
+
+/****************************************************************************
+ * Private Type Definitions
+ ****************************************************************************/
+
+typedef unsigned int uint;
+
+#if SUPPORTS_INT128
+typedef unsigned __int128 uint128_t;
+#else
+typedef struct
+{
+  uint64_t m_low;
+  uint64_t m_high;
+} uint128_t;
+#endif
+
+typedef struct
+{
+  uint64_t x[NUM_ECC_DIGITS];
+  uint64_t y[NUM_ECC_DIGITS];
+} eccpoint_t;
+
+/****************************************************************************
+ * Private Data
+ ****************************************************************************/
+
+static uint64_t g_curve_p[NUM_ECC_DIGITS] = CONCATENATE(curve_p_, ECC_CURVE);
+static uint64_t g_curve_b[NUM_ECC_DIGITS] = CONCATENATE(curve_b_, ECC_CURVE);
+static uint64_t g_curve_n[NUM_ECC_DIGITS] = CONCATENATE(curve_n_, ECC_CURVE);
+static eccpoint_t g_curve_g = CONCATENATE(curve_g_, ECC_CURVE);
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+static void vli_clear(FAR uint64_t *vli)
+{
+  uint i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      vli[i] = 0;
+    }
+}
+
+/* Returns 1 if vli == 0, 0 otherwise. */
+
+static int vli_iszero(FAR uint64_t *vli)
+{
+  uint i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      if (vli[i])
+        {
+          return 0;
+        }
+    }
+
+  return 1;
+}
+
+/* Returns nonzero if bit bit of vli is set. */
+
+static uint64_t vli_testbit(FAR uint64_t *vli, uint bit)
+{
+  return vli[bit / 64] & ((uint64_t)1 << (bit % 64));
+}
+
+/* Counts the number of 64-bit "digits" in vli. */
+
+static uint vli_numdigits(FAR uint64_t *vli)
+{
+  int i;
+
+  /* Search from the end until we find a non-zero digit.
+   * We do it in reverse because we expect that most digits
+   * will be nonzero.
+   */
+
+  for (i = NUM_ECC_DIGITS - 1; i >= 0 && vli[i] == 0; --i)
+    {
+    }
+
+  return i + 1;
+}
+
+/* Counts the number of bits required for vli. */
+
+static uint vli_numbits(FAR uint64_t *vli)
+{
+  uint64_t l_digit;
+  uint l_numdigits = vli_numdigits(vli);
+  uint i;
+
+  if (l_numdigits == 0)
+    {
+      return 0;
+    }
+
+  l_digit = vli[l_numdigits - 1];
+  for (i = 0; l_digit; ++i)
+    {
+      l_digit >>= 1;
+    }
+
+  return (l_numdigits - 1) * 64 + i;
+}
+
+/* Sets dest = src. */
+
+static void vli_set(FAR uint64_t *dest, FAR uint64_t *src)
+{
+  uint i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      dest[i] = src[i];
+    }
+}
+
+/* Returns sign of left - right. */
+
+static int vli_cmp(FAR uint64_t *left, FAR uint64_t *right)
+{
+  int i;
+
+  for (i = NUM_ECC_DIGITS - 1; i >= 0; --i)
+    {
+      if (left[i] > right[i])
+        {
+          return 1;
+        }
+      else if (left[i] < right[i])
+        {
+          return -1;
+        }
+    }
+
+  return 0;
+}
+
+/* Computes result = in << c, returning carry.
+ * Can modify in place (if result == in). 0 < shift < 64.
+ */
+
+static uint64_t vli_lshift(FAR uint64_t *result, FAR uint64_t *in,
+                           uint shift)
+{
+  uint64_t l_carry = 0;
+  uint64_t l_temp;
+  uint i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      l_temp = in[i];
+      result[i] = (l_temp << shift) | l_carry;
+      l_carry = l_temp >> (64 - shift);
+    }
+
+  return l_carry;
+}
+
+/* Computes vli = vli >> 1. */
+
+static void vli_rshift1(FAR uint64_t *vli)
+{
+  FAR uint64_t *l_end = vli;
+  uint64_t l_carry = 0;
+  uint64_t l_temp;
+
+  vli += NUM_ECC_DIGITS;
+  while (vli-- > l_end)
+    {
+      l_temp = *vli;
+      *vli = (l_temp >> 1) | l_carry;
+      l_carry = l_temp << 63;
+    }
+}
+
+/* Computes result = left + right, returning carry. Can modify in place. */
+
+static uint64_t vli_add(FAR uint64_t *result, FAR uint64_t *left,
+                        FAR uint64_t *right)
+{
+  uint64_t l_carry = 0;
+  uint64_t l_sum;
+  uint i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      l_sum = left[i] + right[i] + l_carry;
+      if (l_sum != left[i])
+        {
+          l_carry = (l_sum < left[i]);
+        }
+
+      result[i] = l_sum;
+    }
+
+  return l_carry;
+}
+
+/* Computes result = left - right, returning borrow. Can modify in place. */
+
+static uint64_t vli_sub(FAR uint64_t *result, FAR uint64_t *left,
+                        FAR uint64_t *right)
+{
+  uint64_t l_borrow = 0;
+  uint64_t l_diff;
+  uint i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      l_diff = left[i] - right[i] - l_borrow;
+      if (l_diff != left[i])
+        {
+          l_borrow = (l_diff > left[i]);
+        }
+
+      result[i] = l_diff;
+    }
+
+  return l_borrow;
+}
+
+#if SUPPORTS_INT128
+
+/* Computes result = left * right. */
+
+static void vli_mult(FAR uint64_t *result, FAR uint64_t *left,
+                     FAR uint64_t *right)
+{
+  uint128_t l_product;
+  uint128_t r01 = 0;
+  uint64_t r2 = 0;
+  uint l_min;
+  uint i;
+  uint k;
+
+  /* Compute each digit of result in sequence, maintaining the carries. */
+
+  for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k)
+    {
+      l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
+      for (i = l_min; i <= k && i < NUM_ECC_DIGITS; ++i)
+        {
+          l_product = (uint128_t)left[i] * right[k - i];
+          r01 += l_product;
+          r2 += (r01 < l_product);
+        }
+
+      result[k] = (uint64_t)r01;
+      r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
+      r2 = 0;
+    }
+
+  result[NUM_ECC_DIGITS * 2 - 1] = (uint64_t)r01;
+}
+
+/* Computes result = left^2. */
+
+static void vli_square(FAR uint64_t *result, FAR uint64_t *left)
+{
+  uint128_t l_product;
+  uint128_t r01 = 0;
+  uint64_t r2 = 0;
+  uint l_min;
+  uint i;
+  uint k;
+
+  for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k)
+    {
+      l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
+      for (i = l_min; i <= k && i <= k - i; ++i)
+        {
+          l_product = (uint128_t)left[i] * left[k - i];
+          if (i < k - i)
+            {
+              r2 += l_product >> 127;
+              l_product *= 2;
+            }
+
+          r01 += l_product;
+          r2 += (r01 < l_product);
+        }
+
+      result[k] = (uint64_t)r01;
+      r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
+      r2 = 0;
+    }
+
+  result[NUM_ECC_DIGITS * 2 - 1] = (uint64_t)r01;
+}
+
+#else /* #if SUPPORTS_INT128 */
+
+static uint128_t mul_64_64(uint64_t left, uint64_t right)
+{
+  uint128_t l_result;
+  uint64_t a0 = left & 0xffffffffull;
+  uint64_t a1 = left >> 32;
+  uint64_t b0 = right & 0xffffffffull;
+  uint64_t b1 = right >> 32;
+  uint64_t m0 = a0 * b0;
+  uint64_t m1 = a0 * b1;
+  uint64_t m2 = a1 * b0;
+  uint64_t m3 = a1 * b1;
+
+  m2 += (m0 >> 32);
+  m2 += m1;
+  if (m2 < m1)
+    {
+      m3 += 0x100000000ull;
+    }
+
+  l_result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
+  l_result.m_high = m3 + (m2 >> 32);
+
+  return l_result;
+}
+
+static uint128_t add_128_128(uint128_t a, uint128_t b)
+{
+  uint128_t l_result;
+
+  l_result.m_low = a.m_low + b.m_low;
+  l_result.m_high = a.m_high + b.m_high + (l_result.m_low < a.m_low);
+  return l_result;
+}
+
+static void vli_mult(FAR uint64_t *result, FAR uint64_t *left,
+                     FAR uint64_t *right)
+{
+  uint64_t r2 = 0;
+  uint i;
+  uint k;
+  uint128_t l_product;
+  uint128_t r01 =
+    {
+      0, 0
+    };
+
+  /* Compute each digit of result in sequence, maintaining the carries. */
+
+  for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k)
+    {
+      uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
+      for (i = l_min; i <= k && i < NUM_ECC_DIGITS; ++i)
+        {
+          l_product = mul_64_64(left[i], right[k - i]);
+          r01 = add_128_128(r01, l_product);
+          r2 += (r01.m_high < l_product.m_high);
+        }
+
+      result[k] = r01.m_low;
+      r01.m_low = r01.m_high;
+      r01.m_high = r2;
+      r2 = 0;
+    }
+
+  result[NUM_ECC_DIGITS * 2  - 1] = r01.m_low;
+}
+
+static void vli_square(FAR uint64_t *result, FAR uint64_t *left)
+{
+  uint64_t r2 = 0;
+  uint l_min;
+  uint i;
+  uint k;
+  uint128_t l_product;
+  uint128_t r01 =
+    {
+      0, 0
+    };
+
+  for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k)
+    {
+      l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
+      for (i = l_min; i <= k && i <= k - i; ++i)
+        {
+          l_product = mul_64_64(left[i], left[k - i]);
+          if (i < k - i)
+            {
+              r2 += l_product.m_high >> 63;
+              l_product.m_high = (l_product.m_high << 1) |
+                                 (l_product.m_low >> 63);
+              l_product.m_low <<= 1;
+            }
+
+          r01 = add_128_128(r01, l_product);
+          r2 += (r01.m_high < l_product.m_high);
+        }
+
+      result[k] = r01.m_low;
+      r01.m_low = r01.m_high;
+      r01.m_high = r2;
+      r2 = 0;
+    }
+
+  result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low;
+}
+
+#endif /* SUPPORTS_INT128 */
+
+/* Computes result = (left + right) % mod.
+ * Assumes that left < mod and right < mod, result != mod.
+ */
+
+static void vli_modadd(FAR uint64_t *result, FAR uint64_t *left,
+                       FAR uint64_t *right, FAR uint64_t *mod)
+{
+  uint64_t l_carry = vli_add(result, left, right);
+
+  if (l_carry || vli_cmp(result, mod) >= 0)
+    {
+      /* result > mod (result = mod + remainder),
+       * so subtract mod to get remainder.
+       */
+
+      vli_sub(result, result, mod);
+    }
+}
+
+/* Computes result = (left - right) % mod.
+ * Assumes that left < mod and right < mod, result != mod.
+ */
+
+static void vli_modsub(FAR uint64_t *result, FAR uint64_t *left,
+                       FAR uint64_t *right, FAR uint64_t *mod)
+{
+  uint64_t l_borrow = vli_sub(result, left, right);
+
+  if (l_borrow)
+    {
+      /* In this case, result == -diff == (max int) - diff.
+       * Since -x % d == d - x, we can get the correct result from
+       * result + mod (with overflow).
+       */
+
+      vli_add(result, result, mod);
+    }
+}
+
+#if ECC_CURVE == secp128r1
+
+/* Computes result = product % g_curve_p.
+ * See algorithm 5 and 6 from
+ * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
+ */
+
+static void vli_mmod_fast(FAR uint64_t *result,
+                          FAR uint64_t *product)
+{
+  uint64_t l_tmp[NUM_ECC_DIGITS];
+  int l_carry;
+
+  vli_set(result, product);
+
+  l_tmp[0] = product[2];
+  l_tmp[1] = (product[3] & 0x1ffffffffull) | (product[2] << 33);
+  l_carry = vli_add(result, result, l_tmp);
+
+  l_tmp[0] = (product[2] >> 31) | (product[3] << 33);
+  l_tmp[1] = (product[3] >> 31) |
+             ((product[2] & 0xffffffff80000000ull) << 2);
+  l_carry += vli_add(result, result, l_tmp);
+
+  l_tmp[0] = (product[2] >> 62) | (product[3] << 2);
+  l_tmp[1] = (product[3] >> 62) |
+             ((product[2] & 0xc000000000000000ull) >> 29) |
+             (product[3] << 35);
+  l_carry += vli_add(result, result, l_tmp);
+
+  l_tmp[0] = (product[3] >> 29);
+  l_tmp[1] = ((product[3] & 0xffffffffe0000000ull) << 4);
+  l_carry += vli_add(result, result, l_tmp);
+
+  l_tmp[0] = (product[3] >> 60);
+  l_tmp[1] = (product[3] & 0xfffffffe00000000ull);
+  l_carry += vli_add(result, result, l_tmp);
+
+  l_tmp[0] = 0;
+  l_tmp[1] = ((product[3] & 0xf000000000000000ull) >> 27);
+  l_carry += vli_add(result, result, l_tmp);
+
+  while (l_carry || vli_cmp(g_curve_p, result) != 1)
+    {
+      l_carry -= vli_sub(result, result, g_curve_p);
+    }
+}
+
+#elif ECC_CURVE == secp192r1
+
+/* Computes result = product % g_curve_p.
+ * See algorithm 5 and 6 from
+ * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
+ */
+
+static void vli_mmod_fast(FAR uint64_t *result,
+                          FAR uint64_t *product)
+{
+  uint64_t l_tmp[NUM_ECC_DIGITS];
+  int l_carry;
+
+  vli_set(result, product);
+
+  vli_set(l_tmp, &product[3]);
+  l_carry = vli_add(result, result, l_tmp);
+
+  l_tmp[0] = 0;
+  l_tmp[1] = product[3];
+  l_tmp[2] = product[4];
+  l_carry += vli_add(result, result, l_tmp);
+
+  l_tmp[0] = l_tmp[1] = product[5];
+  l_tmp[2] = 0;
+  l_carry += vli_add(result, result, l_tmp);
+
+  while (l_carry || vli_cmp(g_curve_p, result) != 1)
+    {
+      l_carry -= vli_sub(result, result, g_curve_p);
+    }
+}
+
+#elif ECC_CURVE == secp256r1
+
+/* Computes result = product % g_curve_p
+ * from http://www.nsa.gov/ia/_files/nist-routines.pdf
+ */
+
+static void vli_mmod_fast(FAR uint64_t *result,
+                          FAR uint64_t *product)
+{
+  uint64_t l_tmp[NUM_ECC_DIGITS];
+  int l_carry;
+
+  /* t */
+
+  vli_set(result, product);
+
+  /* s1 */
+
+  l_tmp[0] = 0;
+  l_tmp[1] = product[5] & 0xffffffff00000000ull;
+  l_tmp[2] = product[6];
+  l_tmp[3] = product[7];
+  l_carry = vli_lshift(l_tmp, l_tmp, 1);
+  l_carry += vli_add(result, result, l_tmp);
+
+  /* s2 */
+
+  l_tmp[1] = product[6] << 32;
+  l_tmp[2] = (product[6] >> 32) | (product[7] << 32);
+  l_tmp[3] = product[7] >> 32;
+  l_carry += vli_lshift(l_tmp, l_tmp, 1);
+  l_carry += vli_add(result, result, l_tmp);
+
+  /* s3 */
+
+  l_tmp[0] = product[4];
+  l_tmp[1] = product[5] & 0xffffffff;
+  l_tmp[2] = 0;
+  l_tmp[3] = product[7];
+  l_carry += vli_add(result, result, l_tmp);
+
+  /* s4 */
+
+  l_tmp[0] = (product[4] >> 32) | (product[5] << 32);
+  l_tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
+  l_tmp[2] = product[7];
+  l_tmp[3] = (product[6] >> 32) | (product[4] << 32);
+  l_carry += vli_add(result, result, l_tmp);
+
+  /* d1 */
+
+  l_tmp[0] = (product[5] >> 32) | (product[6] << 32);
+  l_tmp[1] = (product[6] >> 32);
+  l_tmp[2] = 0;
+  l_tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
+  l_carry -= vli_sub(result, result, l_tmp);
+
+  /* d2 */
+
+  l_tmp[0] = product[6];
+  l_tmp[1] = product[7];
+  l_tmp[2] = 0;
+  l_tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
+  l_carry -= vli_sub(result, result, l_tmp);
+
+  /* d3 */
+
+  l_tmp[0] = (product[6] >> 32) | (product[7] << 32);
+  l_tmp[1] = (product[7] >> 32) | (product[4] << 32);
+  l_tmp[2] = (product[4] >> 32) | (product[5] << 32);
+  l_tmp[3] = (product[6] << 32);
+  l_carry -= vli_sub(result, result, l_tmp);
+
+  /* d4 */
+
+  l_tmp[0] = product[7];
+  l_tmp[1] = product[4] & 0xffffffff00000000ull;
+  l_tmp[2] = product[5];
+  l_tmp[3] = product[6] & 0xffffffff00000000ull;
+  l_carry -= vli_sub(result, result, l_tmp);
+
+  if (l_carry < 0)
+    {
+      do
+        {
+          l_carry += vli_add(result, result, g_curve_p);
+        }
+      while (l_carry < 0);
+    }
+  else
+    {
+      while (l_carry || vli_cmp(g_curve_p, result) != 1)
+        {
+          l_carry -= vli_sub(result, result, g_curve_p);
+        }
+    }
+}
+
+#elif ECC_CURVE == secp384r1
+
+static void omega_mult(uint64_t *result, uint64_t *right)
+{
+  uint64_t l_tmp[NUM_ECC_DIGITS];
+  uint64_t l_carry;
+  uint64_t l_diff;
+  uint i;
+
+  /* Multiply by (2^128 + 2^96 - 2^32 + 1). */
+
+  vli_set(result, right); /* 1 */
+  l_carry = vli_lshift(l_tmp, right, 32);
+  result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(result + 1, result + 1, 
l_tmp); /* 2^96 + 1 */
+
+  /* 2^128 + 2^96 + 1 */
+
+  result[2 + NUM_ECC_DIGITS] = vli_add(result + 2, result + 2, right);
+
+  /* 2^128 + 2^96 - 2^32 + 1 */
+
+  l_carry += vli_sub(result, result, l_tmp);
+  l_diff = result[NUM_ECC_DIGITS] - l_carry;
+  if (l_diff > result[NUM_ECC_DIGITS])
+    {
+      /* Propagate borrow if necessary. */
+
+      for (i = 1 + NUM_ECC_DIGITS; ; ++i)
+        {
+          --result[i];
+          if (result[i] != (uint64_t)-1)
+            {
+              break;
+            }
+        }
+    }
+
+  result[NUM_ECC_DIGITS] = l_diff;
+}
+
+/* Computes result = product % g_curve_p
+ * see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
+ * section "Curve-Specific Optimizations"
+ */
+
+static void vli_mmod_fast(uint64_t *result, uint64_t *product)
+{
+  uint64_t l_tmp[2 * NUM_ECC_DIGITS];
+  uint64_t l_carry;
+  uint64_t l_sum;
+  uint i;
+
+  while (!vli_iszero(product + NUM_ECC_DIGITS)) /* While c1 != 0 */
+    {
+      l_carry = 0;
+
+      vli_clear(l_tmp);
+      vli_clear(l_tmp + NUM_ECC_DIGITS);
+      omega_mult(l_tmp, product + NUM_ECC_DIGITS); /* tmp = w * c1 */
+
+      /* p = c0 */
+
+      vli_clear(product + NUM_ECC_DIGITS);
+
+      /* (c1, c0) = c0 + w * c1 */
+
+      for (i = 0; i < NUM_ECC_DIGITS + 3; ++i)
+        {
+          l_sum = product[i] + l_tmp[i] + l_carry;
+          if (l_sum != product[i])
+            {
+              l_carry = (l_sum < product[i]);
+            }
+
+          product[i] = l_sum;
+        }
+    }
+
+  while (vli_cmp(product, g_curve_p) > 0)
+    {
+      vli_sub(product, product, g_curve_p);
+    }
+
+  vli_set(result, product);
+}
+
+#endif
+
+/* Computes result = (left * right) % g_curve_p. */
+
+static void vli_modmult_fast(FAR uint64_t *result, FAR uint64_t *left,
+                             FAR uint64_t *right)
+{
+  uint64_t l_product[2 * NUM_ECC_DIGITS];
+
+  vli_mult(l_product, left, right);
+  vli_mmod_fast(result, l_product);
+}
+
+/* Computes result = left^2 % g_curve_p. */
+
+static void vli_modsquare_fast(FAR uint64_t *result,
+                               FAR uint64_t *left)
+{
+  uint64_t l_product[2 * NUM_ECC_DIGITS];
+
+  vli_square(l_product, left);
+  vli_mmod_fast(result, l_product);
+}
+
+/* Computes result = (1 / input) % mod. All VLIs are the same size.
+ * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
+ * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
+ */
+
+static void vli_modinv(FAR uint64_t *result, FAR uint64_t *input,
+                       FAR uint64_t *mod)
+{
+  uint64_t a[NUM_ECC_DIGITS];
+  uint64_t b[NUM_ECC_DIGITS];
+  uint64_t u[NUM_ECC_DIGITS];
+  uint64_t v[NUM_ECC_DIGITS];
+  uint64_t l_carry;
+  int l_cmpresult;
+
+  if (vli_iszero(input))
+    {
+      vli_clear(result);
+      return;
+    }
+
+  vli_set(a, input);
+  vli_set(b, mod);
+  vli_clear(u);
+  u[0] = 1;
+  vli_clear(v);
+
+  while ((l_cmpresult = vli_cmp(a, b)) != 0)
+    {
+      l_carry = 0;
+      if (EVEN(a))
+        {
+          vli_rshift1(a);
+          if (!EVEN(u))
+            {
+              l_carry = vli_add(u, u, mod);
+            }
+
+          vli_rshift1(u);
+          if (l_carry)
+            {
+              u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+            }
+        }
+      else if (EVEN(b))
+        {
+          vli_rshift1(b);
+          if (!EVEN(v))
+            {
+              l_carry = vli_add(v, v, mod);
+            }
+
+          vli_rshift1(v);
+          if (l_carry)
+            {
+              v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+            }
+        }
+      else if (l_cmpresult > 0)
+        {
+          vli_sub(a, a, b);
+          vli_rshift1(a);
+          if (vli_cmp(u, v) < 0)
+            {
+              vli_add(u, u, mod);
+            }
+
+          vli_sub(u, u, v);
+          if (!EVEN(u))
+            {
+              l_carry = vli_add(u, u, mod);
+            }
+
+          vli_rshift1(u);
+          if (l_carry)
+            {
+              u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+            }
+        }
+      else
+        {
+          vli_sub(b, b, a);
+          vli_rshift1(b);
+          if (vli_cmp(v, u) < 0)
+            {
+              vli_add(v, v, mod);
+            }
+
+          vli_sub(v, v, u);
+          if (!EVEN(v))
+            {
+              l_carry = vli_add(v, v, mod);
+            }
+
+          vli_rshift1(v);
+          if (l_carry)
+            {
+              v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+            }
+        }
+    }
+
+  vli_set(result, u);
+}
+
+/* ------ Point operations ------ */
+
+/* Returns 1 if point is the point at infinity, 0 otherwise. */
+
+static int eccpoint_iszero(FAR eccpoint_t *point)
+{
+  return vli_iszero(point->x) && vli_iszero(point->y);
+}
+
+/* Point multiplication algorithm using Montgomery's ladder with
+ * co-Z coordinates. From http://eprint.iacr.org/2011/338.pdf
+ */
+
+/* Double in place */
+
+static void eccpoint_double_jacobian(FAR uint64_t *X1,
+                                     FAR uint64_t *Y1,
+                                     FAR uint64_t *Z1)
+{
+  /* t1 = X, t2 = Y, t3 = Z */
+
+  uint64_t t4[NUM_ECC_DIGITS];
+  uint64_t t5[NUM_ECC_DIGITS];
+  uint64_t l_carry;
+
+  if (vli_iszero(Z1))
+    {
+      return;
+    }
+
+  vli_modsquare_fast(t4, Y1);   /* t4 = y1^2 */
+  vli_modmult_fast(t5, X1, t4); /* t5 = x1*y1^2 = A */
+  vli_modsquare_fast(t4, t4);   /* t4 = y1^4 */
+  vli_modmult_fast(Y1, Y1, Z1); /* t2 = y1*z1 = z3 */
+  vli_modsquare_fast(Z1, Z1);   /* t3 = z1^2 */
+
+  vli_modadd(X1, X1, Z1, g_curve_p); /* t1 = x1 + z1^2 */
+  vli_modadd(Z1, Z1, Z1, g_curve_p); /* t3 = 2*z1^2 */
+  vli_modsub(Z1, X1, Z1, g_curve_p); /* t3 = x1 - z1^2 */
+
+  /* t1 = x1^2 - z1^4 */
+
+  vli_modmult_fast(X1, X1, Z1);
+
+  vli_modadd(Z1, X1, X1, g_curve_p); /* t3 = 2*(x1^2 - z1^4) */
+  vli_modadd(X1, X1, Z1, g_curve_p); /* t1 = 3*(x1^2 - z1^4) */
+  if (vli_testbit(X1, 0))
+    {
+      l_carry = vli_add(X1, X1, g_curve_p);
+      vli_rshift1(X1);
+      X1[NUM_ECC_DIGITS - 1] |= l_carry << 63;
+    }
+  else
+    {
+      vli_rshift1(X1);
+    }
+
+  /* t1 = 3/2*(x1^2 - z1^4) = B */
+
+  /* t3 = B^2 */
+
+  vli_modsquare_fast(Z1, X1);
+
+  /* t3 = B^2 - A */
+
+  vli_modsub(Z1, Z1, t5, g_curve_p);
+
+  /* t3 = B^2 - 2A = x3 */
+
+  vli_modsub(Z1, Z1, t5, g_curve_p);
+
+  /* t5 = A - x3 */
+
+  vli_modsub(t5, t5, Z1, g_curve_p);
+
+  /* t1 = B * (A - x3) */
+
+  vli_modmult_fast(X1, X1, t5);
+
+  /* t4 = B * (A - x3) - y1^4 = y3 */
+
+  vli_modsub(t4, X1, t4, g_curve_p);
+
+  vli_set(X1, Z1);
+  vli_set(Z1, Y1);
+  vli_set(Y1, t4);
+}
+
+/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
+
+static void apply_z(FAR uint64_t *X1, FAR uint64_t *Y1,
+                    FAR uint64_t *Z)
+{
+  uint64_t t1[NUM_ECC_DIGITS];
+
+  /* z^2 */
+
+  vli_modsquare_fast(t1, Z);
+
+  /* x1 * z^2 */
+
+  vli_modmult_fast(X1, X1, t1);
+
+  /* z^3 */
+
+  vli_modmult_fast(t1, t1, Z);
+
+  /* y1 * z^3 */
+
+  vli_modmult_fast(Y1, Y1, t1);
+}
+
+/* P = (x1, y1) => 2P, (x2, y2) => P' */
+
+static void xycz_initial_double(FAR uint64_t *X1, FAR uint64_t *Y1,
+                                FAR uint64_t *X2, FAR uint64_t *Y2,
+                                FAR uint64_t *initialz)
+{
+  uint64_t z[NUM_ECC_DIGITS];
+
+  vli_set(X2, X1);
+  vli_set(Y2, Y1);
+
+  vli_clear(z);
+  z[0] = 1;
+  if (initialz)
+    {
+      vli_set(z, initialz);
+    }
+
+  apply_z(X1, Y1, z);
+
+  eccpoint_double_jacobian(X1, Y1, z);
+
+  apply_z(X2, Y2, z);
+}
+
+/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
+ * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
+ *  or P => P', Q => P + Q
+ */
+
+static void xycz_add(FAR uint64_t *X1, FAR uint64_t *Y1,
+                     FAR uint64_t *X2, FAR uint64_t *Y2)
+{
+  /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
+
+  uint64_t t5[NUM_ECC_DIGITS];
+
+  /* t5 = x2 - x1 */
+
+  vli_modsub(t5, X2, X1, g_curve_p);
+
+  /* t5 = (x2 - x1)^2 = A */
+
+  vli_modsquare_fast(t5, t5);
+
+  /* t1 = x1*A = B */
+
+  vli_modmult_fast(X1, X1, t5);
+
+  /* t3 = x2*A = C */
+
+  vli_modmult_fast(X2, X2, t5);
+
+  /* t4 = y2 - y1 */
+
+  vli_modsub(Y2, Y2, Y1, g_curve_p);
+
+  /* t5 = (y2 - y1)^2 = D */
+
+  vli_modsquare_fast(t5, Y2);
+
+  /* t5 = D - B */
+
+  vli_modsub(t5, t5, X1, g_curve_p);
+
+  /* t5 = D - B - C = x3 */
+
+  vli_modsub(t5, t5, X2, g_curve_p);
+
+  /* t3 = C - B */
+
+  vli_modsub(X2, X2, X1, g_curve_p);
+
+  /* t2 = y1*(C - B) */
+
+  vli_modmult_fast(Y1, Y1, X2);
+
+  /* t3 = B - x3 */
+
+  vli_modsub(X2, X1, t5, g_curve_p);
+
+  /* t4 = (y2 - y1)*(B - x3) */
+
+  vli_modmult_fast(Y2, Y2, X2);
+
+  /* t4 = y3 */
+
+  vli_modsub(Y2, Y2, Y1, g_curve_p);
+  vli_set(X2, t5);
+}
+
+/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
+ * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
+ *  or P => P - Q, Q => P + Q
+ */
+
+static void xycz_addc(FAR uint64_t *X1, FAR uint64_t *Y1,
+                      FAR uint64_t *X2, FAR uint64_t *Y2)
+{
+  /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
+
+  uint64_t t5[NUM_ECC_DIGITS];
+  uint64_t t6[NUM_ECC_DIGITS];
+  uint64_t t7[NUM_ECC_DIGITS];
+
+  /* t5 = x2 - x1 */
+
+  vli_modsub(t5, X2, X1, g_curve_p);
+
+  /* t5 = (x2 - x1)^2 = A */
+
+  vli_modsquare_fast(t5, t5);
+
+  /* t1 = x1*A = B */
+
+  vli_modmult_fast(X1, X1, t5);
+
+  /* t3 = x2*A = C */
+
+  vli_modmult_fast(X2, X2, t5);
+
+  /* t4 = y2 + y1 */
+
+  vli_modadd(t5, Y2, Y1, g_curve_p);
+
+  /* t4 = y2 - y1 */
+
+  vli_modsub(Y2, Y2, Y1, g_curve_p);
+
+  /* t6 = C - B */
+
+  vli_modsub(t6, X2, X1, g_curve_p);
+
+  /* t2 = y1 * (C - B) */
+
+  vli_modmult_fast(Y1, Y1, t6);
+
+  /* t6 = B + C */
+
+  vli_modadd(t6, X1, X2, g_curve_p);
+
+  /* t3 = (y2 - y1)^2 */
+
+  vli_modsquare_fast(X2, Y2);
+
+  /* t3 = x3 */
+
+  vli_modsub(X2, X2, t6, g_curve_p);
+
+  /* t7 = B - x3 */
+
+  vli_modsub(t7, X1, X2, g_curve_p);
+
+  /* t4 = (y2 - y1)*(B - x3) */
+
+  vli_modmult_fast(Y2, Y2, t7);
+
+  /* t4 = y3 */
+
+  vli_modsub(Y2, Y2, Y1, g_curve_p);
+
+  /* t7 = (y2 + y1)^2 = F */
+
+  vli_modsquare_fast(t7, t5);
+
+  /* t7 = x3' */
+
+  vli_modsub(t7, t7, t6, g_curve_p);
+
+  /* t6 = x3' - B */
+
+  vli_modsub(t6, t7, X1, g_curve_p);
+
+  /* t6 = (y2 + y1)*(x3' - B) */
+
+  vli_modmult_fast(t6, t6, t5);
+
+  /* t2 = y3' */
+
+  vli_modsub(Y1, t6, Y1, g_curve_p);
+
+  vli_set(X1, t7);
+}
+
+static void eccpoint_mult(FAR eccpoint_t *result, FAR eccpoint_t *point,
+                          FAR uint64_t *scalar, FAR uint64_t *initialz)
+{
+  /* R0 and R1 */
+
+  uint64_t rx[2][NUM_ECC_DIGITS];
+  uint64_t ry[2][NUM_ECC_DIGITS];
+  uint64_t z[NUM_ECC_DIGITS];
+  int nb;
+  int i;
+
+  vli_set(rx[1], point->x);
+  vli_set(ry[1], point->y);
+
+  xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initialz);
+
+  for (i = vli_numbits(scalar) - 2; i > 0; --i)
+    {
+      nb = !vli_testbit(scalar, i);
+      xycz_addc(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]);
+      xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]);
+    }
+
+  nb = !vli_testbit(scalar, 0);
+  xycz_addc(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]);
+
+  /* Find final 1/Z value. */
+
+  /* X1 - X0 */
+
+  vli_modsub(z, rx[1], rx[0], g_curve_p);
+
+  /* Yb * (X1 - X0) */
+
+  vli_modmult_fast(z, z, ry[1 - nb]);
+
+  /* xP * Yb * (X1 - X0) */
+
+  vli_modmult_fast(z, z, point->x);
+
+  /* 1 / (xP * Yb * (X1 - X0)) */
+
+  vli_modinv(z, z, g_curve_p);
+
+  /* yP / (xP * Yb * (X1 - X0)) */
+
+  vli_modmult_fast(z, z, point->y);
+
+  /* Xb * yP / (xP * Yb * (X1 - X0)) */
+
+  vli_modmult_fast(z, z, rx[1 - nb]);
+
+  /* End 1/Z calculation */
+
+  xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]);
+
+  apply_z(rx[0], ry[0], z);
+
+  vli_set(result->x, rx[0]);
+  vli_set(result->y, ry[0]);
+}
+
+static void ecc_bytes2native(uint64_t native[NUM_ECC_DIGITS],
+                             const uint8_t bytes[ECC_BYTES])
+{
+  FAR const uint8_t *digit;
+  unsigned i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
+      native[i] = ((uint64_t)digit[0] << 56) | ((uint64_t)digit[1] << 48) |
+                  ((uint64_t)digit[2] << 40) | ((uint64_t)digit[3] << 32) |
+                  ((uint64_t)digit[4] << 24) | ((uint64_t)digit[5] << 16) |
+                  ((uint64_t)digit[6] << 8) | (uint64_t)digit[7];
+    }
+}
+
+static void ecc_native2bytes(uint8_t bytes[ECC_BYTES],
+                             const uint64_t native[NUM_ECC_DIGITS])
+{
+  FAR uint8_t *digit;
+  unsigned i;
+
+  for (i = 0; i < NUM_ECC_DIGITS; ++i)
+    {
+      digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
+      digit[0] = native[i] >> 56;
+      digit[1] = native[i] >> 48;
+      digit[2] = native[i] >> 40;
+      digit[3] = native[i] >> 32;
+      digit[4] = native[i] >> 24;
+      digit[5] = native[i] >> 16;
+      digit[6] = native[i] >> 8;
+      digit[7] = native[i];
+    }
+}
+
+/* Compute a = sqrt(a) (mod g_curve_p). */
+
+static void mod_sqrt(uint64_t a[NUM_ECC_DIGITS])
+{
+  unsigned i;
+  uint64_t l_result[NUM_ECC_DIGITS] =
+    {
+      1
+    };
+
+  uint64_t p1[NUM_ECC_DIGITS] =
+    {
+      1
+    };
+
+  /* Since g_curve_p == 3 (mod 4) for all supported curves, we can
+   * compute sqrt(a) = a^((g_curve_p + 1) / 4) (mod g_curve_p).
+   */
+
+  vli_add(p1, g_curve_p, p1); /* p1 = g_curve_p + 1 */
+  for (i = vli_numbits(p1) - 1; i > 1; --i)
+    {
+      vli_modsquare_fast(l_result, l_result);
+      if (vli_testbit(p1, i))
+        {
+          vli_modmult_fast(l_result, l_result, a);
+        }
+    }
+
+  vli_set(a, l_result);
+}
+
+static void
+ecc_point_decompress(FAR eccpoint_t *point,
+                     const uint8_t compressed[ECC_BYTES + 1])
+{
+  /* -a = 3 */
+
+  uint64_t _3[NUM_ECC_DIGITS] =
+    {
+      3
+    };
+
+  ecc_bytes2native(point->x, compressed + 1);
+
+  /* y = x^2 */
+
+  vli_modsquare_fast(point->y, point->x);
+
+  /* y = x^2 - 3 */
+
+  vli_modsub(point->y, point->y, _3, g_curve_p);
+
+  /* y = x^3 - 3x */
+
+  vli_modmult_fast(point->y, point->y, point->x);
+
+  /* y = x^3 - 3x + b */
+
+  vli_modadd(point->y, point->y, g_curve_b, g_curve_p);
+
+  mod_sqrt(point->y);
+
+  if ((point->y[0] & 0x01) != (compressed[0] & 0x01))
+    {
+      vli_sub(point->y, g_curve_p, point->y);
+    }
+}
+
+/* -------- ECDSA code -------- */
+
+/* Computes result = (left * right) % mod. */
+
+static void vli_modmult(FAR uint64_t *result, FAR uint64_t *left,
+                        FAR uint64_t *right, FAR uint64_t *mod)
+{
+  uint64_t l_product[2 * NUM_ECC_DIGITS];
+  uint64_t l_modmultiple[2 * NUM_ECC_DIGITS];
+  uint64_t l_carry;
+  uint l_modbits = vli_numbits(mod);
+  uint l_productbits;
+  uint l_digitshift;
+  uint l_bitshift;
+  int l_cmp;
+
+  vli_mult(l_product, left, right);
+  l_productbits = vli_numbits(l_product + NUM_ECC_DIGITS);
+  if (l_productbits)
+    {
+      l_productbits += NUM_ECC_DIGITS * 64;
+    }
+  else
+    {
+      l_productbits = vli_numbits(l_product);
+    }
+
+  if (l_productbits < l_modbits)
+    {
+      /* l_product < mod. */
+
+      vli_set(result, l_product);
+      return;
+    }
+
+  /* Shift mod by (l_leftBits - l_modbits).
+   * This multiplies mod by the largest power of two possible
+   * while still resulting in a number less than left.
+   */
+
+  vli_clear(l_modmultiple);
+  vli_clear(l_modmultiple + NUM_ECC_DIGITS);
+  l_digitshift = (l_productbits - l_modbits) / 64;
+  l_bitshift = (l_productbits - l_modbits) % 64;
+  if (l_bitshift)
+    {
+      l_modmultiple[l_digitshift + NUM_ECC_DIGITS] =
+        vli_lshift(l_modmultiple + l_digitshift, mod, l_bitshift);
+    }
+  else
+    {
+      vli_set(l_modmultiple + l_digitshift, mod);
+    }
+
+  /* Subtract all multiples of mod to get the remainder. */
+
+  vli_clear(result);
+
+  /* Use result as a temp var to store 1 (for subtraction) */
+
+  result[0] = 1;
+  while (l_productbits > NUM_ECC_DIGITS * 64 ||
+         vli_cmp(l_modmultiple, mod) >= 0)
+    {
+      l_cmp = vli_cmp(l_modmultiple + NUM_ECC_DIGITS,
+                      l_product + NUM_ECC_DIGITS);
+      if (l_cmp < 0 ||
+         (l_cmp == 0 && vli_cmp(l_modmultiple, l_product) <= 0))
+        {
+          if (vli_sub(l_product, l_product, l_modmultiple))
+            {
+              /* borrow */
+
+              vli_sub(l_product + NUM_ECC_DIGITS,
+                      l_product + NUM_ECC_DIGITS, result);
+            }
+
+          vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS,
+                  l_modmultiple + NUM_ECC_DIGITS);
+        }
+
+      l_carry = (l_modmultiple[NUM_ECC_DIGITS] & 0x01) << 63;
+      vli_rshift1(l_modmultiple + NUM_ECC_DIGITS);
+      vli_rshift1(l_modmultiple);
+      l_modmultiple[NUM_ECC_DIGITS - 1] |= l_carry;
+      --l_productbits;
+    }
+
+  vli_set(result, l_product);
+}
+
+static uint umax(uint a, uint b)
+{
+  return a > b ? a : b;
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+int ecc_make_key(uint8_t publickey[ECC_BYTES + 1],
+                 uint8_t privatekey[ECC_BYTES])
+{
+  uint64_t l_private[NUM_ECC_DIGITS];
+  eccpoint_t l_public;
+  unsigned l_tries = 0;
+
+  do
+    {
+      if (l_tries++ >= MAX_TRIES)
+        {
+          return 0;
+        }
+
+      arc4random_buf(l_private, NUM_ECC_DIGITS);
+
+      if (vli_iszero(l_private))
+        {
+          continue;
+        }
+
+      /* Make sure the private key is in the range [1, n-1].
+       * For the supported curves, n is always large enough that we only
+       * need to subtract once at most.
+       */
+
+      if (vli_cmp(g_curve_n, l_private) != 1)
+        {
+          vli_sub(l_private, l_private, g_curve_n);
+        }
+
+      eccpoint_mult(&l_public, &g_curve_g, l_private, NULL);
+    }
+  while (eccpoint_iszero(&l_public));
+
+  ecc_native2bytes(privatekey, l_private);
+  ecc_native2bytes(publickey + 1, l_public.x);
+  publickey[0] = 2 + (l_public.y[0] & 0x01);
+  return 1;
+}
+
+int ecdh_shared_secret(const uint8_t publickey[ECC_BYTES + 1],
+                       const uint8_t privatekey[ECC_BYTES],
+                       uint8_t secret[ECC_BYTES])
+{
+  eccpoint_t l_product;
+  eccpoint_t l_public;
+  uint64_t l_private[NUM_ECC_DIGITS];
+  uint64_t l_random[NUM_ECC_DIGITS];
+
+  arc4random_buf(l_random, NUM_ECC_DIGITS);
+  ecc_point_decompress(&l_public, publickey);
+  ecc_bytes2native(l_private, privatekey);
+
+  eccpoint_mult(&l_product, &l_public, l_private, l_random);
+
+  ecc_native2bytes(secret, l_product.x);
+
+  return !eccpoint_iszero(&l_product);
+}
+
+int ecdsa_sign(const uint8_t privatekey[ECC_BYTES],
+               const uint8_t hash[ECC_BYTES],
+               uint8_t signature[ECC_BYTES * 2])
+{
+  uint64_t k[NUM_ECC_DIGITS];
+  uint64_t l_tmp[NUM_ECC_DIGITS];
+  uint64_t l_s[NUM_ECC_DIGITS];
+  unsigned l_tries = 0;
+  eccpoint_t p;
+
+  do
+    {
+      if (l_tries++ >= MAX_TRIES)
+        {
+          return 0;
+        }
+
+      arc4random_buf(k, NUM_ECC_DIGITS);
+
+      if (vli_iszero(k))
+        {
+          continue;
+        }
+
+      if (vli_cmp(g_curve_n, k) != 1)
+        {
+          vli_sub(k, k, g_curve_n);
+        }
+
+      /* tmp = k * G */
+
+      eccpoint_mult(&p, &g_curve_g, k, NULL);
+
+      /* r = x1 (mod n) */
+
+      if (vli_cmp(g_curve_n, p.x) != 1)
+        {
+          vli_sub(p.x, p.x, g_curve_n);
+        }
+    }
+  while (vli_iszero(p.x));
+
+  ecc_native2bytes(signature, p.x);
+
+  ecc_bytes2native(l_tmp, privatekey);
+  vli_modmult(l_s, p.x, l_tmp, g_curve_n); /* s = r*d */
+  ecc_bytes2native(l_tmp, hash);
+  vli_modadd(l_s, l_tmp, l_s, g_curve_n); /* s = e + r*d */
+
+  /* k = 1 / k */
+
+  vli_modinv(k, k, g_curve_n);
+
+  /* s = (e + r*d) / k */
+
+  vli_modmult(l_s, l_s, k, g_curve_n);
+  ecc_native2bytes(signature + ECC_BYTES, l_s);
+
+  return 1;
+}
+
+int ecdsa_verify(const uint8_t publickey[ECC_BYTES + 1],
+                 const uint8_t hash[ECC_BYTES],
+                 const uint8_t signature[ECC_BYTES * 2])
+{
+  uint64_t u1[NUM_ECC_DIGITS];
+  uint64_t u2[NUM_ECC_DIGITS];
+  uint64_t z[NUM_ECC_DIGITS];
+  uint64_t rx[NUM_ECC_DIGITS];
+  uint64_t ry[NUM_ECC_DIGITS];
+  uint64_t tx[NUM_ECC_DIGITS];
+  uint64_t ty[NUM_ECC_DIGITS];
+  uint64_t tz[NUM_ECC_DIGITS];
+  uint64_t l_r[NUM_ECC_DIGITS];
+  uint64_t l_s[NUM_ECC_DIGITS];
+  uint l_numbits;
+  eccpoint_t *l_point;
+  eccpoint_t l_public;
+  eccpoint_t l_sum;
+  int l_index;
+  int i;
+
+  /* Use Shamir's trick to calculate u1*G + u2*Q */
+
+  eccpoint_t *l_points[4] =
+    {
+        NULL, &g_curve_g, &l_public, &l_sum
+    };
+
+  ecc_point_decompress(&l_public, publickey);
+  ecc_bytes2native(l_r, signature);
+  ecc_bytes2native(l_s, signature + ECC_BYTES);
+
+  if (vli_iszero(l_r) || vli_iszero(l_s))
+    {
+      /* r, s must not be 0. */
+
+      return 0;
+    }
+
+  if (vli_cmp(g_curve_n, l_r) != 1 || vli_cmp(g_curve_n, l_s) != 1)
+    {
+      /* r, s must be < n. */
+
+      return 0;
+    }
+
+  /* Calculate u1 and u2. */
+
+  vli_modinv(z, l_s, g_curve_n); /* Z = s^-1 */
+  ecc_bytes2native(u1, hash);
+  vli_modmult(u1, u1, z, g_curve_n); /* u1 = e/s */
+
+  /* u2 = r/s */
+
+  vli_modmult(u2, l_r, z, g_curve_n);
+
+  /* Calculate l_sum = G + Q. */
+
+  vli_set(l_sum.x, l_public.x);
+  vli_set(l_sum.y, l_public.y);
+  vli_set(tx, g_curve_g.x);
+  vli_set(ty, g_curve_g.y);
+  vli_modsub(z, l_sum.x, tx, g_curve_p); /* Z = x2 - x1 */
+  xycz_add(tx, ty, l_sum.x, l_sum.y);
+  vli_modinv(z, z, g_curve_p); /* Z = 1/Z */
+  apply_z(l_sum.x, l_sum.y, z);
+
+  l_numbits = umax(vli_numbits(u1), vli_numbits(u2));
+
+  l_point = l_points[(!!vli_testbit(u1, l_numbits - 1)) |
+                    ((!!vli_testbit(u2, l_numbits - 1)) << 1)];
+  vli_set(rx, l_point->x);
+  vli_set(ry, l_point->y);
+  vli_clear(z);
+  z[0] = 1;
+
+  for (i = l_numbits - 2; i >= 0; --i)
+    {
+      eccpoint_double_jacobian(rx, ry, z);
+
+      l_index = (!!vli_testbit(u1, i)) | ((!!vli_testbit(u2, i)) << 1);
+      l_point = l_points[l_index];
+      if (l_point)
+        {
+          vli_set(tx, l_point->x);
+          vli_set(ty, l_point->y);
+          apply_z(tx, ty, z);
+          vli_modsub(tz, rx, tx, g_curve_p); /* Z = x2 - x1 */
+          xycz_add(tx, ty, rx, ry);
+          vli_modmult_fast(z, z, tz);
+        }
+    }
+
+  vli_modinv(z, z, g_curve_p); /* Z = 1/Z */
+  apply_z(rx, ry, z);
+
+  /* v = x1 (mod n) */
+
+  if (vli_cmp(g_curve_n, rx) != 1)
+    {
+      vli_sub(rx, rx, g_curve_n);
+    }
+
+  /* Accept only if v == r. */
+
+  return vli_cmp(rx, l_r) == 0;
+}
diff --git a/include/crypto/ecc.h b/include/crypto/ecc.h
new file mode 100644
index 00000000000..eb3898eaabd
--- /dev/null
+++ b/include/crypto/ecc.h
@@ -0,0 +1,143 @@
+/****************************************************************************
+ * include/crypto/ecc.h
+ *
+ * Copyright (c) 2013, Kenneth MacKay All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * SPECIAL, HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ****************************************************************************/
+
+#ifndef __INCLUDE_CRYPTO_ECC_H
+#define __INCLUDE_CRYPTO_ECC_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <stdint.h>
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* Curve selection options. */
+
+#define secp128r1 16
+#define secp192r1 24
+#define secp256r1 32
+#define secp384r1 48
+
+#ifndef ECC_CURVE
+#  define ECC_CURVE secp256r1
+#endif
+
+#if (ECC_CURVE != secp128r1 && \
+     ECC_CURVE != secp192r1 && \
+     ECC_CURVE != secp256r1 && \
+     ECC_CURVE != secp384r1)
+#  error "Must define ECC_CURVE to one of the available curves"
+#endif
+
+#define ECC_BYTES ECC_CURVE
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* ecc_make_key() function.
+ * Create a public/private key pair.
+ *
+ * Outputs:
+ *   publickey  - Will be filled in with the public key.
+ *   privatekey - Will be filled in with the private key.
+ *
+ * Returns 1 if the key pair was generated successfully,
+ * 0 if an error occurred.
+ */
+
+int ecc_make_key(uint8_t publickey[ECC_BYTES + 1],
+                 uint8_t privatekey[ECC_BYTES]);
+
+/* ecdh_shared_secret() function.
+ * Compute a shared secret given your secret key and someone else's
+ * public key.
+ * Note: It is recommended that you hash the result of ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Inputs:
+ *    publickey  - The public key of the remote party.
+ *    privatekey - Your private key.
+ *
+ * Outputs:
+ *    secret - Will be filled in with the shared secret value.
+ *
+ * Returns 1 if the shared secret was generated successfully,
+ * 0 if an error occurred.
+ */
+
+int ecdh_shared_secret(const uint8_t publickey[ECC_BYTES + 1],
+                       const uint8_t privatekey[ECC_BYTES],
+                       uint8_t secret[ECC_BYTES]);
+
+/* ecdsa_sign() function.
+ * Generate an ECDSA signature for a given hash value.
+ *
+ * Usage: Compute a hash of the data you wish to sign (SHA-2 is recommended)
+ * and pass it in to this function along with your private key.
+ *
+ * Inputs:
+ *    privatekey - Your private key.
+ *    hash       - The message hash to sign.
+ *
+ * Outputs:
+ *    signature  - Will be filled in with the signature value.
+ *
+ * Returns 1 if the signature generated successfully, 0 if an error occurred.
+ */
+
+int ecdsa_sign(const uint8_t privatekey[ECC_BYTES],
+               const uint8_t hash[ECC_BYTES],
+               uint8_t signature[ECC_BYTES * 2]);
+
+/* ecdsa_verify() function.
+ * Verify an ECDSA signature.
+ *
+ * Usage: Compute the hash of the signed data using the same hash as
+ * the signer and pass it to this function along with the signer's
+ * public key and the signature values (r and s).
+ *
+ * Inputs:
+ *   publickey - The signer's public key
+ *   hash      - The hash of the signed data.
+ *   signature - The signature value.
+ *
+ * Returns 1 if the signature is valid, 0 if it is invalid.
+ */
+
+int ecdsa_verify(const uint8_t publickey[ECC_BYTES + 1],
+                 const uint8_t hash[ECC_BYTES],
+                 const uint8_t signature[ECC_BYTES * 2]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_CRYPTO_ECC_H */

Reply via email to