Ben Laurie wrote:
> Comments?
>
> Note that this supplies the underlying PRNG - I anticipate wrapping it
> up in a daemon for normal use. As discussed with some members of the
> team, we think that should be a sub-project of APR, apr-prngd.
>
> Note that for some applications, direct access to the PRNG makes sense.
> Also note that it isn't currenly thread-safe.
Doh! Patch attached...
I should also note that the core PRNG is a work in progress, I may
change it yet, but the API shouldn't change.
Cheers,
Ben.
--
http://www.apache-ssl.org/ben.html http://www.thebunker.net/
"There is no limit to what a man can do or how far he can go if he
doesn't mind who gets the credit." - Robert Woodruff
Index: Makefile.in
===================================================================
RCS file: /home/cvs/apr/Makefile.in,v
retrieving revision 1.87
diff -u -r1.87 Makefile.in
--- Makefile.in 30 Apr 2003 17:28:25 -0000 1.87
+++ Makefile.in 30 Oct 2003 13:04:11 -0000
@@ -133,6 +133,9 @@
check: $(TARGET_LIB)
(cd test && $(MAKE) check)
+etags:
+ etags `find . -name '*.[ch]'`
+
# DO NOT REMOVE
docs: $(INCDIR)/*.h
Index: configure.in
===================================================================
RCS file: /home/cvs/apr/configure.in,v
retrieving revision 1.535
diff -u -r1.535 configure.in
--- configure.in 2 Sep 2003 08:42:54 -0000 1.535
+++ configure.in 30 Oct 2003 13:04:12 -0000
@@ -100,7 +100,7 @@
DEFAULT_OSDIR="unix"
echo "(Default will be ${DEFAULT_OSDIR})"
-apr_modules="file_io network_io threadproc misc locks time mmap shmem user
memory atomic poll support"
+apr_modules="file_io network_io threadproc misc locks time mmap shmem user
memory atomic poll support random"
dnl Checks for programs.
AC_PROG_MAKE_SET
Index: include/apr_atomic.h
===================================================================
RCS file: /home/cvs/apr/include/apr_atomic.h,v
retrieving revision 1.56
diff -u -r1.56 apr_atomic.h
--- include/apr_atomic.h 26 Sep 2003 02:34:10 -0000 1.56
+++ include/apr_atomic.h 30 Oct 2003 13:04:13 -0000
@@ -285,7 +285,7 @@
#define apr_atomic_add32(mem, val) apr_atomic_add(mem, val)
#define apr_atomic_dec32(mem) apr_atomic_dec(mem)
#define apr_atomic_inc32(mem) apr_atomic_inc(mem)
-#define apr_atomic_set32(mem) apr_atomic_set(mem)
+#define apr_atomic_set32(mem,val) apr_atomic_set(mem,val)
#define apr_atomic_read32(mem) apr_atomic_read(mem)
#elif (defined(__linux__) || defined(__EMX__)) && defined(__i386__) &&
!APR_FORCE_ATOMIC_GENERIC
Index: include/apr_errno.h
===================================================================
RCS file: /home/cvs/apr/include/apr_errno.h,v
retrieving revision 1.113
diff -u -r1.113 apr_errno.h
--- include/apr_errno.h 23 Sep 2003 22:28:52 -0000 1.113
+++ include/apr_errno.h 30 Oct 2003 13:04:15 -0000
@@ -307,6 +307,8 @@
#define APR_ESYMNOTFOUND (APR_OS_START_ERROR + 26)
/** @see APR_STATUS_IS_EPROC_UNKNOWN */
#define APR_EPROC_UNKNOWN (APR_OS_START_ERROR + 27)
+
+#define APR_ENOTENOUGHENTROPY (APR_OS_START_ERROR + 28)
/** @} */
/**
Index: include/apr_random.h
===================================================================
RCS file: include/apr_random.h
diff -N include/apr_random.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ include/apr_random.h 30 Oct 2003 13:04:15 -0000
@@ -0,0 +1,96 @@
+/* ====================================================================
+ * The Apache Software License, Version 1.1
+ *
+ * Copyright (c) 2000-2003 The Apache Software Foundation. All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. The end-user documentation included with the redistribution,
+ * if any, must include the following acknowledgment:
+ * "This product includes software developed by the
+ * Apache Software Foundation (http://www.apache.org/)."
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ *
+ * 4. The names "Apache" and "Apache Software Foundation" must
+ * not be used to endorse or promote products derived from this
+ * software without prior written permission. For written
+ * permission, please contact [EMAIL PROTECTED]
+ *
+ * 5. Products derived from this software may not be called "Apache",
+ * nor may "Apache" appear in their name, without prior written
+ * permission of the Apache Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation. For more
+ * information on the Apache Software Foundation, please see
+ * <http://www.apache.org/>.
+ */
+
+#ifndef APR_RANDOM_H
+#define APR_RANDOM_H
+
+#include <apr_pools.h>
+
+typedef struct apr_crypto_hash_t apr_crypto_hash_t;
+
+typedef void apr_crypto_hash_init_t(apr_crypto_hash_t *hash);
+typedef void apr_crypto_hash_add_t(apr_crypto_hash_t *hash,const void *data,
+ apr_size_t bytes);
+typedef void apr_crypto_hash_finish_t(apr_crypto_hash_t *hash,
+ unsigned char *result);
+
+// FIXME: make this opaque
+struct apr_crypto_hash_t
+ {
+ apr_crypto_hash_init_t *init;
+ apr_crypto_hash_add_t *add;
+ apr_crypto_hash_finish_t *finish;
+ apr_size_t size;
+ void *data;
+ };
+
+apr_crypto_hash_t *apr_crypto_sha256_new(apr_pool_t *p);
+
+typedef struct apr_random_t apr_random_t;
+
+void apr_random_init(apr_random_t *g,apr_pool_t *p,
+ apr_crypto_hash_t *pool_hash,apr_crypto_hash_t *key_hash,
+ apr_crypto_hash_t *prng_hash);
+apr_random_t *apr_random_standard_new(apr_pool_t *p);
+void apr_random_add_entropy(apr_random_t *g,const void *entropy_,
+ apr_size_t bytes);
+apr_status_t apr_random_insecure_bytes(apr_random_t *g,void *random,
+ apr_size_t bytes);
+apr_status_t apr_random_secure_bytes(apr_random_t *g,void *random,
+ apr_size_t bytes);
+void apr_random_barrier(apr_random_t *g);
+apr_status_t apr_random_secure_ready(apr_random_t *r);
+apr_status_t apr_random_insecure_ready(apr_random_t *r);
+
+#endif /* ndef APR_RANDOM_H */
Index: random/unix/Makefile.in
===================================================================
RCS file: random/unix/Makefile.in
diff -N random/unix/Makefile.in
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ random/unix/Makefile.in 30 Oct 2003 13:04:16 -0000
@@ -0,0 +1,18 @@
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+TARGETS = \
+ apr_random.lo \
+ sha2.lo \
+ sha2_glue.lo
+
+
+# bring in rules.mk for standard functionality
[EMAIL PROTECTED]@
+
+INCDIR=../../include
+OSDIR=$(INCDIR)/arch/@OSDIR@
+DEFOSDIR=$(INCDIR)/arch/@DEFAULT_OSDIR@
+INCLUDES=-I$(INCDIR) -I$(OSDIR) -I$(DEFOSDIR)
+
+# DO NOT REMOVE
Index: random/unix/apr_random.c
===================================================================
RCS file: random/unix/apr_random.c
diff -N random/unix/apr_random.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ random/unix/apr_random.c 30 Oct 2003 13:04:17 -0000
@@ -0,0 +1,294 @@
+/* ====================================================================
+ * The Apache Software License, Version 1.1
+ *
+ * Copyright (c) 2003 The Apache Software Foundation. All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. The end-user documentation included with the redistribution,
+ * if any, must include the following acknowledgment:
+ * "This product includes software developed by the
+ * Apache Software Foundation (http://www.apache.org/)."
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ *
+ * 4. The names "Apache" and "Apache Software Foundation" must
+ * not be used to endorse or promote products derived from this
+ * software without prior written permission. For written
+ * permission, please contact [EMAIL PROTECTED]
+ *
+ * 5. Products derived from this software may not be called "Apache",
+ * nor may "Apache" appear in their name, without prior written
+ * permission of the Apache Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation. For more
+ * information on the Apache Software Foundation, please see
+ * <http://www.apache.org/>.
+ */
+/*
+ * See the paper "???" by Ben Laurie for an explanation of this PRNG.
+ */
+
+#include "apr.h"
+#include "apr_pools.h"
+#include "apr_random.h"
+#include <assert.h>
+
+#define min(a,b) ((a) < (b) ? (a) : (b))
+
+#define APR_RANDOM_DEFAULT_POOLS 32
+#define APR_RANDOM_DEFAULT_REHASH_SIZE 1024
+#define APR_RANDOM_DEFAULT_RESEED_SIZE 32
+#define APR_RANDOM_DEFAULT_HASH_SECRET_SIZE 32
+#define APR_RANDOM_DEFAULT_G_FOR_INSECURE 32
+#define APR_RANDOM_DEFAULT_G_FOR_SECURE 320
+
+typedef struct apr_random_pool_t
+ {
+ unsigned char *pool;
+ int bytes;
+ int pool_size;
+ } apr_random_pool_t;
+
+#define hash_init(h) (h)->init(h)
+#define hash_add(h,b,n) (h)->add(h,b,n)
+#define hash_finish(h,r) (h)->finish(h,r)
+
+#define hash(h,r,b,n) hash_init(h),hash_add(h,b,n),hash_finish(h,r)
+
+#define crypt_setkey(c,k) (c)->set_key((c)->data,k)
+#define crypt_crypt(c,out,in) (c)->crypt((c)->date,out,in)
+
+struct apr_random_t
+ {
+ apr_pool_t *apr_pool;
+ apr_crypto_hash_t *pool_hash;
+ unsigned int npools;
+ apr_random_pool_t *pools;
+ unsigned int next_pool;
+ unsigned int generation;
+ apr_size_t rehash_size;
+ apr_size_t reseed_size;
+ apr_crypto_hash_t *key_hash;
+#define K_size(g) ((g)->key_hash->size)
+ apr_crypto_hash_t *prng_hash;
+#define B_size(g) ((g)->prng_hash->size)
+ unsigned char *H;
+ unsigned char *H_waiting;
+#define H_size(g) (B_size(g)+K_size(g))
+ unsigned char *randomness;
+ apr_size_t random_bytes;
+ unsigned int g_for_insecure;
+ unsigned int g_for_secure;
+ unsigned int secure_base;
+ unsigned char insecure_started:1;
+ unsigned char secure_started:1;
+ };
+
+void apr_random_init(apr_random_t *g,apr_pool_t *p,
+ apr_crypto_hash_t *pool_hash,apr_crypto_hash_t *key_hash,
+ apr_crypto_hash_t *prng_hash)
+ {
+ int n;
+
+ g->apr_pool=p;
+ g->pool_hash=pool_hash;
+ g->key_hash=key_hash;
+ g->prng_hash=prng_hash;
+ g->npools=APR_RANDOM_DEFAULT_POOLS;
+ g->pools=apr_palloc(p,g->npools*sizeof *g->pools);
+ for(n=0 ; n < g->npools ; ++n)
+ {
+ g->pools[n].bytes=g->pools[n].pool_size=0;
+ g->pools[n].pool=NULL;
+ }
+ g->next_pool=0;
+ g->generation=0;
+ g->rehash_size=APR_RANDOM_DEFAULT_REHASH_SIZE;
+ /* Ensure that the rehash size is twice the size of the pool hasher */
+ g->rehash_size=((g->rehash_size+2*g->pool_hash->size-1)/g->pool_hash->size
+ /2)*g->pool_hash->size*2;
+ g->reseed_size=APR_RANDOM_DEFAULT_RESEED_SIZE;
+ g->prng_hash=prng_hash;
+ g->H=apr_palloc(p,H_size(g));
+ g->H_waiting=apr_palloc(p,H_size(g));
+ g->randomness=apr_palloc(p,B_size(g));
+ g->random_bytes=0;
+
+ g->g_for_insecure=APR_RANDOM_DEFAULT_G_FOR_INSECURE;
+ g->secure_base=0;
+ g->g_for_secure=APR_RANDOM_DEFAULT_G_FOR_SECURE;
+ g->secure_started=g->insecure_started=0;
+ }
+
+apr_random_t *apr_random_standard_new(apr_pool_t *p)
+ {
+ apr_random_t *r=apr_palloc(p,sizeof *r);
+
+ apr_random_init(r,p,apr_crypto_sha256_new(p),apr_crypto_sha256_new(p),
+ apr_crypto_sha256_new(p));
+ return r;
+ }
+
+static void rekey(apr_random_t *g)
+ {
+ int n;
+ unsigned char *H=(g->insecure_started && !g->secure_started) ? g->H_waiting
+ : g->H;
+
+ hash_init(g->key_hash);
+ hash_add(g->key_hash,H,H_size(g));
+ for(n=0 ; n < g->npools && (n == 0 || g->generation&(1 << (n-1)))
+ ; ++n)
+ {
+ hash_add(g->key_hash,g->pools[n].pool,g->pools[n].bytes);
+ g->pools[n].bytes=0;
+ }
+ hash_finish(g->key_hash,H+B_size(g));
+ ++g->generation;
+ if(!g->insecure_started && g->generation > g->g_for_insecure)
+ {
+ g->insecure_started=1;
+ if(!g->secure_started)
+ {
+ memcpy(g->H_waiting,g->H,H_size(g));
+ g->secure_base=g->generation;
+ }
+ }
+ if(!g->secure_started && g->generation > g->secure_base+g->g_for_secure)
+ {
+ g->secure_started=1;
+ memcpy(g->H,g->H_waiting,H_size(g));
+ }
+ }
+
+void apr_random_add_entropy(apr_random_t *g,const void *entropy_,
+ apr_size_t bytes)
+ {
+ int n;
+ const unsigned char *entropy=entropy_;
+
+ for(n=0 ; n < bytes ; ++n)
+ {
+ apr_random_pool_t *p=&g->pools[g->next_pool];
+
+ if(++g->next_pool == g->npools)
+ g->next_pool=0;
+
+ if(p->pool_size < p->bytes+1)
+ {
+ unsigned char *np=apr_palloc(g->apr_pool,(p->bytes+1)*2);
+
+ memcpy(np,p->pool,p->bytes);
+ p->pool=np;
+ p->pool_size=(p->bytes+1)*2;
+ }
+ p->pool[p->bytes++]=entropy[n];
+
+ if(p->bytes == g->rehash_size)
+ {
+ int r;
+
+ for(r=0 ; r < p->bytes/2 ; r+=g->pool_hash->size)
+ hash(g->pool_hash,p->pool+r,p->pool+r*2,g->pool_hash->size*2);
+ p->bytes/=2;
+ }
+ assert(p->bytes < g->rehash_size);
+ }
+
+ if(g->pools[0].bytes >= g->reseed_size)
+ rekey(g);
+ }
+
+// This will give g->B_size bytes of randomness
+static void apr_random_block(apr_random_t *g,unsigned char *random)
+ {
+ // FIXME: in principle, these are different hashes
+ hash(g->prng_hash,g->H,g->H,H_size(g));
+ hash(g->prng_hash,random,g->H,B_size(g));
+ }
+
+static void apr_random_bytes(apr_random_t *g,unsigned char *random,
+ apr_size_t bytes)
+ {
+ apr_size_t n;
+
+ for(n=0 ; n < bytes ; )
+ {
+ int l;
+
+ if(g->random_bytes == 0)
+ {
+ apr_random_block(g,g->randomness);
+ g->random_bytes=B_size(g);
+ }
+ l=min(bytes-n,g->random_bytes);
+ memcpy(&random[n],g->randomness+B_size(g)-g->random_bytes,l);
+ g->random_bytes-=l;
+ n+=l;
+ }
+ }
+
+apr_status_t apr_random_secure_bytes(apr_random_t *g,void *random,
+ apr_size_t bytes)
+ {
+ if(!g->secure_started)
+ return APR_ENOTENOUGHENTROPY;
+ apr_random_bytes(g,random,bytes);
+ return APR_SUCCESS;
+ }
+
+apr_status_t apr_random_insecure_bytes(apr_random_t *g,void *random,
+ apr_size_t bytes)
+ {
+ if(!g->insecure_started)
+ return APR_ENOTENOUGHENTROPY;
+ apr_random_bytes(g,random,bytes);
+ return APR_SUCCESS;
+ }
+
+void apr_random_barrier(apr_random_t *g)
+ {
+ g->secure_started=0;
+ g->secure_base=g->generation;
+ }
+
+apr_status_t apr_random_secure_ready(apr_random_t *r)
+ {
+ if(!r->secure_started)
+ return APR_ENOTENOUGHENTROPY;
+ return APR_SUCCESS;
+ }
+
+apr_status_t apr_random_insecure_ready(apr_random_t *r)
+ {
+ if(!r->insecure_started)
+ return APR_ENOTENOUGHENTROPY;
+ return APR_SUCCESS;
+ }
Index: random/unix/sha2.c
===================================================================
RCS file: random/unix/sha2.c
diff -N random/unix/sha2.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ random/unix/sha2.c 30 Oct 2003 13:04:17 -0000
@@ -0,0 +1,1065 @@
+/*
+ * FILE: sha2.c
+ * AUTHOR: Aaron D. Gifford <[EMAIL PROTECTED]>
+ *
+ * Copyright (c) 2000-2001, Aaron D. Gifford
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: sha2.c,v 1.1 2001/11/08 00:01:51 adg Exp adg $
+ */
+
+#include <string.h> /* memcpy()/memset() or bcopy()/bzero() */
+#include <assert.h> /* assert() */
+#include "sha2.h"
+
+/*
+ * ASSERT NOTE:
+ * Some sanity checking code is included using assert(). On my FreeBSD
+ * system, this additional code can be removed by compiling with NDEBUG
+ * defined. Check your own systems manpage on assert() to see how to
+ * compile WITHOUT the sanity checking code on your system.
+ *
+ * UNROLLED TRANSFORM LOOP NOTE:
+ * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
+ * loop version for the hash transform rounds (defined using macros
+ * later in this file). Either define on the command line, for example:
+ *
+ * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
+ *
+ * or define below:
+ *
+ * #define SHA2_UNROLL_TRANSFORM
+ *
+ */
+
+
+/*** SHA-256/384/512 Machine Architecture Definitions *****************/
+/*
+ * BYTE_ORDER NOTE:
+ *
+ * Please make sure that your system defines BYTE_ORDER. If your
+ * architecture is little-endian, make sure it also defines
+ * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
+ * equivilent.
+ *
+ * If your system does not define the above, then you can do so by
+ * hand like this:
+ *
+ * #define LITTLE_ENDIAN 1234
+ * #define BIG_ENDIAN 4321
+ *
+ * And for little-endian machines, add:
+ *
+ * #define BYTE_ORDER LITTLE_ENDIAN
+ *
+ * Or for big-endian machines:
+ *
+ * #define BYTE_ORDER BIG_ENDIAN
+ *
+ * The FreeBSD machine this was written on defines BYTE_ORDER
+ * appropriately by including <sys/types.h> (which in turn includes
+ * <machine/endian.h> where the appropriate definitions are actually
+ * made).
+ */
+#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER !=
BIG_ENDIAN)
+#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN
+#endif
+
+/*
+ * Define the followingsha2_* types to types of the correct length on
+ * the native archtecture. Most BSD systems and Linux define u_intXX_t
+ * types. Machines with very recent ANSI C headers, can use the
+ * uintXX_t definintions from inttypes.h by defining SHA2_USE_INTTYPES_H
+ * during compile or in the sha.h header file.
+ *
+ * Machines that support neither u_intXX_t nor inttypes.h's uintXX_t
+ * will need to define these three typedefs below (and the appropriate
+ * ones in sha.h too) by hand according to their system architecture.
+ *
+ * Thank you, Jun-ichiro itojun Hagino, for suggesting using u_intXX_t
+ * types and pointing out recent ANSI C support for uintXX_t in inttypes.h.
+ */
+#ifdef SHA2_USE_INTTYPES_H
+
+typedef uint8_t sha2_byte; /* Exactly 1 byte */
+typedef uint32_t sha2_word32; /* Exactly 4 bytes */
+typedef uint64_t sha2_word64; /* Exactly 8 bytes */
+
+#else /* SHA2_USE_INTTYPES_H */
+
+typedef u_int8_t sha2_byte; /* Exactly 1 byte */
+typedef u_int32_t sha2_word32; /* Exactly 4 bytes */
+typedef u_int64_t sha2_word64; /* Exactly 8 bytes */
+
+#endif /* SHA2_USE_INTTYPES_H */
+
+
+/*** SHA-256/384/512 Various Length Definitions ***********************/
+/* NOTE: Most of these are in sha2.h */
+#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8)
+#define SHA384_SHORT_BLOCK_LENGTH (SHA384_BLOCK_LENGTH - 16)
+#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16)
+
+
+/*** ENDIAN REVERSAL MACROS *******************************************/
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define REVERSE32(w,x) { \
+ sha2_word32 tmp = (w); \
+ tmp = (tmp >> 16) | (tmp << 16); \
+ (x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \
+}
+#define REVERSE64(w,x) { \
+ sha2_word64 tmp = (w); \
+ tmp = (tmp >> 32) | (tmp << 32); \
+ tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
+ ((tmp & 0x00ff00ff00ff00ffULL) << 8); \
+ (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
+ ((tmp & 0x0000ffff0000ffffULL) << 16); \
+}
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+/*
+ * Macro for incrementally adding the unsigned 64-bit integer n to the
+ * unsigned 128-bit integer (represented using a two-element array of
+ * 64-bit words):
+ */
+#define ADDINC128(w,n) { \
+ (w)[0] += (sha2_word64)(n); \
+ if ((w)[0] < (n)) { \
+ (w)[1]++; \
+ } \
+}
+
+/*
+ * Macros for copying blocks of memory and for zeroing out ranges
+ * of memory. Using these macros makes it easy to switch from
+ * using memset()/memcpy() and using bzero()/bcopy().
+ *
+ * Please define either SHA2_USE_MEMSET_MEMCPY or define
+ * SHA2_USE_BZERO_BCOPY depending on which function set you
+ * choose to use:
+ */
+#if !defined(SHA2_USE_MEMSET_MEMCPY) && !defined(SHA2_USE_BZERO_BCOPY)
+/* Default to memset()/memcpy() if no option is specified */
+#define SHA2_USE_MEMSET_MEMCPY 1
+#endif
+#if defined(SHA2_USE_MEMSET_MEMCPY) && defined(SHA2_USE_BZERO_BCOPY)
+/* Abort with an error if BOTH options are defined */
+#error Define either SHA2_USE_MEMSET_MEMCPY or SHA2_USE_BZERO_BCOPY, not both!
+#endif
+
+#ifdef SHA2_USE_MEMSET_MEMCPY
+#define MEMSET_BZERO(p,l) memset((p), 0, (l))
+#define MEMCPY_BCOPY(d,s,l) memcpy((d), (s), (l))
+#endif
+#ifdef SHA2_USE_BZERO_BCOPY
+#define MEMSET_BZERO(p,l) bzero((p), (l))
+#define MEMCPY_BCOPY(d,s,l) bcopy((s), (d), (l))
+#endif
+
+
+/*** THE SIX LOGICAL FUNCTIONS ****************************************/
+/*
+ * Bit shifting and rotation (used by the six SHA-XYZ logical functions:
+ *
+ * NOTE: The naming of R and S appears backwards here (R is a SHIFT and
+ * S is a ROTATION) because the SHA-256/384/512 description document
+ * (see http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf) uses this
+ * same "backwards" definition.
+ */
+/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
+#define R(b,x) ((x) >> (b))
+/* 32-bit Rotate-right (used in SHA-256): */
+#define S32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
+/* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
+#define S64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
+
+/* Two of six logical functions used in SHA-256, SHA-384, and SHA-512: */
+#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+/* Four of six logical functions used in SHA-256: */
+#define Sigma0_256(x) (S32(2, (x)) ^ S32(13, (x)) ^ S32(22, (x)))
+#define Sigma1_256(x) (S32(6, (x)) ^ S32(11, (x)) ^ S32(25, (x)))
+#define sigma0_256(x) (S32(7, (x)) ^ S32(18, (x)) ^ R(3 , (x)))
+#define sigma1_256(x) (S32(17, (x)) ^ S32(19, (x)) ^ R(10, (x)))
+
+/* Four of six logical functions used in SHA-384 and SHA-512: */
+#define Sigma0_512(x) (S64(28, (x)) ^ S64(34, (x)) ^ S64(39, (x)))
+#define Sigma1_512(x) (S64(14, (x)) ^ S64(18, (x)) ^ S64(41, (x)))
+#define sigma0_512(x) (S64( 1, (x)) ^ S64( 8, (x)) ^ R( 7, (x)))
+#define sigma1_512(x) (S64(19, (x)) ^ S64(61, (x)) ^ R( 6, (x)))
+
+/*** INTERNAL FUNCTION PROTOTYPES *************************************/
+/* NOTE: These should not be accessed directly from outside this
+ * library -- they are intended for private internal visibility/use
+ * only.
+ */
+void SHA512_Last(SHA512_CTX*);
+void SHA256_Transform(SHA256_CTX*, const sha2_word32*);
+void SHA512_Transform(SHA512_CTX*, const sha2_word64*);
+
+
+/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/
+/* Hash constant words K for SHA-256: */
+const static sha2_word32 K256[64] = {
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
+ 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
+ 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
+ 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
+ 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
+ 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
+ 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
+ 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
+ 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
+ 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
+ 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
+ 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
+ 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
+
+/* Initial hash value H for SHA-256: */
+const static sha2_word32 sha256_initial_hash_value[8] = {
+ 0x6a09e667UL,
+ 0xbb67ae85UL,
+ 0x3c6ef372UL,
+ 0xa54ff53aUL,
+ 0x510e527fUL,
+ 0x9b05688cUL,
+ 0x1f83d9abUL,
+ 0x5be0cd19UL
+};
+
+/* Hash constant words K for SHA-384 and SHA-512: */
+const static sha2_word64 K512[80] = {
+ 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
+ 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+ 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
+ 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+ 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
+ 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+ 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
+ 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+ 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
+ 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+ 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
+ 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+ 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
+ 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+ 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
+ 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+ 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
+ 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+ 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
+ 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+ 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
+ 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+ 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
+ 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+ 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
+ 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+ 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
+ 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+ 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
+ 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+ 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
+ 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+ 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
+ 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+ 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
+ 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+ 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
+ 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+ 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
+ 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
+};
+
+/* Initial hash value H for SHA-384 */
+const static sha2_word64 sha384_initial_hash_value[8] = {
+ 0xcbbb9d5dc1059ed8ULL,
+ 0x629a292a367cd507ULL,
+ 0x9159015a3070dd17ULL,
+ 0x152fecd8f70e5939ULL,
+ 0x67332667ffc00b31ULL,
+ 0x8eb44a8768581511ULL,
+ 0xdb0c2e0d64f98fa7ULL,
+ 0x47b5481dbefa4fa4ULL
+};
+
+/* Initial hash value H for SHA-512 */
+const static sha2_word64 sha512_initial_hash_value[8] = {
+ 0x6a09e667f3bcc908ULL,
+ 0xbb67ae8584caa73bULL,
+ 0x3c6ef372fe94f82bULL,
+ 0xa54ff53a5f1d36f1ULL,
+ 0x510e527fade682d1ULL,
+ 0x9b05688c2b3e6c1fULL,
+ 0x1f83d9abfb41bd6bULL,
+ 0x5be0cd19137e2179ULL
+};
+
+/*
+ * Constant used by SHA256/384/512_End() functions for converting the
+ * digest to a readable hexadecimal character string:
+ */
+static const char *sha2_hex_digits = "0123456789abcdef";
+
+
+/*** SHA-256: *********************************************************/
+void SHA256_Init(SHA256_CTX* context) {
+ if (context == (SHA256_CTX*)0) {
+ return;
+ }
+ MEMCPY_BCOPY(context->state, sha256_initial_hash_value,
SHA256_DIGEST_LENGTH);
+ MEMSET_BZERO(context->buffer, SHA256_BLOCK_LENGTH);
+ context->bitcount = 0;
+}
+
+#ifdef SHA2_UNROLL_TRANSFORM
+
+/* Unrolled SHA-256 round macros: */
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
+ REVERSE32(*data++, W256[j]); \
+ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
+ K256[j] + W256[j]; \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
+ j++
+
+
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
+ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
+ K256[j] + (W256[j] = *data++); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
+ j++
+
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND256(a,b,c,d,e,f,g,h) \
+ s0 = W256[(j+1)&0x0f]; \
+ s0 = sigma0_256(s0); \
+ s1 = W256[(j+14)&0x0f]; \
+ s1 = sigma1_256(s1); \
+ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
+ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
+ j++
+
+void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
+ sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word32 T1, *W256;
+ int j;
+
+ W256 = (sha2_word32*)context->buffer;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+ /* Rounds 0 to 15 (unrolled): */
+ ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
+ ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
+ ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
+ ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
+ ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
+ ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
+ ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
+ ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
+ } while (j < 16);
+
+ /* Now for the remaining rounds to 64: */
+ do {
+ ROUND256(a,b,c,d,e,f,g,h);
+ ROUND256(h,a,b,c,d,e,f,g);
+ ROUND256(g,h,a,b,c,d,e,f);
+ ROUND256(f,g,h,a,b,c,d,e);
+ ROUND256(e,f,g,h,a,b,c,d);
+ ROUND256(d,e,f,g,h,a,b,c);
+ ROUND256(c,d,e,f,g,h,a,b);
+ ROUND256(b,c,d,e,f,g,h,a);
+ } while (j < 64);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = 0;
+}
+
+#else /* SHA2_UNROLL_TRANSFORM */
+
+void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
+ sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word32 T1, T2, *W256;
+ int j;
+
+ W256 = (sha2_word32*)context->buffer;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Copy data while converting to host byte order */
+ REVERSE32(*data++,W256[j]);
+ /* Apply the SHA-256 compression function to update a..h */
+ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+ /* Apply the SHA-256 compression function to update a..h with
copy */
+ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] =
*data++);
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+ T2 = Sigma0_256(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 16);
+
+ do {
+ /* Part of the message block expansion: */
+ s0 = W256[(j+1)&0x0f];
+ s0 = sigma0_256(s0);
+ s1 = W256[(j+14)&0x0f];
+ s1 = sigma1_256(s1);
+
+ /* Apply the SHA-256 compression function to update a..h */
+ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
+ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
+ T2 = Sigma0_256(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 64);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = T2 = 0;
+}
+
+#endif /* SHA2_UNROLL_TRANSFORM */
+
+void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
+ unsigned int freespace, usedspace;
+
+ if (len == 0) {
+ /* Calling with no data is valid - we do nothing */
+ return;
+ }
+
+ /* Sanity check: */
+ assert(context != (SHA256_CTX*)0 && data != (sha2_byte*)0);
+
+ usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
+ if (usedspace > 0) {
+ /* Calculate how much free space is available in the buffer */
+ freespace = SHA256_BLOCK_LENGTH - usedspace;
+
+ if (len >= freespace) {
+ /* Fill the buffer completely and process it */
+ MEMCPY_BCOPY(&context->buffer[usedspace], data,
freespace);
+ context->bitcount += freespace << 3;
+ len -= freespace;
+ data += freespace;
+ SHA256_Transform(context,
(sha2_word32*)context->buffer);
+ } else {
+ /* The buffer is not yet full */
+ MEMCPY_BCOPY(&context->buffer[usedspace], data, len);
+ context->bitcount += len << 3;
+ /* Clean up: */
+ usedspace = freespace = 0;
+ return;
+ }
+ }
+ while (len >= SHA256_BLOCK_LENGTH) {
+ /* Process as many complete blocks as we can */
+ SHA256_Transform(context, (sha2_word32*)data);
+ context->bitcount += SHA256_BLOCK_LENGTH << 3;
+ len -= SHA256_BLOCK_LENGTH;
+ data += SHA256_BLOCK_LENGTH;
+ }
+ if (len > 0) {
+ /* There's left-overs, so save 'em */
+ MEMCPY_BCOPY(context->buffer, data, len);
+ context->bitcount += len << 3;
+ }
+ /* Clean up: */
+ usedspace = freespace = 0;
+}
+
+void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
+ sha2_word32 *d = (sha2_word32*)digest;
+ unsigned int usedspace;
+
+ /* Sanity check: */
+ assert(context != (SHA256_CTX*)0);
+
+ /* If no digest buffer is passed, we don't bother doing this: */
+ if (digest != (sha2_byte*)0) {
+ usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Convert FROM host byte order */
+ REVERSE64(context->bitcount,context->bitcount);
+#endif
+ if (usedspace > 0) {
+ /* Begin padding with a 1 bit: */
+ context->buffer[usedspace++] = 0x80;
+
+ if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) {
+ /* Set-up for the last transform: */
+ MEMSET_BZERO(&context->buffer[usedspace],
SHA256_SHORT_BLOCK_LENGTH - usedspace);
+ } else {
+ if (usedspace < SHA256_BLOCK_LENGTH) {
+
MEMSET_BZERO(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace);
+ }
+ /* Do second-to-last transform: */
+ SHA256_Transform(context,
(sha2_word32*)context->buffer);
+
+ /* And set-up for the last transform: */
+ MEMSET_BZERO(context->buffer,
SHA256_SHORT_BLOCK_LENGTH);
+ }
+ } else {
+ /* Set-up for the last transform: */
+ MEMSET_BZERO(context->buffer,
SHA256_SHORT_BLOCK_LENGTH);
+
+ /* Begin padding with a 1 bit: */
+ *context->buffer = 0x80;
+ }
+ /* Set the bit count: */
+ *(sha2_word64*)&context->buffer[SHA256_SHORT_BLOCK_LENGTH] =
context->bitcount;
+
+ /* Final transform: */
+ SHA256_Transform(context, (sha2_word32*)context->buffer);
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ {
+ /* Convert TO host byte order */
+ int j;
+ for (j = 0; j < 8; j++) {
+ REVERSE32(context->state[j],context->state[j]);
+ *d++ = context->state[j];
+ }
+ }
+#else
+ MEMCPY_BCOPY(d, context->state, SHA256_DIGEST_LENGTH);
+#endif
+ }
+
+ /* Clean up state data: */
+ MEMSET_BZERO(context, sizeof(context));
+ usedspace = 0;
+}
+
+char *SHA256_End(SHA256_CTX* context, char buffer[]) {
+ sha2_byte digest[SHA256_DIGEST_LENGTH], *d = digest;
+ int i;
+
+ /* Sanity check: */
+ assert(context != (SHA256_CTX*)0);
+
+ if (buffer != (char*)0) {
+ SHA256_Final(digest, context);
+
+ for (i = 0; i < SHA256_DIGEST_LENGTH; i++) {
+ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
+ *buffer++ = sha2_hex_digits[*d & 0x0f];
+ d++;
+ }
+ *buffer = (char)0;
+ } else {
+ MEMSET_BZERO(context, sizeof(context));
+ }
+ MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH);
+ return buffer;
+}
+
+char* SHA256_Data(const sha2_byte* data, size_t len, char
digest[SHA256_DIGEST_STRING_LENGTH]) {
+ SHA256_CTX context;
+
+ SHA256_Init(&context);
+ SHA256_Update(&context, data, len);
+ return SHA256_End(&context, digest);
+}
+
+
+/*** SHA-512: *********************************************************/
+void SHA512_Init(SHA512_CTX* context) {
+ if (context == (SHA512_CTX*)0) {
+ return;
+ }
+ MEMCPY_BCOPY(context->state, sha512_initial_hash_value,
SHA512_DIGEST_LENGTH);
+ MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH);
+ context->bitcount[0] = context->bitcount[1] = 0;
+}
+
+#ifdef SHA2_UNROLL_TRANSFORM
+
+/* Unrolled SHA-512 round macros: */
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
+ REVERSE64(*data++, W512[j]); \
+ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
+ K512[j] + W512[j]; \
+ (d) += T1, \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
+ j++
+
+
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
+ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
+ K512[j] + (W512[j] = *data++); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
+ j++
+
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND512(a,b,c,d,e,f,g,h) \
+ s0 = W512[(j+1)&0x0f]; \
+ s0 = sigma0_512(s0); \
+ s1 = W512[(j+14)&0x0f]; \
+ s1 = sigma1_512(s1); \
+ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
+ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
+ j++
+
+void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
+ sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word64 T1, *W512 = (sha2_word64*)context->buffer;
+ int j;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+ ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
+ ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
+ ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
+ ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
+ ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
+ ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
+ ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
+ ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
+ } while (j < 16);
+
+ /* Now for the remaining rounds up to 79: */
+ do {
+ ROUND512(a,b,c,d,e,f,g,h);
+ ROUND512(h,a,b,c,d,e,f,g);
+ ROUND512(g,h,a,b,c,d,e,f);
+ ROUND512(f,g,h,a,b,c,d,e);
+ ROUND512(e,f,g,h,a,b,c,d);
+ ROUND512(d,e,f,g,h,a,b,c);
+ ROUND512(c,d,e,f,g,h,a,b);
+ ROUND512(b,c,d,e,f,g,h,a);
+ } while (j < 80);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = 0;
+}
+
+#else /* SHA2_UNROLL_TRANSFORM */
+
+void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
+ sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word64 T1, T2, *W512 = (sha2_word64*)context->buffer;
+ int j;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Convert TO host byte order */
+ REVERSE64(*data++, W512[j]);
+ /* Apply the SHA-512 compression function to update a..h */
+ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+ /* Apply the SHA-512 compression function to update a..h with
copy */
+ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] =
*data++);
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+ T2 = Sigma0_512(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 16);
+
+ do {
+ /* Part of the message block expansion: */
+ s0 = W512[(j+1)&0x0f];
+ s0 = sigma0_512(s0);
+ s1 = W512[(j+14)&0x0f];
+ s1 = sigma1_512(s1);
+
+ /* Apply the SHA-512 compression function to update a..h */
+ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
+ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
+ T2 = Sigma0_512(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 80);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = T2 = 0;
+}
+
+#endif /* SHA2_UNROLL_TRANSFORM */
+
+void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
+ unsigned int freespace, usedspace;
+
+ if (len == 0) {
+ /* Calling with no data is valid - we do nothing */
+ return;
+ }
+
+ /* Sanity check: */
+ assert(context != (SHA512_CTX*)0 && data != (sha2_byte*)0);
+
+ usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
+ if (usedspace > 0) {
+ /* Calculate how much free space is available in the buffer */
+ freespace = SHA512_BLOCK_LENGTH - usedspace;
+
+ if (len >= freespace) {
+ /* Fill the buffer completely and process it */
+ MEMCPY_BCOPY(&context->buffer[usedspace], data,
freespace);
+ ADDINC128(context->bitcount, freespace << 3);
+ len -= freespace;
+ data += freespace;
+ SHA512_Transform(context,
(sha2_word64*)context->buffer);
+ } else {
+ /* The buffer is not yet full */
+ MEMCPY_BCOPY(&context->buffer[usedspace], data, len);
+ ADDINC128(context->bitcount, len << 3);
+ /* Clean up: */
+ usedspace = freespace = 0;
+ return;
+ }
+ }
+ while (len >= SHA512_BLOCK_LENGTH) {
+ /* Process as many complete blocks as we can */
+ SHA512_Transform(context, (sha2_word64*)data);
+ ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3);
+ len -= SHA512_BLOCK_LENGTH;
+ data += SHA512_BLOCK_LENGTH;
+ }
+ if (len > 0) {
+ /* There's left-overs, so save 'em */
+ MEMCPY_BCOPY(context->buffer, data, len);
+ ADDINC128(context->bitcount, len << 3);
+ }
+ /* Clean up: */
+ usedspace = freespace = 0;
+}
+
+void SHA512_Last(SHA512_CTX* context) {
+ unsigned int usedspace;
+
+ usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Convert FROM host byte order */
+ REVERSE64(context->bitcount[0],context->bitcount[0]);
+ REVERSE64(context->bitcount[1],context->bitcount[1]);
+#endif
+ if (usedspace > 0) {
+ /* Begin padding with a 1 bit: */
+ context->buffer[usedspace++] = 0x80;
+
+ if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) {
+ /* Set-up for the last transform: */
+ MEMSET_BZERO(&context->buffer[usedspace],
SHA512_SHORT_BLOCK_LENGTH - usedspace);
+ } else {
+ if (usedspace < SHA512_BLOCK_LENGTH) {
+ MEMSET_BZERO(&context->buffer[usedspace],
SHA512_BLOCK_LENGTH - usedspace);
+ }
+ /* Do second-to-last transform: */
+ SHA512_Transform(context,
(sha2_word64*)context->buffer);
+
+ /* And set-up for the last transform: */
+ MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH - 2);
+ }
+ } else {
+ /* Prepare for final transform: */
+ MEMSET_BZERO(context->buffer, SHA512_SHORT_BLOCK_LENGTH);
+
+ /* Begin padding with a 1 bit: */
+ *context->buffer = 0x80;
+ }
+ /* Store the length of input data (in bits): */
+ *(sha2_word64*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH] =
context->bitcount[1];
+ *(sha2_word64*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8] =
context->bitcount[0];
+
+ /* Final transform: */
+ SHA512_Transform(context, (sha2_word64*)context->buffer);
+}
+
+void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) {
+ sha2_word64 *d = (sha2_word64*)digest;
+
+ /* Sanity check: */
+ assert(context != (SHA512_CTX*)0);
+
+ /* If no digest buffer is passed, we don't bother doing this: */
+ if (digest != (sha2_byte*)0) {
+ SHA512_Last(context);
+
+ /* Save the hash data for output: */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ {
+ /* Convert TO host byte order */
+ int j;
+ for (j = 0; j < 8; j++) {
+ REVERSE64(context->state[j],context->state[j]);
+ *d++ = context->state[j];
+ }
+ }
+#else
+ MEMCPY_BCOPY(d, context->state, SHA512_DIGEST_LENGTH);
+#endif
+ }
+
+ /* Zero out state data */
+ MEMSET_BZERO(context, sizeof(context));
+}
+
+char *SHA512_End(SHA512_CTX* context, char buffer[]) {
+ sha2_byte digest[SHA512_DIGEST_LENGTH], *d = digest;
+ int i;
+
+ /* Sanity check: */
+ assert(context != (SHA512_CTX*)0);
+
+ if (buffer != (char*)0) {
+ SHA512_Final(digest, context);
+
+ for (i = 0; i < SHA512_DIGEST_LENGTH; i++) {
+ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
+ *buffer++ = sha2_hex_digits[*d & 0x0f];
+ d++;
+ }
+ *buffer = (char)0;
+ } else {
+ MEMSET_BZERO(context, sizeof(context));
+ }
+ MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH);
+ return buffer;
+}
+
+char* SHA512_Data(const sha2_byte* data, size_t len, char
digest[SHA512_DIGEST_STRING_LENGTH]) {
+ SHA512_CTX context;
+
+ SHA512_Init(&context);
+ SHA512_Update(&context, data, len);
+ return SHA512_End(&context, digest);
+}
+
+
+/*** SHA-384: *********************************************************/
+void SHA384_Init(SHA384_CTX* context) {
+ if (context == (SHA384_CTX*)0) {
+ return;
+ }
+ MEMCPY_BCOPY(context->state, sha384_initial_hash_value,
SHA512_DIGEST_LENGTH);
+ MEMSET_BZERO(context->buffer, SHA384_BLOCK_LENGTH);
+ context->bitcount[0] = context->bitcount[1] = 0;
+}
+
+void SHA384_Update(SHA384_CTX* context, const sha2_byte* data, size_t len) {
+ SHA512_Update((SHA512_CTX*)context, data, len);
+}
+
+void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) {
+ sha2_word64 *d = (sha2_word64*)digest;
+
+ /* Sanity check: */
+ assert(context != (SHA384_CTX*)0);
+
+ /* If no digest buffer is passed, we don't bother doing this: */
+ if (digest != (sha2_byte*)0) {
+ SHA512_Last((SHA512_CTX*)context);
+
+ /* Save the hash data for output: */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ {
+ /* Convert TO host byte order */
+ int j;
+ for (j = 0; j < 6; j++) {
+ REVERSE64(context->state[j],context->state[j]);
+ *d++ = context->state[j];
+ }
+ }
+#else
+ MEMCPY_BCOPY(d, context->state, SHA384_DIGEST_LENGTH);
+#endif
+ }
+
+ /* Zero out state data */
+ MEMSET_BZERO(context, sizeof(context));
+}
+
+char *SHA384_End(SHA384_CTX* context, char buffer[]) {
+ sha2_byte digest[SHA384_DIGEST_LENGTH], *d = digest;
+ int i;
+
+ /* Sanity check: */
+ assert(context != (SHA384_CTX*)0);
+
+ if (buffer != (char*)0) {
+ SHA384_Final(digest, context);
+
+ for (i = 0; i < SHA384_DIGEST_LENGTH; i++) {
+ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
+ *buffer++ = sha2_hex_digits[*d & 0x0f];
+ d++;
+ }
+ *buffer = (char)0;
+ } else {
+ MEMSET_BZERO(context, sizeof(context));
+ }
+ MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH);
+ return buffer;
+}
+
+char* SHA384_Data(const sha2_byte* data, size_t len, char
digest[SHA384_DIGEST_STRING_LENGTH]) {
+ SHA384_CTX context;
+
+ SHA384_Init(&context);
+ SHA384_Update(&context, data, len);
+ return SHA384_End(&context, digest);
+}
+
Index: random/unix/sha2.h
===================================================================
RCS file: random/unix/sha2.h
diff -N random/unix/sha2.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ random/unix/sha2.h 30 Oct 2003 13:04:17 -0000
@@ -0,0 +1,197 @@
+/*
+ * FILE: sha2.h
+ * AUTHOR: Aaron D. Gifford <[EMAIL PROTECTED]>
+ *
+ * Copyright (c) 2000-2001, Aaron D. Gifford
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: sha2.h,v 1.1 2001/11/08 00:02:01 adg Exp adg $
+ */
+
+#ifndef __SHA2_H__
+#define __SHA2_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Import u_intXX_t size_t type definitions from system headers. You
+ * may need to change this, or define these things yourself in this
+ * file.
+ */
+#include <sys/types.h>
+
+#ifdef SHA2_USE_INTTYPES_H
+
+#include <inttypes.h>
+
+#endif /* SHA2_USE_INTTYPES_H */
+
+
+/*** SHA-256/384/512 Various Length Definitions ***********************/
+#define SHA256_BLOCK_LENGTH 64
+#define SHA256_DIGEST_LENGTH 32
+#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1)
+#define SHA384_BLOCK_LENGTH 128
+#define SHA384_DIGEST_LENGTH 48
+#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1)
+#define SHA512_BLOCK_LENGTH 128
+#define SHA512_DIGEST_LENGTH 64
+#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1)
+
+
+/*** SHA-256/384/512 Context Structures *******************************/
+/* NOTE: If your architecture does not define either u_intXX_t types or
+ * uintXX_t (from inttypes.h), you may need to define things by hand
+ * for your system:
+ */
+#if 0
+typedef unsigned char u_int8_t; /* 1-byte (8-bits) */
+typedef unsigned int u_int32_t; /* 4-bytes (32-bits) */
+typedef unsigned long long u_int64_t; /* 8-bytes (64-bits) */
+#endif
+/*
+ * Most BSD systems already define u_intXX_t types, as does Linux.
+ * Some systems, however, like Compaq's Tru64 Unix instead can use
+ * uintXX_t types defined by very recent ANSI C standards and included
+ * in the file:
+ *
+ * #include <inttypes.h>
+ *
+ * If you choose to use <inttypes.h> then please define:
+ *
+ * #define SHA2_USE_INTTYPES_H
+ *
+ * Or on the command line during compile:
+ *
+ * cc -DSHA2_USE_INTTYPES_H ...
+ */
+#ifdef SHA2_USE_INTTYPES_H
+
+typedef struct _SHA256_CTX {
+ uint32_t state[8];
+ uint64_t bitcount;
+ uint8_t buffer[SHA256_BLOCK_LENGTH];
+} SHA256_CTX;
+typedef struct _SHA512_CTX {
+ uint64_t state[8];
+ uint64_t bitcount[2];
+ uint8_t buffer[SHA512_BLOCK_LENGTH];
+} SHA512_CTX;
+
+#else /* SHA2_USE_INTTYPES_H */
+
+typedef struct _SHA256_CTX {
+ u_int32_t state[8];
+ u_int64_t bitcount;
+ u_int8_t buffer[SHA256_BLOCK_LENGTH];
+} SHA256_CTX;
+typedef struct _SHA512_CTX {
+ u_int64_t state[8];
+ u_int64_t bitcount[2];
+ u_int8_t buffer[SHA512_BLOCK_LENGTH];
+} SHA512_CTX;
+
+#endif /* SHA2_USE_INTTYPES_H */
+
+typedef SHA512_CTX SHA384_CTX;
+
+
+/*** SHA-256/384/512 Function Prototypes ******************************/
+#ifndef NOPROTO
+#ifdef SHA2_USE_INTTYPES_H
+
+void SHA256_Init(SHA256_CTX *);
+void SHA256_Update(SHA256_CTX*, const uint8_t*, size_t);
+void SHA256_Final(uint8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*);
+char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]);
+char* SHA256_Data(const uint8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]);
+
+void SHA384_Init(SHA384_CTX*);
+void SHA384_Update(SHA384_CTX*, const uint8_t*, size_t);
+void SHA384_Final(uint8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*);
+char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]);
+char* SHA384_Data(const uint8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]);
+
+void SHA512_Init(SHA512_CTX*);
+void SHA512_Update(SHA512_CTX*, const uint8_t*, size_t);
+void SHA512_Final(uint8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*);
+char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]);
+char* SHA512_Data(const uint8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]);
+
+#else /* SHA2_USE_INTTYPES_H */
+
+void SHA256_Init(SHA256_CTX *);
+void SHA256_Update(SHA256_CTX*, const u_int8_t*, size_t);
+void SHA256_Final(u_int8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*);
+char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]);
+char* SHA256_Data(const u_int8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]);
+
+void SHA384_Init(SHA384_CTX*);
+void SHA384_Update(SHA384_CTX*, const u_int8_t*, size_t);
+void SHA384_Final(u_int8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*);
+char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]);
+char* SHA384_Data(const u_int8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]);
+
+void SHA512_Init(SHA512_CTX*);
+void SHA512_Update(SHA512_CTX*, const u_int8_t*, size_t);
+void SHA512_Final(u_int8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*);
+char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]);
+char* SHA512_Data(const u_int8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]);
+
+#endif /* SHA2_USE_INTTYPES_H */
+
+#else /* NOPROTO */
+
+void SHA256_Init();
+void SHA256_Update();
+void SHA256_Final();
+char* SHA256_End();
+char* SHA256_Data();
+
+void SHA384_Init();
+void SHA384_Update();
+void SHA384_Final();
+char* SHA384_End();
+char* SHA384_Data();
+
+void SHA512_Init();
+void SHA512_Update();
+void SHA512_Final();
+char* SHA512_End();
+char* SHA512_Data();
+
+#endif /* NOPROTO */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __SHA2_H__ */
+
Index: random/unix/sha2_glue.c
===================================================================
RCS file: random/unix/sha2_glue.c
diff -N random/unix/sha2_glue.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ random/unix/sha2_glue.c 30 Oct 2003 13:04:17 -0000
@@ -0,0 +1,33 @@
+#include <apr.h>
+#include <apr_random.h>
+#include <apr_pools.h>
+#include "sha2.h"
+
+static void sha256_init(apr_crypto_hash_t *h)
+ {
+ SHA256_Init(h->data);
+ }
+
+static void sha256_add(apr_crypto_hash_t *h,const void *data,
+ apr_size_t bytes)
+ {
+ SHA256_Update(h->data,data,bytes);
+ }
+
+static void sha256_finish(apr_crypto_hash_t *h,unsigned char *result)
+ {
+ SHA256_Final(result,h->data);
+ }
+
+apr_crypto_hash_t *apr_crypto_sha256_new(apr_pool_t *p)
+ {
+ apr_crypto_hash_t *h=apr_palloc(p,sizeof *h);
+
+ h->data=apr_palloc(p,sizeof(SHA256_CTX));
+ h->init=sha256_init;
+ h->add=sha256_add;
+ h->finish=sha256_finish;
+ h->size=256/8;
+
+ return h;
+ }
Index: test/test_apr.h
===================================================================
RCS file: /home/cvs/apr/test/test_apr.h,v
retrieving revision 1.42
diff -u -r1.42 test_apr.h
--- test/test_apr.h 2 Jul 2003 12:12:28 -0000 1.42
+++ test/test_apr.h 30 Oct 2003 13:04:18 -0000
@@ -81,6 +81,7 @@
CuSuite *testdir(void);
CuSuite *testfileinfo(void);
CuSuite *testrand(void);
+CuSuite *testrand2(void);
CuSuite *testdso(void);
CuSuite *testoc(void);
CuSuite *testdup(void);
Index: test/testall.c
===================================================================
RCS file: /home/cvs/apr/test/testall.c,v
retrieving revision 1.44
diff -u -r1.44 testall.c
--- test/testall.c 2 Jul 2003 12:12:30 -0000 1.44
+++ test/testall.c 30 Oct 2003 13:04:18 -0000
@@ -95,6 +95,7 @@
{"testdup", testdup},
{"testdir", testdir},
{"testrand", testrand},
+ {"testrand2", testrand2},
{"testdso", testdso},
{"testoc", testoc},
{"testsockets", testsockets},
Index: test/testrand2.c
===================================================================
RCS file: test/testrand2.c
diff -N test/testrand2.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ test/testrand2.c 30 Oct 2003 13:04:18 -0000
@@ -0,0 +1,250 @@
+/* ====================================================================
+ * The Apache Software License, Version 1.1
+ *
+ * Copyright (c) 2000-2003 The Apache Software Foundation. All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. The end-user documentation included with the redistribution,
+ * if any, must include the following acknowledgment:
+ * "This product includes software developed by the
+ * Apache Software Foundation (http://www.apache.org/)."
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ *
+ * 4. The names "Apache" and "Apache Software Foundation" must
+ * not be used to endorse or promote products derived from this
+ * software without prior written permission. For written
+ * permission, please contact [EMAIL PROTECTED]
+ *
+ * 5. Products derived from this software may not be called "Apache",
+ * nor may "Apache" appear in their name, without prior written
+ * permission of the Apache Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This software consists of voluntary contributions made by many
+ * individuals on behalf of the Apache Software Foundation. For more
+ * information on the Apache Software Foundation, please see
+ * <http://www.apache.org/>.
+ */
+
+#include "apr_general.h"
+#include "apr_random.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "test_apr.h"
+
+static void hexdump(const unsigned char *b,int n)
+ {
+ int i;
+
+ for(i=0 ; i < n ; ++i)
+ {
+#if 0
+ if((i&0xf) == 0)
+ printf("%04x",i);
+ printf(" %02x",b[i]);
+ if((i&0xf) == 0xf)
+ printf("\n");
+#else
+ printf("0x%02x,",b[i]);
+ if((i&7) == 7)
+ printf("\n");
+#endif
+ }
+ printf("\n");
+ }
+
+static apr_random_t *r;
+
+typedef apr_status_t rnd_fn(apr_random_t *r,void *b,apr_size_t n);
+
+static void rand_run_kat(CuTest *tc,rnd_fn *f,apr_random_t *r,
+ const unsigned char expected[128])
+ {
+ unsigned char c[128];
+ apr_status_t rv;
+
+ rv=f(r,c,128);
+ CuAssertIntEquals(tc,0,rv);
+ if(rv)
+ return;
+ if(memcmp(c,expected,128))
+ {
+ hexdump(c,128);
+ hexdump(expected,128);
+ CuFail(tc,"Randomness mismatch");
+ }
+ }
+
+static void rand_add_zeroes(apr_random_t *r)
+ {
+ static unsigned char c[2048];
+
+ apr_random_add_entropy(r,c,sizeof c);
+ }
+
+static void rand_run_seed_short(CuTest *tc,rnd_fn *f,apr_random_t *r,
+ int count)
+ {
+ int i;
+ apr_status_t rv;
+ char c[1];
+
+ for(i=0 ; i < count ; ++i)
+ rand_add_zeroes(r);
+ rv=f(r,c,1);
+ CuAssertIntEquals(tc,rv,APR_ENOTENOUGHENTROPY);
+ }
+
+static void rand_seed_short(CuTest *tc)
+ {
+ r=apr_random_standard_new(p);
+ rand_run_seed_short(tc,apr_random_insecure_bytes,r,32);
+ }
+
+static void rand_kat(CuTest *tc)
+ {
+ unsigned char expected[128]=
+ { 0x82,0x04,0xad,0xd2,0x0b,0xd5,0xac,0xda,
+ 0x3d,0x85,0x58,0x38,0x54,0x6b,0x69,0x45,
+ 0x37,0x4c,0xc7,0xd7,0x87,0xeb,0xbf,0xd9,
+ 0xb1,0xb8,0xb8,0x2d,0x9b,0x33,0x6e,0x97,
+ 0x04,0x1d,0x4c,0xb0,0xd1,0xdf,0x3d,0xac,
+ 0xd2,0xaa,0xfa,0xcd,0x96,0xb7,0xcf,0xb1,
+ 0x8e,0x3d,0xb3,0xe5,0x37,0xa9,0x95,0xb4,
+ 0xaa,0x3d,0x11,0x1a,0x08,0x20,0x21,0x9f,
+ 0xdb,0x08,0x3a,0xb9,0x57,0x9f,0xf2,0x1f,
+ 0x27,0xdc,0xb6,0xc0,0x85,0x08,0x05,0xbb,
+ 0x13,0xbe,0xb1,0xe9,0x63,0x2a,0xe2,0xa4,
+ 0x23,0x15,0x2a,0x10,0xbf,0xdf,0x09,0xb3,
+ 0xc7,0xfb,0x2d,0x87,0x48,0x19,0xfb,0xc0,
+ 0x15,0x8c,0xcb,0xc6,0xbd,0x89,0x38,0x69,
+ 0xa3,0xae,0xa3,0x21,0x58,0x50,0xe7,0xc4,
+ 0x87,0xec,0x2e,0xb1,0x2d,0x6a,0xbd,0x46 };
+
+ rand_add_zeroes(r);
+ rand_run_kat(tc,apr_random_insecure_bytes,r,expected);
+ }
+
+static void rand_seed_short2(CuTest *tc)
+ {
+ rand_run_seed_short(tc,apr_random_secure_bytes,r,320);
+ }
+
+static void rand_kat2(CuTest *tc)
+ {
+ unsigned char expected[128]=
+ { 0x38,0x8f,0x01,0x29,0x5a,0x5c,0x1f,0xa8,
+ 0x00,0xde,0x16,0x4c,0xe5,0xf7,0x1f,0x58,
+ 0xc0,0x67,0xe2,0x98,0x3d,0xde,0x4a,0x75,
+ 0x61,0x3f,0x23,0xd8,0x45,0x7a,0x10,0x60,
+ 0x59,0x9b,0xd6,0xaf,0xcb,0x0a,0x2e,0x34,
+ 0x9c,0x39,0x5b,0xd0,0xbc,0x9a,0xf0,0x7b,
+ 0x7f,0x40,0x8b,0x33,0xc0,0x0e,0x2a,0x56,
+ 0xfc,0xe5,0xab,0xde,0x7b,0x13,0xf5,0xec,
+ 0x15,0x68,0xb8,0x09,0xbc,0x2c,0x15,0xf0,
+ 0x7b,0xef,0x2a,0x97,0x19,0xa8,0x69,0x51,
+ 0xdf,0xb0,0x5f,0x1a,0x4e,0xdf,0x42,0x02,
+ 0x71,0x36,0xa7,0x25,0x64,0x85,0xe2,0x72,
+ 0xc7,0x87,0x4d,0x7d,0x15,0xbb,0x15,0xd1,
+ 0xb1,0x62,0x0b,0x25,0xd9,0xd3,0xd9,0x5a,
+ 0xe3,0x47,0x1e,0xae,0x67,0xb4,0x19,0x9e,
+ 0xed,0xd2,0xde,0xce,0x18,0x70,0x57,0x12 };
+
+ rand_add_zeroes(r);
+ rand_run_kat(tc,apr_random_secure_bytes,r,expected);
+ }
+
+static void rand_barrier(CuTest *tc)
+ {
+ apr_random_barrier(r);
+ rand_run_seed_short(tc,apr_random_secure_bytes,r,320);
+ }
+
+static void rand_kat3(CuTest *tc)
+ {
+ unsigned char expected[128]=
+ { 0xe8,0xe7,0xc9,0x45,0xe2,0x2a,0x54,0xb2,
+ 0xdd,0xe0,0xf9,0xbc,0x3d,0xf9,0xce,0x3c,
+ 0x4c,0xbd,0xc9,0xe2,0x20,0x4a,0x35,0x1c,
+ 0x04,0x52,0x7f,0xb8,0x0f,0x60,0x89,0x63,
+ 0x8a,0xbe,0x0a,0x44,0xac,0x5d,0xd8,0xeb,
+ 0x24,0x7d,0xd1,0xda,0x4d,0x86,0x9b,0x94,
+ 0x26,0x56,0x4a,0x5e,0x30,0xea,0xd4,0xa9,
+ 0x9a,0xdf,0xdd,0xb6,0xb1,0x15,0xe0,0xfa,
+ 0x28,0xa4,0xd6,0x95,0xa4,0xf1,0xd8,0x6e,
+ 0xeb,0x8c,0xa4,0xac,0x34,0xfe,0x06,0x92,
+ 0xc5,0x09,0x99,0x86,0xdc,0x5a,0x3c,0x92,
+ 0xc8,0x3e,0x52,0x00,0x4d,0x01,0x43,0x6f,
+ 0x69,0xcf,0xe2,0x60,0x9c,0x23,0xb3,0xa5,
+ 0x5f,0x51,0x47,0x8c,0x07,0xde,0x60,0xc6,
+ 0x04,0xbf,0x32,0xd6,0xdc,0xb7,0x31,0x01,
+ 0x29,0x51,0x51,0xb3,0x19,0x6e,0xe4,0xf8 };
+
+ rand_run_kat(tc,apr_random_insecure_bytes,r,expected);
+ }
+
+static void rand_kat4(CuTest *tc)
+ {
+ unsigned char expected[128]=
+ { 0x7d,0x0e,0xc4,0x4e,0x3e,0xac,0x86,0x50,
+ 0x37,0x95,0x7a,0x98,0x23,0x26,0xa7,0xbf,
+ 0x60,0xfb,0xa3,0x70,0x90,0xc3,0x58,0xc6,
+ 0xbd,0xd9,0x5e,0xa6,0x77,0x62,0x7a,0x5c,
+ 0x96,0x83,0x7f,0x80,0x3d,0xf4,0x9c,0xcc,
+ 0x9b,0x0c,0x8c,0xe1,0x72,0xa8,0xfb,0xc9,
+ 0xc5,0x43,0x91,0xdc,0x9d,0x92,0xc2,0xce,
+ 0x1c,0x5e,0x36,0xc7,0x87,0xb1,0xb4,0xa3,
+ 0xc8,0x69,0x76,0xfc,0x35,0x75,0xcb,0x08,
+ 0x2f,0xe3,0x98,0x76,0x37,0x80,0x04,0x5c,
+ 0xb8,0xb0,0x7f,0xb2,0xda,0xe3,0xa3,0xba,
+ 0xed,0xff,0xf5,0x9d,0x3b,0x7b,0xf3,0x32,
+ 0x6c,0x50,0xa5,0x3e,0xcc,0xe1,0x84,0x9c,
+ 0x17,0x9e,0x80,0x64,0x09,0xbb,0x62,0xf1,
+ 0x95,0xf5,0x2c,0xc6,0x9f,0x6a,0xee,0x6d,
+ 0x17,0x35,0x5f,0x35,0x8d,0x55,0x0c,0x07 };
+
+ rand_add_zeroes(r);
+ rand_run_kat(tc,apr_random_secure_bytes,r,expected);
+ }
+
+CuSuite *testrand2(void)
+ {
+ CuSuite *suite = CuSuiteNew("Random2");
+
+ SUITE_ADD_TEST(suite, rand_seed_short);
+ SUITE_ADD_TEST(suite, rand_kat);
+ SUITE_ADD_TEST(suite, rand_seed_short2);
+ SUITE_ADD_TEST(suite, rand_kat2);
+ SUITE_ADD_TEST(suite, rand_barrier);
+ SUITE_ADD_TEST(suite, rand_kat3);
+ SUITE_ADD_TEST(suite, rand_kat4);
+
+ return suite;
+ }