Hi, I think I could fix the alignment errors. I had to change various things, as the code was totally unaware of alignment problems, and I really doubt that this ever worked on archs where correct alignment is mandatory.
Please, look at it, as it changes the SHA1 code (and downgrades performance, I know, I tried to keep it as simple as possible). There were three problems WRT alignment: the buffer passed by the user is a char array, later interpreted as ints; the context buffer and the returned hash suffer the same problem. -- Martín Ferrari
--- src/hackerlab/hash/sha1.c-orig 2006-08-21 19:28:34.585850524 -0300 +++ src/hackerlab/hash/sha1.c 2006-08-21 19:31:42.667183132 -0300 @@ -39,11 +39,11 @@ t_uint32 total[2]; t_uint32 buflen; - t_uchar buffer[128]; + t_uchar buffer[128] __attribute__((aligned(4))); }; static void -sha1_process_blocks (const void *buffer, size_t len, sha1_context_t ctx); +sha1_process_blocks (const t_uchar *buffer, size_t len, sha1_context_t ctx); #if MACHINE_IS_BIGENDIAN # define NOTSWAP(n) (n) @@ -124,7 +124,7 @@ { /* When we already have some bits in our internal buffer concatenate both inputs first. */ - if (ctx->buflen != 0) + while (len > 0) { size_t left_over = ctx->buflen; size_t add = 128 - left_over > len ? len : 128 - left_over; @@ -147,14 +147,17 @@ } /* Process available complete blocks. */ + /* if (len >= 64) { sha1_process_blocks (buffer, len & ~63, ctx); buffer = (const t_uchar *) buffer + (len & ~63); len &= 63; } + */ /* Move remaining bytes in internal buffer. */ + /* if (len > 0) { size_t left_over = ctx->buflen; @@ -169,6 +172,7 @@ } ctx->buflen = left_over; } + */ } @@ -191,6 +195,8 @@ /* Take yet unprocessed bytes into account. */ t_uint32 bytes = ctx->buflen; size_t pad; + /* Temporary array for solving alignment issues */ + t_uint32 tmp[5]; /* Now count remaining bytes. */ ctx->total[0] += bytes; @@ -208,11 +214,12 @@ /* Process last bytes. */ sha1_process_blocks (ctx->buffer, bytes + pad + 8, ctx); - ((t_uint32 *) result)[0] = NOTSWAP (ctx->current_sha1.A); - ((t_uint32 *) result)[1] = NOTSWAP (ctx->current_sha1.B); - ((t_uint32 *) result)[2] = NOTSWAP (ctx->current_sha1.C); - ((t_uint32 *) result)[3] = NOTSWAP (ctx->current_sha1.D); - ((t_uint32 *) result)[4] = NOTSWAP (ctx->current_sha1.E); + tmp[0] = NOTSWAP (ctx->current_sha1.A); + tmp[1] = NOTSWAP (ctx->current_sha1.B); + tmp[2] = NOTSWAP (ctx->current_sha1.C); + tmp[3] = NOTSWAP (ctx->current_sha1.D); + tmp[4] = NOTSWAP (ctx->current_sha1.E); + mem_cpy (result, tmp, 20); sha1_context_reset (ctx); } @@ -329,7 +336,7 @@ It is assumed that LEN % 64 == 0. Most of this code comes from GnuPG's cipher/sha1.c. */ static void -sha1_process_blocks (const void *buffer, size_t len, sha1_context_t ctx) +sha1_process_blocks (const t_uchar *buffer, size_t len, sha1_context_t ctx) { const t_uint32 *words = buffer; size_t nwords = len / sizeof (t_uint32);