RE: [PATCH V2 2/2] crypto : async implementation for sha1-mb

2016-06-07 Thread Dey, Megha


-Original Message-
From: Herbert Xu [mailto:herb...@gondor.apana.org.au] 
Sent: Tuesday, June 7, 2016 3:35 AM
To: Dey, Megha <megha@intel.com>
Cc: tim.c.c...@linux.intel.com; da...@davemloft.net; 
linux-crypto@vger.kernel.org; linux-ker...@vger.kernel.org; Yu, Fenghua 
<fenghua...@intel.com>; Megha Dey <megha@linux.intel.com>
Subject: Re: [PATCH V2 2/2] crypto : async implementation for sha1-mb

On Thu, Jun 02, 2016 at 07:53:50PM -0700, Megha Dey wrote:
>
> + struct ahash_alg *shash = crypto_ahash_alg(tfm);
>  
>   /* alignment is to be done by multi-buffer crypto algorithm if 
> needed */
>  
> - return shash->finup(desc, NULL, 0, req->result);
> + return shash->finup(desc);

You're still poking in the guts of the API.  Now that it's a real ahash you 
don't need to do that.

Just do crypto_ahash_finup.  That way you don't need to export crypto_ahsh_alg 
either.

> I have made these changes and re-sent the patch.

Thanks,
--
Email: Herbert Xu <herb...@gondor.apana.org.au> Home Page: 
http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V2 2/2] crypto : async implementation for sha1-mb

2016-06-02 Thread Megha Dey
From: Megha Dey 

Herbert wants the sha1-mb algorithm to have an async implementation:
https://lkml.org/lkml/2016/4/5/286.
Currently, sha1-mb uses an async interface for the outer algorithm
and a sync interface for the inner algorithm. This patch introduces
a async interface for even the inner algorithm.

Signed-off-by: Megha Dey 
Signed-off-by: Tim Chen 
---
 arch/x86/crypto/sha-mb/sha1_mb.c | 190 ++-
 crypto/ahash.c   |   6 --
 crypto/mcryptd.c | 131 ++-
 include/crypto/hash.h|   6 ++
 include/crypto/internal/hash.h   |  16 ++--
 include/crypto/mcryptd.h |   8 +-
 6 files changed, 193 insertions(+), 164 deletions(-)

diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 0a46491..efc19e3 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -68,6 +68,7 @@
 #include 
 #include 
 #include "sha_mb_ctx.h"
+#include 
 
 #define FLUSH_INTERVAL 1000 /* in usec */
 
@@ -80,10 +81,10 @@ struct sha1_mb_ctx {
 static inline struct mcryptd_hash_request_ctx
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
 {
-   struct shash_desc *desc;
+   struct ahash_request *areq;
 
-   desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
-   return container_of(desc, struct mcryptd_hash_request_ctx, desc);
+   areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
+   return container_of(areq, struct mcryptd_hash_request_ctx, areq);
 }
 
 static inline struct ahash_request
@@ -93,7 +94,7 @@ static inline struct ahash_request
 }
 
 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
-   struct shash_desc *desc)
+   struct ahash_request *areq)
 {
rctx->flag = HASH_UPDATE;
 }
@@ -375,9 +376,9 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct 
sha1_ctx_mgr *mgr)
}
 }
 
-static int sha1_mb_init(struct shash_desc *desc)
+static int sha1_mb_init(struct ahash_request *areq)
 {
-   struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+   struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
 
hash_ctx_init(sctx);
sctx->job.result_digest[0] = SHA1_H0;
@@ -395,7 +396,7 @@ static int sha1_mb_init(struct shash_desc *desc)
 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
 {
int i;
-   struct  sha1_hash_ctx *sctx = shash_desc_ctx(>desc);
+   struct  sha1_hash_ctx *sctx = ahash_request_ctx(>areq);
__be32  *dst = (__be32 *) rctx->out;
 
for (i = 0; i < 5; ++i)
@@ -427,7 +428,7 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx 
**ret_rctx,
 
}
sha_ctx = (struct sha1_hash_ctx *)
-   shash_desc_ctx(>desc);
+   ahash_request_ctx(>areq);
kernel_fpu_begin();
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
rctx->walk.data, nbytes, flag);
@@ -519,11 +520,10 @@ static void sha1_mb_add_list(struct 
mcryptd_hash_request_ctx *rctx,
mcryptd_arm_flusher(cstate, delay);
 }
 
-static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_mb_update(struct ahash_request *areq)
 {
struct mcryptd_hash_request_ctx *rctx =
-   container_of(desc, struct mcryptd_hash_request_ctx, desc);
+   container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
 
@@ -539,7 +539,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 
*data,
}
 
/* need to init context */
-   req_ctx_init(rctx, desc);
+   req_ctx_init(rctx, areq);
 
nbytes = crypto_ahash_walk_first(req, >walk);
 
@@ -552,7 +552,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 
*data,
rctx->flag |= HASH_DONE;
 
/* submit */
-   sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+   sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
@@ -579,11 +579,10 @@ done:
return ret;
 }
 
-static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
-unsigned int len, u8 *out)
+static int sha1_mb_finup(struct ahash_request *areq)
 {
struct mcryptd_hash_request_ctx *rctx =
-   container_of(desc, struct mcryptd_hash_request_ctx, desc);
+   container_of(areq, struct