RE: [PATCH 2/2] crypto : async implementation for sha1-mb

2016-06-02 Thread Dey, Megha


-Original Message-
From: Herbert Xu [mailto:herb...@gondor.apana.org.au] 
Sent: Thursday, June 2, 2016 5:33 PM
To: Dey, Megha 
Cc: tim.c.c...@linux.intel.com; da...@davemloft.net; 
linux-crypto@vger.kernel.org; linux-ker...@vger.kernel.org; Yu, Fenghua 

Subject: Re: [PATCH 2/2] crypto : async implementation for sha1-mb

On Thu, Jun 02, 2016 at 10:20:20AM -0700, Megha Dey wrote:
>
> > > @@ -439,17 +444,18 @@ static int mcryptd_hash_finup_enqueue(struct 
> > > ahash_request *req)  static void mcryptd_hash_digest(struct 
> > > crypto_async_request *req_async, int err)  {
> > >   struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
> > > - struct crypto_shash *child = ctx->child;
> > > + struct crypto_ahash *child = ctx->child;
> > >   struct ahash_request *req = ahash_request_cast(req_async);
> > >   struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
> > > - struct shash_desc *desc = >desc;
> > > + struct ahash_request *desc = >areq;
> > > + struct crypto_async_request *base = >base;
> > >  
> > >   if (unlikely(err == -EINPROGRESS))
> > >   goto out;
> > > + base->tfm = >base;
> > > + base->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */
> > 
> > You should not be touching crypto_async_request directly.  Use the 
> > proper ahash interface to set the child request.
> > 
> Herbert, Could you please clarify?
> In the earlier code we had a async_request which is now replaced by 
> crypto_async_request. Do you want a new async_request to be used?
> Do you think we shouldn't be setting the members of the 
> crypto_ahash_request directly, but use some other interface to do the 
> same for us?

You already have an ahash_request here.  So you should be doing

ahash_request_set_tfm(...)
ahash_request_set_callback(...)

>ok,done!
Thanks,
--
Email: Herbert Xu  Home Page: 
http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V2 1/2] crypto: sha1-mb - stylistic cleanup

2016-06-02 Thread Megha Dey
From: Megha Dey 

Currently there are several checkpatch warnings in the sha1_mb.c file:
'WARNING: line over 80 characters' in the sha1_mb.c file. Also, the
syntax of some multi-line comments are not correct. This patch fixes
these issues.

Signed-off-by: Megha Dey 
Signed-off-by: Herbert Xu 
---
 arch/x86/crypto/sha-mb/sha1_mb.c | 110 ++-
 1 file changed, 74 insertions(+), 36 deletions(-)

diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 9c5af33..0a46491 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -77,7 +77,8 @@ struct sha1_mb_ctx {
struct mcryptd_ahash *mcryptd_tfm;
 };
 
-static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct 
sha1_hash_ctx *hash_ctx)
+static inline struct mcryptd_hash_request_ctx
+   *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
 {
struct shash_desc *desc;
 
@@ -85,7 +86,8 @@ static inline struct mcryptd_hash_request_ctx 
*cast_hash_to_mcryptd_ctx(struct s
return container_of(desc, struct mcryptd_hash_request_ctx, desc);
 }
 
-static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct 
mcryptd_hash_request_ctx *ctx)
+static inline struct ahash_request
+   *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
 {
return container_of((void *) ctx, struct ahash_request, __ctx);
 }
@@ -97,10 +99,12 @@ static void req_ctx_init(struct mcryptd_hash_request_ctx 
*rctx,
 }
 
 static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr 
*state,
- struct job_sha1 *job);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr 
*state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct 
sha1_mb_mgr *state);
+static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
+   (struct sha1_mb_mgr *state, struct job_sha1 *job);
+static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
+   (struct sha1_mb_mgr *state);
+static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
+   (struct sha1_mb_mgr *state);
 
 static inline void sha1_init_digest(uint32_t *digest)
 {
@@ -131,7 +135,8 @@ static inline uint32_t sha1_pad(uint8_t 
padblock[SHA1_BLOCK_SIZE * 2],
return i >> SHA1_LOG2_BLOCK_SIZE;
 }
 
-static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, 
struct sha1_hash_ctx *ctx)
+static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
+   struct sha1_hash_ctx *ctx)
 {
while (ctx) {
if (ctx->status & HASH_CTX_STS_COMPLETE) {
@@ -177,8 +182,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct 
sha1_ctx_mgr *mgr, str
 
ctx->job.buffer = (uint8_t *) buffer;
ctx->job.len = len;
-   ctx = (struct sha1_hash_ctx *) 
sha1_job_mgr_submit(>mgr,
-   
  >job);
+   ctx = (struct sha1_hash_ctx 
*)sha1_job_mgr_submit(>mgr,
+   
>job);
continue;
}
}
@@ -191,13 +196,15 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct 
sha1_ctx_mgr *mgr, str
if (ctx->status & HASH_CTX_STS_LAST) {
 
uint8_t *buf = ctx->partial_block_buffer;
-   uint32_t n_extra_blocks = sha1_pad(buf, 
ctx->total_length);
+   uint32_t n_extra_blocks =
+   sha1_pad(buf, ctx->total_length);
 
ctx->status = (HASH_CTX_STS_PROCESSING |
   HASH_CTX_STS_COMPLETE);
ctx->job.buffer = buf;
ctx->job.len = (uint32_t) n_extra_blocks;
-   ctx = (struct sha1_hash_ctx *) 
sha1_job_mgr_submit(>mgr, >job);
+   ctx = (struct sha1_hash_ctx *)
+   sha1_job_mgr_submit(>mgr, >job);
continue;
}
 
@@ -208,14 +215,17 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct 
sha1_ctx_mgr *mgr, str
return NULL;
 }
 
-static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr 
*mgr)
+static struct sha1_hash_ctx
+   *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
 {
/*
 * If get_comp_job returns NULL, there are no jobs 

[PATCH V2 2/2] crypto : async implementation for sha1-mb

2016-06-02 Thread Megha Dey
From: Megha Dey 

Herbert wants the sha1-mb algorithm to have an async implementation:
https://lkml.org/lkml/2016/4/5/286.
Currently, sha1-mb uses an async interface for the outer algorithm
and a sync interface for the inner algorithm. This patch introduces
a async interface for even the inner algorithm.

Signed-off-by: Megha Dey 
Signed-off-by: Tim Chen 
---
 arch/x86/crypto/sha-mb/sha1_mb.c | 190 ++-
 crypto/ahash.c   |   6 --
 crypto/mcryptd.c | 131 ++-
 include/crypto/hash.h|   6 ++
 include/crypto/internal/hash.h   |  16 ++--
 include/crypto/mcryptd.h |   8 +-
 6 files changed, 193 insertions(+), 164 deletions(-)

diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 0a46491..efc19e3 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -68,6 +68,7 @@
 #include 
 #include 
 #include "sha_mb_ctx.h"
+#include 
 
 #define FLUSH_INTERVAL 1000 /* in usec */
 
@@ -80,10 +81,10 @@ struct sha1_mb_ctx {
 static inline struct mcryptd_hash_request_ctx
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
 {
-   struct shash_desc *desc;
+   struct ahash_request *areq;
 
-   desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
-   return container_of(desc, struct mcryptd_hash_request_ctx, desc);
+   areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
+   return container_of(areq, struct mcryptd_hash_request_ctx, areq);
 }
 
 static inline struct ahash_request
@@ -93,7 +94,7 @@ static inline struct ahash_request
 }
 
 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
-   struct shash_desc *desc)
+   struct ahash_request *areq)
 {
rctx->flag = HASH_UPDATE;
 }
@@ -375,9 +376,9 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct 
sha1_ctx_mgr *mgr)
}
 }
 
-static int sha1_mb_init(struct shash_desc *desc)
+static int sha1_mb_init(struct ahash_request *areq)
 {
-   struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+   struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
 
hash_ctx_init(sctx);
sctx->job.result_digest[0] = SHA1_H0;
@@ -395,7 +396,7 @@ static int sha1_mb_init(struct shash_desc *desc)
 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
 {
int i;
-   struct  sha1_hash_ctx *sctx = shash_desc_ctx(>desc);
+   struct  sha1_hash_ctx *sctx = ahash_request_ctx(>areq);
__be32  *dst = (__be32 *) rctx->out;
 
for (i = 0; i < 5; ++i)
@@ -427,7 +428,7 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx 
**ret_rctx,
 
}
sha_ctx = (struct sha1_hash_ctx *)
-   shash_desc_ctx(>desc);
+   ahash_request_ctx(>areq);
kernel_fpu_begin();
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
rctx->walk.data, nbytes, flag);
@@ -519,11 +520,10 @@ static void sha1_mb_add_list(struct 
mcryptd_hash_request_ctx *rctx,
mcryptd_arm_flusher(cstate, delay);
 }
 
-static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_mb_update(struct ahash_request *areq)
 {
struct mcryptd_hash_request_ctx *rctx =
-   container_of(desc, struct mcryptd_hash_request_ctx, desc);
+   container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
 
@@ -539,7 +539,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 
*data,
}
 
/* need to init context */
-   req_ctx_init(rctx, desc);
+   req_ctx_init(rctx, areq);
 
nbytes = crypto_ahash_walk_first(req, >walk);
 
@@ -552,7 +552,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 
*data,
rctx->flag |= HASH_DONE;
 
/* submit */
-   sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+   sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
@@ -579,11 +579,10 @@ done:
return ret;
 }
 
-static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
-unsigned int len, u8 *out)
+static int sha1_mb_finup(struct ahash_request *areq)
 {
struct mcryptd_hash_request_ctx *rctx =
-   container_of(desc, struct mcryptd_hash_request_ctx, desc);
+   container_of(areq, struct 

[PATCH V2 0/2] Implementation of an async interface for sha1-mb

2016-06-02 Thread Megha Dey
From: Megha Dey 

Currently, sha1-mb uses an async interface for the outer algorithm
and a sync interface for the inner algorithm.
Herbert wants the sha1-mb algorithm to have an async implementation:
https://lkml.org/lkml/2016/4/5/286.
This patch introduces a async interface for even the inner algorithm.
Additionally, there are several checkpatch warnings in the sha1_mb.c file:
'WARNING: line over 80 characters' in the sha1_mb.c file. Also, the
syntax of some multi-line comments are not correct. This patchset fixes
these issues.

changes V1->V2
rename shash_ahash_mcryptd* functions to ahash_mcryptd_*
removed the export_symbols for the shash_ahash_mcryptd_* functions
use ahash_interfaces instead of touching the crypto_ahash_request directly

Megha Dey (2):
  crypto: sha1-mb - stylistic cleanup
  crypto : async implementation for sha1-mb

 arch/x86/crypto/sha-mb/sha1_mb.c | 292 ---
 crypto/ahash.c   |   6 -
 crypto/mcryptd.c | 131 +-
 include/crypto/hash.h|   6 +
 include/crypto/internal/hash.h   |  16 +--
 include/crypto/mcryptd.h |   8 +-
 6 files changed, 263 insertions(+), 196 deletions(-)

-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] crypto : async implementation for sha1-mb

2016-06-02 Thread Herbert Xu
On Thu, Jun 02, 2016 at 10:20:20AM -0700, Megha Dey wrote:
>
> > > @@ -439,17 +444,18 @@ static int mcryptd_hash_finup_enqueue(struct 
> > > ahash_request *req)
> > >  static void mcryptd_hash_digest(struct crypto_async_request *req_async, 
> > > int err)
> > >  {
> > >   struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
> > > - struct crypto_shash *child = ctx->child;
> > > + struct crypto_ahash *child = ctx->child;
> > >   struct ahash_request *req = ahash_request_cast(req_async);
> > >   struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
> > > - struct shash_desc *desc = >desc;
> > > + struct ahash_request *desc = >areq;
> > > + struct crypto_async_request *base = >base;
> > >  
> > >   if (unlikely(err == -EINPROGRESS))
> > >   goto out;
> > > + base->tfm = >base;
> > > + base->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */
> > 
> > You should not be touching crypto_async_request directly.  Use
> > the proper ahash interface to set the child request.
> > 
> Herbert, Could you please clarify?
> In the earlier code we had a async_request which is now replaced by
> crypto_async_request. Do you want a new async_request to be used?
> Do you think we shouldn't be setting the members of the
> crypto_ahash_request directly, but use some other interface to do the
> same for us?

You already have an ahash_request here.  So you should be doing

ahash_request_set_tfm(...)
ahash_request_set_callback(...)

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] crypto : async implementation for sha1-mb

2016-06-02 Thread Megha Dey
On Thu, 2016-06-02 at 18:33 +0800, Herbert Xu wrote:
> On Tue, May 31, 2016 at 02:42:21PM -0700, Megha Dey wrote:
> >
> > @@ -416,8 +421,8 @@ static void mcryptd_hash_finup(struct 
> > crypto_async_request *req_async, int err)
> >  
> > if (unlikely(err == -EINPROGRESS))
> > goto out;
> > -
> > -   err = shash_ahash_mcryptd_finup(req, >desc);
> > +   rctx->out = req->result;
> > +   err = shash_ahash_mcryptd_finup(req, >areq);
> 
> These shash_ahash functions should be renamed.
> 
> Also why are they exported?

taken care of this.
> 
> > @@ -439,17 +444,18 @@ static int mcryptd_hash_finup_enqueue(struct 
> > ahash_request *req)
> >  static void mcryptd_hash_digest(struct crypto_async_request *req_async, 
> > int err)
> >  {
> > struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
> > -   struct crypto_shash *child = ctx->child;
> > +   struct crypto_ahash *child = ctx->child;
> > struct ahash_request *req = ahash_request_cast(req_async);
> > struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
> > -   struct shash_desc *desc = >desc;
> > +   struct ahash_request *desc = >areq;
> > +   struct crypto_async_request *base = >base;
> >  
> > if (unlikely(err == -EINPROGRESS))
> > goto out;
> > +   base->tfm = >base;
> > +   base->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */
> 
> You should not be touching crypto_async_request directly.  Use
> the proper ahash interface to set the child request.
> 
Herbert, Could you please clarify?
In the earlier code we had a async_request which is now replaced by
crypto_async_request. Do you want a new async_request to be used?
Do you think we shouldn't be setting the members of the
crypto_ahash_request directly, but use some other interface to do the
same for us?
> Thanks,


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


authenc methods vs FIPS in light of unencrypted associated data

2016-06-02 Thread Marcus Meissner
Hi,

In February I already tagged some authenc ciphers for FIPS compatibility.

I currently revisit this to get testmgr running all the tests in strict FIPS 
mode.

The authenc() class is troublesome.

There is a HASH + ENC part of this method, but you can also add associated data,
which is not encrypted. (using the ctx->null cipher in crypto/authenc.c)

But in FIPS mode the crypto_authenc_init_tfm does:

null = crypto_get_default_null_skcipher();

which results in error, as the crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0);
results in failure due to "ecb(cipher_null)" not FIPS compliant.

How to handle this?

I think GCM also does not encrypt, just hashes, the associated data, it just 
does
copy the content itself and does not use a virtual cipher.

Ciao, Marcus
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 3/4] crypto: kdf - SP800-108 Key Derivation Function

2016-06-02 Thread Stephan Mueller
Am Donnerstag, 2. Juni 2016, 16:55:26 schrieb Herbert Xu:

Hi Herbert,

> 
> Why don't you put the result in a temporary buffer and then copy
> it? These things are tiny, right?

The KDFs are usually used for output sizes between one and 4 keys. So, 
commonly it is expected that not more than 200 or 300 bytes are generated by 
one call. But you cannot be sure how much data a user wants. The spec allows 
that the user generates up to 2^50 or so bytes. The implementation I offer is 
limited to unsigned int bytes.

Note, if one would implement a key ladder, it can be expected that many keys 
are generated from one KDF seed.

I tried to avoid memcpy for speed purposes. And all the user needs to do is to 
not invoke an in-place crypto operation.

Maybe I should copy the input data into a private memory location so that the 
KDF can be used like any other cipher: the caller uses a reference to the 
instance to generate data where the caller does not need to ensure that some 
initial data must be left at some specific place.

Ciao
Stephan
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 1/1] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread LABBE Corentin
The current crypto engine allow only ablkcipher_request to be enqueued.
Thus denying any use of it for hardware that also handle hash algo.

This patch convert all ablkcipher_request references to the
more general crypto_async_request.

Since the crypto engine is now generalist, this patch rename
crypto_one_request to do_one_request.

Since omap-aes/omap-des are the only users, this patch also convert them to
the new cryptoengine API.

Signed-off-by: LABBE Corentin 
---
 crypto/crypto_engine.c| 19 ---
 drivers/crypto/omap-aes.c | 18 +-
 drivers/crypto/omap-des.c | 18 +-
 include/crypto/algapi.h   | 18 +-
 4 files changed, 43 insertions(+), 30 deletions(-)

diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index a55c82d..e9b31f4 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -19,7 +19,7 @@
 #define CRYPTO_ENGINE_MAX_QLEN 10
 
 void crypto_finalize_request(struct crypto_engine *engine,
-struct ablkcipher_request *req, int err);
+struct crypto_async_request *req, int err);
 
 /**
  * crypto_pump_requests - dequeue one request from engine queue to process
@@ -34,7 +34,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 bool in_kthread)
 {
struct crypto_async_request *async_req, *backlog;
-   struct ablkcipher_request *req;
unsigned long flags;
bool was_busy = false;
int ret;
@@ -82,9 +81,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!async_req)
goto out;
 
-   req = ablkcipher_request_cast(async_req);
-
-   engine->cur_req = req;
+   engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
 
@@ -113,7 +110,7 @@ static void crypto_pump_requests(struct crypto_engine 
*engine,
engine->cur_req_prepared = true;
}
 
-   ret = engine->crypt_one_request(engine, engine->cur_req);
+   ret = engine->do_one_request(engine, engine->cur_req);
if (ret) {
pr_err("failed to crypt one request from queue\n");
goto req_err;
@@ -142,7 +139,7 @@ static void crypto_pump_work(struct kthread_work *work)
  * @req: the request need to be listed into the engine queue
  */
 int crypto_transfer_request(struct crypto_engine *engine,
-   struct ablkcipher_request *req, bool need_pump)
+   struct crypto_async_request *req, bool need_pump)
 {
unsigned long flags;
int ret;
@@ -154,7 +151,7 @@ int crypto_transfer_request(struct crypto_engine *engine,
return -ESHUTDOWN;
}
 
-   ret = ablkcipher_enqueue_request(>queue, req);
+   ret = crypto_enqueue_request(>queue, req);
 
if (!engine->busy && need_pump)
queue_kthread_work(>kworker, >pump_requests);
@@ -171,7 +168,7 @@ EXPORT_SYMBOL_GPL(crypto_transfer_request);
  * @req: the request need to be listed into the engine queue
  */
 int crypto_transfer_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ struct crypto_async_request *req)
 {
return crypto_transfer_request(engine, req, true);
 }
@@ -184,7 +181,7 @@ EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
  * @err: error number
  */
 void crypto_finalize_request(struct crypto_engine *engine,
-struct ablkcipher_request *req, int err)
+struct crypto_async_request *req, int err)
 {
unsigned long flags;
bool finalize_cur_req = false;
@@ -208,7 +205,7 @@ void crypto_finalize_request(struct crypto_engine *engine,
spin_unlock_irqrestore(>queue_lock, flags);
}
 
-   req->base.complete(>base, err);
+   req->complete(req, err);
 
queue_kthread_work(>kworker, >pump_requests);
 }
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce174d3..a8234fc 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -519,7 +519,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, 
int err)
 
pr_debug("err: %d\n", err);
 
-   crypto_finalize_request(dd->engine, req, err);
+   crypto_finalize_request(dd->engine, >base, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -592,14 +592,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 struct ablkcipher_request *req)
 {
if (req)
-   return crypto_transfer_request_to_engine(dd->engine, req);
+   return crypto_transfer_request_to_engine(dd->engine, 
>base);
 
return 0;
 }
 
 static int omap_aes_prepare_req(struct crypto_engine *engine,
-   

[PATCH v3 0/1] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread LABBE Corentin
Hello

I wanted to use the crypto engine for my Allwinner crypto driver but something
prevented me to use it: it cannot enqueue hash requests.
This patch convert crypto engine to permit enqueuing of ahash_requests.
It also convert the only driver using crypto engine.

The modifications against omap was only compile tested but the crypto engine 
with
hash support was tested on two different offtree driver (sun4i-ss and sun8i-ce)

Regards

Changes since v1:
- rebased on cryptodev for handling omap-des

Changes since v2:
- Fusionned both patch
- Renamed crypt_one_request to do_one_request
- Test the type of request before processing it

LABBE Corentin (1):
  crypto: engine: permit to enqueue ashash_request

 crypto/crypto_engine.c| 19 ---
 drivers/crypto/omap-aes.c | 18 +-
 drivers/crypto/omap-des.c | 18 +-
 include/crypto/algapi.h   | 18 +-
 4 files changed, 43 insertions(+), 30 deletions(-)

-- 
2.7.3

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 0/9] crypto: asynchronous compression api

2016-06-02 Thread Giovanni Cabiddu
The following patch set introduces acomp, a generic asynchronous
(de)compression api with support for SG lists.
We propose a new crypto type called crypto_acomp_type, a new struct acomp_alg
and struct crypto_acomp, together with number of helper functions to register
acomp type algorithms and allocate tfm instances.
This interface will allow the following operations:

int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);

Together with acomp we propose a new driver-side interface, scomp, which
handles compression implementations which use linear buffers. We converted all
compression algorithms available in LKCF to use this interface so that those
algorithms will be accessible through the acomp api.

Changes in v5:
- removed qdecompress api, no longer needed
- removed produced and consumed counters in acomp_req
- added crypto_has_acomp function 

Changes in v4:
- added qdecompress api, a front-end for decompression algorithms which
  do not need additional vmalloc work space

Changes in v3:
- added driver-side scomp interface
- provided support for lzo, lz4, lz4hc, 842, deflate compression algorithms
  via the acomp api (through scomp)
- extended testmgr to support acomp
- removed extended acomp api for supporting deflate algorithm parameters
  (will be enhanced and re-proposed in future)
Note that (2) to (7) are a rework of Joonsoo Kim's scomp patches.

Changes in v2:
- added compression and decompression request sizes in acomp_alg
  in order to enable noctx support
- extended api with helpers to allocate compression and
  decompression requests

Changes from initial submit:
- added consumed and produced fields to acomp_req
- extended api to support configuration of deflate compressors

Giovanni Cabiddu (9):
  crypto: shrink hash down to two types
  crypto: add asynchronous compression api
  crypto: add driver-side scomp interface
  crypto: acomp - add support for lzo via scomp
  crypto: acomp - add support for lz4 via scomp
  crypto: acomp - add support for lz4hc via scomp
  crypto: acomp - add support for 842 via scomp
  crypto: acomp - add support for deflate via scomp
  crypto: acomp - update testmgr with support for acomp

 crypto/842.c|   82 +++-
 crypto/Kconfig  |   15 ++
 crypto/Makefile |3 +
 crypto/acompress.c  |  163 ++
 crypto/crypto_user.c|   21 +++
 crypto/deflate.c|  111 ++--
 crypto/lz4.c|   91 +++--
 crypto/lz4hc.c  |   92 +++--
 crypto/lzo.c|   97 +++--
 crypto/scompress.c  |  252 ++
 crypto/testmgr.c|  158 --
 include/crypto/acompress.h  |  254 +++
 include/crypto/internal/acompress.h |   82 +++
 include/crypto/internal/scompress.h |  134 ++
 include/linux/crypto.h  |   13 +-
 15 files changed, 1502 insertions(+), 66 deletions(-)
 create mode 100644 crypto/acompress.c
 create mode 100644 crypto/scompress.c
 create mode 100644 include/crypto/acompress.h
 create mode 100644 include/crypto/internal/acompress.h
 create mode 100644 include/crypto/internal/scompress.h

-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 8/9] crypto: acomp - add support for deflate via scomp

2016-06-02 Thread Giovanni Cabiddu
This patch implements an scomp backend for the deflate compression
algorithm. This way, deflate is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig   |1 +
 crypto/deflate.c |  111 +-
 2 files changed, 102 insertions(+), 10 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 09c88ba..b617c5d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1502,6 +1502,7 @@ comment "Compression"
 config CRYPTO_DEFLATE
tristate "Deflate compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select ZLIB_INFLATE
select ZLIB_DEFLATE
help
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 95d8d37..f942cb3 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -32,6 +32,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define DEFLATE_DEF_LEVEL  Z_DEFAULT_COMPRESSION
 #define DEFLATE_DEF_WINBITS11
@@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
vfree(ctx->decomp_stream.workspace);
 }
 
-static int deflate_init(struct crypto_tfm *tfm)
+static int __deflate_init(void *ctx)
 {
-   struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
 
ret = deflate_comp_init(ctx);
@@ -116,19 +116,55 @@ out:
return ret;
 }
 
-static void deflate_exit(struct crypto_tfm *tfm)
+static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
+{
+   struct deflate_ctx *ctx;
+   int ret;
+
+   ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   ret = __deflate_init(ctx);
+   if (ret) {
+   kfree(ctx);
+   return ERR_PTR(ret);
+   }
+
+   return ctx;
+}
+
+static int deflate_init(struct crypto_tfm *tfm)
 {
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
 
+   return __deflate_init(ctx);
+}
+
+static void __deflate_exit(void *ctx)
+{
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
 }
 
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
-   unsigned int slen, u8 *dst, unsigned int *dlen)
+static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   __deflate_exit(ctx);
+   kzfree(ctx);
+}
+
+static void deflate_exit(struct crypto_tfm *tfm)
+{
+   struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   __deflate_exit(ctx);
+}
+
+static int __deflate_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
 {
int ret = 0;
-   struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+   struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = >comp_stream;
 
ret = zlib_deflateReset(stream);
@@ -153,12 +189,27 @@ out:
return ret;
 }
 
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
+   unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+   struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+   return __deflate_compress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
+unsigned int slen, u8 *dst, unsigned int *dlen,
+void *ctx)
+{
+   return __deflate_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __deflate_decompress(const u8 *src, unsigned int slen,
+   u8 *dst, unsigned int *dlen, void *ctx)
 {
 
int ret = 0;
-   struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+   struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = >decomp_stream;
 
ret = zlib_inflateReset(stream);
@@ -194,6 +245,21 @@ out:
return ret;
 }
 
+static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+   struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+   return __deflate_decompress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen,
+  void *ctx)
+{
+   return __deflate_decompress(src, slen, dst, dlen, ctx);
+}
+
 static struct crypto_alg alg = {
.cra_name   = "deflate",
.cra_flags  = CRYPTO_ALG_TYPE_COMPRESS,
@@ -206,14 +272,39 @@ static struct crypto_alg alg = {
.coa_decompress = deflate_decompress } }
 };
 
+static struct scomp_alg scomp = {
+   .alloc_ctx  = deflate_alloc_ctx,
+   .free_ctx   = deflate_free_ctx,
+   .compress   = deflate_scompress,
+   .decompress  

[PATCH v5 2/9] crypto: add asynchronous compression api

2016-06-02 Thread Giovanni Cabiddu
This patch introduces acomp, an asynchronous compression api that uses
scatterlist buffers.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig  |   10 ++
 crypto/Makefile |2 +
 crypto/acompress.c  |  118 
 crypto/crypto_user.c|   21 +++
 include/crypto/acompress.h  |  263 +++
 include/crypto/internal/acompress.h |   66 +
 include/linux/crypto.h  |1 +
 7 files changed, 481 insertions(+), 0 deletions(-)
 create mode 100644 crypto/acompress.c
 create mode 100644 include/crypto/acompress.h
 create mode 100644 include/crypto/internal/acompress.h

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1d33beb..24fef55 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER
select CRYPTO_AKCIPHER2
select CRYPTO_ALGAPI
 
+config CRYPTO_ACOMP
+   tristate
+   select CRYPTO_ACOMP2
+   select CRYPTO_ALGAPI
+
+config CRYPTO_ACOMP2
+   tristate
+   select CRYPTO_ALGAPI2
+
 config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@@ -115,6 +124,7 @@ config CRYPTO_MANAGER2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
+   select CRYPTO_ACOMP2
 
 config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
diff --git a/crypto/Makefile b/crypto/Makefile
index 4f4ef7e..e817b38 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
 
+obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
+
 $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
 $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
 clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
diff --git a/crypto/acompress.c b/crypto/acompress.c
new file mode 100644
index 000..f24fef3
--- /dev/null
+++ b/crypto/acompress.c
@@ -0,0 +1,118 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li 
+ *  Giovanni Cabiddu 
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "internal.h"
+
+#ifdef CONFIG_NET
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+   struct crypto_report_comp racomp;
+
+   strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+   if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+   sizeof(struct crypto_report_comp), ))
+   goto nla_put_failure;
+   return 0;
+
+nla_put_failure:
+   return -EMSGSIZE;
+}
+#else
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+   return -ENOSYS;
+}
+#endif
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+   __attribute__ ((unused));
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+   seq_puts(m, "type : acomp\n");
+}
+
+static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
+{
+   struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+   struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+   alg->exit(acomp);
+}
+
+static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
+{
+   struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+   struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+   if (alg->exit)
+   acomp->base.exit = crypto_acomp_exit_tfm;
+
+   if (alg->init)
+   return alg->init(acomp);
+
+   return 0;
+}
+
+static const struct crypto_type crypto_acomp_type = {
+   .extsize = crypto_alg_extsize,
+   .init_tfm = crypto_acomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+   .show = crypto_acomp_show,
+#endif
+   .report = crypto_acomp_report,
+   .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+   .maskset = CRYPTO_ALG_TYPE_MASK,
+   .type = CRYPTO_ALG_TYPE_ACOMPRESS,
+   .tfmsize = offsetof(struct crypto_acomp, base),
+};
+
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+   u32 mask)
+{
+   return crypto_alloc_tfm(alg_name, _acomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+   struct crypto_alg *base = >base;
+
+   base->cra_type = _acomp_type;
+   base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+   base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
+
+   return 

[PATCH v5 3/9] crypto: add driver-side scomp interface

2016-06-02 Thread Giovanni Cabiddu
Add a synchronous back-end (scomp) to acomp. This allows to easily expose
the already present compression algorithms in LKCF via acomp

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Makefile |1 +
 crypto/acompress.c  |   49 +++-
 crypto/scompress.c  |  252 +++
 include/crypto/acompress.h  |   33 ++---
 include/crypto/internal/acompress.h |   16 +++
 include/crypto/internal/scompress.h |  134 +++
 include/linux/crypto.h  |2 +
 7 files changed, 464 insertions(+), 23 deletions(-)
 create mode 100644 crypto/scompress.c
 create mode 100644 include/crypto/internal/scompress.h

diff --git a/crypto/Makefile b/crypto/Makefile
index e817b38..fc8fcfe 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
 
 obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
 
 $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
 $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
diff --git a/crypto/acompress.c b/crypto/acompress.c
index f24fef3..885d15d 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -22,8 +22,11 @@
 #include 
 #include 
 #include 
+#include 
 #include "internal.h"
 
+static const struct crypto_type crypto_acomp_type;
+
 #ifdef CONFIG_NET
 static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 {
@@ -67,6 +70,13 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
 
+   if (tfm->__crt_alg->cra_type != _acomp_type)
+   return crypto_init_scomp_ops_async(tfm);
+
+   acomp->compress = alg->compress;
+   acomp->decompress = alg->decompress;
+   acomp->reqsize = alg->reqsize;
+
if (alg->exit)
acomp->base.exit = crypto_acomp_exit_tfm;
 
@@ -76,15 +86,25 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
return 0;
 }
 
+unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
+{
+   int extsize = crypto_alg_extsize(alg);
+
+   if (alg->cra_type != _acomp_type)
+   extsize += sizeof(struct crypto_scomp *);
+
+   return extsize;
+}
+
 static const struct crypto_type crypto_acomp_type = {
-   .extsize = crypto_alg_extsize,
+   .extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
 #ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
 #endif
.report = crypto_acomp_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
-   .maskset = CRYPTO_ALG_TYPE_MASK,
+   .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
 };
@@ -96,6 +116,31 @@ struct crypto_acomp *crypto_alloc_acomp(const char 
*alg_name, u32 type,
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
 
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp, gfp_t gfp)
+{
+   struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+   struct acomp_req *req;
+
+   req = __acomp_request_alloc(acomp, gfp);
+   if (req && (tfm->__crt_alg->cra_type != _acomp_type))
+   return crypto_acomp_scomp_alloc_ctx(req);
+
+   return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_alloc);
+
+void acomp_request_free(struct acomp_req *req)
+{
+   struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+   struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+
+   if (tfm->__crt_alg->cra_type != _acomp_type)
+   crypto_acomp_scomp_free_ctx(req);
+
+   __acomp_request_free(req);
+}
+EXPORT_SYMBOL_GPL(acomp_request_free);
+
 int crypto_register_acomp(struct acomp_alg *alg)
 {
struct crypto_alg *base = >base;
diff --git a/crypto/scompress.c b/crypto/scompress.c
new file mode 100644
index 000..850b427
--- /dev/null
+++ b/crypto/scompress.c
@@ -0,0 +1,252 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu 
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "internal.h"
+
+static const struct crypto_type crypto_scomp_type;
+
+#ifdef CONFIG_NET
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+   struct crypto_report_comp rscomp;
+
+   strncpy(rscomp.type, "scomp", 

[PATCH v5 1/9] crypto: shrink hash down to two types

2016-06-02 Thread Giovanni Cabiddu
Move hash to 0xe to free up the space for acomp/scomp

Signed-off-by: Giovanni Cabiddu 
---
 include/linux/crypto.h |   10 +-
 1 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6e28c89..d844cbc 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -48,15 +48,15 @@
 #define CRYPTO_ALG_TYPE_BLKCIPHER  0x0004
 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x0005
 #define CRYPTO_ALG_TYPE_GIVCIPHER  0x0006
-#define CRYPTO_ALG_TYPE_DIGEST 0x0008
-#define CRYPTO_ALG_TYPE_HASH   0x0008
-#define CRYPTO_ALG_TYPE_SHASH  0x0009
-#define CRYPTO_ALG_TYPE_AHASH  0x000a
 #define CRYPTO_ALG_TYPE_RNG0x000c
 #define CRYPTO_ALG_TYPE_AKCIPHER   0x000d
+#define CRYPTO_ALG_TYPE_DIGEST 0x000e
+#define CRYPTO_ALG_TYPE_HASH   0x000e
+#define CRYPTO_ALG_TYPE_SHASH  0x000e
+#define CRYPTO_ALG_TYPE_AHASH  0x000f
 
 #define CRYPTO_ALG_TYPE_HASH_MASK  0x000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x000c
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x000e
 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x000c
 
 #define CRYPTO_ALG_LARVAL  0x0010
-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 7/9] crypto: acomp - add support for 842 via scomp

2016-06-02 Thread Giovanni Cabiddu
This patch implements an scomp backend for the 842 compression algorithm.
This way, 842 is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/842.c   |   82 +--
 crypto/Kconfig |1 +
 2 files changed, 80 insertions(+), 3 deletions(-)

diff --git a/crypto/842.c b/crypto/842.c
index 98e387e..a954ed3 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -31,11 +31,46 @@
 #include 
 #include 
 #include 
+#include 
 
 struct crypto842_ctx {
-   char wmem[SW842_MEM_COMPRESS];  /* working memory for compress */
+   void *wmem; /* working memory for compress */
 };
 
+static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+{
+   void *ctx;
+
+   ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   return ctx;
+}
+
+static int crypto842_init(struct crypto_tfm *tfm)
+{
+   struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   ctx->wmem = crypto842_alloc_ctx(NULL);
+   if (IS_ERR(ctx->wmem))
+   return -ENOMEM;
+
+   return 0;
+}
+
+static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   kfree(ctx);
+}
+
+static void crypto842_exit(struct crypto_tfm *tfm)
+{
+   struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   crypto842_free_ctx(NULL, ctx->wmem);
+}
+
 static int crypto842_compress(struct crypto_tfm *tfm,
  const u8 *src, unsigned int slen,
  u8 *dst, unsigned int *dlen)
@@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm,
return sw842_compress(src, slen, dst, dlen, ctx->wmem);
 }
 
+static int crypto842_scompress(struct crypto_scomp *tfm,
+  const u8 *src, unsigned int slen,
+  u8 *dst, unsigned int *dlen, void *ctx)
+{
+   return sw842_compress(src, slen, dst, dlen, ctx);
+}
+
 static int crypto842_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
@@ -52,27 +94,61 @@ static int crypto842_decompress(struct crypto_tfm *tfm,
return sw842_decompress(src, slen, dst, dlen);
 }
 
+static int crypto842_sdecompress(struct crypto_scomp *tfm,
+const u8 *src, unsigned int slen,
+u8 *dst, unsigned int *dlen, void *ctx)
+{
+   return sw842_decompress(src, slen, dst, dlen);
+}
+
 static struct crypto_alg alg = {
.cra_name   = "842",
.cra_driver_name= "842-generic",
.cra_priority   = 100,
.cra_flags  = CRYPTO_ALG_TYPE_COMPRESS,
-   .cra_ctxsize= sizeof(struct crypto842_ctx),
.cra_module = THIS_MODULE,
+   .cra_init   = crypto842_init,
+   .cra_exit   = crypto842_exit,
.cra_u  = { .compress = {
.coa_compress   = crypto842_compress,
.coa_decompress = crypto842_decompress } }
 };
 
+static struct scomp_alg scomp = {
+   .alloc_ctx  = crypto842_alloc_ctx,
+   .free_ctx   = crypto842_free_ctx,
+   .compress   = crypto842_scompress,
+   .decompress = crypto842_sdecompress,
+   .base   = {
+   .cra_name   = "842",
+   .cra_driver_name = "842-scomp",
+   .cra_priority= 100,
+   .cra_module  = THIS_MODULE,
+   }
+};
+
 static int __init crypto842_mod_init(void)
 {
-   return crypto_register_alg();
+   int ret;
+
+   ret = crypto_register_alg();
+   if (ret)
+   return ret;
+
+   ret = crypto_register_scomp();
+   if (ret) {
+   crypto_unregister_alg();
+   return ret;
+   }
+
+   return ret;
 }
 module_init(crypto842_mod_init);
 
 static void __exit crypto842_mod_exit(void)
 {
crypto_unregister_alg();
+   crypto_unregister_scomp();
 }
 module_exit(crypto842_mod_exit);
 
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 59570da..09c88ba 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1522,6 +1522,7 @@ config CRYPTO_LZO
 config CRYPTO_842
tristate "842 compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select 842_COMPRESS
select 842_DECOMPRESS
help
-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 5/9] crypto: acomp - add support for lz4 via scomp

2016-06-02 Thread Giovanni Cabiddu
This patch implements an scomp backend for the lz4 compression algorithm.
This way, lz4 is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig |1 +
 crypto/lz4.c   |   91 +--
 2 files changed, 82 insertions(+), 10 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 08075c1..114d43b 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1530,6 +1530,7 @@ config CRYPTO_842
 config CRYPTO_LZ4
tristate "LZ4 compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select LZ4_COMPRESS
select LZ4_DECOMPRESS
help
diff --git a/crypto/lz4.c b/crypto/lz4.c
index aefbcea..99c1b2c 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -23,36 +23,53 @@
 #include 
 #include 
 #include 
+#include 
 
 struct lz4_ctx {
void *lz4_comp_mem;
 };
 
+static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+{
+   void *ctx;
+
+   ctx = vmalloc(LZ4_MEM_COMPRESS);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   return ctx;
+}
+
 static int lz4_init(struct crypto_tfm *tfm)
 {
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-   ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS);
-   if (!ctx->lz4_comp_mem)
+   ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
+   if (IS_ERR(ctx->lz4_comp_mem))
return -ENOMEM;
 
return 0;
 }
 
+static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   vfree(ctx);
+}
+
 static void lz4_exit(struct crypto_tfm *tfm)
 {
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-   vfree(ctx->lz4_comp_mem);
+
+   lz4_free_ctx(NULL, ctx->lz4_comp_mem);
 }
 
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
-   unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
+u8 *dst, unsigned int *dlen, void *ctx)
 {
-   struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
 
-   err = lz4_compress(src, slen, dst, _len, ctx->lz4_comp_mem);
+   err = lz4_compress(src, slen, dst, _len, ctx);
 
if (err < 0)
return -EINVAL;
@@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const 
u8 *src,
return 0;
 }
 
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
+unsigned int slen, u8 *dst, unsigned int *dlen,
+void *ctx)
+{
+   return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+   struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
+}
+
+static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
+  u8 *dst, unsigned int *dlen, void *ctx)
 {
int err;
size_t tmp_len = *dlen;
@@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, 
const u8 *src,
return err;
 }
 
+static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen,
+  void *ctx)
+{
+   return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+unsigned int slen, u8 *dst,
+unsigned int *dlen)
+{
+   return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
 static struct crypto_alg alg_lz4 = {
.cra_name   = "lz4",
.cra_flags  = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +135,39 @@ static struct crypto_alg alg_lz4 = {
.coa_decompress = lz4_decompress_crypto } }
 };
 
+static struct scomp_alg scomp = {
+   .alloc_ctx  = lz4_alloc_ctx,
+   .free_ctx   = lz4_free_ctx,
+   .compress   = lz4_scompress,
+   .decompress = lz4_sdecompress,
+   .base   = {
+   .cra_name   = "lz4",
+   .cra_driver_name = "lz4-scomp",
+   .cra_module  = THIS_MODULE,
+   }
+};
+
 static int __init lz4_mod_init(void)
 {
-   return crypto_register_alg(_lz4);
+   int ret;
+
+   ret = crypto_register_alg(_lz4);
+   if (ret)
+   return ret;
+
+   ret = crypto_register_scomp();
+   if (ret) {
+   crypto_unregister_alg(_lz4);
+   return ret;
+   }
+
+   return ret;
 }
 
 

[PATCH v5 9/9] crypto: acomp - update testmgr with support for acomp

2016-06-02 Thread Giovanni Cabiddu
This patch adds tests to the test manager for algorithms exposed through
the acomp api

Signed-off-by: Giovanni Cabiddu 
---
 crypto/testmgr.c |  158 +-
 1 files changed, 145 insertions(+), 13 deletions(-)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c727fb0..3dcac20 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -32,6 +32,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "internal.h"
 
@@ -1423,6 +1424,121 @@ out:
return ret;
 }
 
+static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
+ struct comp_testvec *dtemplate, int ctcount, int dtcount)
+{
+   const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+   unsigned int i;
+   char output[COMP_BUF_SIZE];
+   int ret;
+   struct scatterlist src, dst;
+   struct acomp_req *req;
+   struct tcrypt_result result;
+
+   for (i = 0; i < ctcount; i++) {
+   unsigned int dlen = COMP_BUF_SIZE;
+   int ilen = ctemplate[i].inlen;
+
+   memset(output, 0, sizeof(output));
+   init_completion();
+   sg_init_one(, ctemplate[i].input, ilen);
+   sg_init_one(, output, dlen);
+
+   req = acomp_request_alloc(tfm, GFP_KERNEL);
+   if (!req) {
+   pr_err("alg: acomp: request alloc failed for %s\n",
+  algo);
+   ret = -ENOMEM;
+   goto out;
+   }
+
+   acomp_request_set_params(req, , , ilen, dlen);
+   acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+  tcrypt_complete, );
+
+   ret = wait_async_op(, crypto_acomp_compress(req));
+   if (ret) {
+   pr_err("alg: acomp: compression failed on test %d for 
%s: ret=%d\n",
+  i + 1, algo, -ret);
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (req->dlen != ctemplate[i].outlen) {
+   pr_err("alg: acomp: Compression test %d failed for %s: 
output len = %d\n",
+  i + 1, algo, req->dlen);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (memcmp(output, ctemplate[i].output, req->dlen)) {
+   pr_err("alg: acomp: Compression test %d failed for 
%s\n",
+  i + 1, algo);
+   hexdump(output, req->dlen);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   acomp_request_free(req);
+   }
+
+   for (i = 0; i < dtcount; i++) {
+   unsigned int dlen = COMP_BUF_SIZE;
+   int ilen = dtemplate[i].inlen;
+
+   memset(output, 0, sizeof(output));
+   init_completion();
+   sg_init_one(, dtemplate[i].input, ilen);
+   sg_init_one(, output, dlen);
+
+   req = acomp_request_alloc(tfm, GFP_KERNEL);
+   if (!req) {
+   pr_err("alg: acomp: request alloc failed for %s\n",
+  algo);
+   ret = -ENOMEM;
+   goto out;
+   }
+
+   acomp_request_set_params(req, , , ilen, dlen);
+   acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+  tcrypt_complete, );
+
+   ret = wait_async_op(, crypto_acomp_decompress(req));
+   if (ret) {
+   pr_err("alg: acomp: decompression failed on test %d for 
%s: ret=%d\n",
+  i + 1, algo, -ret);
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (req->dlen != dtemplate[i].outlen) {
+   pr_err("alg: acomp: Decompression test %d failed for 
%s: output len = %d\n",
+  i + 1, algo, req->dlen);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (memcmp(output, dtemplate[i].output, req->dlen)) {
+   pr_err("alg: acomp: Decompression test %d failed for 
%s\n",
+  i + 1, algo);
+   hexdump(output, req->dlen);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   acomp_request_free(req);
+   }
+
+   ret = 0;
+
+out:
+   return ret;
+}
+
 static int 

Re: [RFC] DRBG: which shall be default?

2016-06-02 Thread Stephan Mueller
Am Donnerstag, 2. Juni 2016, 17:42:11 schrieb Herbert Xu:

Hi Herbert,

> On Thu, Jun 02, 2016 at 11:31:22AM +0200, Stephan Mueller wrote:
> > The skcipher API, however, wants to encrypt an entire input data stream.
> > That means the skcipher API requires the length of the input data stream
> > to generate an equally sized output data stream. But that is not what we
> > have here -- there is no input data. I.e. the skcipher API invokes the
> > CTR mode for the stream cipher and performs the final XOR of the CTR
> > stream with the input data.
> 
> Just use an input stream of zeros.

I am working on it. During the analysis, I saw, however, that the DRBG 
increments the counter before the encryption whereas the the CTR mode 
increments it after the encryption.

I could of course adjust the handling in the code, but this would be a real 
hack IMHO.

Ciao
Stephan
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH v5 1/3] crypto: Key-agreement Protocol Primitives API (KPP)

2016-06-02 Thread Benedetto, Salvatore
Hi Herbert,

apologies, I missed this email.

> -Original Message-
> From: linux-crypto-ow...@vger.kernel.org [mailto:linux-crypto-
> ow...@vger.kernel.org] On Behalf Of Herbert Xu
> Sent: Tuesday, May 31, 2016 7:42 AM
> To: Benedetto, Salvatore 
> Cc: linux-crypto@vger.kernel.org
> Subject: Re: [PATCH v5 1/3] crypto: Key-agreement Protocol Primitives API
> (KPP)
> 
> On Mon, May 09, 2016 at 10:40:39PM +0100, Salvatore Benedetto wrote:
> > Add key-agreement protocol primitives (kpp) API which allows to
> > implement primitives required by protocols such as DH and ECDH.
> > The API is composed mainly by the following functions
> >  * set_params() - It allows the user to set the parameters known to
> >both parties involved in the key-agreement session
> >  * set_secret() - It allows the user to set his secret, also
> >referred to as his private key
> 
> Why can't we just have one function, set_secret or better yet setkey?
>

Off the top of my head, with ECDH when the user gets a EGAIN, he wants
to reset the secret key only, not the params.

> >  * generate_public_key() - It generates the public key to be sent to
> >the other counterpart involved in the key-agreement session. The
> >function has to be called after set_params() and set_secret()
> >  * generate_secret() - It generates the shared secret for the session
> 
> Ditto, we only need one operation and that is multiplication by the secret.

Sorry, but I don't understand your point.
We do always need one math operation with different params.

> I'm OK with you keeping them separate for kpp users so that they don't have
> to explicitly provide G but please ensure that drivers only have to implement
> one of them.

The implementation still has to know which params to use for the operation
and somehow we need to pass that information on.

Can you please help understand what your main concern is? :-)

Thanks,
Salvatore
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] hwrng: alea - Add support for Araneus Alea I USB RNG

2016-06-02 Thread Bob Ham
On Thu, 2016-06-02 at 12:43 +0200, Clemens Ladisch wrote:
> Bob Ham wrote:
> > Adds support for the Araneus Alea I USB hardware Random Number
> > Generator.  This RNG creates entropy at a high rate, about 100kb/s.
> >
> > Signed-off-by: Bob Ham 
> > ---
> >
> > +++ b/drivers/char/hw_random/alea.c
> 
> Why didn't you just add the device ID to chaoskey.c?
> (Because that one is hidden in drivers/usb/misc? ;-)

Argh!  Because that one is hidden in drivers/usb/misc!  Pfft :-)

Thanks.

-- 
Bob Ham 
Software Engineer
Collabora


Open First
Collabora is hiring!
Please check out our latest opportunities here:
http://bit.ly/Collabora-Careers



--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] hwrng: alea - Add support for Araneus Alea I USB RNG

2016-06-02 Thread Clemens Ladisch
Bob Ham wrote:
> Adds support for the Araneus Alea I USB hardware Random Number
> Generator.  This RNG creates entropy at a high rate, about 100kb/s.
>
> Signed-off-by: Bob Ham 
> ---
>
> +++ b/drivers/char/hw_random/alea.c

Why didn't you just add the device ID to chaoskey.c?
(Because that one is hidden in drivers/usb/misc? ;-)


Regards,
Clemens
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/2] crypto : stylistic cleanup in sha1-mb

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 02:42:20PM -0700, Megha Dey wrote:
> From: Megha Dey 
> 
> Currently there are several checkpatch warnings in the sha1_mb.c file:
> 'WARNING: line over 80 characters' in the sha1_mb.c file. Also, the
> syntax of some multi-line comments are not correct. This patch fixes
> these issues.
> 
> Signed-off-by: Megha Dey 

Patch applied.  Thanks!
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto: DRBG - reduce number of setkey calls

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 01:11:57PM +0200, Stephan Mueller wrote:
> The CTR DRBG code always set the key for each sym cipher invocation even
> though the key has not been changed.
> 
> The patch ensures that the setkey is only invoked when a new key is
> generated by the DRBG.
> 
> With this patch, the CTR DRBG performance increases by more than 150%.
> 
> Signed-off-by: Stephan Mueller 

Patch applied.  Thanks!
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] crypto : async implementation for sha1-mb

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 02:42:21PM -0700, Megha Dey wrote:
>
> @@ -416,8 +421,8 @@ static void mcryptd_hash_finup(struct 
> crypto_async_request *req_async, int err)
>  
>   if (unlikely(err == -EINPROGRESS))
>   goto out;
> -
> - err = shash_ahash_mcryptd_finup(req, >desc);
> + rctx->out = req->result;
> + err = shash_ahash_mcryptd_finup(req, >areq);

These shash_ahash functions should be renamed.

Also why are they exported?

> @@ -439,17 +444,18 @@ static int mcryptd_hash_finup_enqueue(struct 
> ahash_request *req)
>  static void mcryptd_hash_digest(struct crypto_async_request *req_async, int 
> err)
>  {
>   struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
> - struct crypto_shash *child = ctx->child;
> + struct crypto_ahash *child = ctx->child;
>   struct ahash_request *req = ahash_request_cast(req_async);
>   struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
> - struct shash_desc *desc = >desc;
> + struct ahash_request *desc = >areq;
> + struct crypto_async_request *base = >base;
>  
>   if (unlikely(err == -EINPROGRESS))
>   goto out;
> + base->tfm = >base;
> + base->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */

You should not be touching crypto_async_request directly.  Use
the proper ahash interface to set the child request.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v7 1/3] crypto: Key-agreement Protocol Primitives API (KPP)

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 04:36:21PM +0100, Salvatore Benedetto wrote:
> Add key-agreement protocol primitives (kpp) API which allows to
> implement primitives required by protocols such as DH and ECDH.
> The API is composed mainly by the following functions
>  * set_params() - It allows the user to set the parameters known to
>both parties involved in the key-agreement session
>  * set_secret() - It allows the user to set his secret, also
>referred to as his private key
>  * generate_public_key() - It generates the public key to be sent to
>the other counterpart involved in the key-agreement session. The
>function has to be called after set_params() and set_secret()
>  * generate_secret() - It generates the shared secret for the session
> 
> Other functions such as init() and exit() are provided for allowing
> cryptographic hardware to be inizialized properly before use
> 
> Signed-off-by: Salvatore Benedetto 

You totally ignored my comments about this patch in the round v5.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 1/2] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread Herbert Xu
On Thu, Jun 02, 2016 at 11:38:35AM +0200, LABBE Corentin wrote:
>
> Since my patch is small and easy (and only one client is modified), do you 
> mind if I choose the first one ?

Sure.

> I will add this type checking on my patch against omap-aes/des.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC] DRBG: which shall be default?

2016-06-02 Thread Herbert Xu
On Thu, Jun 02, 2016 at 11:31:22AM +0200, Stephan Mueller wrote:
>
> The skcipher API, however, wants to encrypt an entire input data stream. That 
> means the skcipher API requires the length of the input data stream to 
> generate an equally sized output data stream. But that is not what we have 
> here -- there is no input data. I.e. the skcipher API invokes the CTR mode 
> for 
> the stream cipher and performs the final XOR of the CTR stream with the input 
> data.

Just use an input stream of zeros.

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 1/2] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread LABBE Corentin
On Thu, Jun 02, 2016 at 05:19:40PM +0800, Herbert Xu wrote:
> On Thu, Jun 02, 2016 at 11:12:13AM +0200, LABBE Corentin wrote:
> > On Thu, Jun 02, 2016 at 04:32:59PM +0800, Herbert Xu wrote:
> > > On Mon, May 30, 2016 at 03:32:01PM +0200, LABBE Corentin wrote:
> > > > The current crypto engine allow only ablkcipher_request to be enqueued.
> > > > Thus denying any use of it for hardware that also handle hash algo.
> > > > 
> > > > This patch convert all ablkcipher_request references to the
> > > > more general crypto_async_request.
> > > > 
> > > > Signed-off-by: LABBE Corentin 
> > > 
> > > First of all your patches break bisection which is unacceptable.
> > > 
> > 
> > How do I break bisection ?
> 
> Because the kernel won't compile after your first patch.
> 
> Either do it as one single patch or use the more elaborate "new
> interafce" + "switchover" + "delete old interface" ritual.
> 

Since my patch is small and easy (and only one client is modified), do you mind 
if I choose the first one ?

> > So, if my hwcrypto can handle hash and ciphers, I need to have two engine 
> > and each crypt_one_request()/hash_one_request()
> > need to lock the engine.
> > Having only one engine that handle all types permit to avoid this locking.
> 
> OK then we should add some type-checking as you suggested.  What
> I don't want is just blind casting by the user of crypto_engine.

I will add this type checking on my patch against omap-aes/des.

Regards

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC] DRBG: which shall be default?

2016-06-02 Thread Stephan Mueller
Am Donnerstag, 2. Juni 2016, 16:40:12 schrieb Herbert Xu:

Hi Herbert,

> On Tue, May 31, 2016 at 01:23:21PM +0200, Stephan Mueller wrote:
> > Hence my question: shall we leave the HMAC DRBG as default or shall we use
> > the CTR DRBG as default?
> 
> I don't really care one way or another.
> 
> BTW why did you use the crypto_cipher aes interface instead of
> the crypto_skcipher ctr(aes) interface which would likely make
> your code run many orders-of-magnitude faster, especially with
> aesni-intel?

I considered such approach, but the crux is the following: for the CTR DRBG 
generate function, our state is 16 bytes (i.e. the AES block length). Out of 
those 16 bytes, we generate the random number by encrypting that block, 
incrementing the block by one and encrypt it again. In essence what we do here 
is the stream cipher part of the CTR mode which generates the data stream we 
use to XOR the input data with.

The skcipher API, however, wants to encrypt an entire input data stream. That 
means the skcipher API requires the length of the input data stream to 
generate an equally sized output data stream. But that is not what we have 
here -- there is no input data. I.e. the skcipher API invokes the CTR mode for 
the stream cipher and performs the final XOR of the CTR stream with the input 
data.

Currently I would not see a way how to access the CTR mode stream cipher part 
only via the skcipher API.

Ciao
Stephan
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 1/2] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread Herbert Xu
On Thu, Jun 02, 2016 at 11:12:13AM +0200, LABBE Corentin wrote:
> On Thu, Jun 02, 2016 at 04:32:59PM +0800, Herbert Xu wrote:
> > On Mon, May 30, 2016 at 03:32:01PM +0200, LABBE Corentin wrote:
> > > The current crypto engine allow only ablkcipher_request to be enqueued.
> > > Thus denying any use of it for hardware that also handle hash algo.
> > > 
> > > This patch convert all ablkcipher_request references to the
> > > more general crypto_async_request.
> > > 
> > > Signed-off-by: LABBE Corentin 
> > 
> > First of all your patches break bisection which is unacceptable.
> > 
> 
> How do I break bisection ?

Because the kernel won't compile after your first patch.

Either do it as one single patch or use the more elaborate "new
interafce" + "switchover" + "delete old interface" ritual.

> So, if my hwcrypto can handle hash and ciphers, I need to have two engine and 
> each crypt_one_request()/hash_one_request()
> need to lock the engine.
> Having only one engine that handle all types permit to avoid this locking.

OK then we should add some type-checking as you suggested.  What
I don't want is just blind casting by the user of crypto_engine.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4 04/10] crypto: add quick decompression api

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 02:55:30PM +0100, Giovanni Cabiddu wrote:
> This patch introduces qdecomp, an asynchronous decompression api.
> qdecomp is a front-end for acomp and scomp algorithms which do not
> not need additional vmalloc work space for decompression.
> 
> Signed-off-by: Giovanni Cabiddu 

I'm sorry to have to make you go through this again but we no
longer have a user for this (I believe they have switched over to
using a per-cpu buffer) so you can now get rid of any code that's
associated with qdecomp.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4 02/10] crypto: add asynchronous compression api

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 02:55:28PM +0100, Giovanni Cabiddu wrote:
>
> +/**
> + * struct acomp_req - asynchronous (de)compression request
> + *
> + * @base: Common attributes for asynchronous crypto requests
> + * @src:  Source Data
> + * @dst:  Destination data
> + * @slen: Size of the input buffer
> + * @dlen: Size of the output buffer
> + * @consumed: Number of bytes consumed by the (de)compressor
> + * @produced: Number of bytes produced by the (de)compressor

Why do we need these two? For a moment I thought you were going
to add pcomp again :)

Since we're only support compression in one go, the number of
bytes consumed must be equal to slen unless there was an error.

For the number of bytes produced I'd prefer to just use dlen for it.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 1/2] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread LABBE Corentin
On Thu, Jun 02, 2016 at 04:32:59PM +0800, Herbert Xu wrote:
> On Mon, May 30, 2016 at 03:32:01PM +0200, LABBE Corentin wrote:
> > The current crypto engine allow only ablkcipher_request to be enqueued.
> > Thus denying any use of it for hardware that also handle hash algo.
> > 
> > This patch convert all ablkcipher_request references to the
> > more general crypto_async_request.
> > 
> > Signed-off-by: LABBE Corentin 
> 
> First of all your patches break bisection which is unacceptable.
> 

How do I break bisection ?

> Secondly you should not be casting generic requests to a specific type.
> 
I didnt add any request type check since omap use engine only for ciphers.
My view if usage of crypt_one_request() if hash and ciphers coule be used is to 
test
crypto_tfm_alg_type(areq->tfm) to check which alg is used 
(CRYPTO_ALG_TYPE_AHASH vs CRYPTO_ALG_TYPE_ABLKCIPHER)

For example, this is my setted crypt_one_request function:
int handle_request(struct crypto_engine *engine, struct crypto_async_request 
*areq)
{
int rtype;
struct ahash_request *hreq;
struct ablkcipher_request *breq;
int err = -EINVAL;
rtype = crypto_tfm_alg_type(areq->tfm);
switch (rtype) {
case CRYPTO_ALG_TYPE_AHASH:
hreq = ahash_request_cast(areq);
err = sun4i_ss_hash(hreq);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
breq = ablkcipher_request_cast(areq);
err = sun4i_ss_cipher(breq);
}
crypto_finalize_request(engine, areq, err);
return 0;
}


> Assuming a single engine only has to deal with one type of requests,
> what you could do is to create a separate engine type for each
> crypto type that you want to support.
> 

So, if my hwcrypto can handle hash and ciphers, I need to have two engine and 
each crypt_one_request()/hash_one_request()
need to lock the engine.
Having only one engine that handle all types permit to avoid this locking.

Regards

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 0/4] crypto: Key Derivation Function (SP800-108)

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 01:50:57PM +0200, Stephan Mueller wrote:
> Hi,
> 
> this patch set implements all three key derivation functions defined in
> SP800-108.
> 
> The implementation is provided as a template for random number generators,
> since a KDF can be considered a form of deterministic RNG where the key
> material is used as a seed.
> 
> With the KDF implemented as a template, all types of keyed hashes can be
> utilized, including HMAC and CMAC. The testmgr tests are derived from
> publicly available test vectors from NIST.
> 
> The KDF are all tested with a complete round of CAVS testing on 32 and 64 bit.
> 
> The patch set introduces an extension to the kernel crypto API in the first
> patch by adding a template handling for random number generators based on the
> same logic as for keyed hashes.

When you resubmit these patches please actually add a user.  I'm not
going to add new algorithms with no users.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 3/4] crypto: kdf - SP800-108 Key Derivation Function

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 01:52:32PM +0200, Stephan Mueller wrote:
>
> + * NOTE: Technically you can use one buffer for holding the label_context and
> + *the outbuf in the example above. Howerver, multiple rounds of the
> + *KDF are to be expected with the input must always be the same.
> + *The first round would replace the input in case of one buffer, and the
> + *KDF would calculate a cryptographically strong result which, however,
> + *is not portable to other KDF implementations! Thus, always use
> + *different buffers for the label_context and the outbuf. A safe
> + *in-place operation can only be done when only one round of the KDF
> + *is executed (i.e. the size of the requested buffer is equal to the
> + *digestsize of the used MAC).

Why don't you put the result in a temporary buffer and then copy
it? These things are tiny, right?

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC] DRBG: which shall be default?

2016-06-02 Thread Herbert Xu
On Tue, May 31, 2016 at 01:23:21PM +0200, Stephan Mueller wrote:
>
> Hence my question: shall we leave the HMAC DRBG as default or shall we use 
> the 
> CTR DRBG as default?

I don't really care one way or another.

BTW why did you use the crypto_cipher aes interface instead of
the crypto_skcipher ctr(aes) interface which would likely make
your code run many orders-of-magnitude faster, especially with
aesni-intel?

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 1/2] crypto: engine: permit to enqueue ashash_request

2016-06-02 Thread Herbert Xu
On Mon, May 30, 2016 at 03:32:01PM +0200, LABBE Corentin wrote:
> The current crypto engine allow only ablkcipher_request to be enqueued.
> Thus denying any use of it for hardware that also handle hash algo.
> 
> This patch convert all ablkcipher_request references to the
> more general crypto_async_request.
> 
> Signed-off-by: LABBE Corentin 

First of all your patches break bisection which is unacceptable.

Secondly you should not be casting generic requests to a specific type.

Assuming a single engine only has to deal with one type of requests,
what you could do is to create a separate engine type for each
crypto type that you want to support.

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC v2 2/3] crypto: Introduce CRYPTO_ALG_BULK flag

2016-06-02 Thread Herbert Xu
On Fri, May 27, 2016 at 07:11:23PM +0800, Baolin Wang wrote:
> Now some cipher hardware engines prefer to handle bulk block rather than one
> sector (512 bytes) created by dm-crypt, cause these cipher engines can handle
> the intermediate values (IV) by themselves in one bulk block. This means we
> can increase the size of the request by merging request rather than always 512
> bytes and thus increase the hardware engine processing speed.
> 
> So introduce 'CRYPTO_ALG_BULK' flag to indicate this cipher can support bulk
> mode.
> 
> Signed-off-by: Baolin Wang 

I think a better aproach would be to explicitly move the IV generation
into the crypto API, similar to how we handle IPsec.  Once you do
that then every algorithm can be handled through the bulk interface.

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] hwrng: alea - Add support for Araneus Alea I USB RNG

2016-06-02 Thread Bob Ham
Adds support for the Araneus Alea I USB hardware Random Number
Generator.  This RNG creates entropy at a high rate, about 100kb/s.

Signed-off-by: Bob Ham 
---

Just a note about the name of the module, I haven't append "-rng" to
the name, like every other module in hw_random, because those modules
contain drivers for the RNG part of some more complex device. By
contrast, the Alea is solely an RNG so adding "-rng" to the name seems
redundant.


 drivers/char/hw_random/Kconfig  |  13 ++
 drivers/char/hw_random/Makefile |   1 +
 drivers/char/hw_random/alea.c   | 370 
 3 files changed, 384 insertions(+)
 create mode 100644 drivers/char/hw_random/alea.c

diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index ac51149..b3f5a89 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -396,6 +396,19 @@ config HW_RANDOM_PIC32
 
  If unsure, say Y.
 
+config HW_RANDOM_ALEA
+   tristate "Araneus Alea I USB Random Number Generator support"
+   depends on HW_RANDOM && USB
+   default n
+   ---help---
+ This driver provides kernel-side support for the Araneus
+ Alea I USB hardware Random Number Generator.
+
+ To compile this driver as a module, choose M here. the
+ module will be called alea.
+
+ If unsure, say N.
+
 endif # HW_RANDOM
 
 config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 63022b4..3709906 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
 obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
 obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
+obj-$(CONFIG_HW_RANDOM_ALEA) += alea.o
diff --git a/drivers/char/hw_random/alea.c b/drivers/char/hw_random/alea.c
new file mode 100644
index 000..50909da
--- /dev/null
+++ b/drivers/char/hw_random/alea.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2016 Collabora Ltd
+ * Written by Bob Ham 
+ *
+ * An HWRNG driver to pull data from an Araneus Alea I
+ *
+ * derived from:
+ *
+ * USB Skeleton driver - 2.2
+ *
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (g...@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
+ * but has been rewritten to be easier to read and use.
+ *
+ */
+
+/*
+ * The Alea I is a really simple device.  There is one bulk read
+ * endpoint.  It spits out data in 64-byte chunks.  Each chunk
+ * contains entropy.  Simple.
+ *
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+
+#define MODULE_NAME "alea"
+
+#define ARANEUS_VENDOR_ID  0x12d8
+#define ARANEUS_ALEA_I_PRODUCT_ID  0x0001
+
+/* table of devices that work with this driver */
+static const struct usb_device_id alea_table[] = {
+   { USB_DEVICE(ARANEUS_VENDOR_ID, ARANEUS_ALEA_I_PRODUCT_ID) },
+   { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE (usb, alea_table);
+
+
+/* Structure to hold all of our device specific stuff */
+struct alea {
+   struct usb_device   *udev;  /* the usb device for 
this device */
+   struct usb_interface*interface; /* the interface for 
this device */
+   struct urb  *bulk_in_urb;   /* the urb to read data 
with */
+   unsigned char   *bulk_in_buffer;/* the buffer to 
receive data */
+   size_t  bulk_in_size;   /* the size of the 
receive buffer */
+   size_t  bulk_in_filled; /* number of bytes in 
the buffer */
+   __u8bulk_in_endpointAddr;   /* the address of the 
bulk in endpoint */
+   int errors; /* the last request 
tanked */
+   boolongoing_read;   /* a read is going on */
+   spinlock_t  err_lock;   /* lock for errors */
+   struct kref kref;
+   struct mutexio_mutex;   /* synchronize I/O with 
disconnect */
+   wait_queue_head_t   bulk_in_wait;   /* to wait for an 
ongoing read */
+   char*rng_name;  /* name for the hwrng 
subsystem */
+   struct hwrngrng;/* the hwrng info */
+};
+#define kref_to_alea(d) container_of(d, struct alea, kref)
+#define rng_to_alea(d) container_of(d, struct alea, rng)
+
+static struct usb_driver alea_driver;
+
+static void alea_delete(struct kref *kref)
+{
+   struct alea *dev =