Клиентские базы!!! Тел\Viber\Whatsapp:+79133913837 Skype:prodawez389 Email:zradionov...@gmail.com Узнайте подробнее!!!

2016-01-29 Thread linux-crypto@vger.kernel.org
Соберем для Вас по интернет базу данных потенциальных клиентов для Вашего 
Бизнеса!!! Много! Быстро! Не дорого! Узнайте об этом подробнее по 
Тел\Viber\Whatsapp:+79133913837 Skype:prodawez389 Email:zradionov...@gmail.com  
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Duplicated module names

2016-01-29 Thread Lucas De Marchi
Hi!

On Fri, Jan 29, 2016 at 3:54 AM, Rusty Russell  wrote:
> Lucas De Marchi  writes:
>> Hi!
>>
>> CC'ing Rusty and mailing lists
>
> Thanks.
>
>> Rusty and ohers: it looks like both CONFIG_CRC32 and
>> CONFIG_CRYPTO_CRC32 can be compiled as module, and they generate
>> modules with the same name, crc32.  Could that be fixed?
>
> Gah.  Looks like it's been that way since at least 2014, too.
>
> I think we could rename it to crypto_crc32, but I don't think it's the
> only one.  Marco, I think depmod should probably FAIL if two modules
> have the same name, which would at least find such problems.

Yes, I'll do that on kmod, but we need to pay attention to 2 things:

  - It's possible 2 modules with the same names if they come from
different locations (with different priorities)
  - It doesn't fix the case in which one is a module and the other is
builtin. We could still have problem with such a scenario, like was
pointed out in this bug report to kmod.


Lucas De Marchi
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1] crypto: ccp - Limit the amount of information exported

2016-01-29 Thread Tom Lendacky
Since the exported information can be exposed to user-space, instead of
exporting the entire request context only export the minimum information
needed.

Cc:  # 3.14.x-
Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   16 +++-
 drivers/crypto/ccp/ccp-crypto-sha.c  |   20 +++-
 drivers/crypto/ccp/ccp-crypto.h  |   22 ++
 3 files changed, 48 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 00207cf..6a2d836 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -223,9 +223,12 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
 static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
 {
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-   struct ccp_aes_cmac_req_ctx *state = out;
+   struct ccp_aes_cmac_exp_ctx *state = out;
 
-   *state = *rctx;
+   state->null_msg = rctx->null_msg;
+   memcpy(state->iv, rctx->iv, sizeof(state->iv));
+   state->buf_count = rctx->buf_count;
+   memcpy(state->buf, rctx->buf, sizeof(state->buf));
 
return 0;
 }
@@ -233,9 +236,12 @@ static int ccp_aes_cmac_export(struct ahash_request *req, 
void *out)
 static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
 {
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-   const struct ccp_aes_cmac_req_ctx *state = in;
+   const struct ccp_aes_cmac_exp_ctx *state = in;
 
-   *rctx = *state;
+   rctx->null_msg = state->null_msg;
+   memcpy(rctx->iv, state->iv, sizeof(rctx->iv));
+   rctx->buf_count = state->buf_count;
+   memcpy(rctx->buf, state->buf, sizeof(rctx->buf));
 
return 0;
 }
@@ -378,7 +384,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
 
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
-   halg->statesize = sizeof(struct ccp_aes_cmac_req_ctx);
+   halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 3aae58d..a67128a 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -210,9 +210,14 @@ static int ccp_sha_digest(struct ahash_request *req)
 static int ccp_sha_export(struct ahash_request *req, void *out)
 {
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   struct ccp_sha_req_ctx *state = out;
+   struct ccp_sha_exp_ctx *state = out;
 
-   *state = *rctx;
+   state->type = rctx->type;
+   state->msg_bits = rctx->msg_bits;
+   state->first = rctx->first;
+   memcpy(state->ctx, rctx->ctx, sizeof(state->ctx));
+   state->buf_count = rctx->buf_count;
+   memcpy(state->buf, rctx->buf, sizeof(state->buf));
 
return 0;
 }
@@ -220,9 +225,14 @@ static int ccp_sha_export(struct ahash_request *req, void 
*out)
 static int ccp_sha_import(struct ahash_request *req, const void *in)
 {
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   const struct ccp_sha_req_ctx *state = in;
+   const struct ccp_sha_exp_ctx *state = in;
 
-   *rctx = *state;
+   rctx->type = state->type;
+   rctx->msg_bits = state->msg_bits;
+   rctx->first = state->first;
+   memcpy(rctx->ctx, state->ctx, sizeof(rctx->ctx));
+   rctx->buf_count = state->buf_count;
+   memcpy(rctx->buf, state->buf, sizeof(rctx->buf));
 
return 0;
 }
@@ -428,7 +438,7 @@ static int ccp_register_sha_alg(struct list_head *head,
 
halg = &alg->halg;
halg->digestsize = def->digest_size;
-   halg->statesize = sizeof(struct ccp_sha_req_ctx);
+   halg->statesize = sizeof(struct ccp_sha_exp_ctx);
 
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0..a326ec2 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
 };
 
+struct ccp_aes_cmac_exp_ctx {
+   unsigned int null_msg;
+
+   u8 iv[AES_BLOCK_SIZE];
+
+   unsigned int buf_count;
+   u8 buf[AES_BLOCK_SIZE];
+};
+
 /* SHA related defines */
 #define MAX_SHA_CONTEXT_SIZE   SHA256_DIGEST_SIZE
 #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
 };
 
+struct ccp_sha_exp_ctx {
+   enum ccp_sha_type type;
+
+   u64 msg_bits;
+
+   unsigned int first;
+
+   u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+   unsigned int buf_count;
+   u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
 /* Common Context Structure */
 struct ccp_ctx {

Re: [v2 PATCH 9/26] eCryptfs: Use skcipher and shash

2016-01-29 Thread Tyler Hicks
On 2016-01-25 10:29:33, Herbert Xu wrote:
> On Sun, Jan 24, 2016 at 07:10:50PM +0100, Julia Lawall wrote:
> > Maybe the goto on line 1726 needs a preceding mutex_unlock?
> 
> Good catch! Thanks.
> 
> ---8<---
> This patch replaces uses of ablkcipher and blkcipher with skcipher,
> and the long obsolete hash interface with shash.
> 
> Signed-off-by: Herbert Xu 

Acked-by: Tyler Hicks 

I have no problem with you taking this through the cryptodev tree.
Thanks!

Tyler

> 
> diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
> index 80d6901..11255cb 100644
> --- a/fs/ecryptfs/crypto.c
> +++ b/fs/ecryptfs/crypto.c
> @@ -23,6 +23,8 @@
>   * 02111-1307, USA.
>   */
>  
> +#include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -30,7 +32,6 @@
>  #include 
>  #include 
>  #include 
> -#include 
>  #include 
>  #include 
>  #include 
> @@ -74,6 +75,19 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
>   }
>  }
>  
> +static int ecryptfs_hash_digest(struct crypto_shash *tfm,
> + char *src, int len, char *dst)
> +{
> + SHASH_DESC_ON_STACK(desc, tfm);
> + int err;
> +
> + desc->tfm = tfm;
> + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
> + err = crypto_shash_digest(desc, src, len, dst);
> + shash_desc_zero(desc);
> + return err;
> +}
> +
>  /**
>   * ecryptfs_calculate_md5 - calculates the md5 of @src
>   * @dst: Pointer to 16 bytes of allocated memory
> @@ -88,45 +102,26 @@ static int ecryptfs_calculate_md5(char *dst,
> struct ecryptfs_crypt_stat *crypt_stat,
> char *src, int len)
>  {
> - struct scatterlist sg;
> - struct hash_desc desc = {
> - .tfm = crypt_stat->hash_tfm,
> - .flags = CRYPTO_TFM_REQ_MAY_SLEEP
> - };
> + struct crypto_shash *tfm;
>   int rc = 0;
>  
>   mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
> - sg_init_one(&sg, (u8 *)src, len);
> - if (!desc.tfm) {
> - desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0,
> -  CRYPTO_ALG_ASYNC);
> - if (IS_ERR(desc.tfm)) {
> - rc = PTR_ERR(desc.tfm);
> + tfm = crypt_stat->hash_tfm;
> + if (!tfm) {
> + tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
> + if (IS_ERR(tfm)) {
> + rc = PTR_ERR(tfm);
>   ecryptfs_printk(KERN_ERR, "Error attempting to "
>   "allocate crypto context; rc = [%d]\n",
>   rc);
>   goto out;
>   }
> - crypt_stat->hash_tfm = desc.tfm;
> - }
> - rc = crypto_hash_init(&desc);
> - if (rc) {
> - printk(KERN_ERR
> -"%s: Error initializing crypto hash; rc = [%d]\n",
> -__func__, rc);
> - goto out;
> + crypt_stat->hash_tfm = tfm;
>   }
> - rc = crypto_hash_update(&desc, &sg, len);
> + rc = ecryptfs_hash_digest(tfm, src, len, dst);
>   if (rc) {
>   printk(KERN_ERR
> -"%s: Error updating crypto hash; rc = [%d]\n",
> -__func__, rc);
> - goto out;
> - }
> - rc = crypto_hash_final(&desc, dst);
> - if (rc) {
> - printk(KERN_ERR
> -"%s: Error finalizing crypto hash; rc = [%d]\n",
> +"%s: Error computing crypto hash; rc = [%d]\n",
>  __func__, rc);
>   goto out;
>   }
> @@ -234,10 +229,8 @@ void ecryptfs_destroy_crypt_stat(struct 
> ecryptfs_crypt_stat *crypt_stat)
>  {
>   struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
>  
> - if (crypt_stat->tfm)
> - crypto_free_ablkcipher(crypt_stat->tfm);
> - if (crypt_stat->hash_tfm)
> - crypto_free_hash(crypt_stat->hash_tfm);
> + crypto_free_skcipher(crypt_stat->tfm);
> + crypto_free_shash(crypt_stat->hash_tfm);
>   list_for_each_entry_safe(key_sig, key_sig_tmp,
>&crypt_stat->keysig_list, crypt_stat_list) {
>   list_del(&key_sig->crypt_stat_list);
> @@ -342,7 +335,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat 
> *crypt_stat,
>struct scatterlist *src_sg, int size,
>unsigned char *iv, int op)
>  {
> - struct ablkcipher_request *req = NULL;
> + struct skcipher_request *req = NULL;
>   struct extent_crypt_result ecr;
>   int rc = 0;
>  
> @@ -358,20 +351,20 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat 
> *crypt_stat,
>   init_completion(&ecr.completion);
>  
>   mutex_lock(&crypt_stat->cs_tfm_mutex);
> - req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
> + req = skcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
>   if (!req) {
>   mutex_unlock(&crypt

[PATCH] crypto: atmel-aes - remove calls of clk_prepare() from atomic contexts

2016-01-29 Thread Cyrille Pitchen
clk_prepare()/clk_unprepare() must not be called within atomic context.

This patch calls clk_prepare() once for all from atmel_aes_probe() and
clk_unprepare() from atmel_aes_remove().

Then calls of clk_prepare_enable()/clk_disable_unprepare() were replaced
by calls of clk_enable()/clk_disable().

Signed-off-by: Cyrille Pitchen 
Reported-by: Matthias Mayr 
---
 drivers/crypto/atmel-aes.c | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6dd3317ca365..3eb3f1279fb7 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -400,7 +400,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 {
int err;
 
-   err = clk_prepare_enable(dd->iclk);
+   err = clk_enable(dd->iclk);
if (err)
return err;
 
@@ -430,7 +430,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev 
*dd)
 
dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 
-   clk_disable_unprepare(dd->iclk);
+   clk_disable(dd->iclk);
return 0;
 }
 
@@ -448,7 +448,7 @@ static inline bool atmel_aes_is_encrypt(const struct 
atmel_aes_dev *dd)
 
 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 {
-   clk_disable_unprepare(dd->iclk);
+   clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
 
if (dd->is_async)
@@ -2091,10 +2091,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto res_err;
}
 
-   err = atmel_aes_hw_version_init(aes_dd);
+   err = clk_prepare(aes_dd->iclk);
if (err)
goto res_err;
 
+   err = atmel_aes_hw_version_init(aes_dd);
+   if (err)
+   goto iclk_unprepare;
+
atmel_aes_get_cap(aes_dd);
 
err = atmel_aes_buff_init(aes_dd);
@@ -2127,6 +2131,8 @@ err_algs:
 err_aes_dma:
atmel_aes_buff_cleanup(aes_dd);
 err_aes_buff:
+iclk_unprepare:
+   clk_unprepare(aes_dd->iclk);
 res_err:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
@@ -2155,6 +2161,8 @@ static int atmel_aes_remove(struct platform_device *pdev)
atmel_aes_dma_cleanup(aes_dd);
atmel_aes_buff_cleanup(aes_dd);
 
+   clk_unprepare(aes_dd->iclk);
+
return 0;
 }
 
-- 
1.8.2.2

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] crypto: atmel-sha: fix algorihtm registration

2016-01-29 Thread Cyrille Pitchen
This patch implements the missing .import() and .export() mandatory
hooks for asynchronous hash algorithms. It also sets the relevant, non
zero, value for the .statesize field when declaring the supported SHA
algorithms. Indeed a zero value of .statesize prevents the algorithm from
being registered.

Signed-off-by: Cyrille Pitchen 
---
ChangeLog

v2: deal with unaligned in/out pointers in atmel_sha_import/atmel_sha_export

 drivers/crypto/atmel-sha.c | 61 +-
 1 file changed, 60 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index da4c3055784f..63b09e01075c 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -66,7 +66,7 @@
 #define SHA_OP_UPDATE  1
 #define SHA_OP_FINAL   2
 
-#define SHA_BUFFER_LEN PAGE_SIZE
+#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
 
 #define ATMEL_SHA_DMA_THRESHOLD56
 
@@ -80,6 +80,17 @@ struct atmel_sha_caps {
 
 struct atmel_sha_dev;
 
+/*
+ * .statesize = sizeof(struct atmel_sha_state) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct atmel_sha_state {
+   u8  digest[SHA512_DIGEST_SIZE];
+   u8  buffer[SHA_BUFFER_LEN];
+   u64 digcnt[2];
+   size_t  bufcnt;
+};
+
 struct atmel_sha_reqctx {
struct atmel_sha_dev*dd;
unsigned long   flags;
@@ -1033,6 +1044,39 @@ static int atmel_sha_digest(struct ahash_request *req)
return atmel_sha_init(req) ?: atmel_sha_finup(req);
 }
 
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+   const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+   struct atmel_sha_state state;
+
+   memcpy(state.digest, ctx->digest, SHA512_DIGEST_SIZE);
+   memcpy(state.buffer, ctx->buffer, ctx->bufcnt);
+   state.bufcnt = ctx->bufcnt;
+   state.digcnt[0] = ctx->digcnt[0];
+   state.digcnt[1] = ctx->digcnt[1];
+
+   /* out might be unaligned. */
+   memcpy(out, &state, sizeof(state));
+   return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+   struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+   struct atmel_sha_state state;
+
+   /* in might be unaligned. */
+   memcpy(&state, in, sizeof(state));
+
+   memcpy(ctx->digest, state.digest, SHA512_DIGEST_SIZE);
+   memcpy(ctx->buffer, state.buffer, state.bufcnt);
+   ctx->bufcnt = state.bufcnt;
+   ctx->digcnt[0] = state.digcnt[0];
+   ctx->digcnt[1] = state.digcnt[1];
+   return 0;
+}
+
 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
 {
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1049,8 +1093,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final  = atmel_sha_final,
.finup  = atmel_sha_finup,
.digest = atmel_sha_digest,
+   .export = atmel_sha_export,
+   .import = atmel_sha_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+   .statesize  = sizeof(struct atmel_sha_state),
.base   = {
.cra_name   = "sha1",
.cra_driver_name= "atmel-sha1",
@@ -1070,8 +1117,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final  = atmel_sha_final,
.finup  = atmel_sha_finup,
.digest = atmel_sha_digest,
+   .export = atmel_sha_export,
+   .import = atmel_sha_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+   .statesize  = sizeof(struct atmel_sha_state),
.base   = {
.cra_name   = "sha256",
.cra_driver_name= "atmel-sha256",
@@ -1093,8 +1143,11 @@ static struct ahash_alg sha_224_alg = {
.final  = atmel_sha_final,
.finup  = atmel_sha_finup,
.digest = atmel_sha_digest,
+   .export = atmel_sha_export,
+   .import = atmel_sha_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
+   .statesize  = sizeof(struct atmel_sha_state),
.base   = {
.cra_name   = "sha224",
.cra_driver_name= "atmel-sha224",
@@ -1116,8 +1169,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final  = atmel_sha_final,
.finup  = atmel_sha_finup,
.digest = atmel_sha_digest,
+   .export = atmel_sha_export,
+   .import = atmel_sha_import,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
+   .statesize  = sizeof(struct atmel_sha_state),
.base   = {
.cra_name   = "sha384",
.cra_driver_n

Re: Duplicated module names

2016-01-29 Thread Sudeep Holla
On Fri, Jan 29, 2016 at 5:54 AM, Rusty Russell  wrote:
> Lucas De Marchi  writes:
>> Hi!
>>
>> CC'ing Rusty and mailing lists
>
> Thanks.
>
>> Rusty and ohers: it looks like both CONFIG_CRC32 and
>> CONFIG_CRYPTO_CRC32 can be compiled as module, and they generate
>> modules with the same name, crc32.  Could that be fixed?
>
> Gah.  Looks like it's been that way since at least 2014, too.
>
> I think we could rename it to crypto_crc32, but I don't think it's the
> only one.  Marco, I think depmod should probably FAIL if two modules
> have the same name, which would at least find such problems.
>
> (BTW is there a nice way to figure out if a config var is a tristate?  These
> are only problematic if both CONFIG_ are tristate.)
>
> Here's a hacky attempt to look for problems:
>
> rusty@rusty-Lemur:~/devel/kernel/linux (master)$ KCONFIGS=`find * -name 
> 'Kconfig*'`; for m in `find [b-z]* -name 'Makefile*'`; do sed -n 
> 's,obj-\$(CONFIG.*+= \([a-z0-9_-]\+\.o\)$,'$m' \1,p' <$m | sort -u; done | 
> sort -k 2 | uniq -D -f 1 | while read m obj; do fgrep -w $obj $m /dev/null; 
> done | while read LINE; do conf=`echo $LINE | sed 
> 's/.*\$(CONFIG_\([A-Z0-9_]*\).*/\1/'`; if grep -C2 "^config $conf\$" 
> $KCONFIGS | fgrep -q tristate; then echo $LINE; fi; done
>
> Here are the results (mildly filtered by me):
>
[...]

>
> drivers/hwmon/Makefile:obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
> drivers/regulator/Makefile:obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
>

Indeed causes issues if both are built as modules, only once succeeds.
I will post these 2 patches separately soon.

-->8

>From 1eb0d653d90e3f5fe05c6f63a17976226309e1ec Mon Sep 17 00:00:00 2001
From: Sudeep Holla 
Date: Fri, 29 Jan 2016 15:40:24 +
Subject: [PATCH 1/2] hwmon: (vexpress) rename vexpress hwmon implementation

The vexpress hwmon implementation is currently just called vexpress.
This is a problem because it clashes with another module with the same
name in regulators.

This patch renames the vexpress hwmon implementation to vexpress-hwmon
so that there will be no clash in the module namespace.

Reported-by: Rusty Russell 
Signed-off-by: Sudeep Holla 
---
 drivers/hwmon/Makefile | 2 +-
 drivers/hwmon/{vexpress.c => vexpress-hwmon.c} | 0
 2 files changed, 1 insertion(+), 1 deletion(-)
 rename drivers/hwmon/{vexpress.c => vexpress-hwmon.c} (100%)

diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 30c94df31465..cfc09711810c 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -149,7 +149,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
 obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
 obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
 obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
-obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
 obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
 obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
 obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress-hwmon.c
similarity index 100%
rename from drivers/hwmon/vexpress.c
rename to drivers/hwmon/vexpress-hwmon.c
-- 
1.9.1

>From 1520bd20d3863df87ca5d9461bf5a20d3f58e385 Mon Sep 17 00:00:00 2001
From: Sudeep Holla 
Date: Fri, 29 Jan 2016 15:44:50 +
Subject: [PATCH 2/2] regulator: vexpress: rename vexpress regulator
 implementation

The vexpress regulator implementation is currently just called vexpress.
This is a problem because it clashes with another module with the same
name in hardware monitors.

This patch renames the vexpress regulator implementation to
vexpress-regulator so that there will be no clash in the module namespace.

Reported-by: Rusty Russell 
Signed-off-by: Sudeep Holla 
---
 drivers/regulator/Makefile | 2 +-
 drivers/regulator/{vexpress.c => vexpress-regulator.c} | 0
 2 files changed, 1 insertion(+), 1 deletion(-)
 rename drivers/regulator/{vexpress.c => vexpress-regulator.c} (100%)

diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 980b1943fa81..755077a89a25 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -98,7 +98,7 @@ obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
 obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
 obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
-obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
+obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/vexpress.c
b/drivers/regulator/vexpress-regulator.c
similarity index 100%
rename from drivers/regulator/vexpress.c
rename to drivers/regulator/vexpress-regulator.c
-- 
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majord

[RFC 2/2] crypto: tcrypt - Add ahash digest bandwidth tests support

2016-01-29 Thread Catalin Vasile
Signed-off-by: Catalin Vasile 
---
 crypto/tcrypt.c | 190 
 1 file changed, 190 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index f56419d..02a3856 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1316,6 +1316,176 @@ static void band_acipher(const char *algo, int enc, 
unsigned int secs,
band_acipher_destroy_session_ctx(&ses_ctx);
 }
 
+static inline struct ahash_request
+*band_ahash_alloc_req_digest(struct band_session_ctx *ses_ctx)
+{
+   struct ahash_request *req;
+   struct band_req_ctx *br;
+   void *buf_sg;
+   u8 *icv;
+   size_t req_size = sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ses_ctx->tfm);
+   size_t src_size = ses_ctx->job_size -
+ crypto_ahash_digestsize(ses_ctx->tfm);
+   size_t no_sg_entries = DIV_ROUND_UP(src_size, PAGE_SIZE);
+   int rv;
+   int i;
+
+   rv = band_alloc_req(ses_ctx, (void **)&req, (void **)&buf_sg);
+   if (unlikely(rv))
+   return NULL;
+
+   br = ((void *)req) + req_size;
+   icv = buf_sg + src_size;
+   sg_init_table(br->sg, no_sg_entries);
+   if (src_size < PAGE_SIZE)
+   sg_set_buf(br->sg, buf_sg, src_size);
+   else
+   for (i = 0; i < no_sg_entries; i++) {
+   sg_set_buf(br->sg + i, buf_sg, PAGE_SIZE);
+   buf_sg += PAGE_SIZE;
+   }
+   ahash_request_set_tfm(req, ses_ctx->tfm);
+   ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+   band_complete, br);
+   ahash_request_set_crypt(req, br->sg, icv, src_size);
+
+   init_completion(&br->completion);
+   br->session = ses_ctx;
+
+   return req;
+}
+
+static int band_ahash_jiffies_digest(struct band_session_ctx *ses_ctx, int 
secs)
+{
+   struct ahash_request *req;
+   unsigned long start, end;
+   unsigned long stop;
+   int elapsed_secs;
+   int bcount;
+   int no_jobs_done;
+   int ret;
+   unsigned long long band;
+
+   for (start = jiffies, end = start + secs * HZ, bcount = 0;
+time_before(jiffies, end); bcount++) {
+   req = band_ahash_alloc_req_digest(ses_ctx);
+   if (unlikely(req == NULL))
+   break;
+   ret = do_one_band_op(&req->base, crypto_ahash_digest(req));
+   if (unlikely(ret != -EINPROGRESS && ret))
+   break;
+   }
+
+   stop = jiffies;
+   no_jobs_done = atomic_read(&band_async_done);
+   elapsed_secs = (stop - start) / HZ;
+
+   while (atomic_read(&band_async_done) < bcount)
+   cpu_relax();
+
+   band = ses_ctx->job_size - crypto_ahash_digestsize(ses_ctx->tfm);
+   band = no_jobs_done * band;
+   pr_cont("%d operations in %d seconds (%llu bytes processed)\n",
+   bcount, secs, band);
+
+   return 0;
+}
+
+static inline int
+band_ahash_init_session_ctx(const char *algo,
+   struct band_session_ctx *ses_ctx)
+{
+   struct crypto_ahash *tfm;
+   struct kmem_cache *req_cache;
+   mempool_t *req_mpool;
+   size_t req_size;
+   int err;
+
+   tfm = crypto_alloc_ahash(algo, 0, 0);
+   if (IS_ERR(tfm)) {
+   pr_err("failed to load transform for %s: %ld\n",
+  algo, PTR_ERR(tfm));
+   return PTR_ERR(tfm);
+   }
+
+   req_size = sizeof(struct ahash_request) +
+  crypto_ahash_reqsize(tfm) +
+  sizeof(struct band_req_ctx);
+   req_cache = kmem_cache_create("tcrypt-band-ahash-req",
+ req_size, 0, 0, NULL);
+   if (unlikely(!req_cache)) {
+   pr_err("failed to allocate request cache memory\n");
+   err = -ENOMEM;
+   goto out;
+   }
+
+   req_mpool = mempool_create(1024, mempool_alloc_slab, mempool_free_slab,
+  req_cache);
+   if (unlikely(!req_mpool)) {
+   pr_err("failed to allocate request memory pool\n");
+   err = -ENOMEM;
+   goto out_free_cache;
+   }
+
+   ses_ctx->req_mpool = req_mpool;
+   ses_ctx->tfm = tfm;
+
+   return 0;
+
+out_free_cache:
+   kmem_cache_destroy(req_cache);
+out:
+   crypto_free_ahash(tfm);
+   return err;
+}
+
+static inline void
+band_ahash_destroy_session_ctx(struct band_session_ctx *ses_ctx)
+{
+   struct kmem_cache *req_cache = ses_ctx->req_mpool->pool_data;
+
+   mempool_destroy(ses_ctx->req_mpool);
+   kmem_cache_destroy(req_cache);
+   crypto_free_ahash(ses_ctx->tfm);
+}
+
+static void band_ahash_digest(const char *algo, unsigned int secs)
+{
+   struct band_session_ctx ses_ctx;
+   u32 *b_size;
+   int i, ret;
+
+   if (!secs)
+   return;
+
+   re

[RFC 1/2] crypto: tcrypt - Add acipher bandwidth tests support

2016-01-29 Thread Catalin Vasile
Signed-off-by: Catalin Vasile 
---
 crypto/tcrypt.c | 501 
 1 file changed, 501 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 270bc4b..f56419d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -35,6 +35,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "tcrypt.h"
 
 /*
@@ -1021,6 +1022,300 @@ out:
crypto_free_ahash(tfm);
 }
 
+
+struct band_session_ctx {
+   mempool_t *req_mpool;
+   int job_size;
+   void *tfm;
+};
+
+struct band_req_ctx {
+   struct completion completion;
+   int err;
+   struct scatterlist sg[TVMEMSIZE];
+   struct band_session_ctx *session;
+   char iv[128];
+};
+atomic_t band_async_done;
+
+static inline void band_free_request(struct crypto_async_request *req)
+{
+   struct band_req_ctx *res = req->data;
+   struct band_session_ctx *ses_ctx = res->session;
+
+   kfree(sg_virt(res->sg));
+   mempool_free(req, ses_ctx->req_mpool);
+}
+
+static void band_complete(struct crypto_async_request *req, int err)
+{
+   struct band_req_ctx *res = req->data;
+
+   res->err = err;
+
+   /* if it previously was -EBUSY */
+   if (err == -EINPROGRESS) {
+   complete(&res->completion);
+   } else {
+   if (unlikely(err < 0))
+   pr_err("failed bandwidth acipher job with error: %d\n",
+  err);
+   band_free_request(req);
+   atomic_inc(&band_async_done);
+   }
+}
+
+static inline int band_alloc_req(struct band_session_ctx *ses_ctx,
+void **req,
+void **buf_sg)
+{
+   *req = mempool_alloc(ses_ctx->req_mpool, GFP_KERNEL);
+   if (unlikely(!*req))
+   goto out;
+
+   *buf_sg = kmalloc(ses_ctx->job_size, GFP_KERNEL);
+   if (unlikely(!*buf_sg))
+   goto out_free_req;
+
+   return 0;
+
+out_free_req:
+   mempool_free(req, ses_ctx->req_mpool);
+out:
+   pr_err("out of memory for bandwidth benchmark request\n");
+   return -ENOMEM;
+}
+
+static inline struct ablkcipher_request
+*band_acipher_alloc_req(struct band_session_ctx *ses_ctx)
+{
+   struct ablkcipher_request *req;
+   struct band_req_ctx *br;
+   void *buf_sg;
+   size_t req_size = sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(ses_ctx->tfm);
+   size_t no_sg_entries = DIV_ROUND_UP(ses_ctx->job_size, PAGE_SIZE);
+   int rv;
+   int i;
+
+   rv = band_alloc_req(ses_ctx, (void **)&req, (void **)&buf_sg);
+   if (unlikely(rv))
+   return NULL;
+
+   br = ((void *)req) + req_size;
+   sg_init_table(br->sg, no_sg_entries);
+   if (ses_ctx->job_size < PAGE_SIZE)
+   sg_set_buf(br->sg, buf_sg, ses_ctx->job_size);
+   else
+   for (i = 0; i < no_sg_entries; i++) {
+   sg_set_buf(br->sg + i, buf_sg, PAGE_SIZE);
+   buf_sg += PAGE_SIZE;
+   }
+   ablkcipher_request_set_tfm(req, ses_ctx->tfm);
+   ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+   band_complete, br);
+   ablkcipher_request_set_crypt(req, br->sg, br->sg,
+ses_ctx->job_size, br->iv);
+
+   init_completion(&br->completion);
+   br->session = ses_ctx;
+
+   return req;
+}
+
+static inline int do_one_band_op(struct crypto_async_request *req, int ret)
+{
+   struct band_req_ctx *br = req->data;
+
+   if (ret == -EBUSY) {
+   wait_for_completion(&br->completion);
+   reinit_completion(&br->completion);
+   ret = br->err;
+   } else {
+   if (ret != -EINPROGRESS) {
+   band_free_request(req);
+   atomic_inc(&band_async_done);
+   }
+   }
+
+   return ret;
+}
+
+static int band_acipher_jiffies(struct band_session_ctx *ses_ctx,
+   int enc, int secs)
+{
+   struct ablkcipher_request *req;
+   unsigned long start, end;
+   unsigned long stop;
+   int elapsed_secs;
+   int bcount;
+   int no_jobs_done;
+   int ret = 0;
+   unsigned long long band;
+
+   for (start = jiffies, end = start + secs * HZ, bcount = 0;
+time_before(jiffies, end); bcount++) {
+   req = band_acipher_alloc_req(ses_ctx);
+   if (unlikely(req == NULL))
+   break;
+   if (enc)
+   ret = do_one_band_op(&req->base,
+   crypto_ablkcipher_encrypt(req));
+   else
+   ret = do_one_band_op(&req->base,
+   crypto_ablkcipher_decrypt(req));
+
+   if (unlikely(ret != -EINPROGRESS &&

tcrypt bandwidth tests

2016-01-29 Thread Catalin Vasile
I have attached to this e-mail a set of patches that introduce bandwitdth tests
for tcrypt.
The mindset of these patches is to send as much asynchronous jobs as possible
and, after a set period of time, to count how many of them have actually been
done.
These patches are at the stage of POC (proof of concept), as an actual flood
of asynchronous jobs should have the possibility of using multiple threads
to make requests.

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] fix out of bound read in __test_aead()

2016-01-29 Thread Jerome Marchand
__test_aead() reads MAX_IVLEN bytes from template[i].iv, but the
actual length of the initialisation vector can be shorter.
The length of the IV is already calculated earlier in the
function. Let's just reuses that.
This fix an out-of-bound error detected by KASan.

Signed-off-by: Jerome Marchand 
---
 crypto/testmgr.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index ae8c57fd..d3587d5 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -617,7 +617,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
j++;
 
if (template[i].iv)
-   memcpy(iv, template[i].iv, MAX_IVLEN);
+   memcpy(iv, template[i].iv, iv_len);
else
memset(iv, 0, MAX_IVLEN);
 
-- 
2.5.0

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Duplicated module names

2016-01-29 Thread Herbert Xu
Rusty Russell  wrote:
> 
> I think we could rename it to crypto_crc32, but I don't think it's the
> only one.  Marco, I think depmod should probably FAIL if two modules
> have the same name, which would at least find such problems.

This patch renames it to crc32_generic.

---8<---
Subject: crypto: crc32 - Rename generic implementation

The generic crc32 implementation is currently called crc32.  This
is a problem because it clashes with the lib implementation of crc32.

This patch renames the crypto crc32 to crc32_generic so that it is
consistent with crc32c.  An alias for the driver is also added.

Signed-off-by: Herbert Xu 

diff --git a/crypto/Makefile b/crypto/Makefile
index ffe18c9..059de1b 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -100,7 +100,7 @@ obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
 obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
+obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
diff --git a/crypto/crc32.c b/crypto/crc32_generic.c
similarity index 98%
rename from crypto/crc32.c
rename to crypto/crc32_generic.c
index 187ded2..aa2a25f 100644
--- a/crypto/crc32.c
+++ b/crypto/crc32_generic.c
@@ -131,7 +131,7 @@ static struct shash_alg alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.base   = {
.cra_name   = "crc32",
-   .cra_driver_name= "crc32-table",
+   .cra_driver_name= "crc32-generic",
.cra_priority   = 100,
.cra_blocksize  = CHKSUM_BLOCK_SIZE,
.cra_ctxsize= sizeof(u32),
@@ -157,3 +157,4 @@ MODULE_AUTHOR("Alexander Boyko 
");
 MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-generic");
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 04/10] crypto/compress: add asynchronous compression support

2016-01-29 Thread Herbert Xu
On Thu, Jan 28, 2016 at 12:19:42PM +0900, Joonsoo Kim wrote:
>
> I have tested asynchronous compression APIs in zram and I saw
> regression. Atomic allocation and setting up SG lists are culprit
> for this regression. Moreover, zram optimizes linearisation

So which is it, atomic allocations or setting up SG lists? There
is nothing in acomp that requires you to do an atomic allocation.

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html