[PATCH 5/7] crypto: marvell: Adding a complete operation for async requests

2016-06-15 Thread Romain Perier
So far, the 'process' operation was used to check if the current request
was correctly handled by the engine, if it was the case it copied
information from the SRAM to the main memory. Now, we split this
operation. We keep the 'process' operation, which still checks if the
request was correctly handled by the engine or not, then we add a new
operation for completion. The 'complete' method copies the content of
the SRAM to memory. This will soon become useful if we want to call
the process and the complete operations from different locations
depending on the type of the request (different cleanup logic).

Signed-off-by: Romain Perier 
---
 drivers/crypto/marvell/cesa.c   |  1 +
 drivers/crypto/marvell/cesa.h   |  3 +++
 drivers/crypto/marvell/cipher.c | 47 -
 drivers/crypto/marvell/hash.c   | 22 ++-
 4 files changed, 44 insertions(+), 29 deletions(-)

diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index fe04d1b..af96426 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -98,6 +98,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
engine->req = NULL;
mv_cesa_dequeue_req_unlocked(engine);
spin_unlock_bh(&engine->lock);
+   ctx->ops->complete(req);
ctx->ops->cleanup(req);
local_bh_disable();
req->complete(req, res);
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index 158ff82..32de08b 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -456,6 +456,8 @@ struct mv_cesa_engine {
  * code)
  * @step:  launch the crypto operation on the next chunk
  * @cleanup:   cleanup the crypto request (release associated data)
+ * @complete:  complete the request, i.e copy result from sram or contexts
+ * when it is needed.
  */
 struct mv_cesa_req_ops {
void (*prepare)(struct crypto_async_request *req,
@@ -463,6 +465,7 @@ struct mv_cesa_req_ops {
int (*process)(struct crypto_async_request *req, u32 status);
void (*step)(struct crypto_async_request *req);
void (*cleanup)(struct crypto_async_request *req);
+   void (*complete)(struct crypto_async_request *req);
 };
 
 /**
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 15d2c5a..fbaae2f 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -118,7 +118,6 @@ static int mv_cesa_ablkcipher_std_process(struct 
ablkcipher_request *req,
struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
struct mv_cesa_engine *engine = sreq->base.engine;
size_t len;
-   unsigned int ivsize;
 
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
   engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@ -128,10 +127,6 @@ static int mv_cesa_ablkcipher_std_process(struct 
ablkcipher_request *req,
if (sreq->offset < req->nbytes)
return -EINPROGRESS;
 
-   ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
-   memcpy_fromio(req->info,
- engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, ivsize);
-
return 0;
 }
 
@@ -141,21 +136,9 @@ static int mv_cesa_ablkcipher_process(struct 
crypto_async_request *req,
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
 
-   if (mv_cesa_req_get_type(&creq->req.base) == CESA_DMA_REQ) {
-   int ret;
-   struct mv_cesa_req *basereq;
-   unsigned int ivsize;
-
-   ret = mv_cesa_dma_process(&creq->req.base, status);
-   if (ret)
-   return ret;
+   if (mv_cesa_req_get_type(&creq->req.base) == CESA_DMA_REQ)
+   return mv_cesa_dma_process(&creq->req.base, status);
 
-   basereq = &creq->req.base;
-   ivsize = crypto_ablkcipher_ivsize(
-crypto_ablkcipher_reqtfm(ablkreq));
-   memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
-   return ret;
-   }
return mv_cesa_ablkcipher_std_process(ablkreq, status);
 }
 
@@ -197,6 +180,7 @@ static inline void mv_cesa_ablkcipher_prepare(struct 
crypto_async_request *req,
 {
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+
creq->req.base.engine = engine;
 
if (mv_cesa_req_get_type(&creq->req.base) == CESA_DMA_REQ)
@@ -213,11 +197,36 @@ mv_cesa_ablkcipher_req_cleanup(struct 
crypto_async_request *req)
mv_cesa_ablkcipher_cleanup(ablkreq);
 }
 
+static v

Re: [PATCH 5/7] crypto: marvell: Adding a complete operation for async requests

2016-06-15 Thread Boris Brezillon
On Wed, 15 Jun 2016 21:15:32 +0200
Romain Perier  wrote:

> So far, the 'process' operation was used to check if the current request
> was correctly handled by the engine, if it was the case it copied
> information from the SRAM to the main memory. Now, we split this
> operation. We keep the 'process' operation, which still checks if the
> request was correctly handled by the engine or not, then we add a new
> operation for completion. The 'complete' method copies the content of
> the SRAM to memory. This will soon become useful if we want to call
> the process and the complete operations from different locations
> depending on the type of the request (different cleanup logic).
> 
> Signed-off-by: Romain Perier 
> ---
>  drivers/crypto/marvell/cesa.c   |  1 +
>  drivers/crypto/marvell/cesa.h   |  3 +++
>  drivers/crypto/marvell/cipher.c | 47 
> -
>  drivers/crypto/marvell/hash.c   | 22 ++-
>  4 files changed, 44 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
> index fe04d1b..af96426 100644
> --- a/drivers/crypto/marvell/cesa.c
> +++ b/drivers/crypto/marvell/cesa.c
> @@ -98,6 +98,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
>   engine->req = NULL;
>   mv_cesa_dequeue_req_unlocked(engine);
>   spin_unlock_bh(&engine->lock);
> + ctx->ops->complete(req);
>   ctx->ops->cleanup(req);
>   local_bh_disable();
>   req->complete(req, res);
> diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
> index 158ff82..32de08b 100644
> --- a/drivers/crypto/marvell/cesa.h
> +++ b/drivers/crypto/marvell/cesa.h
> @@ -456,6 +456,8 @@ struct mv_cesa_engine {
>   *   code)
>   * @step:launch the crypto operation on the next chunk
>   * @cleanup: cleanup the crypto request (release associated data)
> + * @complete:complete the request, i.e copy result from sram or 
> contexts
> + *   when it is needed.
>   */
>  struct mv_cesa_req_ops {
>   void (*prepare)(struct crypto_async_request *req,
> @@ -463,6 +465,7 @@ struct mv_cesa_req_ops {
>   int (*process)(struct crypto_async_request *req, u32 status);
>   void (*step)(struct crypto_async_request *req);
>   void (*cleanup)(struct crypto_async_request *req);
> + void (*complete)(struct crypto_async_request *req);
>  };
>  
>  /**
> diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
> index 15d2c5a..fbaae2f 100644
> --- a/drivers/crypto/marvell/cipher.c
> +++ b/drivers/crypto/marvell/cipher.c
> @@ -118,7 +118,6 @@ static int mv_cesa_ablkcipher_std_process(struct 
> ablkcipher_request *req,
>   struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
>   struct mv_cesa_engine *engine = sreq->base.engine;
>   size_t len;
> - unsigned int ivsize;
>  
>   len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
>  engine->sram + CESA_SA_DATA_SRAM_OFFSET,
> @@ -128,10 +127,6 @@ static int mv_cesa_ablkcipher_std_process(struct 
> ablkcipher_request *req,
>   if (sreq->offset < req->nbytes)
>   return -EINPROGRESS;
>  
> - ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
> - memcpy_fromio(req->info,
> -   engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, ivsize);
> -
>   return 0;
>  }
>  
> @@ -141,21 +136,9 @@ static int mv_cesa_ablkcipher_process(struct 
> crypto_async_request *req,
>   struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
>   struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
>  
> - if (mv_cesa_req_get_type(&creq->req.base) == CESA_DMA_REQ) {
> - int ret;
> - struct mv_cesa_req *basereq;
> - unsigned int ivsize;
> -
> - ret = mv_cesa_dma_process(&creq->req.base, status);
> - if (ret)
> - return ret;
> + if (mv_cesa_req_get_type(&creq->req.base) == CESA_DMA_REQ)
> + return mv_cesa_dma_process(&creq->req.base, status);
>  
> - basereq = &creq->req.base;
> - ivsize = crypto_ablkcipher_ivsize(
> -  crypto_ablkcipher_reqtfm(ablkreq));
> - memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
> - return ret;
> - }
>   return mv_cesa_ablkcipher_std_process(ablkreq, status);
>  }
>  
> @@ -197,6 +180,7 @@ static inline void mv_cesa_ablkcipher_prepare(struct 
> crypto_async_request *req,
>  {
>   struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
>   struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
> +

Nit: not sure you should mix this cosmetic change with the other
ch

Re: [PATCH 5/7] crypto: marvell: Adding a complete operation for async requests

2016-06-16 Thread Romain Perier

Hello,

Le 15/06/2016 22:55, Boris Brezillon a écrit :

+


Nit: not sure you should mix this cosmetic change with the other
changes.


Ok



You already have ivsize initialized.


+   memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);


Use memcpy() here.


good catch, for both.

Thanks,
Romain
--
Romain Perier, Free Electrons
Embedded Linux, Kernel and Android engineering
http://free-electrons.com
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html