From: Iuliana Prodan <iuliana.pro...@nxp.com>

Remove CRYPTO_ALG_ALLOCATES_MEMORY flag and allocate the memory
needed by the driver, to fulfil a request, within the crypto
request object.
The extra size needed for base extended descriptor, hw
descriptor commands and link tables is computed in frontend
driver (caamalg_qi) initialization and saved in reqsize field
that indicates how much memory could be needed per request.

CRYPTO_ALG_ALLOCATES_MEMORY flag is limited only to
dm-crypt use-cases, which seems to be 4 entries maximum.
Therefore in reqsize we allocate memory for maximum 4 entries
for src and 4 for dst, aligned.
If the driver needs more than the 4 entries maximum, the memory
is dynamically allocated, at runtime.

Signed-off-by: Iuliana Prodan <iuliana.pro...@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 134 +++++++++++++++++++++----------
 1 file changed, 90 insertions(+), 44 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index a24ae966df4a..ea49697e2579 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -788,6 +788,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher 
*skcipher, const u8 *key,
  * @dst_nents: number of segments in output scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
  * @qm_sg_bytes: length of dma mapped h/w link table
+ * @free: stored to determine if aead_edesc needs to be freed
  * @qm_sg_dma: bus physical mapped address of h/w link table
  * @assoclen: associated data length, in CAAM endianness
  * @assoclen_dma: bus physical mapped address of req->assoclen
@@ -799,6 +800,7 @@ struct aead_edesc {
        int dst_nents;
        dma_addr_t iv_dma;
        int qm_sg_bytes;
+       bool free;
        dma_addr_t qm_sg_dma;
        unsigned int assoclen;
        dma_addr_t assoclen_dma;
@@ -812,6 +814,7 @@ struct aead_edesc {
  * @dst_nents: number of segments in output scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
  * @qm_sg_bytes: length of dma mapped h/w link table
+ * @free: stored to determine if skcipher_edesc needs to be freed
  * @qm_sg_dma: bus physical mapped address of h/w link table
  * @drv_req: driver-specific request structure
  * @sgt: the h/w link table, followed by IV
@@ -821,6 +824,7 @@ struct skcipher_edesc {
        int dst_nents;
        dma_addr_t iv_dma;
        int qm_sg_bytes;
+       bool free;
        dma_addr_t qm_sg_dma;
        struct caam_drv_req drv_req;
        struct qm_sg_entry sgt[];
@@ -927,7 +931,8 @@ static void aead_done(struct caam_drv_req *drv_req, u32 
status)
        aead_unmap(qidev, edesc, aead_req);
 
        aead_request_complete(aead_req, ecode);
-       qi_cache_free(edesc);
+       if (edesc->free)
+               qi_cache_free(edesc);
 }
 
 /*
@@ -949,7 +954,7 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
        dma_addr_t qm_sg_dma, iv_dma = 0;
        int ivsize = 0;
        unsigned int authsize = ctx->authsize;
-       int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+       int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes, edesc_size = 0;
        int in_len, out_len;
        struct qm_sg_entry *sg_table, *fd_sgt;
        struct caam_drv_ctx *drv_ctx;
@@ -958,13 +963,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
        if (IS_ERR_OR_NULL(drv_ctx))
                return (struct aead_edesc *)drv_ctx;
 
-       /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = qi_cache_alloc(GFP_DMA | flags);
-       if (unlikely(!edesc)) {
-               dev_err(qidev, "could not allocate extended descriptor\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
        if (likely(req->src == req->dst)) {
                src_len = req->assoclen + req->cryptlen +
                          (encrypt ? authsize : 0);
@@ -973,7 +971,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                if (unlikely(src_nents < 0)) {
                        dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
                                src_len);
-                       qi_cache_free(edesc);
                        return ERR_PTR(src_nents);
                }
 
@@ -981,7 +978,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                                              DMA_BIDIRECTIONAL);
                if (unlikely(!mapped_src_nents)) {
                        dev_err(qidev, "unable to map source\n");
-                       qi_cache_free(edesc);
                        return ERR_PTR(-ENOMEM);
                }
        } else {
@@ -992,7 +988,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                if (unlikely(src_nents < 0)) {
                        dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
                                src_len);
-                       qi_cache_free(edesc);
                        return ERR_PTR(src_nents);
                }
 
@@ -1000,7 +995,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                if (unlikely(dst_nents < 0)) {
                        dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
                                dst_len);
-                       qi_cache_free(edesc);
                        return ERR_PTR(dst_nents);
                }
 
@@ -1009,7 +1003,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                                                      src_nents, DMA_TO_DEVICE);
                        if (unlikely(!mapped_src_nents)) {
                                dev_err(qidev, "unable to map source\n");
-                               qi_cache_free(edesc);
                                return ERR_PTR(-ENOMEM);
                        }
                } else {
@@ -1024,7 +1017,6 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                                dev_err(qidev, "unable to map destination\n");
                                dma_unmap_sg(qidev, req->src, src_nents,
                                             DMA_TO_DEVICE);
-                               qi_cache_free(edesc);
                                return ERR_PTR(-ENOMEM);
                        }
                } else {
@@ -1058,14 +1050,30 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
 
        sg_table = &edesc->sgt[0];
        qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-       if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-                    CAAM_QI_MEMCACHE_SIZE)) {
+
+        /* Check if there's enough space for edesc saved in req */
+       edesc_size = offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize;
+       if (unlikely(edesc_size > CAAM_QI_MEMCACHE_SIZE)) {
                dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
                        qm_sg_ents, ivsize);
                caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
                           0, DMA_NONE, 0, 0);
-               qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
+       } else if (edesc_size > crypto_aead_reqsize(aead)) {
+               /* allocate space for base edesc, link tables and IV */
+               edesc = qi_cache_alloc(GFP_DMA | flags);
+               if (unlikely(!edesc)) {
+                       dev_err(qidev, "could not allocate extended 
descriptor\n");
+                       caam_unmap(qidev, req->src, req->dst, src_nents,
+                                  dst_nents, 0, 0, DMA_NONE, 0, 0);
+                       return ERR_PTR(-ENOMEM);
+               }
+               edesc->free = true;
+       } else {
+               /* get address for base edesc, link tables and IV */
+               edesc = (struct aead_edesc *)((u8 *)aead_request_ctx(req));
+               /* clear memory */
+               memset(edesc, 0, sizeof(*edesc));
        }
 
        if (ivsize) {
@@ -1079,7 +1087,8 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                        dev_err(qidev, "unable to map IV\n");
                        caam_unmap(qidev, req->src, req->dst, src_nents,
                                   dst_nents, 0, 0, DMA_NONE, 0, 0);
-                       qi_cache_free(edesc);
+                       if (edesc->free)
+                               qi_cache_free(edesc);
                        return ERR_PTR(-ENOMEM);
                }
        }
@@ -1098,7 +1107,8 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                dev_err(qidev, "unable to map assoclen\n");
                caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
                           iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
-               qi_cache_free(edesc);
+               if (edesc->free)
+                       qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1120,7 +1130,8 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
                caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
                           iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
-               qi_cache_free(edesc);
+               if (edesc->free)
+                       qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1174,7 +1185,8 @@ static inline int aead_crypt(struct aead_request *req, 
bool encrypt)
                ret = -EINPROGRESS;
        } else {
                aead_unmap(ctx->qidev, edesc, req);
-               qi_cache_free(edesc);
+               if (edesc->free)
+                       qi_cache_free(edesc);
        }
 
        return ret;
@@ -1237,7 +1249,8 @@ static void skcipher_done(struct caam_drv_req *drv_req, 
u32 status)
                memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
                       ivsize);
 
-       qi_cache_free(edesc);
+       if (edesc->free)
+               qi_cache_free(edesc);
        skcipher_request_complete(req, ecode);
 }
 
@@ -1254,7 +1267,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct 
skcipher_request *req,
        dma_addr_t iv_dma;
        u8 *iv;
        int ivsize = crypto_skcipher_ivsize(skcipher);
-       int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+       int dst_sg_idx, qm_sg_ents, qm_sg_bytes, edesc_size = 0;
        struct qm_sg_entry *sg_table, *fd_sgt;
        struct caam_drv_ctx *drv_ctx;
 
@@ -1317,22 +1330,30 @@ static struct skcipher_edesc 
*skcipher_edesc_alloc(struct skcipher_request *req,
                qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
 
        qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
-       if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
-                    ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+
+       /* Check if there's enough space for edesc saved in req */
+       edesc_size = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 
ivsize;
+       if (unlikely(edesc_size > CAAM_QI_MEMCACHE_SIZE)) {
                dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
                        qm_sg_ents, ivsize);
                caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
                           0, DMA_NONE, 0, 0);
                return ERR_PTR(-ENOMEM);
-       }
-
-       /* allocate space for base edesc, link tables and IV */
-       edesc = qi_cache_alloc(GFP_DMA | flags);
-       if (unlikely(!edesc)) {
-               dev_err(qidev, "could not allocate extended descriptor\n");
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-                          0, DMA_NONE, 0, 0);
-               return ERR_PTR(-ENOMEM);
+       } else if (edesc_size > crypto_skcipher_reqsize(skcipher)) {
+               /* allocate space for base edesc, link tables and IV */
+               edesc = qi_cache_alloc(GFP_DMA | flags);
+               if (unlikely(!edesc)) {
+                       dev_err(qidev, "could not allocate extended 
descriptor\n");
+                       caam_unmap(qidev, req->src, req->dst, src_nents, 
dst_nents, 0,
+                                  0, DMA_NONE, 0, 0);
+                       return ERR_PTR(-ENOMEM);
+               }
+               edesc->free = true;
+       } else {
+               /* get address for base edesc, link tables and IV */
+               edesc = (struct skcipher_edesc *)((u8 
*)skcipher_request_ctx(req));
+               /* clear memory */
+               memset(edesc, 0, sizeof(*edesc));
        }
 
        /* Make sure IV is located in a DMAable area */
@@ -1345,7 +1366,8 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct 
skcipher_request *req,
                dev_err(qidev, "unable to map IV\n");
                caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
                           0, DMA_NONE, 0, 0);
-               qi_cache_free(edesc);
+               if (edesc->free)
+                       qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1372,7 +1394,8 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct 
skcipher_request *req,
                dev_err(qidev, "unable to map S/G table\n");
                caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
                           iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
-               qi_cache_free(edesc);
+               if (edesc->free)
+                       qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1446,7 +1469,8 @@ static inline int skcipher_crypt(struct skcipher_request 
*req, bool encrypt)
                ret = -EINPROGRESS;
        } else {
                skcipher_unmap(ctx->qidev, edesc, req);
-               qi_cache_free(edesc);
+               if (edesc->free)
+                       qi_cache_free(edesc);
        }
 
        return ret;
@@ -2493,7 +2517,15 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
                container_of(alg, typeof(*caam_alg), skcipher);
        struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
        u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
-       int ret = 0;
+       int ret = 0, extra_reqsize = 0;
+
+       /* Compute extra space needed for base edesc, link tables and IV */
+       extra_reqsize = sizeof(struct skcipher_edesc) +
+                       /* link tables for src and dst:
+                        * 4 entries max + 1 for IV, aligned = 8
+                        */
+                       (16 * sizeof(struct qm_sg_entry)) +
+                       AES_BLOCK_SIZE; /* ivsize */
 
        if (alg_aai == OP_ALG_AAI_XTS) {
                const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
@@ -2509,7 +2541,10 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
 
                ctx->fallback = fallback;
                crypto_skcipher_set_reqsize(tfm, sizeof(struct 
caam_skcipher_req_ctx) +
-                                           crypto_skcipher_reqsize(fallback));
+                                           crypto_skcipher_reqsize(fallback) +
+                                           extra_reqsize);
+       } else {
+               crypto_skcipher_set_reqsize(tfm, extra_reqsize);
        }
 
        ret = caam_init_common(ctx, &caam_alg->caam, false);
@@ -2525,6 +2560,19 @@ static int caam_aead_init(struct crypto_aead *tfm)
        struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
                                                      aead);
        struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+       int extra_reqsize = 0;
+
+       /* Compute extra space needed for base edesc, link tables and IV */
+       extra_reqsize = sizeof(struct aead_edesc) +
+                       /* link tables for src and dst:
+                        * 4 entries max + 1 for IV, aligned = 8
+                        */
+                       (16 * sizeof(struct qm_sg_entry)) +
+                       AES_BLOCK_SIZE; /* ivsize */
+       /*
+        * Set the size for the space needed for base edesc, link tables, IV
+        */
+       crypto_aead_set_reqsize(tfm, extra_reqsize);
 
        return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
 }
@@ -2580,8 +2628,7 @@ static void caam_skcipher_alg_init(struct 
caam_skcipher_alg *t_alg)
        alg->base.cra_module = THIS_MODULE;
        alg->base.cra_priority = CAAM_CRA_PRIORITY;
        alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-       alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-                               CRYPTO_ALG_KERN_DRIVER_ONLY);
+       alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY);
 
        alg->init = caam_cra_init;
        alg->exit = caam_cra_exit;
@@ -2594,8 +2641,7 @@ static void caam_aead_alg_init(struct caam_aead_alg 
*t_alg)
        alg->base.cra_module = THIS_MODULE;
        alg->base.cra_priority = CAAM_CRA_PRIORITY;
        alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-                             CRYPTO_ALG_KERN_DRIVER_ONLY;
+       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
 
        alg->init = caam_aead_init;
        alg->exit = caam_aead_exit;
-- 
2.17.1

Reply via email to