From: Andrei Botila <andrei.bot...@nxp.com> A hardware limitation exists for CAAM until Era 9 which restricts the accelerator to IVs with only 8 bytes. When CAAM has a lower era a fallback is necessary to process 16 bytes IV.
Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms") Cc: <sta...@vger.kernel.org> # v4.12+ Signed-off-by: Andrei Botila <andrei.bot...@nxp.com> --- drivers/crypto/caam/Kconfig | 1 + drivers/crypto/caam/caamalg_qi.c | 70 +++++++++++++++++++++++++++++--- 2 files changed, 66 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index dfeaad8dfe81..8169e6cd04e6 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -115,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI select CRYPTO_AUTHENC select CRYPTO_SKCIPHER select CRYPTO_DES + select CRYPTO_XTS help Selecting this will use CAAM Queue Interface (QI) for sending & receiving crypto jobs to/from CAAM. This gives better performance diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index bb1c0106a95c..2b5e694fdaf6 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -18,6 +18,7 @@ #include "qi.h" #include "jr.h" #include "caamalg_desc.h" +#include <asm/unaligned.h> /* * crypto alg @@ -67,6 +68,11 @@ struct caam_ctx { struct device *qidev; spinlock_t lock; /* Protects multiple init of driver context */ struct caam_drv_ctx *drv_ctx[NUM_OP]; + struct crypto_skcipher *fallback; +}; + +struct caam_skcipher_req_ctx { + struct skcipher_request fallback_req; }; static int aead_set_sh_desc(struct crypto_aead *aead) @@ -726,12 +732,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *jrdev = ctx->jrdev; int ret = 0; + int err; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; + ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; @@ -1373,6 +1384,17 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, return edesc; } +static bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + u64 size = 0; + + size = get_unaligned((u64 *)(req->iv + (ivsize / 2))); + + return !!size; +} + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; @@ -1383,6 +1405,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) if (!req->cryptlen) return 0; + if (ctx->fallback && xts_skcipher_ivsize(req)) { + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); + } + if (unlikely(caam_congested)) return -EAGAIN; @@ -1507,6 +1544,7 @@ static struct caam_skcipher_alg driver_algs[] = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam-qi", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = xts_skcipher_setkey, @@ -2440,9 +2478,27 @@ static int caam_cra_init(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } - return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, - false); + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + + crypto_skcipher_reqsize(fallback)); + } + + return caam_init_common(ctx, &caam_alg->caam, false); } static int caam_aead_init(struct crypto_aead *tfm) @@ -2468,7 +2524,11 @@ static void caam_exit_common(struct caam_ctx *ctx) static void caam_cra_exit(struct crypto_skcipher *tfm) { - caam_exit_common(crypto_skcipher_ctx(tfm)); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); } static void caam_aead_exit(struct crypto_aead *tfm) @@ -2502,8 +2562,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | - CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); alg->init = caam_cra_init; alg->exit = caam_cra_exit; -- 2.17.1