This patch adds proper support for the new bulk requests to cryptd.

Signed-off-by: Ondrej Mosnacek <omosna...@gmail.com>
---
 crypto/cryptd.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 111 insertions(+)

diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0508c48..b7d6e13 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -555,6 +555,114 @@ static int cryptd_skcipher_decrypt_enqueue(struct 
skcipher_request *req)
        return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 }
 
+static void cryptd_skcipher_bulk_complete(struct skcipher_bulk_request *req,
+                                         int err)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_bulk_reqtfm(req);
+       struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct cryptd_skcipher_request_ctx *rctx =
+                       skcipher_bulk_request_ctx(req);
+       int refcnt = atomic_read(&ctx->refcnt);
+
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
+
+       if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+               crypto_free_skcipher(tfm);
+}
+
+static void cryptd_skcipher_bulk_encrypt(struct crypto_async_request *base,
+                                        int err)
+{
+       struct skcipher_bulk_request *req = skcipher_bulk_request_cast(base);
+       struct cryptd_skcipher_request_ctx *rctx =
+                       skcipher_bulk_request_ctx(req);
+       struct crypto_skcipher *tfm = crypto_skcipher_bulk_reqtfm(req);
+       struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_skcipher *child = ctx->child;
+       SKCIPHER_BULK_REQUEST_ON_STACK(subreq, req->maxmsgs, child);
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       skcipher_bulk_request_set_tfm(subreq, child);
+       skcipher_bulk_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                          NULL, NULL);
+       skcipher_bulk_request_set_crypt(subreq, req->src, req->dst, req->nmsgs,
+                                       req->msgsize, req->msgsizes, req->ivs);
+
+       err = crypto_skcipher_encrypt_bulk(subreq);
+       skcipher_bulk_request_zero(subreq);
+
+       req->base.complete = rctx->complete;
+
+out:
+       cryptd_skcipher_bulk_complete(req, err);
+}
+
+static void cryptd_skcipher_bulk_decrypt(struct crypto_async_request *base,
+                                        int err)
+{
+       struct skcipher_bulk_request *req = skcipher_bulk_request_cast(base);
+       struct cryptd_skcipher_request_ctx *rctx =
+                       skcipher_bulk_request_ctx(req);
+       struct crypto_skcipher *tfm = crypto_skcipher_bulk_reqtfm(req);
+       struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_skcipher *child = ctx->child;
+       SKCIPHER_BULK_REQUEST_ON_STACK(subreq, req->maxmsgs, child);
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       skcipher_bulk_request_set_tfm(subreq, child);
+       skcipher_bulk_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                          NULL, NULL);
+       skcipher_bulk_request_set_crypt(subreq, req->src, req->dst, req->nmsgs,
+                                       req->msgsize, req->msgsizes, req->ivs);
+
+       err = crypto_skcipher_decrypt_bulk(subreq);
+       skcipher_bulk_request_zero(subreq);
+
+       req->base.complete = rctx->complete;
+
+out:
+       cryptd_skcipher_bulk_complete(req, err);
+}
+
+static int cryptd_skcipher_bulk_enqueue(struct skcipher_bulk_request *req,
+                                       crypto_completion_t compl)
+{
+       struct cryptd_skcipher_request_ctx *rctx =
+                       skcipher_bulk_request_ctx(req);
+       struct crypto_skcipher *tfm = crypto_skcipher_bulk_reqtfm(req);
+       struct cryptd_queue *queue;
+
+       queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
+       rctx->complete = req->base.complete;
+       req->base.complete = compl;
+
+       return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_skcipher_bulk_encrypt_enqueue(
+               struct skcipher_bulk_request *req)
+{
+       return cryptd_skcipher_bulk_enqueue(req, cryptd_skcipher_bulk_encrypt);
+}
+
+static int cryptd_skcipher_bulk_decrypt_enqueue(
+               struct skcipher_bulk_request *req)
+{
+       return cryptd_skcipher_bulk_enqueue(req, cryptd_skcipher_bulk_decrypt);
+}
+
+static unsigned int cryptd_skcipher_bulk_reqsize(struct crypto_skcipher *tfm,
+                                                unsigned int maxmsgs)
+{
+       return sizeof(struct cryptd_skcipher_request_ctx);
+}
+
 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 {
        struct skcipher_instance *inst = skcipher_alg_instance(tfm);
@@ -641,6 +749,9 @@ static int cryptd_create_skcipher(struct crypto_template 
*tmpl,
        inst->alg.setkey = cryptd_skcipher_setkey;
        inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
        inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
+       inst->alg.encrypt_bulk = cryptd_skcipher_bulk_encrypt_enqueue;
+       inst->alg.decrypt_bulk = cryptd_skcipher_bulk_decrypt_enqueue;
+       inst->alg.reqsize_bulk = cryptd_skcipher_bulk_reqsize;
 
        inst->free = cryptd_skcipher_free;
 
-- 
2.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to