[PATCH v2 4/7] crypto: ccree: add ahash support

2018-01-22 Thread Gilad Ben-Yossef
Add CryptoCell async. hash and HMAC support.

Signed-off-by: Gilad Ben-Yossef 
---
 drivers/crypto/ccree/Makefile|2 +-
 drivers/crypto/ccree/cc_buffer_mgr.c |  261 +++-
 drivers/crypto/ccree/cc_driver.c |   13 +
 drivers/crypto/ccree/cc_driver.h |1 +
 drivers/crypto/ccree/cc_hash.c   | 2296 ++
 drivers/crypto/ccree/cc_hash.h   |  114 ++
 drivers/crypto/ccree/cc_pm.c |4 +
 7 files changed, 2688 insertions(+), 3 deletions(-)
 create mode 100644 drivers/crypto/ccree/cc_hash.c
 create mode 100644 drivers/crypto/ccree/cc_hash.h

diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index a7fecad..1109480 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o 
cc_sram_mgr.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o 
cc_ivgen.o cc_sram_mgr.o
 ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
 ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c 
b/drivers/crypto/ccree/cc_buffer_mgr.c
index 46be101..bb306b4 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -9,6 +9,7 @@
 #include "cc_buffer_mgr.h"
 #include "cc_lli_defs.h"
 #include "cc_cipher.h"
+#include "cc_hash.h"
 
 enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -348,9 +349,33 @@ static int cc_map_sg(struct device *dev, struct 
scatterlist *sg,
return 0;
 }
 
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+  u8 *curr_buff, u32 curr_buff_cnt,
+  struct buffer_array *sg_data)
+{
+   dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
+   /* create sg for the current buffer */
+   sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+   if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+   dev_err(dev, "dma_map_sg() src buffer failed\n");
+   return -ENOMEM;
+   }
+   dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK 
offset=%u length=%u\n",
+   _dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+   sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+   areq_ctx->buff_sg->length);
+   areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+   areq_ctx->curr_sg = areq_ctx->buff_sg;
+   areq_ctx->in_nents = 0;
+   /* prepare for case of MLLI */
+   cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+   false, NULL);
+   return 0;
+}
+
 void cc_unmap_cipher_request(struct device *dev, void *ctx,
-unsigned int ivsize, struct scatterlist *src,
-struct scatterlist *dst)
+   unsigned int ivsize, struct scatterlist *src,
+   struct scatterlist *dst)
 {
struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 
@@ -472,6 +497,238 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, 
void *ctx,
return rc;
 }
 
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
+{
+   struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+   struct device *dev = drvdata_to_dev(drvdata);
+   u8 *curr_buff = cc_hash_buf(areq_ctx);
+   u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+   struct mlli_params *mlli_params = _ctx->mlli_params;
+   struct buffer_array sg_data;
+   struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+   u32 dummy = 0;
+   u32 mapped_nents = 0;
+
+   dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 
0x%X src=%pK curr_index=%u\n",
+   curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+   /* Init the type of the dma buffer */
+   areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+   mlli_params->curr_pool = NULL;
+   sg_data.num_of_buffers = 0;
+   areq_ctx->in_nents = 0;
+
+   if (nbytes == 0 && *curr_buff_cnt == 0) {
+   /* nothing to do */
+   return 0;
+   }
+
+   /*TODO: copy data in case that buffer is enough for operation */
+   /* map the previous buffer */
+   if (*curr_buff_cnt) {
+   if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+   _data)) {
+   return -ENOMEM;
+   }
+   }
+
+   if (src && nbytes > 0 && do_update) {
+   if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ 

[PATCH v2 4/7] crypto: ccree: add ahash support

2018-01-22 Thread Gilad Ben-Yossef
Add CryptoCell async. hash and HMAC support.

Signed-off-by: Gilad Ben-Yossef 
---
 drivers/crypto/ccree/Makefile|2 +-
 drivers/crypto/ccree/cc_buffer_mgr.c |  261 +++-
 drivers/crypto/ccree/cc_driver.c |   13 +
 drivers/crypto/ccree/cc_driver.h |1 +
 drivers/crypto/ccree/cc_hash.c   | 2296 ++
 drivers/crypto/ccree/cc_hash.h   |  114 ++
 drivers/crypto/ccree/cc_pm.c |4 +
 7 files changed, 2688 insertions(+), 3 deletions(-)
 create mode 100644 drivers/crypto/ccree/cc_hash.c
 create mode 100644 drivers/crypto/ccree/cc_hash.h

diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index a7fecad..1109480 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o 
cc_sram_mgr.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o 
cc_ivgen.o cc_sram_mgr.o
 ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
 ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c 
b/drivers/crypto/ccree/cc_buffer_mgr.c
index 46be101..bb306b4 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -9,6 +9,7 @@
 #include "cc_buffer_mgr.h"
 #include "cc_lli_defs.h"
 #include "cc_cipher.h"
+#include "cc_hash.h"
 
 enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -348,9 +349,33 @@ static int cc_map_sg(struct device *dev, struct 
scatterlist *sg,
return 0;
 }
 
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+  u8 *curr_buff, u32 curr_buff_cnt,
+  struct buffer_array *sg_data)
+{
+   dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
+   /* create sg for the current buffer */
+   sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+   if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+   dev_err(dev, "dma_map_sg() src buffer failed\n");
+   return -ENOMEM;
+   }
+   dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK 
offset=%u length=%u\n",
+   _dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+   sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+   areq_ctx->buff_sg->length);
+   areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+   areq_ctx->curr_sg = areq_ctx->buff_sg;
+   areq_ctx->in_nents = 0;
+   /* prepare for case of MLLI */
+   cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+   false, NULL);
+   return 0;
+}
+
 void cc_unmap_cipher_request(struct device *dev, void *ctx,
-unsigned int ivsize, struct scatterlist *src,
-struct scatterlist *dst)
+   unsigned int ivsize, struct scatterlist *src,
+   struct scatterlist *dst)
 {
struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 
@@ -472,6 +497,238 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, 
void *ctx,
return rc;
 }
 
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
+{
+   struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+   struct device *dev = drvdata_to_dev(drvdata);
+   u8 *curr_buff = cc_hash_buf(areq_ctx);
+   u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+   struct mlli_params *mlli_params = _ctx->mlli_params;
+   struct buffer_array sg_data;
+   struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+   u32 dummy = 0;
+   u32 mapped_nents = 0;
+
+   dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 
0x%X src=%pK curr_index=%u\n",
+   curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+   /* Init the type of the dma buffer */
+   areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+   mlli_params->curr_pool = NULL;
+   sg_data.num_of_buffers = 0;
+   areq_ctx->in_nents = 0;
+
+   if (nbytes == 0 && *curr_buff_cnt == 0) {
+   /* nothing to do */
+   return 0;
+   }
+
+   /*TODO: copy data in case that buffer is enough for operation */
+   /* map the previous buffer */
+   if (*curr_buff_cnt) {
+   if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+   _data)) {
+   return -ENOMEM;
+   }
+   }
+
+   if (src && nbytes > 0 && do_update) {
+   if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ _ctx->in_nents,