[PATCH v1 3/3] crypto: ccp - Protect against poorly marked end of sg list

2015-05-26 Thread Tom Lendacky
Scatter gather lists can be created with more available entries than are
actually used (e.g. using sg_init_table() to reserve a specific number
of sg entries, but in actuality using something less than that based on
the data length).  The caller sometimes fails to mark the last entry
with sg_mark_end().  In these cases, sg_nents() will return the original
size of the sg list as opposed to the actual number of sg entries that
contain valid data.

On arm64, if the sg_nents() value is used in a call to dma_map_sg() in
this situation, then it causes a BUG_ON in lib/swiotlb.c because an
"empty" sg list entry results in dma_capable() returning false and
swiotlb trying to create a bounce buffer of size 0. This occurred in
the userspace crypto interface before being fixed by

0f477b655a52 ("crypto: algif - Mark sgl end at the end of data")

Protect against this in the future by counting the number of sg entries
needed to meet the length requirement and supplying that value to
dma_map_sg().

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-ops.c |   18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 542453c..8377ed6 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -477,6 +477,22 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
 }
 
+static int ccp_sg_nents(struct scatterlist *sg, u64 len)
+{
+   int nents = 0;
+
+   while (sg && len) {
+   nents++;
+   if (sg->length > len)
+   break;
+
+   len -= sg->length;
+   sg = sg_next(sg);
+   }
+
+   return nents;
+}
+
 static void ccp_sg_free(struct ccp_sg_workarea *wa)
 {
if (wa->dma_count)
@@ -495,7 +511,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, 
struct device *dev,
if (!sg)
return 0;
 
-   wa->nents = sg_nents(sg);
+   wa->nents = ccp_sg_nents(sg, len);
wa->bytes_left = len;
wa->sg_used = 0;
 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 2/3] crypto: ccp - Remove unused structure field

2015-05-26 Thread Tom Lendacky
Remove the length field from the ccp_sg_workarea since it is unused.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-ops.c |2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 71f2e3c..542453c 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -53,7 +53,6 @@ struct ccp_dm_workarea {
 struct ccp_sg_workarea {
struct scatterlist *sg;
unsigned int nents;
-   unsigned int length;
 
struct scatterlist *dma_sg;
struct device *dma_dev;
@@ -497,7 +496,6 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, 
struct device *dev,
return 0;
 
wa->nents = sg_nents(sg);
-   wa->length = sg->length;
wa->bytes_left = len;
wa->sg_used = 0;
 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/3] crypto: ccp - Remove manual check and set of dma_mask pointer

2015-05-26 Thread Tom Lendacky
The underlying device support will set the device dma_mask pointer
if DMA is set up properly for the device.  Remove the check for and
assignment of dma_mask when it is null. Instead, just error out if
the dma_set_mask_and_coherent function fails because dma_mask is null.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-platform.c |2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index b1c20b2..c0aa5c5 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -174,8 +174,6 @@ static int ccp_platform_probe(struct platform_device *pdev)
}
ccp->io_regs = ccp->io_map;
 
-   if (!dev->dma_mask)
-   dev->dma_mask = &dev->coherent_dma_mask;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 0/3] crypto: ccp - CCP driver updates 2015-05-26

2015-05-26 Thread Tom Lendacky
The following patches are included in this driver update series:

- Remove the checking and setting of the device dma_mask field
- Remove an unused field from a structure to help avoid any confusion
- Protect against poorly marked end of scatter-gather list
 
This patch series is based on cryptodev-2.6.

---

Tom Lendacky (3):
  crypto: ccp - Remove manual check and set of dma_mask pointer
  crypto: ccp - Remove unused structure field
  crypto: ccp - Protect against poorly marked end of sg list


 drivers/crypto/ccp/ccp-ops.c  |   20 +---
 drivers/crypto/ccp/ccp-platform.c |2 --
 2 files changed, 17 insertions(+), 5 deletions(-)

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [Linaro-acpi] [V2 PATCH 2/5] arm64 : Introduce support for ACPI _CCA object

2015-05-05 Thread Tom Lendacky

On 05/05/2015 11:13 AM, Suravee Suthikulanit wrote:

On 5/5/2015 11:12 AM, Arnd Bergmann wrote:

On Tuesday 05 May 2015 11:09:38 Suravee Suthikulanit wrote:


However, codes in several places are making use of dma_map_ops without
checking if the ops are NULL (i.e.
include/asm-generic/dma-mapping-common.h and in arch-specific
implementation). If setting it to NULL is what we are planning to
support, we would need to scrub the current code to put NULL check.
Also, would you consider if that is safe to do going forward?




I mean the dma_mask pointer, not dma_map_ops.


Except a lot of drivers will actually set the dma_mask pointer during
probe (usually by setting dev->dma_mask = &dev->coherent_dma_mask or by
calling dma_coerce_mask_and_coherent).  So I think the dummy_dma_ops
might be the safest way to go.

Thanks,
Tom



Arnd



Ah, got it. Sorry for confusion.

Suravee

___
Linaro-acpi mailing list
linaro-a...@lists.linaro.org
https://lists.linaro.org/mailman/listinfo/linaro-acpi

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] crypto: talitos - add software backlog queue handling

2015-03-13 Thread Tom Lendacky

On 03/13/2015 12:16 PM, Horia Geanta wrote:

I was running into situations where the hardware FIFO was filling up, and
the code was returning EAGAIN to dm-crypt and just dropping the submitted
crypto request.

This adds support in talitos for a software backlog queue. When requests
can't be queued to the hardware immediately EBUSY is returned. The queued
requests are dispatched to the hardware in received order as hardware FIFO
slots become available.

Signed-off-by: Martin Hicks 
Signed-off-by: Horia Geanta 
---
  drivers/crypto/talitos.c | 107 +--
  drivers/crypto/talitos.h |   2 +
  2 files changed, 97 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c184987dfcc7..d4679030d23c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -197,23 +197,41 @@ static struct talitos_request *to_talitos_req(struct 
crypto_async_request *areq)
}
  }

-int talitos_submit(struct device *dev, int ch,
-  struct crypto_async_request *areq)
+/*
+ * Enqueue to HW queue a request, coming either from upper layer or taken from
+ * SW queue. When drawing from SW queue, check if there are backlogged requests
+ * and notify their producers.
+ */
+int __talitos_handle_queue(struct device *dev, int ch,
+  struct crypto_async_request *areq,
+  unsigned long *irq_flags)
  {
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
-   unsigned long flags;
int head;

-   spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
-
if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
/* h/w fifo is full */
-   spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
-   return -EAGAIN;
+   if (!areq)
+   return -EBUSY;
+
+   /* Try to backlog request (if allowed) */
+   return crypto_enqueue_request(&priv->chan[ch].queue, areq);


I'd remembered something about how hardware drivers should use their
own list element for queuing, searched back and found this:

http://marc.info/?l=linux-crypto-vger&m=137609769605139&w=2

Thanks,
Tom


}

-   head = priv->chan[ch].head;
+   if (!areq) {
+   struct crypto_async_request *backlog =
+   crypto_get_backlog(&priv->chan[ch].queue);
+
+   /* Dequeue the oldest request */
+   areq = crypto_dequeue_request(&priv->chan[ch].queue);
+   if (!areq)
+   return 0;
+
+   /* Mark a backlogged request as in-progress */
+   if (backlog)
+   backlog->complete(backlog, -EINPROGRESS);
+   }

request = to_talitos_req(areq);
if (IS_ERR(request))
@@ -224,6 +242,7 @@ int talitos_submit(struct device *dev, int ch,
   DMA_BIDIRECTIONAL);

/* increment fifo head */
+   head = priv->chan[ch].head;
priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);

smp_wmb();
@@ -236,14 +255,66 @@ int talitos_submit(struct device *dev, int ch,
out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 lower_32_bits(request->dma_desc));

+   return -EINPROGRESS;
+}
+
+int talitos_submit(struct device *dev, int ch,
+  struct crypto_async_request *areq)
+{
+   struct talitos_private *priv = dev_get_drvdata(dev);
+   unsigned long flags;
+   int ret;
+
+   spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
+
+   /*
+* Hidden assumption: we maintain submission order separately for
+* requests that may be backlogged and those that may not. For e.g. even
+* if SW queue has some requests, we won't drop an incoming request that
+* may not be backlogged, but enqueue it in the HW queue (in front of
+* pending ones).
+*/
+   if (areq->flags & CRYPTO_TFM_REQ_MAY_BACKLOG &&
+   priv->chan[ch].queue.qlen) {
+   /*
+* There are pending requests in the SW queue. Since we want to
+* maintain the order of requests, we cannot enqueue in the HW
+* queue. Thus put this new request in SW queue and dispatch
+* the oldest backlogged request to the hardware.
+*/
+   ret = crypto_enqueue_request(&priv->chan[ch].queue, areq);
+   __talitos_handle_queue(dev, ch, NULL, &flags);
+   } else {
+   ret = __talitos_handle_queue(dev, ch, areq, &flags);
+   }
+
spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);

-   return -EINPROGRESS;
+   return ret;
  }
  EXPORT_SYMBOL(talitos_submit);

+static void talitos_handle_queue(struct device *dev, int ch)
+{
+   struct talitos_private *priv = dev_get_drvdata(d

Re: AF_ALG interface not marking the end of the scatter-gather list

2015-02-13 Thread Tom Lendacky

On 02/13/2015 05:43 AM, Stephan Mueller wrote:

Am Donnerstag, 12. Februar 2015, 17:41:59 schrieb Tom Lendacky:

Hi Tom,


I was doing some testing of the CCP driver using the AF_ALG interface
and encountered a BUG_ON statement during scatter-gather DMA mapping.

In algif_skcipher.c, before submitting a request to the the Crypto API
the input sg list is not updated to mark the last valid sg entry of the
input data. So even if there is only a single valid sg entry, sg_nents
returns 127 (the initial value used when creating the sg table).

In the CCP driver, when making a call to dma_map_sg I supply the number
of entries as returned by sg_nents. During this call, the sg elements
that are not valid cause a BUG_ON statement to be hit.

I've worked around the issue in skcipher_recvmsg by marking the last
valid sg entry (sg_mark_end(sgl->sg + sgl->cur - 1)) just before the
call to ablkcipher_request_set_crypt and then unmarking the entry after
the return from af_alg_wait_for_completion (using sg_unmark_end).

Is this an appropriate/valid solution for this issue?  If so, I can
submit a patch with the fix in algif_skcipher and algif_hash.


There has been a patch around this issue -- see patch
0f477b655a524515ec9a263d70d51f460c05a161


Thanks for the pointer Stephan.  I had been working with the main
kernel tree where this patch hasn't been merged yet.

Thanks,
Tom



Thanks,
Tom
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html




--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


AF_ALG interface not marking the end of the scatter-gather list

2015-02-12 Thread Tom Lendacky

I was doing some testing of the CCP driver using the AF_ALG interface
and encountered a BUG_ON statement during scatter-gather DMA mapping.

In algif_skcipher.c, before submitting a request to the the Crypto API
the input sg list is not updated to mark the last valid sg entry of the
input data. So even if there is only a single valid sg entry, sg_nents
returns 127 (the initial value used when creating the sg table).

In the CCP driver, when making a call to dma_map_sg I supply the number
of entries as returned by sg_nents. During this call, the sg elements
that are not valid cause a BUG_ON statement to be hit.

I've worked around the issue in skcipher_recvmsg by marking the last
valid sg entry (sg_mark_end(sgl->sg + sgl->cur - 1)) just before the
call to ablkcipher_request_set_crypt and then unmarking the entry after
the return from af_alg_wait_for_completion (using sg_unmark_end).

Is this an appropriate/valid solution for this issue?  If so, I can
submit a patch with the fix in algif_skcipher and algif_hash.

Thanks,
Tom
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 0/5] crypto: ccp - CCP driver updates 2015-01-28

2015-02-03 Thread Tom Lendacky
For some reason this series never reached the mailing list... resending.

The following series of patches includes functional updates to the
driver as well as some trivial changes.

- Fix checks/warnings from checkpatch
- Update how the CCP is built (Kconfig and Makefile)
- Use dma_set_mask_and_coherent to set the DMA mask
- Use devm_ calls where appropriate
- Add ACPI support
  
This patch series is based on cryptodev-2.6.

---

Tom Lendacky (5):
  crypto: ccp - Updates for checkpatch warnings/errors
  crypto: ccp - Update CCP build support
  crypto: ccp - Use dma_set_mask_and_coherent to set DMA mask
  crypto: ccp - Convert calls to their devm_ counterparts
  crypto: ccp - Add ACPI support


 drivers/crypto/Kconfig   |2 -
 drivers/crypto/ccp/Makefile  |9 +-
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   12 ++-
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |4 -
 drivers/crypto/ccp/ccp-crypto-aes.c  |3 -
 drivers/crypto/ccp/ccp-crypto-main.c |5 +
 drivers/crypto/ccp/ccp-crypto-sha.c  |   12 ++-
 drivers/crypto/ccp/ccp-crypto.h  |3 -
 drivers/crypto/ccp/ccp-dev.c |7 +-
 drivers/crypto/ccp/ccp-dev.h |   12 +--
 drivers/crypto/ccp/ccp-ops.c |   24 +++---
 drivers/crypto/ccp/ccp-pci.c |   21 ++
 drivers/crypto/ccp/ccp-platform.c|  111 ++
 13 files changed, 143 insertions(+), 82 deletions(-)

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 3/5] crypto: ccp - Use dma_set_mask_and_coherent to set DMA mask

2015-02-03 Thread Tom Lendacky
Replace the setting of the DMA masks with the dma_set_mask_and_coherent
function call.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-platform.c |7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 9e09c50..04265a3 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -109,8 +109,11 @@ static int ccp_platform_probe(struct platform_device *pdev)
 
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
-   *(dev->dma_mask) = DMA_BIT_MASK(48);
-   dev->coherent_dma_mask = DMA_BIT_MASK(48);
+   ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+   if (ret) {
+   dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+   goto e_free;
+   }
 
if (of_property_read_bool(dev->of_node, "dma-coherent"))
ccp->axcache = CACHE_WB_NO_ALLOC;

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 4/5] crypto: ccp - Convert calls to their devm_ counterparts

2015-02-03 Thread Tom Lendacky
Where applicable, convert calls to their devm_ counterparts, e.g. kzalloc
to devm_kzalloc.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-dev.c  |2 +-
 drivers/crypto/ccp/ccp-pci.c  |   19 +--
 drivers/crypto/ccp/ccp-platform.c |   11 +++
 3 files changed, 9 insertions(+), 23 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 68c637a..861bacc 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -295,7 +295,7 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
 {
struct ccp_device *ccp;
 
-   ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
+   ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
if (!ccp)
return NULL;
ccp->dev = dev;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 1980f77..af190d4 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -174,11 +174,10 @@ static int ccp_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
if (!ccp)
goto e_err;
 
-   ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
-   if (!ccp_pci) {
-   ret = -ENOMEM;
-   goto e_free1;
-   }
+   ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
+   if (!ccp_pci)
+   goto e_err;
+
ccp->dev_specific = ccp_pci;
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -186,7 +185,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
ret = pci_request_regions(pdev, "ccp");
if (ret) {
dev_err(dev, "pci_request_regions failed (%d)\n", ret);
-   goto e_free2;
+   goto e_err;
}
 
ret = pci_enable_device(pdev);
@@ -239,12 +238,6 @@ e_device:
 e_regions:
pci_release_regions(pdev);
 
-e_free2:
-   kfree(ccp_pci);
-
-e_free1:
-   kfree(ccp);
-
 e_err:
dev_notice(dev, "initialization failed\n");
return ret;
@@ -266,8 +259,6 @@ static void ccp_pci_remove(struct pci_dev *pdev)
 
pci_release_regions(pdev);
 
-   kfree(ccp);
-
dev_notice(dev, "disabled\n");
 }
 
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 04265a3..20661f0 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -103,7 +103,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
ccp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) {
ret = PTR_ERR(ccp->io_map);
-   goto e_free;
+   goto e_err;
}
ccp->io_regs = ccp->io_map;
 
@@ -112,7 +112,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
-   goto e_free;
+   goto e_err;
}
 
if (of_property_read_bool(dev->of_node, "dma-coherent"))
@@ -124,15 +124,12 @@ static int ccp_platform_probe(struct platform_device 
*pdev)
 
ret = ccp_init(ccp);
if (ret)
-   goto e_free;
+   goto e_err;
 
dev_notice(dev, "enabled\n");
 
return 0;
 
-e_free:
-   kfree(ccp);
-
 e_err:
dev_notice(dev, "initialization failed\n");
return ret;
@@ -145,8 +142,6 @@ static int ccp_platform_remove(struct platform_device *pdev)
 
ccp_destroy(ccp);
 
-   kfree(ccp);
-
dev_notice(dev, "disabled\n");
 
return 0;

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 2/5] crypto: ccp - Update CCP build support

2015-02-03 Thread Tom Lendacky
Add HAS_IOMEM as a Kconfig dependency. Always include ccp-platform.c
in the CCP build and conditionally include ccp-pci.c.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/Kconfig  |2 +-
 drivers/crypto/ccp/Makefile |9 ++---
 2 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 2fb0fdf..b840b79 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
-   depends on (X86 && PCI) || ARM64
+   depends on ((X86 && PCI) || ARM64) && HAS_IOMEM
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 7f592d8..55a1f39 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,11 +1,6 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o
-ifdef CONFIG_X86
-ccp-objs += ccp-pci.o
-endif
-ifdef CONFIG_ARM64
-ccp-objs += ccp-platform.o
-endif
+ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o
+ccp-$(CONFIG_PCI) += ccp-pci.o
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/5] crypto: ccp - Updates for checkpatch warnings/errors

2015-02-03 Thread Tom Lendacky
Changes to address warnings and errors reported by the checkpatch
script.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   12 +++-
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |4 +---
 drivers/crypto/ccp/ccp-crypto-aes.c  |3 +--
 drivers/crypto/ccp/ccp-crypto-main.c |5 ++---
 drivers/crypto/ccp/ccp-crypto-sha.c  |   12 +++-
 drivers/crypto/ccp/ccp-crypto.h  |3 ---
 drivers/crypto/ccp/ccp-dev.c |5 +
 drivers/crypto/ccp/ccp-dev.h |   12 
 drivers/crypto/ccp/ccp-ops.c |   24 
 drivers/crypto/ccp/ccp-pci.c |2 +-
 drivers/crypto/ccp/ccp-platform.c|1 -
 11 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 8e162ad..ea7e844 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -23,7 +23,6 @@
 
 #include "ccp-crypto.h"
 
-
 static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
 int ret)
 {
@@ -38,11 +37,13 @@ static int ccp_aes_cmac_complete(struct 
crypto_async_request *async_req,
if (rctx->hash_rem) {
/* Save remaining data to buffer */
unsigned int offset = rctx->nbytes - rctx->hash_rem;
+
scatterwalk_map_and_copy(rctx->buf, rctx->src,
 offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
-   } else
+   } else {
rctx->buf_count = 0;
+   }
 
/* Update result area if supplied */
if (req->result)
@@ -202,7 +203,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
 }
 
 static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
-  unsigned int key_len)
+  unsigned int key_len)
 {
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct ccp_crypto_ahash_alg *alg =
@@ -292,7 +293,8 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
 
cipher_tfm = crypto_alloc_cipher("aes", 0,
-   CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+CRYPTO_ALG_ASYNC |
+CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(cipher_tfm)) {
pr_warn("could not load aes cipher driver\n");
return PTR_ERR(cipher_tfm);
@@ -354,7 +356,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
-   base->cra_name, ret);
+  base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0cc5594..52c7395 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -21,7 +21,6 @@
 
 #include "ccp-crypto.h"
 
-
 struct ccp_aes_xts_def {
const char *name;
const char *drv_name;
@@ -216,7 +215,6 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
ctx->u.aes.tfm_ablkcipher = NULL;
 }
 
-
 static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
 {
@@ -255,7 +253,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
-   alg->cra_name, ret);
+  alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c 
b/drivers/crypto/ccp/ccp-crypto-aes.c
index e46490d..7984f91 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -22,7 +22,6 @@
 
 #include "ccp-crypto.h"
 
-
 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
 {
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
@@ -345,7 +344,7 @@ static int ccp_register_aes_alg(struct list_head *head,
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
-   alg->cra_name, ret);
+  alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-ma

[PATCH v1 5/5] crypto: ccp - Add ACPI support

2015-02-03 Thread Tom Lendacky
Add support for ACPI to the CCP platform driver.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/Kconfig|2 -
 drivers/crypto/ccp/ccp-platform.c |   96 +++--
 2 files changed, 93 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b840b79..7e94413 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
-   depends on ((X86 && PCI) || ARM64) && HAS_IOMEM
+   depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && 
HAS_IOMEM
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 20661f0..b1c20b2 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -23,9 +23,16 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include "ccp-dev.h"
 
+struct ccp_platform {
+   int use_acpi;
+   int coherent;
+};
+
 static int ccp_get_irq(struct ccp_device *ccp)
 {
struct device *dev = ccp->dev;
@@ -83,10 +90,64 @@ static struct resource *ccp_find_mmio_area(struct 
ccp_device *ccp)
return NULL;
 }
 
+#ifdef CONFIG_ACPI
+static int ccp_acpi_support(struct ccp_device *ccp)
+{
+   struct ccp_platform *ccp_platform = ccp->dev_specific;
+   struct acpi_device *adev = ACPI_COMPANION(ccp->dev);
+   acpi_handle handle;
+   acpi_status status;
+   unsigned long long data;
+   int cca;
+
+   /* Retrieve the device cache coherency value */
+   handle = adev->handle;
+   do {
+   status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+   if (!ACPI_FAILURE(status)) {
+   cca = data;
+   break;
+   }
+   } while (!ACPI_FAILURE(status));
+
+   if (ACPI_FAILURE(status)) {
+   dev_err(ccp->dev, "error obtaining acpi coherency value\n");
+   return -EINVAL;
+   }
+
+   ccp_platform->coherent = !!cca;
+
+   return 0;
+}
+#else  /* CONFIG_ACPI */
+static int ccp_acpi_support(struct ccp_device *ccp)
+{
+   return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_OF
+static int ccp_of_support(struct ccp_device *ccp)
+{
+   struct ccp_platform *ccp_platform = ccp->dev_specific;
+
+   ccp_platform->coherent = of_dma_is_coherent(ccp->dev->of_node);
+
+   return 0;
+}
+#else
+static int ccp_of_support(struct ccp_device *ccp)
+{
+   return -EINVAL;
+}
+#endif
+
 static int ccp_platform_probe(struct platform_device *pdev)
 {
struct ccp_device *ccp;
+   struct ccp_platform *ccp_platform;
struct device *dev = &pdev->dev;
+   struct acpi_device *adev = ACPI_COMPANION(dev);
struct resource *ior;
int ret;
 
@@ -95,10 +156,16 @@ static int ccp_platform_probe(struct platform_device *pdev)
if (!ccp)
goto e_err;
 
-   ccp->dev_specific = NULL;
+   ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
+   if (!ccp_platform)
+   goto e_err;
+
+   ccp->dev_specific = ccp_platform;
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
 
+   ccp_platform->use_acpi = (!adev || acpi_disabled) ? 0 : 1;
+
ior = ccp_find_mmio_area(ccp);
ccp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) {
@@ -115,7 +182,14 @@ static int ccp_platform_probe(struct platform_device *pdev)
goto e_err;
}
 
-   if (of_property_read_bool(dev->of_node, "dma-coherent"))
+   if (ccp_platform->use_acpi)
+   ret = ccp_acpi_support(ccp);
+   else
+   ret = ccp_of_support(ccp);
+   if (ret)
+   goto e_err;
+
+   if (ccp_platform->coherent)
ccp->axcache = CACHE_WB_NO_ALLOC;
else
ccp->axcache = CACHE_NONE;
@@ -197,15 +271,29 @@ static int ccp_platform_resume(struct platform_device 
*pdev)
 }
 #endif
 
-static const struct of_device_id ccp_platform_ids[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ccp_acpi_match[] = {
+   { "AMDI0C00", 0 },
+   { },
+};
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a" },
{ },
 };
+#endif
 
 static struct platform_driver ccp_platform_driver = {
.driver = {
.name = "AMD Cryptographic Coprocessor",
-   .of_match_table = ccp_platform_ids,
+#ifdef CONFIG_ACPI
+   .acpi_match_table = ccp_acpi_match,
+#endif
+#ifdef CONF

Re: [PATCH] crypto: ccp: terminate ccp_support array with empty element

2015-01-23 Thread Tom Lendacky

On 01/21/2015 09:06 AM, Andrey Ryabinin wrote:

x86_match_cpu() expects array of x86_cpu_ids terminated
with empty element.

Signed-off-by: Andrey Ryabinin 


Acked-by: Tom Lendacky 


---
  drivers/crypto/ccp/ccp-dev.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c6e6171..ca29c12 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -583,6 +583,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
  #ifdef CONFIG_X86
  static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
+   { },
  };
  #endif



--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Asynchronous usage of PCOMPRESS

2014-11-10 Thread Tom Lendacky

Hi Herbert,

Is the PCOMPRESS algorithm supposed to support asynchronous
implementations? In other words, are callers expected to handle the
-EINPROGRESS or -EAGAIN return codes that can be returned by an
asynchronous implementation?

Or is it assumed that if the CRYPTO_ALG_ASYNC flag is not set then the
code path must be synchronous?  If that's the case, should the pcompress
support be updated to look for synchronous implementations if the
CRYPTO_ALG_ASYNC flag isn't set or should asynchronous implementations
provide a synchronous fallback?

Thanks,
Tom
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 01/11] crypto: Documentation - crypto API high level spec

2014-11-03 Thread Tom Lendacky

On 11/03/2014 08:49 AM, Herbert Xu wrote:

On Mon, Nov 03, 2014 at 03:18:29PM +0100, Stephan Mueller wrote:



+ * CRYPTO_ALG_TYPE_DIGEST  Raw message digest
+ * CRYPTO_ALG_TYPE_HASHAlias for CRYPTO_ALG_TYPE_DIGEST
+ * CRYPTO_ALG_TYPE_SHASH   Synchronous multi-block hash
+ * CRYPTO_ALG_TYPE_AHASH   Asynchronous multi-block hash
+ * CRYPTO_ALG_TYPE_RNG Random Number Generation
+ * CRYPTO_ALG_TYPE_PCOMPRESS


What's that last one?


Same here.


pcompress is an enhanced version of compress allowing for piece-meal
compression/decompression rather than having to shove everything in
all at once.

Eventually pcompress should replace the compress interface once
everything is converted across.


Herbert, I was looking at adding async support for ALG_TYPE_COMPRESS
since the CCP device will support compression/decompression but only
as an everything at once invocation.  Given what you're saying about
pcompress replacing compress, would this be something you'd even
consider though?

Thanks,
Tom



Thanks,


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto: ccp - Check for CCP before registering crypto algs

2014-09-22 Thread Tom Lendacky

On 09/15/2014 06:47 AM, Herbert Xu wrote:

On Fri, Sep 05, 2014 at 11:49:38PM +, Scot Doyle wrote:


On Fri, 5 Sep 2014, Tom Lendacky wrote:


If the ccp is built as a built-in module, then ccp-crypto (whether
built as a module or a built-in module) will be able to load and
it will register its crypto algorithms.  If the system does not have
a CCP this will result in -ENODEV being returned whenever a command
is attempted to be queued by the registered crypto algorithms.

Add an API, ccp_present(), that checks for the presence of a CCP
on the system.  The ccp-crypto module can use this to determine if it
should register it's crypto alogorithms.

Reported-by: Scot Doyle 
Signed-off-by: Tom Lendacky 


Tested-by: Scot Doyle 


Patch applied.  Thanks!



Hi Herbert,

Can you push this patch into the 3.17 release?

Also, it should probably go into to the stable releases.  Is this
something that you request or should I take care of that?

Thanks,
Tom
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Check for CCP before registering crypto algs

2014-09-05 Thread Tom Lendacky
If the ccp is built as a built-in module, then ccp-crypto (whether
built as a module or a built-in module) will be able to load and
it will register its crypto algorithms.  If the system does not have
a CCP this will result in -ENODEV being returned whenever a command
is attempted to be queued by the registered crypto algorithms.

Add an API, ccp_present(), that checks for the presence of a CCP
on the system.  The ccp-crypto module can use this to determine if it
should register it's crypto alogorithms.

Reported-by: Scot Doyle 
Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |4 
 drivers/crypto/ccp/ccp-dev.c |   14 ++
 include/linux/ccp.h  |   12 
 3 files changed, 30 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 20dc848..4d4e016 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -367,6 +367,10 @@ static int ccp_crypto_init(void)
 {
int ret;
 
+   ret = ccp_present();
+   if (ret)
+   return ret;
+
spin_lock_init(&req_queue_lock);
INIT_LIST_HEAD(&req_queue.cmds);
req_queue.backlog = &req_queue.cmds;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index a7d1106..c6e6171 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -55,6 +55,20 @@ static inline void ccp_del_device(struct ccp_device *ccp)
 }
 
 /**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void)
+{
+   if (ccp_get_device())
+   return 0;
+
+   return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccp_present);
+
+/**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
  * @cmd: ccp_cmd struct to be processed
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index ebcc9d1..7f43703 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -27,6 +27,13 @@ struct ccp_cmd;
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
 
 /**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void);
+
+/**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
  * @cmd: ccp_cmd struct to be processed
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
 
 #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
 
+static inline int ccp_present(void)
+{
+   return -ENODEV;
+}
+
 static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
return -ENODEV;

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: AF_ALG inadvertently disabled

2014-09-05 Thread Tom Lendacky

On 09/04/2014 07:43 PM, Scot Doyle wrote:

On a laptop without AMD's CCP, compiling 3.17-rc3 with
   # CONFIG_MODULES is not set
   CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
   CONFIG_CRYPTO_DEV_CCP=y
   CONFIG_CRYPTO_DEV_CCP_DD=y
   # CONFIG_CRYPTO_DEV_CCP_CRYPTO is not set
the strace from a test program is
   socket(PF_ALG, SOCK_SEQPACKET, 0)   = 3
   bind(3, {sa_family=AF_ALG, sa_data="skcipher\0\0\0\0\0\0"}, 88) = 0
   setsockopt(3, 0x117 /* SOL_?? */, 1, "n) 
\21\220\25-\364\356\5\2019\336\366\20\273", 16) = 0
   accept(3, 0, NULL)  = 4
   sendmsg(4, {msg_name(0)=NULL, 
msg_iov(1)=[{"\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27"...,
 512}], msg_controllen=64, {cmsg_len=20, cmsg_level=0x117 /* SOL_??? */, cmsg_type=, 
...}, msg_flags=0}, 0) = 512
   read(4, 
"\322\322\22\25\3\3159\2052Q\356\256lA<\336\245\230a\36!\343\366\26=J\231\254\211x>G"...,
 512) = 512


However, when compiling with
   # CONFIG_MODULES is not set
   CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
   CONFIG_CRYPTO_DEV_CCP=y
   CONFIG_CRYPTO_DEV_CCP_DD=y
   CONFIG_CRYPTO_DEV_CCP_CRYPTO=y
the strace from the same test program is
   socket(PF_ALG, SOCK_SEQPACKET, 0)   = 3
   bind(3, {sa_family=AF_ALG, sa_data="skcipher\0\0\0\0\0\0"}, 88) = 0
   setsockopt(3, 0x117 /* SOL_?? */, 1, "n) 
\21\220\25-\364\356\5\2019\336\366\20\273", 16) = 0
   accept(3, 0, NULL)  = 4
   sendmsg(4, {msg_name(0)=NULL, 
msg_iov(1)=[{"\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27"...,
 512}], msg_controllen=64, {cmsg_len=20, cmsg_level=0x117 /* SOL_??? */, cmsg_type=, 
...}, msg_flags=0}, 0) = 512
   read(4, 0x1f48000, 512) = -1 ENODEV (No such device)



Because ccp-crypto isn't built as a module it will register the
algorithms even if a CCP device isn't there. I'll work up a patch
that checks for the presence of the CCP and only register the
algorithms if a CCP is there.

Thanks,
Tom



cryptsetup exhibits the same behavior as the test program.


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Do not sign extend input data to CCP

2014-07-30 Thread Tom Lendacky
The CCP hardware interprets all numbers as unsigned numbers, therefore
sign extending input data is not valid.  Modify the function calls
for RSA and ECC to not perform sign extending.

This patch is based on the cryptodev-2.6 kernel tree.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-ops.c |   26 +-
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 9ae006d..8729364 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
goto e_ksb;
 
ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
-   true);
+   false);
ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
  CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
goto e_exp;
 
ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
-   true);
+   false);
src.address += o_len;   /* Adjust the address for the copy operation */
ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
-   true);
+   false);
src.address -= o_len;   /* Reset the address to original value */
 
/* Prepare the output area for the operation */
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
 
/* Copy the ECC modulus */
ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Copy the first operand */
ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
ecc->u.mm.operand_1_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
/* Copy the second operand */
ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
ecc->u.mm.operand_2_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
}
 
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
 
/* Copy the ECC modulus */
ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Copy the first point X and Y coordinate */
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
ecc->u.pm.point_1.x_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
ecc->u.pm.point_1.y_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Set the first point Z coordianate to 1 */
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
/* Copy the second point X and Y coordinate */
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
ecc->u.pm.point_2.x_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
ecc->u.pm.point_2.y_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Set the second point Z coordianate to 1 */
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
/* Copy the Domain "a&q

[PATCH] crypto: ccp - Remove "select OF" from Kconfig

2014-07-23 Thread Tom Lendacky
The addition of the "select OF if ARM64" has led to a Kconfig
recursive dependency error when "make ARCH=sh rsk7269_defconfig"
was run.  Since OF is selected by ARM64 and the of_property_read_bool
is defined no matter what, delete the Kconfig line that selects OF.

Reported-by: kbuild test robot 
Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/Kconfig |1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 474382d..7639ffc 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,7 +3,6 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
-   select OF if ARM64
help
  Provides the interface to use the AMD Cryptographic Coprocessor
  which can be used to accelerate or offload encryption operations

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Base AXI DMA cache settings on device tree

2014-07-10 Thread Tom Lendacky
The default cache operations for ARM64 were changed during 3.15.
To use coherent operations a "dma-coherent" device tree property
is required.  If that property is not present in the device tree
node then the non-coherent operations are assigned for the device.

Add support to the ccp driver to assign the AXI DMA cache settings
based on whether the "dma-coherent" property is present in the device
node.  If present, use settings that work with the caches.  If not
present, use settings that do not look at the caches.

Signed-off-by: Tom Lendacky 
---
 .../devicetree/bindings/crypto/amd-ccp.txt |3 +++
 drivers/crypto/ccp/Kconfig |1 +
 drivers/crypto/ccp/ccp-dev.c   |2 +-
 drivers/crypto/ccp/ccp-dev.h   |4 
 drivers/crypto/ccp/ccp-platform.c  |6 ++
 5 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt 
b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
index 6e0b11a..8c61183 100644
--- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -7,6 +7,9 @@ Required properties:
   that services interrupts for this device
 - interrupts: Should contain the CCP interrupt
 
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+
 Example:
ccp@e010 {
compatible = "amd,ccp-seattle-v1a";
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 7639ffc..474382d 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,6 +3,7 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
+   select OF if ARM64
help
  Provides the interface to use the AMD Cryptographic Coprocessor
  which can be used to accelerate or offload encryption operations
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index fa1ab10..a7d1106 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -364,7 +364,7 @@ int ccp_init(struct ccp_device *ccp)
 
 #ifdef CONFIG_ARM64
/* For arm64 set the recommended queue cache settings */
-   iowrite32(CACHE_WB_NO_ALLOC, ccp->io_regs + CMD_Q_CACHE_BASE +
+   iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
  (CMD_Q_CACHE_INC * i));
 #endif
 
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 1c5651b..62ff35a 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -30,6 +30,7 @@
 
 #define TRNG_RETRIES   10
 
+#define CACHE_NONE 0x00
 #define CACHE_WB_NO_ALLOC  0xb7
 
 
@@ -255,6 +256,9 @@ struct ccp_device {
/* Suspend support */
unsigned int suspending;
wait_queue_head_t suspend_queue;
+
+   /* DMA caching attribute support */
+   unsigned int axcache;
 };
 
 
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 65e5829..b0a2806 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -22,6 +22,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "ccp-dev.h"
 
@@ -112,6 +113,11 @@ static int ccp_platform_probe(struct platform_device *pdev)
*(dev->dma_mask) = DMA_BIT_MASK(48);
dev->coherent_dma_mask = DMA_BIT_MASK(48);
 
+   if (of_property_read_bool(dev->of_node, "dma-coherent"))
+   ccp->axcache = CACHE_WB_NO_ALLOC;
+   else
+   ccp->axcache = CACHE_NONE;
+
dev_set_drvdata(dev, ccp);
 
ret = ccp_init(ccp);

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V1 1/3] crypto: ccp - Modify PCI support in prep for arm64 support

2014-06-05 Thread Tom Lendacky
Modify the PCI device support in prep for supporting the
CCP as a platform device for arm64.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-dev.h |3 +--
 drivers/crypto/ccp/ccp-pci.c |   39 ++-
 2 files changed, 15 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7ec536e..72bf153 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -23,8 +23,6 @@
 #include 
 
 
-#define IO_OFFSET  0x2
-
 #define MAX_DMAPOOL_NAME_LEN   32
 
 #define MAX_HW_QUEUES  5
@@ -194,6 +192,7 @@ struct ccp_device {
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
+   unsigned int irq;
 
/*
 * I/O area used for device communication. The register mapping
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 0d74623..180cc87 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -12,8 +12,10 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -24,6 +26,8 @@
 #include "ccp-dev.h"
 
 #define IO_BAR 2
+#define IO_OFFSET  0x2
+
 #define MSIX_VECTORS   2
 
 struct ccp_msix {
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
if (ret)
return ret;
 
-   ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+   ccp->irq = pdev->irq;
+   ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
 dev);
pci_disable_msix(pdev);
} else {
-   free_irq(pdev->irq, dev);
+   free_irq(ccp->irq, dev);
pci_disable_msi(pdev);
}
 }
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
resource_size_t io_len;
unsigned long io_flags;
-   int bar;
 
io_flags = pci_resource_flags(pdev, IO_BAR);
io_len = pci_resource_len(pdev, IO_BAR);
if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
return IO_BAR;
 
-   for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
-   io_flags = pci_resource_flags(pdev, bar);
-   io_len = pci_resource_len(pdev, bar);
-   if ((io_flags & IORESOURCE_MEM) &&
-   (io_len >= (IO_OFFSET + 0x800)))
-   return bar;
-   }
-
return -EIO;
 }
 
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
}
ccp->io_regs = ccp->io_map + IO_OFFSET;
 
-   ret = dma_set_mask(dev, DMA_BIT_MASK(48));
-   if (ret == 0) {
-   ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+   ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+   if (ret) {
+   ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
-   dev_err(dev,
-   "pci_set_consistent_dma_mask failed (%d)\n",
+   dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret);
-   goto e_bar0;
-   }
-   } else {
-   ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-   if (ret) {
-   dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
-   goto e_bar0;
+   goto e_iomap;
}
}
 
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
 
ret = ccp_init(ccp);
if (ret)
-   goto e_bar0;
+   goto e_iomap;
 
dev_notice(dev, "enabled\n");
 
return 0;
 
-e_bar0:
+e_iomap:
pci_iounmap(pdev, ccp->io_map);
 
 e_device:

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V1 3/3] crypto: ccp - Add platform device support for arm64

2014-06-05 Thread Tom Lendacky
Add support for the CCP on arm64 as a platform device.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/Kconfig|2 
 drivers/crypto/ccp/Makefile   |5 +
 drivers/crypto/ccp/ccp-dev.c  |   34 ++
 drivers/crypto/ccp/ccp-dev.h  |7 +
 drivers/crypto/ccp/ccp-platform.c |  224 +
 5 files changed, 270 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp-platform.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f066fa2..09ae35c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
-   depends on X86 && PCI
+   depends on (X86 && PCI) || ARM64
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index d3505a0..7f592d8 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,6 +1,11 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
 ccp-objs := ccp-dev.o ccp-ops.o
+ifdef CONFIG_X86
 ccp-objs += ccp-pci.o
+endif
+ifdef CONFIG_ARM64
+ccp-objs += ccp-platform.o
+endif
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2c78161..fa1ab10 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -20,7 +20,9 @@
 #include 
 #include 
 #include 
+#ifdef CONFIG_X86
 #include 
+#endif
 #include 
 
 #include "ccp-dev.h"
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp)
/* Build queue interrupt mask (two interrupts per queue) */
qim |= cmd_q->int_ok | cmd_q->int_err;
 
+#ifdef CONFIG_ARM64
+   /* For arm64 set the recommended queue cache settings */
+   iowrite32(CACHE_WB_NO_ALLOC, ccp->io_regs + CMD_Q_CACHE_BASE +
+ (CMD_Q_CACHE_INC * i));
+#endif
+
dev_dbg(dev, "queue #%u available\n", i);
}
if (ccp->cmd_q_count == 0) {
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
 }
 #endif
 
+#ifdef CONFIG_X86
 static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
 };
+#endif
 
 static int __init ccp_mod_init(void)
 {
+#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
 
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void)
 
break;
}
+#endif
+
+#ifdef CONFIG_ARM64
+   int ret;
+
+   ret = ccp_platform_init();
+   if (ret)
+   return ret;
+
+   /* Don't leave the driver loaded if init failed */
+   if (!ccp_get_device()) {
+   ccp_platform_exit();
+   return -ENODEV;
+   }
+
+   return 0;
+#endif
 
return -ENODEV;
 }
 
 static void __exit ccp_mod_exit(void)
 {
+#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
 
switch (cpuinfo->x86) {
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void)
ccp_pci_exit();
break;
}
+#endif
+
+#ifdef CONFIG_ARM64
+   ccp_platform_exit();
+#endif
 }
 
 module_init(ccp_mod_init);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 72bf153..1c5651b 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -30,6 +30,8 @@
 
 #define TRNG_RETRIES   10
 
+#define CACHE_WB_NO_ALLOC  0xb7
+
 
 /** Register Mappings **/
 #define Q_MASK_REG 0x000
@@ -48,7 +50,7 @@
 #define CMD_Q_INT_STATUS_BASE  0x214
 #define CMD_Q_STATUS_INCR  0x20
 
-#define CMD_Q_CACHE0x228
+#define CMD_Q_CACHE_BASE   0x228
 #define CMD_Q_CACHE_INC0x20
 
 #define CMD_Q_ERROR(__qs)  ((__qs) & 0x003f);
@@ -259,6 +261,9 @@ struct ccp_device {
 int ccp_pci_init(void);
 void ccp_pci_exit(void);
 
+int ccp_platform_init(void);
+void ccp_platform_exit(void);
+
 struct ccp_device *ccp_alloc_struct(struct device *dev);
 int ccp_init(struct ccp_device *ccp);
 void ccp_destroy(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
new file mode 100644
index 000..65e5829
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -0,0 +1,224 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#includ

[PATCH V1 0/3] crypto: ccp - arm64 platform support

2014-06-05 Thread Tom Lendacky
The following series implements support for the CCP as a platform
driver on ARM64.

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (3):
  crypto: ccp - Modify PCI support in prep for arm64 support
  crypto: ccp - CCP device bindings documentation
  crypto: ccp - Add platform device support for arm64


 .../devicetree/bindings/crypto/amd-ccp.txt |   16 +
 drivers/crypto/Kconfig |2 
 drivers/crypto/ccp/Makefile|5 
 drivers/crypto/ccp/ccp-dev.c   |   34 +++
 drivers/crypto/ccp/ccp-dev.h   |   10 +
 drivers/crypto/ccp/ccp-pci.c   |   39 +--
 drivers/crypto/ccp/ccp-platform.c  |  224 
 7 files changed, 301 insertions(+), 29 deletions(-)
 create mode 100644 Documentation/devicetree/bindings/crypto/amd-ccp.txt
 create mode 100644 drivers/crypto/ccp/ccp-platform.c

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V1 2/3] crypto: ccp - CCP device bindings documentation

2014-06-05 Thread Tom Lendacky
This patch provides the documentation of the device bindings
for the AMD Cryptographic Coprocessor driver.

Signed-off-by: Tom Lendacky 
---
 .../devicetree/bindings/crypto/amd-ccp.txt |   16 
 1 file changed, 16 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/crypto/amd-ccp.txt

diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt 
b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
new file mode 100644
index 000..6e0b11a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -0,0 +1,16 @@
+* AMD Cryptographic Coprocessor driver (ccp)
+
+Required properties:
+- compatible: Should be "amd,ccp-seattle-v1a"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the CCP interrupt
+
+Example:
+   ccp@e010 {
+   compatible = "amd,ccp-seattle-v1a";
+   reg = <0 0xe010 0 0x1>;
+   interrupt-parent = <&gic>;
+   interrupts = <0 3 4>;
+   };

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/3] crypto: ccp - Invoke context callback when there is a backlog error

2014-02-24 Thread Tom Lendacky
Invoke the callback routine associated with the crypto context
if an error is encountered sending the command to the CCP during
backlog processing.  This is needed to free any resources used
by the command.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 9d30d6f..7d98635 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -183,6 +183,9 @@ static void ccp_crypto_complete(void *data, int err)
break;
 
/* Error occurred, report it and get the next entry */
+   ctx = crypto_tfm_ctx(held->req->tfm);
+   if (ctx->complete)
+   ret = ctx->complete(held->req, ret);
held->req->complete(held->req, ret);
 
next = ccp_crypto_cmd_complete(held, &backlog);


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/3] crypto: ccp - Account for CCP backlog processing

2014-02-24 Thread Tom Lendacky
When the crypto layer is able to queue up a command for processing
by the CCP on the initial call to ccp_crypto_enqueue_request and
the CCP returns -EBUSY, then if the backlog flag is not set the
command needs to be freed and not added to the active command list.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |   18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 7d98635..20dc848 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -205,6 +205,7 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd 
*crypto_cmd)
 {
struct ccp_crypto_cmd *active = NULL, *tmp;
unsigned long flags;
+   bool free_cmd = true;
int ret;
 
spin_lock_irqsave(&req_queue_lock, flags);
@@ -231,7 +232,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd 
*crypto_cmd)
if (!active) {
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
-   goto e_lock;
+   goto e_lock;/* Error, don't queue it */
+   if ((ret == -EBUSY) &&
+   !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+   goto e_lock;/* Not backlogging, don't queue it */
}
 
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
@@ -244,9 +248,14 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd 
*crypto_cmd)
req_queue.cmd_count++;
list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
 
+   free_cmd = false;
+
 e_lock:
spin_unlock_irqrestore(&req_queue_lock, flags);
 
+   if (free_cmd)
+   kfree(crypto_cmd);
+
return ret;
 }
 
@@ -262,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request 
*req,
 {
struct ccp_crypto_cmd *crypto_cmd;
gfp_t gfp;
-   int ret;
 
gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
 
@@ -287,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request 
*req,
else
cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
 
-   ret = ccp_crypto_enqueue_cmd(crypto_cmd);
-   if (!ccp_crypto_success(ret))
-   kfree(crypto_cmd);
-
-   return ret;
+   return ccp_crypto_enqueue_cmd(crypto_cmd);
 }
 
 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/3] crypto: ccp - Prevent a possible lost CCP command request

2014-02-24 Thread Tom Lendacky
If a CCP command has been queued for processing at the
crypto layer then, when dequeueing it for processing, the
"can backlog" flag must be set so that the request isn't
lost if the CCP backlog queue limit is reached.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 010fded..9d30d6f 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -174,6 +174,10 @@ static void ccp_crypto_complete(void *data, int err)
 
/* Submit the next cmd */
while (held) {
+   /* Since we have already queued the cmd, we must indicate that
+* we can backlog so as not to "lose" this request.
+*/
+   held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
ret = ccp_enqueue_cmd(held->cmd);
if (ccp_crypto_success(ret))
break;


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 0/3] crypto: ccp - minor code fixes

2014-02-24 Thread Tom Lendacky
The following series implements some fixes to some code paths executed
during crypto API request processing.  These fixes address processing of
requests when the CCP driver returns -EBUSY and freeing memory in error
paths.

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (3):
  crypto: ccp - Prevent a possible lost CCP command request
  crypto: ccp - Invoke context callback when there is a backlog error
  crypto: ccp - Account for CCP backlog processing


 drivers/crypto/ccp/ccp-crypto-main.c |   25 ++---
 1 file changed, 18 insertions(+), 7 deletions(-)

-- 
Tom Lendacky

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/4] crypto: ccp - Move HMAC calculation down to ccp ops file

2014-01-24 Thread Tom Lendacky
Move the support to perform an HMAC calculation into
the CCP operations file.  This eliminates the need to
perform a synchronous SHA operation used to calculate
the HMAC.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-sha.c |  130 +++
 drivers/crypto/ccp/ccp-crypto.h |8 +-
 drivers/crypto/ccp/ccp-ops.c|  104 
 include/linux/ccp.h |7 ++
 4 files changed, 139 insertions(+), 110 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 3867290..873f234 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -24,75 +24,10 @@
 #include "ccp-crypto.h"
 
 
-struct ccp_sha_result {
-   struct completion completion;
-   int err;
-};
-
-static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
-{
-   struct ccp_sha_result *result = req->data;
-
-   if (err == -EINPROGRESS)
-   return;
-
-   result->err = err;
-   complete(&result->completion);
-}
-
-static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
-struct scatterlist *sg, unsigned int len)
-{
-   struct ccp_sha_result result;
-   struct ahash_request *req;
-   int ret;
-
-   init_completion(&result.completion);
-
-   req = ahash_request_alloc(tfm, GFP_KERNEL);
-   if (!req)
-   return -ENOMEM;
-
-   ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-  ccp_sync_hash_complete, &result);
-   ahash_request_set_crypt(req, sg, buf, len);
-
-   ret = crypto_ahash_digest(req);
-   if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
-   ret = wait_for_completion_interruptible(&result.completion);
-   if (!ret)
-   ret = result.err;
-   }
-
-   ahash_request_free(req);
-
-   return ret;
-}
-
-static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
-{
-   struct ahash_request *req = ahash_request_cast(async_req);
-   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
-   struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   struct scatterlist sg[2];
-   unsigned int block_size =
-   crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
-   unsigned int digest_size = crypto_ahash_digestsize(tfm);
-
-   sg_init_table(sg, ARRAY_SIZE(sg));
-   sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
-   sg_set_buf(&sg[1], rctx->ctx, digest_size);
-
-   return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
-block_size + digest_size);
-}
-
 static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
 {
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
unsigned int digest_size = crypto_ahash_digestsize(tfm);
 
@@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request 
*async_req, int ret)
if (req->result)
memcpy(req->result, rctx->ctx, digest_size);
 
-   /* If we're doing an HMAC, we need to perform that on the final op */
-   if (rctx->final && ctx->u.sha.key_len)
-   ret = ccp_sha_finish_hmac(async_req);
-
 e_free:
sg_free_table(&rctx->data_sg);
 
@@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
 unsigned int final)
 {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist *sg;
unsigned int block_size =
@@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
+   rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
+   &ctx->u.sha.opad_sg : NULL;
+   rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
+   ctx->u.sha.opad_count : 0;
+   rctx->cmd.u.sha.first = rctx->first;
rctx->cmd.u.sha.final = rctx->final;
rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
 
@@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req)
 
memset(rctx, 0, sizeof(*rctx));
 
-   memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
rctx->type = alg->type;
rctx->first = 1;
 
@@ -261,10 +197,13 @@ static int ccp_sh

[PATCH 0/4] crypto: ccp - selective algorithm registration and ipsec-related fixes

2014-01-24 Thread Tom Lendacky
Patch 1: Allow for selectively disabling the registration of an algorithm
family (sha or aes algorithms) via module parameters.

Patch 2-4: Fix errors/issues that were found during IPSec testing. In
order to prevent deadlocks with the networking code, the crypto callback
was changed to run as a tasklet.  In order for the callback to be run as
a tasklet, the HMAC calculation needed to be moved out of the callback
path (since it sleeps) and into the CCP sha operation logic.  Additionally,
trying to allow concurrency within the tfm while maintaining serialization
of the tfm per CPU was not working properly so a single queue is used now.

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (4):
  crypto: ccp - Allow for selective disablement of crypto API algorithms
  crypto: ccp - Move HMAC calculation down to ccp ops file
  crypto: ccp - Use a single queue for proper ordering of tfm requests
  crypto: ccp - Perform completion callbacks using a tasklet


 drivers/crypto/ccp/ccp-crypto-main.c |  201 --
 drivers/crypto/ccp/ccp-crypto-sha.c  |  130 --
 drivers/crypto/ccp/ccp-crypto.h  |8 +
 drivers/crypto/ccp/ccp-dev.c |   21 +++-
 drivers/crypto/ccp/ccp-ops.c |  104 +-
 include/linux/ccp.h  |7 +
 6 files changed, 229 insertions(+), 242 deletions(-)

-- 
Tom Lendacky

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/4] crypto: ccp - Perform completion callbacks using a tasklet

2014-01-24 Thread Tom Lendacky
Change from scheduling work to scheduling a tasklet to perform
the callback operations.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-dev.c |   21 +
 1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c3bc212..2c78161 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -30,6 +30,11 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0.0");
 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
 
+struct ccp_tasklet_data {
+   struct completion completion;
+   struct ccp_cmd *cmd;
+};
+
 
 static struct ccp_device *ccp_dev;
 static inline struct ccp_device *ccp_get_device(void)
@@ -192,17 +197,23 @@ static struct ccp_cmd *ccp_dequeue_cmd(struct 
ccp_cmd_queue *cmd_q)
return cmd;
 }
 
-static void ccp_do_cmd_complete(struct work_struct *work)
+static void ccp_do_cmd_complete(unsigned long data)
 {
-   struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+   struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
+   struct ccp_cmd *cmd = tdata->cmd;
 
cmd->callback(cmd->data, cmd->ret);
+   complete(&tdata->completion);
 }
 
 static int ccp_cmd_queue_thread(void *data)
 {
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
+   struct ccp_tasklet_data tdata;
+   struct tasklet_struct tasklet;
+
+   tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
 
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
@@ -220,8 +231,10 @@ static int ccp_cmd_queue_thread(void *data)
cmd->ret = ccp_run_cmd(cmd_q, cmd);
 
/* Schedule the completion callback */
-   INIT_WORK(&cmd->work, ccp_do_cmd_complete);
-   schedule_work(&cmd->work);
+   tdata.cmd = cmd;
+   init_completion(&tdata.completion);
+   tasklet_schedule(&tasklet);
+   wait_for_completion(&tdata.completion);
}
 
__set_current_state(TASK_RUNNING);


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/4] crypto: ccp - Use a single queue for proper ordering of tfm requests

2014-01-24 Thread Tom Lendacky
Move to a single queue to serialize requests within a tfm. When
testing using IPSec with a large number of network connections
the per cpu tfm queuing logic was not working properly.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |  164 ++
 1 file changed, 48 insertions(+), 116 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index b3f22b0..010fded 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -38,23 +38,20 @@ MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any 
non-zero value");
 static LIST_HEAD(hash_algs);
 static LIST_HEAD(cipher_algs);
 
-/* For any tfm, requests for that tfm on the same CPU must be returned
- * in the order received.  With multiple queues available, the CCP can
- * process more than one cmd at a time.  Therefore we must maintain
- * a cmd list to insure the proper ordering of requests on a given tfm/cpu
- * combination.
+/* For any tfm, requests for that tfm must be returned on the order
+ * received.  With multiple queues available, the CCP can process more
+ * than one cmd at a time.  Therefore we must maintain a cmd list to insure
+ * the proper ordering of requests on a given tfm.
  */
-struct ccp_crypto_cpu_queue {
+struct ccp_crypto_queue {
struct list_head cmds;
struct list_head *backlog;
unsigned int cmd_count;
 };
-#define CCP_CRYPTO_MAX_QLEN50
+#define CCP_CRYPTO_MAX_QLEN100
 
-struct ccp_crypto_percpu_queue {
-   struct ccp_crypto_cpu_queue __percpu *cpu_queue;
-};
-static struct ccp_crypto_percpu_queue req_queue;
+static struct ccp_crypto_queue req_queue;
+static spinlock_t req_queue_lock;
 
 struct ccp_crypto_cmd {
struct list_head entry;
@@ -71,8 +68,6 @@ struct ccp_crypto_cmd {
 
/* Used for held command processing to determine state */
int ret;
-
-   int cpu;
 };
 
 struct ccp_crypto_cpu {
@@ -91,25 +86,21 @@ static inline bool ccp_crypto_success(int err)
return true;
 }
 
-/*
- * ccp_crypto_cmd_complete must be called while running on the appropriate
- * cpu and the caller must have done a get_cpu to disable preemption
- */
 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
 {
-   struct ccp_crypto_cpu_queue *cpu_queue;
struct ccp_crypto_cmd *held = NULL, *tmp;
+   unsigned long flags;
 
*backlog = NULL;
 
-   cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+   spin_lock_irqsave(&req_queue_lock, flags);
 
/* Held cmds will be after the current cmd in the queue so start
 * searching for a cmd with a matching tfm for submission.
 */
tmp = crypto_cmd;
-   list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
+   list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
if (crypto_cmd->tfm != tmp->tfm)
continue;
held = tmp;
@@ -120,47 +111,45 @@ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
 *   Because cmds can be executed from any point in the cmd list
 *   special precautions have to be taken when handling the backlog.
 */
-   if (cpu_queue->backlog != &cpu_queue->cmds) {
+   if (req_queue.backlog != &req_queue.cmds) {
/* Skip over this cmd if it is the next backlog cmd */
-   if (cpu_queue->backlog == &crypto_cmd->entry)
-   cpu_queue->backlog = crypto_cmd->entry.next;
+   if (req_queue.backlog == &crypto_cmd->entry)
+   req_queue.backlog = crypto_cmd->entry.next;
 
-   *backlog = container_of(cpu_queue->backlog,
+   *backlog = container_of(req_queue.backlog,
struct ccp_crypto_cmd, entry);
-   cpu_queue->backlog = cpu_queue->backlog->next;
+   req_queue.backlog = req_queue.backlog->next;
 
/* Skip over this cmd if it is now the next backlog cmd */
-   if (cpu_queue->backlog == &crypto_cmd->entry)
-   cpu_queue->backlog = crypto_cmd->entry.next;
+   if (req_queue.backlog == &crypto_cmd->entry)
+   req_queue.backlog = crypto_cmd->entry.next;
}
 
/* Remove the cmd entry from the list of cmds */
-   cpu_queue->cmd_count--;
+   req_queue.cmd_count--;
list_del(&crypto_cmd->entry);
 
+   spin_unlock_irqrestore(&req_queue_lock, flags);
+
return held;
 }
 
-static void ccp_crypto_complete_on_cpu(struct work_struct *work)
+static void ccp_crypto_complete(void *data, int err)
 {
-   struct ccp_crypto_cpu *cpu_work =
-   container_of(work, struct ccp_crypto_

[PATCH 1/4] crypto: ccp - Allow for selective disablement of crypto API algorithms

2014-01-24 Thread Tom Lendacky
Introduce module parameters that allow for disabling of a
crypto algorithm by not registering the algorithm with the
crypto API.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |   37 +++---
 1 file changed, 25 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 2636f04..b3f22b0 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -11,6 +11,7 @@
  */
 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -24,6 +25,14 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0.0");
 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
 
+static unsigned int aes_disable;
+module_param(aes_disable, uint, 0444);
+MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
+
+static unsigned int sha_disable;
+module_param(sha_disable, uint, 0444);
+MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
+
 
 /* List heads for the supported algorithms */
 static LIST_HEAD(hash_algs);
@@ -337,21 +346,25 @@ static int ccp_register_algs(void)
 {
int ret;
 
-   ret = ccp_register_aes_algs(&cipher_algs);
-   if (ret)
-   return ret;
+   if (!aes_disable) {
+   ret = ccp_register_aes_algs(&cipher_algs);
+   if (ret)
+   return ret;
 
-   ret = ccp_register_aes_cmac_algs(&hash_algs);
-   if (ret)
-   return ret;
+   ret = ccp_register_aes_cmac_algs(&hash_algs);
+   if (ret)
+   return ret;
 
-   ret = ccp_register_aes_xts_algs(&cipher_algs);
-   if (ret)
-   return ret;
+   ret = ccp_register_aes_xts_algs(&cipher_algs);
+   if (ret)
+   return ret;
+   }
 
-   ret = ccp_register_sha_algs(&hash_algs);
-   if (ret)
-   return ret;
+   if (!sha_disable) {
+   ret = ccp_register_sha_algs(&hash_algs);
+   if (ret)
+   return ret;
+   }
 
return 0;
 }


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Fix ccp_run_passthru_cmd dma variable assignments

2014-01-24 Thread Tom Lendacky
On 01/24/2014 12:39 PM, Dave Jones wrote:
> There are some suspicious looking lines of code in the new ccp driver, 
> including
> one that assigns a variable to itself, and another that overwrites a previous 
> assignment.
> 
> This may have been a cut-and-paste error where 'src' was forgotten to be 
> changed to 'dst'.
> I have no hardware to test this, so this is untested.

Yes, this was a cut-and-paste error that was not discovered with my tests. I've
updated my testcases and tested/verified this fix.

Herbert, this should probably go through the cryptodev-2.6 tree right?

Acked-by: Tom Lendacky 

Thanks,
Tom

> 
> Signed-off-by: Dave Jones 
> 
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 71ed3ade7e12..c266a7b154bb 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -1666,8 +1666,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue 
> *cmd_q,
>   
>   op.dst.type = CCP_MEMTYPE_SYSTEM;
>   op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
> - op.src.u.dma.offset = dst.sg_wa.sg_used;
> - op.src.u.dma.length = op.src.u.dma.length;
> + op.dst.u.dma.offset = dst.sg_wa.sg_used;
> + op.dst.u.dma.length = op.src.u.dma.length;
>   
>   ret = ccp_perform_passthru(&op);
>   if (ret) {
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/3] crypto: Fix the pointer voodoo in unaligned ahash

2014-01-14 Thread Tom Lendacky

On Tuesday, January 14, 2014 06:33:47 PM Marek Vasut wrote:
> Add documentation for the pointer voodoo that is happening in crypto/ahash.c
> in ahash_op_unaligned(). This code is quite confusing, so add a beefy chunk
> of documentation.
> 
> Moreover, make sure the mangled request is completely restored after finishing
> this unaligned operation. This means restoring all of .result, .priv, 
> .base.data
> and .base.complete .
> 
> Also, remove the crypto_completion_t complete = ... line present in the
> ahash_op_unaligned_done() function. This type actually declares a function
> pointer, which is very confusing.
> 
> Finally, yet very important nonetheless, make sure the req->priv is free()'d
> only after the original request is restored in ahash_op_unaligned_done().
> The req->priv data must not be free()'d before that in 
> ahash_op_unaligned_finish(),
> since we would be accessing previously free()'d data in 
> ahash_op_unaligned_done()
> and cause corruption.
> 
> Signed-off-by: Marek Vasut 
> Cc: David S. Miller 
> Cc: Fabio Estevam 
> Cc: Herbert Xu 
> Cc: Shawn Guo 
> Cc: Tom Lendacky 
> ---
>  crypto/ahash.c | 65 
> --
>  1 file changed, 54 insertions(+), 11 deletions(-)
> 
> diff --git a/crypto/ahash.c b/crypto/ahash.c
> index a92dc38..5ca8ede 100644
> --- a/crypto/ahash.c
> +++ b/crypto/ahash.c
> @@ -29,6 +29,7 @@
>  struct ahash_request_priv {
>   crypto_completion_t complete;
>   void *data;
> + void *priv;
>   u8 *result;
>   void *ubuf[] CRYPTO_MINALIGN_ATTR;
>  };
> @@ -200,23 +201,38 @@ static void ahash_op_unaligned_finish(struct 
> ahash_request *req, int err)
>   if (!err)
>   memcpy(priv->result, req->result,
>  crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
> -
> - kzfree(priv);

You can't move/remove this kzfree since a synchronous operation will not take
the ahash_op_unaligned_done path.  A synchronous operation will never return
-EINPROGRESS and the effect will be to never free the priv structure.

>  }
>  
> -static void ahash_op_unaligned_done(struct crypto_async_request *req, int 
> err)
> +static void ahash_op_unaligned_done(struct crypto_async_request *areq, int 
> err)
>  {
> - struct ahash_request *areq = req->data;
> - struct ahash_request_priv *priv = areq->priv;
> - crypto_completion_t complete = priv->complete;
> - void *data = priv->data;
> + struct ahash_request *req = areq->data;
> + struct ahash_request_priv *priv = req->priv;
> + struct crypto_async_request *data;
> +
> + /*
> +  * Restore the original request, see ahash_op_unaligned() for what
> +  * goes where.
> +  *
> +  * The "struct ahash_request *req" here is in fact the "req.base"
> +  * from the ADJUSTED request from ahash_op_unaligned(), thus as it
> +  * is a pointer to self, it is also the ADJUSTED "req" .
> +  */
> +
> + /* First copy req->result into req->priv.result */
> + ahash_op_unaligned_finish(req, err);

Given the above comment on the kzfree, you'll need to save all the priv
values as was done previously.

Thanks,
Tom

>  
> - ahash_op_unaligned_finish(areq, err);
> + /* Restore the original crypto request. */
> + req->result = priv->result;
> + req->base.complete = priv->complete;
> + req->base.data = priv->data;
> + req->priv = priv->priv;
>  
> - areq->base.complete = complete;
> - areq->base.data = data;
> + /* Free the req->priv.priv from the ADJUSTED request. */
> + kzfree(priv);
>  
> - complete(&areq->base, err);
> + /* Complete the ORIGINAL request. */
> + data = req->base.data;
> + req->base.complete(data, err);
>  }
>  
>  static int ahash_op_unaligned(struct ahash_request *req,
> @@ -234,9 +250,36 @@ static int ahash_op_unaligned(struct ahash_request *req,
>   if (!priv)
>   return -ENOMEM;
>  
> + /*
> +  * WARNING: Voodoo programming below!
> +  *
> +  * The code below is obscure and hard to understand, thus explanation
> +  * is necessary. See include/crypto/hash.h and include/linux/crypto.h
> +  * to understand the layout of structures used here!
> +  *
> +  * The code here will replace portions of the ORIGINAL request with
> +  * pointers to new code and buffers so the hashing operation can store
> +  * the result in aligned buffer. We will call the modified request
> +  * an ADJUSTED request.
> +  *

[PATCH 6/6] crypto: ccp - CCP device enabled/disabled changes

2014-01-06 Thread Tom Lendacky
The CCP cannot be hot-plugged so it will either be there
or it won't.  Do not allow the driver to stay loaded if the
CCP does not successfully initialize.

Provide stub routines in the ccp.h file that return -ENODEV
if the CCP has not been configured in the build.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-dev.c |   15 ++-
 drivers/crypto/ccp/ccp-pci.c |3 +++
 include/linux/ccp.h  |   12 
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index b2038a7..c3bc212 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -552,6 +552,7 @@ static const struct x86_cpu_id ccp_support[] = {
 static int __init ccp_mod_init(void)
 {
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
+   int ret;
 
if (!x86_match_cpu(ccp_support))
return -ENODEV;
@@ -560,7 +561,19 @@ static int __init ccp_mod_init(void)
case 22:
if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
return -ENODEV;
-   return ccp_pci_init();
+
+   ret = ccp_pci_init();
+   if (ret)
+   return ret;
+
+   /* Don't leave the driver loaded if init failed */
+   if (!ccp_get_device()) {
+   ccp_pci_exit();
+   return -ENODEV;
+   }
+
+   return 0;
+
break;
}
 
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 1fbeaf1..11836b7 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -268,6 +268,9 @@ static void ccp_pci_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
 
+   if (!ccp)
+   return;
+
ccp_destroy(ccp);
 
pci_iounmap(pdev, ccp->io_map);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 12f1cfd..b941ab9 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -23,6 +23,9 @@
 struct ccp_device;
 struct ccp_cmd;
 
+#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
+   defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
+
 /**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
@@ -48,6 +51,15 @@ struct ccp_cmd;
  */
 int ccp_enqueue_cmd(struct ccp_cmd *cmd);
 
+#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+
+static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+{
+   return -ENODEV;
+}
+
+#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
+
 
 /* AES engine */
 /**


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/6] crypto: ccp - Apply appropriate gfp_t type to memory allocations

2014-01-06 Thread Tom Lendacky
Fix some memory allocations to use the appropriate gfp_t type based
on the CRYPTO_TFM_REQ_MAY_SLEEP flag.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |5 -
 drivers/crypto/ccp/ccp-crypto-sha.c  |5 -
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 64dd35e..398832c 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -61,6 +61,7 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int len, need_pad, sg_count;
+   gfp_t gfp;
int ret;
 
if (!ctx->u.aes.key_len)
@@ -99,7 +100,9 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
 * possible data pieces (buffer, input data, padding)
 */
sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
-   ret = sg_alloc_table(&rctx->data_sg, sg_count, GFP_KERNEL);
+   gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+   GFP_KERNEL : GFP_ATOMIC;
+   ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
if (ret)
return ret;
 
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index b0881df..0571940 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -128,6 +128,7 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int len, sg_count;
+   gfp_t gfp;
int ret;
 
if (!final && ((nbytes + rctx->buf_count) <= block_size)) {
@@ -156,7 +157,9 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
 * possible data pieces (hmac ipad, buffer, input data)
 */
sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
-   ret = sg_alloc_table(&rctx->data_sg, sg_count, GFP_KERNEL);
+   gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+   GFP_KERNEL : GFP_ATOMIC;
+   ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
if (ret)
return ret;
 


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/6] crypto: ccp - Change data length declarations to u64

2014-01-06 Thread Tom Lendacky
When performing a hash operation if the amount of data buffered and a
request at or near the maximum data length is received then the length
calcuation could wrap causing an error in executing the hash operation.
Fix this by using a u64 type for the input and output data lengths in
all CCP operations.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   21 +++
 drivers/crypto/ccp/ccp-crypto-sha.c  |   21 +++
 drivers/crypto/ccp/ccp-crypto.h  |   10 +++--
 drivers/crypto/ccp/ccp-ops.c |   34 +-
 include/linux/ccp.h  |8 ---
 5 files changed, 57 insertions(+), 37 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index c6b8f9e..a52b97a 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -37,8 +37,9 @@ static int ccp_aes_cmac_complete(struct crypto_async_request 
*async_req,
 
if (rctx->hash_rem) {
/* Save remaining data to buffer */
-   scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.aes.src,
-rctx->hash_cnt, rctx->hash_rem, 0);
+   unsigned int offset = rctx->nbytes - rctx->hash_rem;
+   scatterwalk_map_and_copy(rctx->buf, rctx->src,
+offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
} else
rctx->buf_count = 0;
@@ -62,8 +63,9 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
struct scatterlist *sg, *cmac_key_sg = NULL;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
-   unsigned int len, need_pad, sg_count;
+   unsigned int need_pad, sg_count;
gfp_t gfp;
+   u64 len;
int ret;
 
if (!ctx->u.aes.key_len)
@@ -72,7 +74,9 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
if (nbytes)
rctx->null_msg = 0;
 
-   if (!final && ((nbytes + rctx->buf_count) <= block_size)) {
+   len = (u64)rctx->buf_count + (u64)nbytes;
+
+   if (!final && (len <= block_size)) {
scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
 0, nbytes, 0);
rctx->buf_count += nbytes;
@@ -80,12 +84,13 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
return 0;
}
 
-   len = rctx->buf_count + nbytes;
+   rctx->src = req->src;
+   rctx->nbytes = nbytes;
 
rctx->final = final;
-   rctx->hash_cnt = final ? len : len & ~(block_size - 1);
-   rctx->hash_rem = final ?   0 : len &  (block_size - 1);
-   if (!final && (rctx->hash_cnt == len)) {
+   rctx->hash_rem = final ? 0 : len & (block_size - 1);
+   rctx->hash_cnt = len - rctx->hash_rem;
+   if (!final && !rctx->hash_rem) {
/* CCP can't do zero length final, so keep some data around */
rctx->hash_cnt -= block_size;
rctx->hash_rem = block_size;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 183d16e..d30f6c8 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -101,8 +101,9 @@ static int ccp_sha_complete(struct crypto_async_request 
*async_req, int ret)
 
if (rctx->hash_rem) {
/* Save remaining data to buffer */
-   scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.sha.src,
-rctx->hash_cnt, rctx->hash_rem, 0);
+   unsigned int offset = rctx->nbytes - rctx->hash_rem;
+   scatterwalk_map_and_copy(rctx->buf, rctx->src,
+offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
} else
rctx->buf_count = 0;
@@ -129,11 +130,14 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
struct scatterlist *sg;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
-   unsigned int len, sg_count;
+   unsigned int sg_count;
gfp_t gfp;
+   u64 len;
int ret;
 
-   if (!final && ((nbytes + rctx->buf_count) <= block_size)) {
+   len = (u64)rctx->buf_count + (u64)nbytes;
+
+   if (!final && (len <= block_size)) {
scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
 0, nbytes, 0);

[PATCH 3/6] crypto: ccp - Check for caller result area before using it

2014-01-06 Thread Tom Lendacky
For a hash operation, the caller doesn't have to supply a result
area on every call so don't use it / update it if it hasn't
been supplied.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |4 +++-
 drivers/crypto/ccp/ccp-crypto-sha.c  |7 +--
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 646c8d1..c6b8f9e 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -43,7 +43,9 @@ static int ccp_aes_cmac_complete(struct crypto_async_request 
*async_req,
} else
rctx->buf_count = 0;
 
-   memcpy(req->result, rctx->iv, digest_size);
+   /* Update result area if supplied */
+   if (req->result)
+   memcpy(req->result, rctx->iv, digest_size);
 
 e_free:
sg_free_table(&rctx->data_sg);
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index bf913cb..183d16e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -74,6 +74,7 @@ static int ccp_sha_finish_hmac(struct crypto_async_request 
*async_req)
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+   struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist sg[2];
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
@@ -81,7 +82,7 @@ static int ccp_sha_finish_hmac(struct crypto_async_request 
*async_req)
 
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
-   sg_set_buf(&sg[1], req->result, digest_size);
+   sg_set_buf(&sg[1], rctx->ctx, digest_size);
 
return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
 block_size + digest_size);
@@ -106,7 +107,9 @@ static int ccp_sha_complete(struct crypto_async_request 
*async_req, int ret)
} else
rctx->buf_count = 0;
 
-   memcpy(req->result, rctx->ctx, digest_size);
+   /* Update result area if supplied */
+   if (req->result)
+   memcpy(req->result, rctx->ctx, digest_size);
 
/* If we're doing an HMAC, we need to perform that on the final op */
if (rctx->final && ctx->u.sha.key_len)


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/6] crypto: ccp - Cleanup scatterlist usage

2014-01-06 Thread Tom Lendacky
Cleanup up the usage of scatterlists to make the code cleaner
and avoid extra memory allocations when not needed.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |6 ++-
 drivers/crypto/ccp/ccp-crypto-sha.c  |   53 --
 2 files changed, 33 insertions(+), 26 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 398832c..646c8d1 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -125,8 +125,10 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
}
-   if (sg)
+   if (sg) {
sg_mark_end(sg);
+   sg = rctx->data_sg.sgl;
+   }
 
/* Initialize the K1/K2 scatterlist */
if (final)
@@ -143,7 +145,7 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
rctx->cmd.u.aes.iv = &rctx->iv_sg;
rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
-   rctx->cmd.u.aes.src = (sg) ? rctx->data_sg.sgl : NULL;
+   rctx->cmd.u.aes.src = sg;
rctx->cmd.u.aes.src_len = rctx->hash_cnt;
rctx->cmd.u.aes.dst = NULL;
rctx->cmd.u.aes.cmac_key = cmac_key_sg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 0571940..bf913cb 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -122,7 +122,6 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
 unsigned int final)
 {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist *sg;
unsigned int block_size =
@@ -153,35 +152,32 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
/* Initialize the context scatterlist */
sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
 
-   /* Build the data scatterlist table - allocate enough entries for all
-* possible data pieces (hmac ipad, buffer, input data)
-*/
-   sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
-   gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
-   GFP_KERNEL : GFP_ATOMIC;
-   ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
-   if (ret)
-   return ret;
-
sg = NULL;
-   if (rctx->first && ctx->u.sha.key_len) {
-   rctx->hash_cnt += block_size;
-
-   sg_init_one(&rctx->pad_sg, ctx->u.sha.ipad, block_size);
-   sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
-   }
+   if (rctx->buf_count && nbytes) {
+   /* Build the data scatterlist table - allocate enough entries
+* for both data pieces (buffer and input data)
+*/
+   gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+   GFP_KERNEL : GFP_ATOMIC;
+   sg_count = sg_nents(req->src) + 1;
+   ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+   if (ret)
+   return ret;
 
-   if (rctx->buf_count) {
sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
-   }
-
-   if (nbytes)
sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
-
-   if (sg)
sg_mark_end(sg);
 
+   sg = rctx->data_sg.sgl;
+   } else if (rctx->buf_count) {
+   sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+
+   sg = &rctx->buf_sg;
+   } else if (nbytes) {
+   sg = req->src;
+   }
+
rctx->msg_bits += (rctx->hash_cnt << 3);/* Total in bits */
 
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -190,7 +186,7 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
rctx->cmd.u.sha.type = rctx->type;
rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
-   rctx->cmd.u.sha.src = (sg) ? rctx->data_sg.sgl : NULL;
+   rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
rctx->cmd.u.sha.final = rctx->final;
rctx->cmd.u.sha.msg_bits = rctx->msg_bits

[PATCH 5/6] crypto: ccp - Cleanup hash invocation calls

2014-01-06 Thread Tom Lendacky
Cleanup the ahash digest invocations to check the init
return code and make use of the finup routine.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |2 +-
 drivers/crypto/ccp/ccp-crypto-sha.c  |8 ++--
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index a52b97a..8e162ad 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -198,7 +198,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
if (ret)
return ret;
 
-   return ccp_do_cmac_update(req, req->nbytes, 1);
+   return ccp_aes_cmac_finup(req);
 }
 
 static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index d30f6c8..3867290 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -248,9 +248,13 @@ static int ccp_sha_finup(struct ahash_request *req)
 
 static int ccp_sha_digest(struct ahash_request *req)
 {
-   ccp_sha_init(req);
+   int ret;
 
-   return ccp_do_sha_update(req, req->nbytes, 1);
+   ret = ccp_sha_init(req);
+   if (ret)
+   return ret;
+
+   return ccp_sha_finup(req);
 }
 
 static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 0/6] crypto: ccp - more code fixes/cleanup

2014-01-06 Thread Tom Lendacky
The following series implements a fix to hash length wrapping as well
as some additional fixes and cleanups (proper gfp_t type on some memory
allocations, scatterlist usage improvements, null request result field
checks and driver enabled/disabled changes).

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (6):
  crypto: ccp - Apply appropriate gfp_t type to memory allocations
  crypto: ccp - Cleanup scatterlist usage
  crypto: ccp - Check for caller result area before using it
  crypto: ccp - Change data length declarations to u64
  crypto: ccp - Cleanup hash invocation calls
  crypto: ccp - CCP device enabled/disabled changes


 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   38 +
 drivers/crypto/ccp/ccp-crypto-sha.c  |   88 ++
 drivers/crypto/ccp/ccp-crypto.h  |   10 +++
 drivers/crypto/ccp/ccp-dev.c |   15 +
 drivers/crypto/ccp/ccp-ops.c |   34 ++--
 drivers/crypto/ccp/ccp-pci.c |3 +
 include/linux/ccp.h  |   20 +--
 7 files changed, 139 insertions(+), 69 deletions(-)

-- 
Tom Lendacky

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/5] crypto: Fully restore ahash request before completing

2014-01-03 Thread Tom Lendacky
On Monday, December 30, 2013 05:01:13 PM Herbert Xu wrote:
> On Fri, Dec 27, 2013 at 01:21:36AM +0100, Marek Vasut wrote:
> >
> > > > -   complete(data, err);
> > > > +   areq->base.complete = complete;
> > > > +   areq->base.data = data;
> > > > +
> > > > +   complete(&areq->base, err);
> > > 
> > > This looks completely bogus.  While restoring areq isn't wrong per
> > > se, calling complete with &areq->base makes no sense.  The original
> > > completion data is in the variable "data".
> > 
> > Is there some documentation for this so I can understand why this is wrong, 
> > please? I really don't quite get it, sorry. Actually, is there some 
> > documentation for writing crypto API drivers at all please ?
> 
> Well it's wrong because the completion function (req->base.complete)
> is meant to take data (req->base.data) as its first argument.  So
> giving it a pointer to req->base makes no sense.
> 

The crypto_completion_t typdef is defined as:

typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);

so I believe &areq->base is the proper first argument (which is actually just
the req parameter on the ahash_op_unaligned_done function).

If you are going to restore areq, you really should restore all fields that
were changed - result, base.complete, base.data - and set priv to NULL.

Since the ahash_request_priv structure is freed in ahash_op_unaligned_finish
you'll need to save the value of priv->result in order to restore areq->result
(u8 *result = priv->result; or similar).

Additionally, you should probably also fix up ahash_def_finup_done2 and
ahash_def_finup_done1.

Thanks,
Tom

> Cheers,
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH -next] Re: randconfig build error with next-20131210, in drivers/crypto/ccp/ccp-pci.c

2013-12-20 Thread Tom Lendacky

On Thursday, December 19, 2013 11:35:09 AM Randy Dunlap wrote:
> On 12/10/13 07:34, Tom Lendacky wrote:
> > 
> > On Tuesday, December 10, 2013 07:21:36 AM Jim Davis wrote:
> >> Building with the attached random configuration file,
> >>
> >> drivers/crypto/ccp/ccp-pci.c: In function ‘ccp_get_msix_irqs’:
> >> drivers/crypto/ccp/ccp-pci.c:44:20: error: array type has incomplete
> >> element type
> >> drivers/crypto/ccp/ccp-pci.c:48:18: error: negative width in bit-field
> >> ‘’
> >> drivers/crypto/ccp/ccp-pci.c:51:2: error: implicit declaration of
> >> function ‘pci_enable_msix’ [-Werror=implicit-function-declaration]
> >> drivers/crypto/ccp/ccp-pci.c:76:2: error: implicit declaration of
> >> function ‘pci_disable_msix’ [-Werror=implicit-function-declaration]
> >> drivers/crypto/ccp/ccp-pci.c:44:20: warning: unused variable
> >> ‘msix_entry’ [-Wunused-variable]
> >> drivers/crypto/ccp/ccp-pci.c: In function ‘ccp_get_msi_irq’:
> >> drivers/crypto/ccp/ccp-pci.c:89:2: error: implicit declaration of
> >> function ‘pci_enable_msi’ [-Werror=implicit-function-declaration]
> >> drivers/crypto/ccp/ccp-pci.c:102:2: error: implicit declaration of
> >> function ‘pci_disable_msi’ [-Werror=implicit-function-declaration]
> >> cc1: some warnings being treated as errors
> >> make[3]: *** [drivers/crypto/ccp/ccp-pci.o] Error 1
> > 
> > Thanks for finding this, I'll update to the Kconfig to add PCI to
> > the 'depends on' statement.
> 
> Tom,
> 
> Can you get this patch and the HW_RANDOM patch merged into linux-next, please?
> linux-next builds are still failing for both of these reasons.

Please consider the following patch that addresses some randconfig
build errors being experienced in the linux-next tree.  This patch
has already been accepted in the cryptodev-2.6 tree with a commit
id of d5aa80952aba9071b50a74c8daf7feb1caa2fd8c.

---
From: Tom Lendacky 

crypto: ccp - CCP Kconfig fixes

Update the Kconfig to include PCI on the 'depends on'
and add 'select HW_RANDOM' to insure the necessary PCI
and HW_RANDOM functions are available/included in the
build.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/Kconfig |2 +-
 drivers/crypto/ccp/Kconfig |1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4954d75..9dee00f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -401,7 +401,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
-   depends on X86
+   depends on X86 && PCI
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 335ed5c..7639ffc 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -2,6 +2,7 @@ config CRYPTO_DEV_CCP_DD
tristate "Cryptographic Coprocessor device driver"
depends on CRYPTO_DEV_CCP
default m
+   select HW_RANDOM
help
  Provides the interface to use the AMD Cryptographic Coprocessor
  which can be used to accelerate or offload encryption operations


> 
> thanks,
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: crypto: ccp - CCP SHA crypto API support

2013-12-11 Thread Tom Lendacky

On Wednesday, December 11, 2013 01:51:50 PM Dan Carpenter wrote:
> Hello Tom Lendacky,
> 
> The patch 0ab0a1d505ab: "crypto: ccp - CCP SHA crypto API support"
> from Nov 12, 2013, leads to the following static checker warning:
>   drivers/crypto/ccp/ccp-crypto-sha.c:182 ccp_do_sha_update()
>   warn: should 'rctx->hash_cnt << 3' be a 64 bit type?
> 
> drivers/crypto/ccp/ccp-crypto-sha.c
>180  sg_mark_end(sg);
>181
>182  rctx->msg_bits += (rctx->hash_cnt << 3);/* Total in
> bits */ ^^^
> This operation wraps if the msg is over 500MB.  I'm not sure if that's
> possible, but ->msg_bits is declared as a u64 here and in
> rctx->cmd.u.sha.msg_bits as well.

I should probably cast hash_cnt to a u64 before doing the shift to be
sure I don't lose any bits.  So that I can validate my fix, which static
checker did you run?

Thanks,
Tom
 
> 
>183
>184  memset(&rctx->cmd, 0, sizeof(rctx->cmd));
> 
> 
> regards,
> dan carpenter

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/3] crypto: ccp - Remove user triggerable pr_err calls

2013-12-10 Thread Tom Lendacky
Remove the pr_err calls that are issued during parameter
checking in some AES operations. This will eliminate the
possibility of filling up syslog through these paths.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |4 +---
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |   12 +++-
 drivers/crypto/ccp/ccp-crypto-aes.c  |   12 +++-
 3 files changed, 7 insertions(+), 21 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 5b9cd98..64dd35e 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -63,10 +63,8 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
unsigned int len, need_pad, sg_count;
int ret;
 
-   if (!ctx->u.aes.key_len) {
-   pr_err("AES key not set\n");
+   if (!ctx->u.aes.key_len)
return -EINVAL;
-   }
 
if (nbytes)
rctx->null_msg = 0;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index d100b48..0237ab5 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -125,20 +125,14 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request 
*req,
unsigned int unit;
int ret;
 
-   if (!ctx->u.aes.key_len) {
-   pr_err("AES key not set\n");
+   if (!ctx->u.aes.key_len)
return -EINVAL;
-   }
 
-   if (req->nbytes & (AES_BLOCK_SIZE - 1)) {
-   pr_err("AES request size is not a multiple of the block 
size\n");
+   if (req->nbytes & (AES_BLOCK_SIZE - 1))
return -EINVAL;
-   }
 
-   if (!req->info) {
-   pr_err("AES IV not supplied");
+   if (!req->info)
return -EINVAL;
-   }
 
for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
if (!(req->nbytes & (unit_size_map[unit].size - 1)))
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c 
b/drivers/crypto/ccp/ccp-crypto-aes.c
index c0befdb..e46490d 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -76,24 +76,18 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, 
bool encrypt)
unsigned int iv_len = 0;
int ret;
 
-   if (!ctx->u.aes.key_len) {
-   pr_err("AES key not set\n");
+   if (!ctx->u.aes.key_len)
return -EINVAL;
-   }
 
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
 (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
 (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
-   (req->nbytes & (AES_BLOCK_SIZE - 1))) {
-   pr_err("AES request size is not a multiple of the block 
size\n");
+   (req->nbytes & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
-   }
 
if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
-   if (!req->info) {
-   pr_err("AES IV not supplied");
+   if (!req->info)
return -EINVAL;
-   }
 
memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/3] crypto: ccp - CCP Kconfig fixes

2013-12-10 Thread Tom Lendacky
Update the Kconfig to include PCI on the 'depends on'
and add 'select HW_RANDOM' to insure the necessary PCI
and HW_RANDOM functions are available/included in the
build.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/Kconfig |2 +-
 drivers/crypto/ccp/Kconfig |1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4954d75..9dee00f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -401,7 +401,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
-   depends on X86
+   depends on X86 && PCI
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 335ed5c..7639ffc 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -2,6 +2,7 @@ config CRYPTO_DEV_CCP_DD
tristate "Cryptographic Coprocessor device driver"
depends on CRYPTO_DEV_CCP
default m
+   select HW_RANDOM
help
  Provides the interface to use the AMD Cryptographic Coprocessor
  which can be used to accelerate or offload encryption operations


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/3] crypto: ccp - Fix sparse warnings in ccp-crypto-sha.c

2013-12-10 Thread Tom Lendacky
The sha initialization data generated the following sparse warnings:

   sparse: incorrect type in initializer (different base types)
  expected unsigned int
  got restricted __be32 [usertype] 

Change the initialization data type from u32 to __be32.


Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-sha.c |8 
 drivers/crypto/ccp/ccp-crypto.h |2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index a6ef183..b0881df 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -324,20 +324,20 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
ccp_sha_cra_exit(tfm);
 }
 
-static const u32 sha1_init[CCP_SHA_CTXSIZE / sizeof(u32)] = {
+static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
cpu_to_be32(SHA1_H4), 0, 0, 0,
 };
 
-static const u32 sha224_init[CCP_SHA_CTXSIZE / sizeof(u32)] = {
+static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
 };
 
-static const u32 sha256_init[CCP_SHA_CTXSIZE / sizeof(u32)] = {
+static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
@@ -347,7 +347,7 @@ static const u32 sha256_init[CCP_SHA_CTXSIZE / sizeof(u32)] 
= {
 struct ccp_sha_def {
const char *name;
const char *drv_name;
-   const u32 *init;
+   const __be32 *init;
enum ccp_sha_type type;
u32 digest_size;
u32 block_size;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 45f17c3..13ea6ea 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -39,7 +39,7 @@ struct ccp_crypto_ablkcipher_alg {
 struct ccp_crypto_ahash_alg {
struct list_head entry;
 
-   const u32 *init;
+   const __be32 *init;
u32 type;
u32 mode;
 


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 0/3] crypto: ccp - code fixes/cleanup

2013-12-10 Thread Tom Lendacky
The following series implements fixes and cleanup for some reported
errors and suggestions (sparse errors, randconfig build errors and
pr_err usage).

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (3):
  crypto: ccp - Fix sparse warnings in ccp-crypto-sha.c
  crypto: ccp - CCP Kconfig fixes
  crypto: ccp - Remove user triggerable pr_err calls


 drivers/crypto/Kconfig   |2 +-
 drivers/crypto/ccp/Kconfig   |1 +
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |4 +---
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |   12 +++-
 drivers/crypto/ccp/ccp-crypto-aes.c  |   12 +++-
 drivers/crypto/ccp/ccp-crypto-sha.c  |8 
 drivers/crypto/ccp/ccp-crypto.h  |2 +-
 7 files changed, 14 insertions(+), 27 deletions(-)

-- 
Tom Lendacky

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: randconfig build error with next-20131210, in drivers/crypto/ccp/ccp-pci.c

2013-12-10 Thread Tom Lendacky

On Tuesday, December 10, 2013 07:21:36 AM Jim Davis wrote:
> Building with the attached random configuration file,
> 
> drivers/crypto/ccp/ccp-pci.c: In function ‘ccp_get_msix_irqs’:
> drivers/crypto/ccp/ccp-pci.c:44:20: error: array type has incomplete
> element type
> drivers/crypto/ccp/ccp-pci.c:48:18: error: negative width in bit-field
> ‘’
> drivers/crypto/ccp/ccp-pci.c:51:2: error: implicit declaration of
> function ‘pci_enable_msix’ [-Werror=implicit-function-declaration]
> drivers/crypto/ccp/ccp-pci.c:76:2: error: implicit declaration of
> function ‘pci_disable_msix’ [-Werror=implicit-function-declaration]
> drivers/crypto/ccp/ccp-pci.c:44:20: warning: unused variable
> ‘msix_entry’ [-Wunused-variable]
> drivers/crypto/ccp/ccp-pci.c: In function ‘ccp_get_msi_irq’:
> drivers/crypto/ccp/ccp-pci.c:89:2: error: implicit declaration of
> function ‘pci_enable_msi’ [-Werror=implicit-function-declaration]
> drivers/crypto/ccp/ccp-pci.c:102:2: error: implicit declaration of
> function ‘pci_disable_msi’ [-Werror=implicit-function-declaration]
> cc1: some warnings being treated as errors
> make[3]: *** [drivers/crypto/ccp/ccp-pci.o] Error 1

Thanks for finding this, I'll update to the Kconfig to add PCI to
the 'depends on' statement.

Tom

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: randconfig build error with next-20131210, in drivers/crypto/ccp

2013-12-10 Thread Tom Lendacky

On Monday, December 09, 2013 10:30:17 PM Jim Davis wrote:
> Building with the attached random configuration file,
> 
> drivers/built-in.o: In function `ccp_init':
> /home/jim/linux/drivers/crypto/ccp/ccp-dev.c:402: undefined reference
> to `hwrng_register'
> drivers/built-in.o: In function `ccp_destroy':
> /home/jim/linux/drivers/crypto/ccp/ccp-dev.c:445: undefined reference
> to `hwrng_unregister'
> make: *** [vmlinux] Error 1

Thanks for finding this, I'll update the Kconfig to select HW_RANDOM.

Tom

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: scatterwalk - Use sg_chain_ptr on chain entries

2013-12-05 Thread Tom Lendacky
Now that scatterwalk_sg_chain sets the chain pointer bit the sg_page
call in scatterwalk_sg_next hits a BUG_ON when CONFIG_DEBUG_SG is
enabled. Use sg_chain_ptr instead of sg_page on a chain entry.

Signed-off-by: Tom Lendacky 
---
 include/crypto/scatterwalk.h |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 64ebede..6a626a5 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -44,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct 
scatterlist *sg)
if (sg_is_last(sg))
return NULL;
 
-   return (++sg)->length ? sg : (void *)sg_page(sg);
+   return (++sg)->length ? sg : sg_chain_ptr(sg);
 }
 
 static inline void scatterwalk_crypto_chain(struct scatterlist *head,


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: scatterlist.h backtrace from crypto ccm module

2013-12-05 Thread Tom Lendacky

On Thursday, December 05, 2013 09:03:02 AM Josh Boyer wrote:
> Hi All,
> 
> We've had a report [1] of the backtrace below on the latest rawhide
> kernel, which is essentially Linus' tree as of yesterday.  We aren't
> carrying any patches to crypto at the moment.  It's basically hitting
> the second BUG_ON in the sg_page function:
> 
> static inline struct page *sg_page(struct scatterlist *sg)
> {
> #ifdef CONFIG_DEBUG_SG
> BUG_ON(sg->sg_magic != SG_MAGIC);
> BUG_ON(sg_is_chain(sg));
> #endif
> return (struct page *)((sg)->page_link & ~0x3);
> }
> 
> Any ideas on this one?

Looks like I introduced this when I added the chain pointer indicator
bit to fix a segfault I was seeing.  I didn't try running with
CONFIG_DEBUG_SG enabled and so I didn't hit this.  Try this patch and
see if it fixes it for you.  If it does I'll re-send it with a
signed-off-by.

diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 13621cc..2913729 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -43,7 +43,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct 
scatterlist *sg)
if (sg_is_last(sg))
return NULL;
 
-   return (++sg)->length ? sg : (void *)sg_page(sg);
+   return (++sg)->length ? sg : sg_chain_ptr(sg);
 }
 
 static inline void scatterwalk_crypto_chain(struct scatterlist *head,

Thanks,
Tom
> 
> [1] https://bugzilla.redhat.com/show_bug.cgi?id=1038472
> 
> josh
> 
> [   89.317139] [ cut here ]
> [   89.317210] kernel BUG at include/linux/scatterlist.h:99!
> [   89.317272] invalid opcode:  [#1] SMP
> [   89.317326] Modules linked in: ccm ip6t_REJECT bnep bluetooth
> xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter
> ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6
> ip6table_mangle ip6table_security ip6table_raw ip6table_filter
> ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4
> nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw
> amd_freq_sensitivity kvm crct10dif_pclmul crc32_pclmul arc4
> crc32c_intel rtl8188ee rtl_pci rtlwifi ghash_clmulni_intel mac80211
> microcode snd_hda_codec_conexant cfg80211 joydev snd_hda_codec_hdmi
> serio_raw alx snd_hda_intel edac_core edac_mce_amd mdio k10temp
> fam15h_power snd_hda_codec snd_hwdep toshiba_acpi sparse_keymap
> snd_seq rfkill snd_seq_device wmi snd_pcm snd_page_alloc snd_timer snd
> shpchp soundcore video
> [   89.318322]  i2c_piix4 acpi_cpufreq binfmt_misc radeon i2c_algo_bit
> drm_kms_helper ttm drm i2c_core
> [   89.318441] CPU: 3 PID: 809 Comm: cryptomgr_test Not tainted
> 3.13.0-0.rc2.git3.1.fc21.x86_64 #1
> [   89.318535] Hardware name: TOSHIBA Satellite L75D-A/Larne, BIOS
> 1.10 05/16/2013
> [   89.318616] task: 88003725cd70 ti: 880192ef2000 task.ti:
> 880192ef2000
> [   89.318697] RIP: 0010:[]  []
> scatterwalk_pagedone+0xbe/0xc0
> [   89.318803] RSP: 0018:880192ef3940  EFLAGS: 00010202
> [   89.318862] RAX: 87654321 RBX: 880192ef39d0 RCX:
>  [   89.318940] RDX: 8800ac3649f1 RSI: 
> RDI: 880192ef39f0 [   89.319017] RBP: 880192ef3940 R08:
> 01a0 R09: 8800b16071c8 [   89.319094] R10: 0003
> R11:  R12: 0017 [   89.319170] R13:
> 0010 R14: 880192ef3a08 R15: 880192ef3a90 [   89.319248]
> FS:  7f2fb512b740() GS:880198c0()
> knlGS:
> [   89.319335] CS:  0010 DS:  ES:  CR0: 80050033
> [   89.319398] CR2: 0032b540e930 CR3: 01c0c000 CR4:
> 000407e0 [   89.319474] Stack:
> [   89.319499]  880192ef3950 81335a5d 880192ef3990
> 813389f3
> [   89.319594]  880185c33588 0010 880192ef3990
> 880192ef3c88
> [   89.319686]  8800b16071a0  880192ef3a70
> 81345461
> [   89.319779] Call Trace:
> [   89.319814]  [] scatterwalk_done+0x3d/0x50
> [   89.319881]  [] blkcipher_walk_done+0x83/0x230
> [   89.319952]  [] crypto_ctr_crypt+0x121/0x2b0
> [   89.320023]  [] ? aes_decrypt+0xa0/0xa0
> [   89.320087]  [] async_encrypt+0x3d/0x40
> [   89.320150]  [] ? async_encrypt+0x3d/0x40
> [   89.320218]  [] crypto_ccm_encrypt+0x2d0/0x320 [ccm]
> [   89.320293]  [] ? setkey+0xb3/0xd0
> [   89.320351]  [] __test_aead+0x445/0x1170
> [   89.320417]  [] ? __crypto_alloc_tfm+0x45/0x170
> [   89.323746]  [] ? crypto_spawn_tfm+0x45/0x80
> [   89.327078]  [] ? __crypto_alloc_tfm+0x101/0x170
> [   89.330412]  [] test_aead+0x27/0xb0
> [   89.333704]  [] alg_test_aead+0x47/0xb0
> [   89.336976]  [] alg_test+0x12f/0x390
> [   89.340233]  [] ? __schedule+0x350/0x970
> [   89.343456]  [] ? crypto_unregister_pcomp+0x20/0x20
> [   89.346687]  [] cryptomgr_test+0x41/0x50
> [   89.349915]  [] kthread+0xff/0x120
> [   89.353110]  [] ? insert_kthread_work+0x80/0x80
> [   89.356287]  [] ret_from_fork+0x7c/0xb0
> [ 

Re: [PATCH 00/10] AMD Cryptographic Coprocessor support

2013-11-20 Thread Tom Lendacky
Hi Herbert,

Is the crypto tree the right tree to go through for this support
or would you prefer I go through another tree?

Thanks,
Tom

On Tuesday, November 12, 2013 11:45:59 AM Tom Lendacky wrote:
> Resending because of typo in mailing list address...
> 
> The following series implements support for the AMD Cryptographic
> Coprocessor (CCP).  The AMD CCP provides hardware encryption, hashing
> and other related operations.
> 
> This patch series is based on the 3.12 kernel.
> 
> ---
> 
> Tom Lendacky (10):
>   crypto: authenc - Find proper IV address in ablkcipher callback
>   crypto: scatterwalk - Set the chain pointer indication bit
>   crypto: CCP device driver and interface support
>   crypto: crypto API interface to the CCP device driver
>   crypto: CCP AES crypto API support
>   crypto: CCP AES CMAC mode crypto API support
>   crypto: CCP XTS-AES crypto API support
>   crypto: CCP SHA crypto API support
>   crytpo: CCP device driver build files
>   crypto: CCP maintainer information
> 
> 
>  MAINTAINERS  |7
>  crypto/authenc.c |7
>  drivers/crypto/Kconfig   |   12
>  drivers/crypto/Makefile  |1
>  drivers/crypto/ccp/Kconfig   |   23
>  drivers/crypto/ccp/Makefile  |   10
>  drivers/crypto/ccp/ccp-crypto-aes-cmac.c |  355 +
>  drivers/crypto/ccp/ccp-crypto-aes-xts.c  |  285 
>  drivers/crypto/ccp/ccp-crypto-aes.c  |  375 ++
>  drivers/crypto/ccp/ccp-crypto-main.c |  432 ++
>  drivers/crypto/ccp/ccp-crypto-sha.c  |  497 +++
>  drivers/crypto/ccp/ccp-crypto.h  |  191 +++
>  drivers/crypto/ccp/ccp-dev.c |  582 +
>  drivers/crypto/ccp/ccp-dev.h |  272 
>  drivers/crypto/ccp/ccp-ops.c | 2020
> ++ drivers/crypto/ccp/ccp-pci.c | 
> 360 +
>  include/crypto/scatterwalk.h |1
>  include/linux/ccp.h  |  525 
>  18 files changed, 5952 insertions(+), 3 deletions(-)
>  create mode 100644 drivers/crypto/ccp/Kconfig
>  create mode 100644 drivers/crypto/ccp/Makefile
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-cmac.c
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-xts.c
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-aes.c
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-main.c
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-sha.c
>  create mode 100644 drivers/crypto/ccp/ccp-crypto.h
>  create mode 100644 drivers/crypto/ccp/ccp-dev.c
>  create mode 100644 drivers/crypto/ccp/ccp-dev.h
>  create mode 100644 drivers/crypto/ccp/ccp-ops.c
>  create mode 100644 drivers/crypto/ccp/ccp-pci.c
>  create mode 100644 include/linux/ccp.h
-- 
Tom

Thomas Lendacky
Advanced Micro Devices, Inc.

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 09/10] crytpo: CCP device driver build files

2013-11-12 Thread Tom Lendacky
These files provide the ability to configure and build the
AMD CCP device driver and crypto API support.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/Kconfig  |   12 
 drivers/crypto/Makefile |1 +
 drivers/crypto/ccp/Kconfig  |   23 +++
 drivers/crypto/ccp/Makefile |   10 ++
 4 files changed, 46 insertions(+)
 create mode 100644 drivers/crypto/ccp/Kconfig
 create mode 100644 drivers/crypto/ccp/Makefile

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f4fd837..4954d75 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -399,4 +399,16 @@ config CRYPTO_DEV_ATMEL_SHA
  To compile this driver as a module, choose M here: the module
  will be called atmel-sha.
 
+config CRYPTO_DEV_CCP
+   bool "Support for AMD Cryptographic Coprocessor"
+   depends on X86
+   default n
+   help
+ The AMD Cryptographic Coprocessor provides hardware support
+ for encryption, hashing and related operations.
+
+if CRYPTO_DEV_CCP
+   source "drivers/crypto/ccp/Kconfig"
+endif
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index b4946dd..8a6c86a 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
new file mode 100644
index 000..335ed5c
--- /dev/null
+++ b/drivers/crypto/ccp/Kconfig
@@ -0,0 +1,23 @@
+config CRYPTO_DEV_CCP_DD
+   tristate "Cryptographic Coprocessor device driver"
+   depends on CRYPTO_DEV_CCP
+   default m
+   help
+ Provides the interface to use the AMD Cryptographic Coprocessor
+ which can be used to accelerate or offload encryption operations
+ such as SHA, AES and more. If you choose 'M' here, this module
+ will be called ccp.
+
+config CRYPTO_DEV_CCP_CRYPTO
+   tristate "Encryption and hashing acceleration support"
+   depends on CRYPTO_DEV_CCP_DD
+   default m
+   select CRYPTO_ALGAPI
+   select CRYPTO_HASH
+   select CRYPTO_BLKCIPHER
+   select CRYPTO_AUTHENC
+   help
+ Support for using the cryptographic API with the AMD Cryptographic
+ Coprocessor. This module supports acceleration and offload of SHA
+ and AES algorithms.  If you choose 'M' here, this module will be
+ called ccp_crypto.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 000..d3505a0
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
+ccp-objs := ccp-dev.o ccp-ops.o
+ccp-objs += ccp-pci.o
+
+obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
+ccp-crypto-objs := ccp-crypto-main.o \
+  ccp-crypto-aes.o \
+  ccp-crypto-aes-cmac.o \
+  ccp-crypto-aes-xts.o \
+  ccp-crypto-sha.o


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 10/10] crypto: CCP maintainer information

2013-11-12 Thread Tom Lendacky
Update the MAINTAINERS file for the AMD CCP device driver.

Signed-off-by: Tom Lendacky 
---
 MAINTAINERS |7 +++
 1 file changed, 7 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 051e4dc..de22604 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -525,6 +525,13 @@ F: drivers/tty/serial/altera_jtaguart.c
 F: include/linux/altera_uart.h
 F: include/linux/altera_jtaguart.h
 
+AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
+M: Tom Lendacky 
+L: linux-crypto@vger.kernel.org
+S: Supported
+F: drivers/crypto/ccp/
+F: include/linux/ccp.h
+
 AMD FAM15H PROCESSOR POWER MONITORING DRIVER
 M: Andreas Herrmann 
 L: lm-sens...@lm-sensors.org


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 06/10] crypto: CCP AES CMAC mode crypto API support

2013-11-12 Thread Tom Lendacky
These routines provide crypto API support for the CMAC mode of AES
on the AMD CCP.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |  355 ++
 1 file changed, 355 insertions(+)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-cmac.c

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
new file mode 100644
index 000..5b9cd98
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -0,0 +1,355 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp-crypto.h"
+
+
+static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
+int ret)
+{
+   struct ahash_request *req = ahash_request_cast(async_req);
+   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+   struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+   unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+   if (ret)
+   goto e_free;
+
+   if (rctx->hash_rem) {
+   /* Save remaining data to buffer */
+   scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.aes.src,
+rctx->hash_cnt, rctx->hash_rem, 0);
+   rctx->buf_count = rctx->hash_rem;
+   } else
+   rctx->buf_count = 0;
+
+   memcpy(req->result, rctx->iv, digest_size);
+
+e_free:
+   sg_free_table(&rctx->data_sg);
+
+   return ret;
+}
+
+static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
+ unsigned int final)
+{
+   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+   struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+   struct scatterlist *sg, *cmac_key_sg = NULL;
+   unsigned int block_size =
+   crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+   unsigned int len, need_pad, sg_count;
+   int ret;
+
+   if (!ctx->u.aes.key_len) {
+   pr_err("AES key not set\n");
+   return -EINVAL;
+   }
+
+   if (nbytes)
+   rctx->null_msg = 0;
+
+   if (!final && ((nbytes + rctx->buf_count) <= block_size)) {
+   scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+0, nbytes, 0);
+   rctx->buf_count += nbytes;
+
+   return 0;
+   }
+
+   len = rctx->buf_count + nbytes;
+
+   rctx->final = final;
+   rctx->hash_cnt = final ? len : len & ~(block_size - 1);
+   rctx->hash_rem = final ?   0 : len &  (block_size - 1);
+   if (!final && (rctx->hash_cnt == len)) {
+   /* CCP can't do zero length final, so keep some data around */
+   rctx->hash_cnt -= block_size;
+   rctx->hash_rem = block_size;
+   }
+
+   if (final && (rctx->null_msg || (len & (block_size - 1
+   need_pad = 1;
+   else
+   need_pad = 0;
+
+   sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
+
+   /* Build the data scatterlist table - allocate enough entries for all
+* possible data pieces (buffer, input data, padding)
+*/
+   sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
+   ret = sg_alloc_table(&rctx->data_sg, sg_count, GFP_KERNEL);
+   if (ret)
+   return ret;
+
+   sg = NULL;
+   if (rctx->buf_count) {
+   sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+   sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+   }
+
+   if (nbytes)
+   sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+
+   if (need_pad) {
+   int pad_length = block_size - (len & (block_size - 1));
+
+   rctx->hash_cnt += pad_length;
+
+   memset(rctx->pad, 0, sizeof(rctx->pad));
+   rctx->pad[0] = 0x80;
+   sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
+   sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
+   }
+   if (sg)
+   sg_mark_end(sg);
+
+   /* Initialize the K1/K2 scatterlist */
+   if (final)
+   cmac_key_sg = (need_pad) ? &ctx->u

[PATCH 01/10] crypto: authenc - Find proper IV address in ablkcipher callback

2013-11-12 Thread Tom Lendacky
When performing an asynchronous ablkcipher operation the authenc
completion callback routine is invoked, but it does not locate and use
the proper IV.

The callback routine, crypto_authenc_encrypt_done, is updated to use
the same method of calculating the address of the IV as is done in
crypto_authenc_encrypt function which sets up the callback.

Signed-off-by: Tom Lendacky 
---
 crypto/authenc.c |7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/crypto/authenc.c b/crypto/authenc.c
index ffce19d..528b00b 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -368,9 +368,10 @@ static void crypto_authenc_encrypt_done(struct 
crypto_async_request *req,
if (!err) {
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-   struct ablkcipher_request *abreq = aead_request_ctx(areq);
-   u8 *iv = (u8 *)(abreq + 1) +
-crypto_ablkcipher_reqsize(ctx->enc);
+   struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
+   struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+   + ctx->reqoff);
+   u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
 
err = crypto_authenc_genicv(areq, iv, 0);
}


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 07/10] crypto: CCP XTS-AES crypto API support

2013-11-12 Thread Tom Lendacky
These routines provide crypto API support for the XTS-AES mode of AES
on the AMD CCP.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes-xts.c |  285 +++
 1 file changed, 285 insertions(+)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-xts.c

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
new file mode 100644
index 000..d100b48
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -0,0 +1,285 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp-crypto.h"
+
+
+struct ccp_aes_xts_def {
+   const char *name;
+   const char *drv_name;
+};
+
+static struct ccp_aes_xts_def aes_xts_algs[] = {
+   {
+   .name   = "xts(aes)",
+   .drv_name   = "xts-aes-ccp",
+   },
+};
+
+struct ccp_unit_size_map {
+   unsigned int size;
+   u32 value;
+};
+
+static struct ccp_unit_size_map unit_size_map[] = {
+   {
+   .size   = 4096,
+   .value  = CCP_XTS_AES_UNIT_SIZE_4096,
+   },
+   {
+   .size   = 2048,
+   .value  = CCP_XTS_AES_UNIT_SIZE_2048,
+   },
+   {
+   .size   = 1024,
+   .value  = CCP_XTS_AES_UNIT_SIZE_1024,
+   },
+   {
+   .size   = 512,
+   .value  = CCP_XTS_AES_UNIT_SIZE_512,
+   },
+   {
+   .size   = 256,
+   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   },
+   {
+   .size   = 128,
+   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   },
+   {
+   .size   = 64,
+   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   },
+   {
+   .size   = 32,
+   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   },
+   {
+   .size   = 16,
+   .value  = CCP_XTS_AES_UNIT_SIZE_16,
+   },
+   {
+   .size   = 1,
+   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   },
+};
+
+static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int 
ret)
+{
+   struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+   struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+   if (ret)
+   return ret;
+
+   memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+   return 0;
+}
+
+static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+   struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+   /* Only support 128-bit AES key with a 128-bit Tweak key,
+* otherwise use the fallback
+*/
+   switch (key_len) {
+   case AES_KEYSIZE_128 * 2:
+   memcpy(ctx->u.aes.key, key, key_len);
+   break;
+   }
+   ctx->u.aes.key_len = key_len / 2;
+   sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+   return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
+   key_len);
+}
+
+static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+unsigned int encrypt)
+{
+   struct crypto_tfm *tfm =
+   crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+   struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+   struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+   unsigned int unit;
+   int ret;
+
+   if (!ctx->u.aes.key_len) {
+   pr_err("AES key not set\n");
+   return -EINVAL;
+   }
+
+   if (req->nbytes & (AES_BLOCK_SIZE - 1)) {
+   pr_err("AES request size is not a multiple of the block 
size\n");
+   return -EINVAL;
+   }
+
+   if (!req->info) {
+   pr_err("AES IV not supplied");
+   return -EINVAL;
+   }
+
+   for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
+   if (!(req->nbytes & (unit_size_map[unit].size - 1)))
+   break;
+
+   if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+   (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+   /* Use the fallback to process the request for any
+* unsupported unit sizes or key sizes
+*/
+   ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
+

[PATCH 00/10] AMD Cryptographic Coprocessor support

2013-11-12 Thread Tom Lendacky
Resending because of typo in mailing list address...

The following series implements support for the AMD Cryptographic
Coprocessor (CCP).  The AMD CCP provides hardware encryption, hashing
and other related operations.

This patch series is based on the 3.12 kernel.

---

Tom Lendacky (10):
  crypto: authenc - Find proper IV address in ablkcipher callback
  crypto: scatterwalk - Set the chain pointer indication bit
  crypto: CCP device driver and interface support
  crypto: crypto API interface to the CCP device driver
  crypto: CCP AES crypto API support
  crypto: CCP AES CMAC mode crypto API support
  crypto: CCP XTS-AES crypto API support
  crypto: CCP SHA crypto API support
  crytpo: CCP device driver build files
  crypto: CCP maintainer information


 MAINTAINERS  |7 
 crypto/authenc.c |7 
 drivers/crypto/Kconfig   |   12 
 drivers/crypto/Makefile  |1 
 drivers/crypto/ccp/Kconfig   |   23 
 drivers/crypto/ccp/Makefile  |   10 
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |  355 +
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |  285 
 drivers/crypto/ccp/ccp-crypto-aes.c  |  375 ++
 drivers/crypto/ccp/ccp-crypto-main.c |  432 ++
 drivers/crypto/ccp/ccp-crypto-sha.c  |  497 +++
 drivers/crypto/ccp/ccp-crypto.h  |  191 +++
 drivers/crypto/ccp/ccp-dev.c |  582 +
 drivers/crypto/ccp/ccp-dev.h |  272 
 drivers/crypto/ccp/ccp-ops.c | 2020 ++
 drivers/crypto/ccp/ccp-pci.c |  360 +
 include/crypto/scatterwalk.h |1 
 include/linux/ccp.h  |  525 
 18 files changed, 5952 insertions(+), 3 deletions(-)
 create mode 100644 drivers/crypto/ccp/Kconfig
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-cmac.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-xts.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto-main.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto-sha.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto.h
 create mode 100644 drivers/crypto/ccp/ccp-dev.c
 create mode 100644 drivers/crypto/ccp/ccp-dev.h
 create mode 100644 drivers/crypto/ccp/ccp-ops.c
 create mode 100644 drivers/crypto/ccp/ccp-pci.c
 create mode 100644 include/linux/ccp.h

-- 
Tom Lendacky

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04/10] crypto: crypto API interface to the CCP device driver

2013-11-12 Thread Tom Lendacky
These routines provide the support for the interface between the crypto API
and the AMD CCP. This includes insuring that requests associated with a
given tfm on the same cpu are processed in the order received.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-main.c |  432 ++
 drivers/crypto/ccp/ccp-crypto.h  |  191 +++
 2 files changed, 623 insertions(+)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-main.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto.h

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
new file mode 100644
index 000..2636f04
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -0,0 +1,432 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp-crypto.h"
+
+MODULE_AUTHOR("Tom Lendacky ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
+
+
+/* List heads for the supported algorithms */
+static LIST_HEAD(hash_algs);
+static LIST_HEAD(cipher_algs);
+
+/* For any tfm, requests for that tfm on the same CPU must be returned
+ * in the order received.  With multiple queues available, the CCP can
+ * process more than one cmd at a time.  Therefore we must maintain
+ * a cmd list to insure the proper ordering of requests on a given tfm/cpu
+ * combination.
+ */
+struct ccp_crypto_cpu_queue {
+   struct list_head cmds;
+   struct list_head *backlog;
+   unsigned int cmd_count;
+};
+#define CCP_CRYPTO_MAX_QLEN50
+
+struct ccp_crypto_percpu_queue {
+   struct ccp_crypto_cpu_queue __percpu *cpu_queue;
+};
+static struct ccp_crypto_percpu_queue req_queue;
+
+struct ccp_crypto_cmd {
+   struct list_head entry;
+
+   struct ccp_cmd *cmd;
+
+   /* Save the crypto_tfm and crypto_async_request addresses
+* separately to avoid any reference to a possibly invalid
+* crypto_async_request structure after invoking the request
+* callback
+*/
+   struct crypto_async_request *req;
+   struct crypto_tfm *tfm;
+
+   /* Used for held command processing to determine state */
+   int ret;
+
+   int cpu;
+};
+
+struct ccp_crypto_cpu {
+   struct work_struct work;
+   struct completion completion;
+   struct ccp_crypto_cmd *crypto_cmd;
+   int err;
+};
+
+
+static inline bool ccp_crypto_success(int err)
+{
+   if (err && (err != -EINPROGRESS) && (err != -EBUSY))
+   return false;
+
+   return true;
+}
+
+/*
+ * ccp_crypto_cmd_complete must be called while running on the appropriate
+ * cpu and the caller must have done a get_cpu to disable preemption
+ */
+static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
+   struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
+{
+   struct ccp_crypto_cpu_queue *cpu_queue;
+   struct ccp_crypto_cmd *held = NULL, *tmp;
+
+   *backlog = NULL;
+
+   cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+
+   /* Held cmds will be after the current cmd in the queue so start
+* searching for a cmd with a matching tfm for submission.
+*/
+   tmp = crypto_cmd;
+   list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
+   if (crypto_cmd->tfm != tmp->tfm)
+   continue;
+   held = tmp;
+   break;
+   }
+
+   /* Process the backlog:
+*   Because cmds can be executed from any point in the cmd list
+*   special precautions have to be taken when handling the backlog.
+*/
+   if (cpu_queue->backlog != &cpu_queue->cmds) {
+   /* Skip over this cmd if it is the next backlog cmd */
+   if (cpu_queue->backlog == &crypto_cmd->entry)
+   cpu_queue->backlog = crypto_cmd->entry.next;
+
+   *backlog = container_of(cpu_queue->backlog,
+   struct ccp_crypto_cmd, entry);
+   cpu_queue->backlog = cpu_queue->backlog->next;
+
+   /* Skip over this cmd if it is now the next backlog cmd */
+   if (cpu_queue->backlog == &crypto_cmd->entry)
+   cpu_queue->backlog = crypto_cmd->entry.next;
+   }
+
+   /* Remove the cmd entry from the list of cmds */
+   cpu_queue->cmd_count--;
+   list_del(&crypto_cmd->entry);
+
+   return held;
+}
+
+static void ccp_crypto_comple

[PATCH 02/10] crypto: scatterwalk - Set the chain pointer indication bit

2013-11-12 Thread Tom Lendacky
The scatterwalk_crypto_chain function invokes the scatterwalk_sg_chain
function to chain two scatterlists, but the chain pointer indication
bit is not set.  When the resulting scatterlist is used, for example,
by sg_nents to count the number of scatterlist entries, a segfault occurs
because sg_nents does not follow the chain pointer to the chained scatterlist.

Update scatterwalk_sg_chain to set the chain pointer indication bit as is
done by the sg_chain function.

Signed-off-by: Tom Lendacky 
---
 include/crypto/scatterwalk.h |1 +
 1 file changed, 1 insertion(+)

diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 13621cc..64ebede 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist 
*sg1, int num,
 {
sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
sg1[num - 1].page_link &= ~0x02;
+   sg1[num - 1].page_link |= 0x01;
 }
 
 static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 08/10] crypto: CCP SHA crypto API support

2013-11-12 Thread Tom Lendacky
These routines provide crypto API support for SHA1, SHA224 and SHA256
on the AMD CCP.  HMAC support for these SHA modes is also provided.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-sha.c |  497 +++
 1 file changed, 497 insertions(+)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-sha.c

diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
new file mode 100644
index 000..44ff00a
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -0,0 +1,497 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp-crypto.h"
+
+
+struct ccp_sha_result {
+   struct completion completion;
+   int err;
+};
+
+static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
+{
+   struct ccp_sha_result *result = req->data;
+
+   if (err == -EINPROGRESS)
+   return;
+
+   result->err = err;
+   complete(&result->completion);
+}
+
+static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
+struct scatterlist *sg, unsigned int len)
+{
+   struct ccp_sha_result result;
+   struct ahash_request *req;
+   int ret;
+
+   init_completion(&result.completion);
+
+   req = ahash_request_alloc(tfm, GFP_KERNEL);
+   if (!req)
+   return -ENOMEM;
+
+   ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+  ccp_sync_hash_complete, &result);
+   ahash_request_set_crypt(req, sg, buf, len);
+
+   ret = crypto_ahash_digest(req);
+   if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
+   ret = wait_for_completion_interruptible(&result.completion);
+   if (!ret)
+   ret = result.err;
+   }
+
+   ahash_request_free(req);
+
+   return ret;
+}
+
+static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
+{
+   struct ahash_request *req = ahash_request_cast(async_req);
+   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+   struct scatterlist sg[2];
+   unsigned int block_size =
+   crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+   unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+   sg_init_table(sg, ARRAY_SIZE(sg));
+   sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
+   sg_set_buf(&sg[1], req->result, digest_size);
+
+   return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
+block_size + digest_size);
+}
+
+static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
+{
+   struct ahash_request *req = ahash_request_cast(async_req);
+   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+   struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+   unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+   if (ret)
+   goto e_free;
+
+   if (rctx->hash_rem) {
+   /* Save remaining data to buffer */
+   scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.sha.src,
+rctx->hash_cnt, rctx->hash_rem, 0);
+   rctx->buf_count = rctx->hash_rem;
+   } else
+   rctx->buf_count = 0;
+
+   memcpy(req->result, rctx->ctx, digest_size);
+
+   /* If we're doing an HMAC, we need to perform that on the final op */
+   if (rctx->final && ctx->u.sha.key_len)
+   ret = ccp_sha_finish_hmac(async_req);
+
+e_free:
+   sg_free_table(&rctx->data_sg);
+
+   return ret;
+}
+
+static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
+unsigned int final)
+{
+   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+   struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+   struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+   struct scatterlist *sg;
+   unsigned int block_size =
+   crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+   unsigned int len, sg_count;
+   int ret;
+
+   if (!final && ((nbytes + rctx->buf_count) <= block_size)) {
+   scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+0, nbytes, 0);
+   rctx->buf_count += nbytes;
+
+ 

[PATCH 05/10] crypto: CCP AES crypto API support

2013-11-12 Thread Tom Lendacky
These routines provide crypto API support for AES on the AMD CCP.

Support for AES modes: ECB, CBC, OFB, CFB and CTR

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/ccp-crypto-aes.c |  375 +++
 1 file changed, 375 insertions(+)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes.c

diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c 
b/drivers/crypto/ccp/ccp-crypto-aes.c
new file mode 100644
index 000..f302a5b7
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -0,0 +1,375 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp-crypto.h"
+
+
+static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
+{
+   struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+   struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+   struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+   if (ret)
+   return ret;
+
+   if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
+   memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+   return 0;
+}
+
+static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+   struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+   struct ccp_crypto_ablkcipher_alg *alg =
+   ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+
+   switch (key_len) {
+   case AES_KEYSIZE_128:
+   ctx->u.aes.type = CCP_AES_TYPE_128;
+   break;
+   case AES_KEYSIZE_192:
+   ctx->u.aes.type = CCP_AES_TYPE_192;
+   break;
+   case AES_KEYSIZE_256:
+   ctx->u.aes.type = CCP_AES_TYPE_256;
+   break;
+   default:
+   crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+   return -EINVAL;
+   }
+   ctx->u.aes.mode = alg->mode;
+   ctx->u.aes.key_len = key_len;
+
+   memcpy(ctx->u.aes.key, key, key_len);
+   sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+   return 0;
+}
+
+static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+   struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+   struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+   struct scatterlist *iv_sg = NULL;
+   unsigned int iv_len = 0;
+   int ret;
+
+   if (!ctx->u.aes.key_len) {
+   pr_err("AES key not set\n");
+   return -EINVAL;
+   }
+
+   if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
+(ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
+(ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+   (req->nbytes & (AES_BLOCK_SIZE - 1))) {
+   pr_err("AES request size is not a multiple of the block 
size\n");
+   return -EINVAL;
+   }
+
+   if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
+   if (!req->info) {
+   pr_err("AES IV not supplied");
+   return -EINVAL;
+   }
+
+   memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+   iv_sg = &rctx->iv_sg;
+   iv_len = AES_BLOCK_SIZE;
+   sg_init_one(iv_sg, rctx->iv, iv_len);
+   }
+
+   memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+   INIT_LIST_HEAD(&rctx->cmd.entry);
+   rctx->cmd.engine = CCP_ENGINE_AES;
+   rctx->cmd.u.aes.type = ctx->u.aes.type;
+   rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+   rctx->cmd.u.aes.action =
+   (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
+   rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+   rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+   rctx->cmd.u.aes.iv = iv_sg;
+   rctx->cmd.u.aes.iv_len = iv_len;
+   rctx->cmd.u.aes.src = req->src;
+   rctx->cmd.u.aes.src_len = req->nbytes;
+   rctx->cmd.u.aes.dst = req->dst;
+
+   ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+   return ret;
+}
+
+static int ccp_aes_encrypt(struct ablkcipher_request *req)
+{
+   return ccp_aes_crypt(req, true);
+}
+
+static int ccp_aes_decrypt(struct ablkcipher_request *req)
+{
+   return ccp_aes_crypt(req, false);
+}
+
+static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+{
+   struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   ctx->comp

<    1   2