[PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly [try #3]

2007-03-20 Thread David Howells
Add blkcipher accessors for using kernel data directly without the use of
scatter lists.

Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
of DMA and hardware accelerators.  A hardware accelerator may not be used to
access any arbitrary piece of kernel memory lest it not be in a DMA'able
region.  Only software algorithms may do that.

If kernel data is going to be accessed directly, then CRYPTO_ALG_DMA must, for
instance, be passed in the mask of crypto_alloc_blkcipher(), but not the type.

This is used by AF_RXRPC to do quick encryptions, where the size of the data
being encrypted or decrypted is 8 bytes or, occasionally, 16 bytes (ie: one or
two chunks only), and since these data are generally on the stack they may be
split over two pages.  Because they're so small, and because they may be
misaligned, setting up a scatter-gather list is overly expensive.  It is very
unlikely that a hardware FCrypt PCBC engine will be encountered (there is not,
as far as I know, any such thing), and even if one is encountered, the
setup/teardown costs for such small transactions will almost certainly be
prohibitive.

Encrypting and decrypting whole packets, on the other hand, is done through the
scatter-gather list interface as the amount of data is sufficient that the
expense of doing virtual address to page calculations is sufficiently small by
comparison.

Signed-Off-By: David Howells [EMAIL PROTECTED]
---

 crypto/blkcipher.c |2 +
 crypto/pcbc.c  |   62 +
 include/linux/crypto.h |  118 
 3 files changed, 181 insertions(+), 1 deletions(-)

diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index b5befe8..4498b2d 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -376,6 +376,8 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm 
*tfm, u32 type, u32 mask)
crt-setkey = setkey;
crt-encrypt = alg-encrypt;
crt-decrypt = alg-decrypt;
+   crt-encrypt_kernel = alg-encrypt_kernel;
+   crt-decrypt_kernel = alg-decrypt_kernel;
 
addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 5174d7f..fa76111 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -126,6 +126,36 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_encrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+  CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_encrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_encrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
   struct blkcipher_walk *walk,
   struct crypto_cipher *tfm,
@@ -211,6 +241,36 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_decrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+   CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_decrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_decrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
 {
do {
@@ -313,6 +373,8 @@ static struct crypto_instance 

[PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly [try #2]

2007-03-16 Thread David Howells
Add blkcipher accessors for using kernel data directly without the use of
scatter lists.

Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
of DMA and hardware accelerators.  A hardware accelerator may not be used to
access any arbitrary piece of kernel memory lest it not be in a DMA'able
region.  Only software algorithms may do that.

If kernel data is going to be accessed directly, then CRYPTO_ALG_DMA must, for
instance, be passed in the mask of crypto_alloc_blkcipher(), but not the type.

Signed-Off-By: David Howells [EMAIL PROTECTED]
---

 crypto/blkcipher.c |2 ++
 crypto/pcbc.c  |   62 
 include/linux/crypto.h |   53 -
 3 files changed, 116 insertions(+), 1 deletions(-)

diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index b5befe8..4498b2d 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -376,6 +376,8 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm 
*tfm, u32 type, u32 mask)
crt-setkey = setkey;
crt-encrypt = alg-encrypt;
crt-decrypt = alg-decrypt;
+   crt-encrypt_kernel = alg-encrypt_kernel;
+   crt-decrypt_kernel = alg-decrypt_kernel;
 
addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 5174d7f..fa76111 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -126,6 +126,36 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_encrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+  CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_encrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_encrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
   struct blkcipher_walk *walk,
   struct crypto_cipher *tfm,
@@ -211,6 +241,36 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_decrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+   CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_decrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_decrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
 {
do {
@@ -313,6 +373,8 @@ static struct crypto_instance *crypto_pcbc_alloc(void 
*param, unsigned int len)
inst-alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
inst-alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
inst-alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+   inst-alg.cra_blkcipher.encrypt_kernel = crypto_pcbc_encrypt_kernel;
+   inst-alg.cra_blkcipher.decrypt_kernel = crypto_pcbc_decrypt_kernel;
 
 out_put_alg:
crypto_mod_put(alg);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 779aa78..ce092fe 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -40,7 +40,10 @@
 #define CRYPTO_ALG_LARVAL  0x0010
 #define CRYPTO_ALG_DEAD0x0020
 #define CRYPTO_ALG_DYING   0x0040
-#define CRYPTO_ALG_ASYNC   0x0080
+
+#define CRYPTO_ALG_CAP_MASK

Re: [PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly [try #2]

2007-03-16 Thread Christoph Hellwig
On Fri, Mar 16, 2007 at 12:50:16PM +, David Howells wrote:
 Add blkcipher accessors for using kernel data directly without the use of
 scatter lists.

I don't quite understand all these indirections.  What's the problem
with just having a helper that builds the scatterlist for you?

 Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
 of DMA and hardware accelerators.  A hardware accelerator may not be used to
 access any arbitrary piece of kernel memory lest it not be in a DMA'able
 region.  Only software algorithms may do that.

We allow dma access to arbitary pieces of _dynamically_ allocated kernel
memory, and I think using the crypto subsystem on the stack is not allowed
at all.


But the even bigger question is, how does this relate to rxrpc?

 +static inline
 +void crypto_blkcipher_decrypt_kernel_iv(struct blkcipher_desc *desc,

very odd line split, please put the function name on the same line
as the qualifiers, or everything including the void on a line of it's
own.  The first option seems to be the preffered style in this file.

-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly [try #2]

2007-03-16 Thread David Howells
Christoph Hellwig [EMAIL PROTECTED] wrote:

 I don't quite understand all these indirections.  What's the problem
 with just having a helper that builds the scatterlist for you?

I was trying to avoid building a scatterlist completely.  There's not much
point because the scatterlist approach involves finding out the page struct and
then kmapping it just so that the FCrypt algorithm can read 8 or 16 bytes of
data from kernel space.  Why do that if we can avoid it?  It's a waste of
processing time, and has to be done on every secure packet.

 We allow dma access to arbitary pieces of _dynamically_ allocated kernel
 memory, and I think using the crypto subsystem on the stack is not allowed
 at all.

FCrypt is only available in software as far as I know.  For producing and
checking packet signatures, using hardware support would be a waste of time as
the size of the crunched data is so small (a single 8-byte fragment per
packet).

 But the even bigger question is, how does this relate to rxrpc?

RxRPC has security features, up to and including full packet content encryption
if you select it.

 very odd line split

It's not really odd.  The static and inline don't actually add anything to
the function template.  They're merely scope limiters / optimisation
controllers, and so make a lot of sense placed on their own line.

David
-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly [try #2]

2007-03-16 Thread Alan Cox
  very odd line split
 
 It's not really odd.  The static and inline don't actually add anything to
 the function template.  They're merely scope limiters / optimisation
 controllers, and so make a lot of sense placed on their own line.

As do many things but the goal of the coding style is consistency and
almost all other code doesn't have the static inline wasting an extra
display line.


-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly [try #2]

2007-03-16 Thread David Howells
Alan Cox [EMAIL PROTECTED] wrote:

 As do many things but the goal of the coding style is consistency and
 almost all other code doesn't have the static inline wasting an extra
 display line.

Actually it doesn't waste an extra display line.  Either the static inline is
on a line of its own, or the first argument is on a line of its own.  Either
way it uses up two lines - unless you want to split it up further and have
four lines as per Christoph's second suggestion (static inline / void /
funcname / 1st arg).

David
-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-03-08 Thread David Howells
Add blkcipher accessors for using kernel data directly without the use of
scatter lists.

Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
of DMA and hardware accelerators.  A hardware accelerator may not be used to
access any arbitrary piece of kernel memory lest it not be in a DMA'able
region.  Only software algorithms may do that.

If kernel data is going to be accessed directly, then CRYPTO_ALG_DMA must, for
instance, be passed in the mask of crypto_alloc_blkcipher(), but not the type.

Signed-Off-By: David Howells [EMAIL PROTECTED]
---

 crypto/blkcipher.c |2 ++
 crypto/pcbc.c  |   62 
 include/linux/crypto.h |   53 -
 3 files changed, 116 insertions(+), 1 deletions(-)

diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index b5befe8..4498b2d 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -376,6 +376,8 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm 
*tfm, u32 type, u32 mask)
crt-setkey = setkey;
crt-encrypt = alg-encrypt;
crt-decrypt = alg-decrypt;
+   crt-encrypt_kernel = alg-encrypt_kernel;
+   crt-decrypt_kernel = alg-decrypt_kernel;
 
addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 5174d7f..fa76111 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -126,6 +126,36 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_encrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+  CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_encrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_encrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
   struct blkcipher_walk *walk,
   struct crypto_cipher *tfm,
@@ -211,6 +241,36 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_decrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+   CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_decrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_decrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
 {
do {
@@ -313,6 +373,8 @@ static struct crypto_instance *crypto_pcbc_alloc(void 
*param, unsigned int len)
inst-alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
inst-alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
inst-alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+   inst-alg.cra_blkcipher.encrypt_kernel = crypto_pcbc_encrypt_kernel;
+   inst-alg.cra_blkcipher.decrypt_kernel = crypto_pcbc_decrypt_kernel;
 
 out_put_alg:
crypto_mod_put(alg);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 779aa78..ce092fe 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -40,7 +40,10 @@
 #define CRYPTO_ALG_LARVAL  0x0010
 #define CRYPTO_ALG_DEAD0x0020
 #define CRYPTO_ALG_DYING   0x0040
-#define CRYPTO_ALG_ASYNC   0x0080
+
+#define CRYPTO_ALG_CAP_MASK

Re: [PATCH 1/5] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-03-08 Thread Valdis . Kletnieks
On Thu, 08 Mar 2007 22:48:29 GMT, David Howells said:
 
 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
 index 779aa78..ce092fe 100644
 --- a/include/linux/crypto.h
 +++ b/include/linux/crypto.h
 @@ -40,7 +40,10 @@
  #define CRYPTO_ALG_LARVAL0x0010
  #define CRYPTO_ALG_DEAD  0x0020
  #define CRYPTO_ALG_DYING 0x0040
 -#define CRYPTO_ALG_ASYNC 0x0080
 +
 +#define CRYPTO_ALG_CAP_MASK  0x0180  /* capabilities mask */
 +#define CRYPTO_ALG_ASYNC 0x0080  /* capable of async 
 operation */
 +#define CRYPTO_ALG_DMA   0x0100  /* capable of 
 using of DMA */

Would it make sense to define ALG_CAP_MASK as 0xF80 or similar, to reserve
a few bits? The alternative has somebody else grabbing 0x200 for some other
purpose, and then when you want to add another capability bit, you end up
with a CAP_MASK of 0x580 - this way leads to madness and bugs


pgpLOkxmjXka5.pgp
Description: PGP signature