Re: [PATCH 1/9] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-04-03 Thread David Howells
Herbert Xu [EMAIL PROTECTED] wrote:

 Would it be possible to just use the existing scatterlist interface
 for now? We can simplify it later when things settle down.

I'll apply the attached patch for now and drop the bypass patch.  It's a bit
messy, but it does let me use the sg-list interface.

Note that it does paste stack space into sg-list elements, which I think
should be okay, and it asks the compiler to get it appropriately aligned bits
of stack.

David

diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index b3bd399..1eaf529 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -111,8 +111,11 @@ static void rxkad_prime_packet_security(struct 
rxrpc_connection *conn)
 {
struct rxrpc_key_payload *payload;
struct blkcipher_desc desc;
+   struct scatterlist sg[2];
struct rxrpc_crypt iv;
-   __be32 tmpbuf[4];
+   struct {
+   __be32 x[4];
+   } tmpbuf __attribute__((aligned(16))); /* must all be in same page */
 
_enter();
 
@@ -126,16 +129,18 @@ static void rxkad_prime_packet_security(struct 
rxrpc_connection *conn)
desc.info = iv.x;
desc.flags = 0;
 
-   tmpbuf[0] = conn-epoch;
-   tmpbuf[1] = conn-cid;
-   tmpbuf[2] = 0;
-   tmpbuf[3] = htonl(conn-security_ix);
+   tmpbuf.x[0] = conn-epoch;
+   tmpbuf.x[1] = conn-cid;
+   tmpbuf.x[2] = 0;
+   tmpbuf.x[3] = htonl(conn-security_ix);
 
-   crypto_blkcipher_encrypt_kernel_iv(desc, (void *) tmpbuf,
-  (void *) tmpbuf, sizeof(tmpbuf));
+   memset(sg, 0, sizeof(sg));
+   sg_set_buf(sg[0], tmpbuf, sizeof(tmpbuf));
+   sg_set_buf(sg[1], tmpbuf, sizeof(tmpbuf));
+   crypto_blkcipher_encrypt_iv(desc, sg[0], sg[1], sizeof(tmpbuf));
 
-   memcpy(conn-csum_iv, tmpbuf[2], sizeof(conn-csum_iv));
-   ASSERTCMP(conn-csum_iv.n[0], ==, tmpbuf[2]);
+   memcpy(conn-csum_iv, tmpbuf.x[2], sizeof(conn-csum_iv));
+   ASSERTCMP(conn-csum_iv.n[0], ==, tmpbuf.x[2]);
 
_leave();
 }
@@ -151,10 +156,11 @@ static int rxkad_secure_packet_auth(const struct 
rxrpc_call *call,
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
+   struct scatterlist sg[2];
struct {
struct rxkad_level1_hdr hdr;
__be32  first;  /* first four bytes of data and padding */
-   } buf;
+   } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
u16 check;
 
sp = rxrpc_skb(skb);
@@ -164,8 +170,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call 
*call,
check = ntohl(sp-hdr.seq ^ sp-hdr.callNumber);
data_size |= (u32) check  16;
 
-   buf.hdr.data_size = htonl(data_size);
-   memcpy(buf.first, sechdr + 4, sizeof(buf.first));
+   tmpbuf.hdr.data_size = htonl(data_size);
+   memcpy(tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first));
 
/* start the encryption afresh */
memset(iv, 0, sizeof(iv));
@@ -173,10 +179,12 @@ static int rxkad_secure_packet_auth(const struct 
rxrpc_call *call,
desc.info = iv.x;
desc.flags = 0;
 
-   crypto_blkcipher_encrypt_kernel_iv(desc, (void *) buf,
-  (void *) buf, sizeof(buf));
+   memset(sg, 0, sizeof(sg));
+   sg_set_buf(sg[0], tmpbuf, sizeof(tmpbuf));
+   sg_set_buf(sg[1], tmpbuf, sizeof(tmpbuf));
+   crypto_blkcipher_encrypt_iv(desc, sg[0], sg[1], sizeof(tmpbuf));
 
-   memcpy(sechdr, buf, sizeof(buf));
+   memcpy(sechdr, tmpbuf, sizeof(tmpbuf));
 
_leave( = 0);
return 0;
@@ -191,7 +199,8 @@ static int rxkad_secure_packet_encrypt(const struct 
rxrpc_call *call,
void *sechdr)
 {
const struct rxrpc_key_payload *payload;
-   struct rxkad_level2_hdr rxkhdr;
+   struct rxkad_level2_hdr rxkhdr
+   __attribute__((aligned(8))); /* must be all on one page */
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
@@ -217,8 +226,10 @@ static int rxkad_secure_packet_encrypt(const struct 
rxrpc_call *call,
desc.info = iv.x;
desc.flags = 0;
 
-   crypto_blkcipher_encrypt_kernel_iv(desc, (void *) sechdr,
-  (void *) rxkhdr, sizeof(rxkhdr));
+   memset(sg, 0, sizeof(sg[0]) * 2);
+   sg_set_buf(sg[0], sechdr, sizeof(rxkhdr));
+   sg_set_buf(sg[1], rxkhdr, sizeof(rxkhdr));
+   crypto_blkcipher_encrypt_iv(desc, sg[0], sg[1], sizeof(rxkhdr));
 
/* we want to encrypt the skbuff in-place */
nsg = skb_cow_data(skb, 0, trailer);
@@ -246,7 +257,11 @@ static int rxkad_secure_packet(const struct rxrpc_call 
*call,
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
-   __be32 tmpbuf[2], x;
+   struct scatterlist sg[2];
+   struct {
+   __be32 x[2];
+

Re: [PATCH 1/9] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-04-03 Thread Herbert Xu
On Tue, Apr 03, 2007 at 02:52:53PM +0100, David Howells wrote:
 
 Note that it does paste stack space into sg-list elements, which I think
 should be okay, and it asks the compiler to get it appropriately aligned bits
 of stack.

That'll do for now.

Thanks!
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmVHI~} [EMAIL PROTECTED]
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/9] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-04-02 Thread David Howells
Add blkcipher accessors for using kernel data directly without the use of
scatter lists.

Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
of DMA and hardware accelerators.  A hardware accelerator may not be used to
access any arbitrary piece of kernel memory lest it not be in a DMA'able
region.  Only software algorithms may do that.

If kernel data is going to be accessed directly, then CRYPTO_ALG_DMA must, for
instance, be passed in the mask of crypto_alloc_blkcipher(), but not the type.

This is used by AF_RXRPC to do quick encryptions, where the size of the data
being encrypted or decrypted is 8 bytes or, occasionally, 16 bytes (ie: one or
two chunks only), and since these data are generally on the stack they may be
split over two pages.  Because they're so small, and because they may be
misaligned, setting up a scatter-gather list is overly expensive.  It is very
unlikely that a hardware FCrypt PCBC engine will be encountered (there is not,
as far as I know, any such thing), and even if one is encountered, the
setup/teardown costs for such small transactions will almost certainly be
prohibitive.

Encrypting and decrypting whole packets, on the other hand, is done through the
scatter-gather list interface as the amount of data is sufficient that the
expense of doing virtual address to page calculations is sufficiently small by
comparison.

Signed-Off-By: David Howells [EMAIL PROTECTED]
---

 crypto/blkcipher.c |2 +
 crypto/pcbc.c  |   62 +
 include/linux/crypto.h |  118 
 3 files changed, 181 insertions(+), 1 deletions(-)

diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index b5befe8..4498b2d 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -376,6 +376,8 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm 
*tfm, u32 type, u32 mask)
crt-setkey = setkey;
crt-encrypt = alg-encrypt;
crt-decrypt = alg-decrypt;
+   crt-encrypt_kernel = alg-encrypt_kernel;
+   crt-decrypt_kernel = alg-decrypt_kernel;
 
addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 5174d7f..fa76111 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -126,6 +126,36 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_encrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+  CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_encrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_encrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
   struct blkcipher_walk *walk,
   struct crypto_cipher *tfm,
@@ -211,6 +241,36 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
return err;
 }
 
+static int crypto_pcbc_decrypt_kernel(struct blkcipher_desc *desc,
+ u8 *dst, const u8 *src,
+ unsigned int nbytes)
+{
+   struct blkcipher_walk walk;
+   struct crypto_blkcipher *tfm = desc-tfm;
+   struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+   struct crypto_cipher *child = ctx-child;
+   void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx-xor;
+
+   BUG_ON(crypto_tfm_alg_capabilities(crypto_cipher_tfm(child)) 
+   CRYPTO_ALG_DMA);
+
+   if (nbytes == 0)
+   return 0;
+
+   memset(walk, 0, sizeof(walk));
+   walk.src.virt.addr = (u8 *) src;
+   walk.dst.virt.addr = (u8 *) dst;
+   walk.nbytes = nbytes;
+   walk.total = nbytes;
+   walk.iv = desc-info;
+
+   if (walk.src.virt.addr == walk.dst.virt.addr)
+   nbytes = crypto_pcbc_decrypt_inplace(desc, walk, child, xor);
+   else
+   nbytes = crypto_pcbc_decrypt_segment(desc, walk, child, xor);
+   return 0;
+}
+
 static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
 {
do {
@@ -313,6 +373,8 @@ static struct crypto_instance 

Re: [PATCH 1/9] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-04-02 Thread David Miller
From: David Howells [EMAIL PROTECTED]
Date: Mon, 02 Apr 2007 23:44:58 +0100

 Add blkcipher accessors for using kernel data directly without the
 use of scatter lists.

 Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
 of DMA and hardware accelerators.  A hardware accelerator may not be used to
 access any arbitrary piece of kernel memory lest it not be in a DMA'able
 region.  Only software algorithms may do that.

I'll let Herbert, the crypto layer maintainer, decide on this one.
I can put it into net-2.6.22 in order to make the RXRPC integration
easier if Herbert is OK with this.

Please be sure to CC: Herbert and/or the crypto lists on any future
crypto layer patches David, thanks.
-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/9] AF_RXRPC: Add blkcipher accessors for using kernel data directly

2007-04-02 Thread Herbert Xu
On Mon, Apr 02, 2007 at 08:22:15PM -0700, David Miller wrote:
 
 I'll let Herbert, the crypto layer maintainer, decide on this one.
 I can put it into net-2.6.22 in order to make the RXRPC integration
 easier if Herbert is OK with this.
 
 Please be sure to CC: Herbert and/or the crypto lists on any future
 crypto layer patches David, thanks.

I'm OK with the concept but this is going to conflict with the async
stuff that I'm about to push into cryptodev-2.6.

Would it be possible to just use the existing scatterlist interface
for now? We can simplify it later when things settle down.

Cheers,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmVHI~} [EMAIL PROTECTED]
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
-
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html