[patch 1/3] crypto: padlock-aes: work around Nano CPU errata in ECB mode

2009-06-09 Thread Chuck Ebbert
From: Chuck Ebbert cebb...@redhat.com
crypto: padlock-aes: work around Nano CPU errata in ECB mode

The VIA Nano processor has a bug that makes it prefetch extra data
during encryption operations, causing spurious page faults. Extend
existing workarounds for ECB mode to copy the data to an temporary
buffer to avoid the problem.

Signed-off-by: Chuck Ebbert cebb...@redhat.com

--- work-2.6.29.4.orig/drivers/crypto/padlock-aes.c
+++ work-2.6.29.4/drivers/crypto/padlock-aes.c
@@ -18,9 +18,17 @@
 #include linux/percpu.h
 #include linux/smp.h
 #include asm/byteorder.h
+#include asm/processor.h
 #include asm/i387.h
 #include padlock.h
 
+/* number of data blocks actually fetched for each xcrypt insn */
+static unsigned int ecb_fetch_blocks = 2;
+static unsigned int cbc_fetch_blocks = 1;
+
+#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
+
 /* Control word. */
 struct cword {
unsigned int __attribute__ ((__packed__))
@@ -169,54 +177,59 @@ static inline void padlock_store_cword(s
  */
 
 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
- struct cword *control_word)
+ struct cword *control_word, int count)
 {
asm volatile (.byte 0xf3,0x0f,0xa7,0xc8   /* rep xcryptecb */
  : +S(input), +D(output)
- : d(control_word), b(key), c(1));
+ : d(control_word), b(key), c(count));
 }
 
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword 
*cword)
+static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key,
+  struct cword *cword, int count)
 {
-   u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
+   /*
+* Padlock prefetches extra data so we must provide mapped input 
buffers.
+* Assume there are at least 16 bytes of stack already in use.
+*/
+   u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1];
u8 *tmp = PTR_ALIGN(buf[0], PADLOCK_ALIGNMENT);
 
-   memcpy(tmp, in, AES_BLOCK_SIZE);
-   padlock_xcrypt(tmp, out, key, cword);
+   memcpy(tmp, in, count * AES_BLOCK_SIZE);
+   padlock_xcrypt(tmp, out, key, cword, count);
 }
 
 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
-struct cword *cword)
+struct cword *cword, int count)
 {
-   /* padlock_xcrypt requires at least two blocks of data. */
-   if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) 
-  (PAGE_SIZE - 1 {
-   aes_crypt_copy(in, out, key, cword);
+   /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
+* We could avoid some copying here but it's probably not worth it.
+*/
+   if (unlikely(((unsigned long)in  PAGE_SIZE) + ecb_fetch_bytes  
PAGE_SIZE)) {
+   aes_crypt_copy(in, out, key, cword, count);
return;
}
 
-   padlock_xcrypt(in, out, key, cword);
+   padlock_xcrypt(in, out, key, cword, count);
 }
 
 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
  void *control_word, u32 count)
 {
-   if (count == 1) {
-   aes_crypt(input, output, key, control_word);
+   u32 initial = count  (ecb_fetch_blocks - 1);
+
+   if (count  ecb_fetch_blocks) {
+   aes_crypt(input, output, key, control_word, count);
return;
}
 
-   asm volatile (test $1, %%cl;
- je 1f;
- lea -1(%%ecx), %%eax;
- mov $1, %%ecx;
- .byte 0xf3,0x0f,0xa7,0xc8;  /* rep xcryptecb */
- mov %%eax, %%ecx;
- 1:
- .byte 0xf3,0x0f,0xa7,0xc8   /* rep xcryptecb */
+   if (initial)
+   asm volatile (.byte 0xf3,0x0f,0xa7,0xc8   /* rep 
xcryptecb */
+ : +S(input), +D(output)
+ : d(control_word), b(key), c(initial));
+
+   asm volatile (.byte 0xf3,0x0f,0xa7,0xc8   /* rep xcryptecb */
  : +S(input), +D(output)
- : d(control_word), b(key), c(count)
- : ax);
+ : d(control_word), b(key), c(count - initial));
 }
 
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -236,7 +249,7 @@ static void aes_encrypt(struct crypto_tf
 
padlock_reset_key(ctx-cword.encrypt);
ts_state = irq_ts_save();
-   aes_crypt(in, out, ctx-E, ctx-cword.encrypt);
+   aes_crypt(in, out, ctx-E, ctx-cword.encrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(ctx-cword.encrypt);
 }
@@ -248,7 +261,7 @@ static void aes_decrypt(struct crypto_tf
 
padlock_reset_key(ctx-cword.encrypt);
ts_state = 

[patch 2/3] crypto: padlock-aes: work around Nano CPU errata in CBC mode

2009-06-09 Thread Chuck Ebbert
From: Chuck Ebbert cebb...@redhat.com
crypto: padlock-aes: work around Nano CPU errata in CBC mode

Extend previous workarounds for the prefetch bug to cover CBC mode,
clean up the code a bit.

Signed-off-by: Chuck Ebbert cebb...@redhat.com

--- work-2.6.29.4.orig/drivers/crypto/padlock-aes.c
+++ work-2.6.29.4/drivers/crypto/padlock-aes.c
@@ -22,11 +22,16 @@
 #include asm/i387.h
 #include padlock.h
 
-/* number of data blocks actually fetched for each xcrypt insn */
+/*
+ * Number of data blocks actually fetched for each xcrypt insn.
+ * Processors with prefetch errata will fetch extra blocks.
+ */
 static unsigned int ecb_fetch_blocks = 2;
-static unsigned int cbc_fetch_blocks = 1;
-
+#define MAX_ECB_FETCH_BLOCKS (8)
 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+
+static unsigned int cbc_fetch_blocks = 1;
+#define MAX_CBC_FETCH_BLOCKS (4)
 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
 
 /* Control word. */
@@ -176,7 +181,7 @@ static inline void padlock_store_cword(s
  * should be used only inside the irq_ts_save/restore() context
  */
 
-static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
+static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
  struct cword *control_word, int count)
 {
asm volatile (.byte 0xf3,0x0f,0xa7,0xc8   /* rep xcryptecb */
@@ -184,32 +189,65 @@ static inline void padlock_xcrypt(const 
  : d(control_word), b(key), c(count));
 }
 
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key,
+static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+u8 *iv, struct cword *control_word, int count)
+{
+   asm volatile (.byte 0xf3,0x0f,0xa7,0xd0   /* rep xcryptcbc */
+ : +S (input), +D (output), +a (iv)
+ : d (control_word), b (key), c (count));
+   return iv;
+}
+
+static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
   struct cword *cword, int count)
 {
/*
 * Padlock prefetches extra data so we must provide mapped input 
buffers.
 * Assume there are at least 16 bytes of stack already in use.
 */
-   u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1];
+   u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT 
- 1];
u8 *tmp = PTR_ALIGN(buf[0], PADLOCK_ALIGNMENT);
 
memcpy(tmp, in, count * AES_BLOCK_SIZE);
-   padlock_xcrypt(tmp, out, key, cword, count);
+   rep_xcrypt_ecb(tmp, out, key, cword, count);
 }
 
-static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
+static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
+  u8 *iv, struct cword *cword, int count)
+{
+   /*
+* Padlock prefetches extra data so we must provide mapped input 
buffers.
+* Assume there are at least 16 bytes of stack already in use.
+*/
+   u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT 
- 1];
+   u8 *tmp = PTR_ALIGN(buf[0], PADLOCK_ALIGNMENT);
+
+   memcpy(tmp, in, count * AES_BLOCK_SIZE);
+   return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
+}
+
+static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
 struct cword *cword, int count)
 {
/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
 * We could avoid some copying here but it's probably not worth it.
 */
if (unlikely(((unsigned long)in  PAGE_SIZE) + ecb_fetch_bytes  
PAGE_SIZE)) {
-   aes_crypt_copy(in, out, key, cword, count);
+   ecb_crypt_copy(in, out, key, cword, count);
return;
}
 
-   padlock_xcrypt(in, out, key, cword, count);
+   rep_xcrypt_ecb(in, out, key, cword, count);
+}
+
+static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
+   u8 *iv, struct cword *cword, int count)
+{
+   /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
+   if (unlikely(((unsigned long)in  PAGE_SIZE) + cbc_fetch_bytes  
PAGE_SIZE))
+   return cbc_crypt_copy(in, out, key, iv, cword, count);
+
+   return rep_xcrypt_cbc(in, out, key, iv, cword, count);
 }
 
 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
@@ -218,7 +256,7 @@ static inline void padlock_xcrypt_ecb(co
u32 initial = count  (ecb_fetch_blocks - 1);
 
if (count  ecb_fetch_blocks) {
-   aes_crypt(input, output, key, control_word, count);
+   ecb_crypt(input, output, key, control_word, count);
return;
}
 
@@ -235,10 +273,19 @@ static inline void padlock_xcrypt_ecb(co
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 u8 *iv, void *control_word, u32 count)
 {
-   /* rep xcryptcbc */
-   asm volatile 

[patch 3/3] crypto: padlock-aes: enable on 64-bit kernels

2009-06-09 Thread Chuck Ebbert
From: Sebastian Andrzej Siewior sebast...@breakpoint.cc
crypto: padlock-aes: enable on 64-bit kernels

The only required change now is using the right push/pop instruction
on x86-64. Taken from the original patch by Sebastian Andrzej Siewior.
(Added a dependency on X86.)

Signed-off-by: Sebastian Andrzej Siewior sebast...@breakpoint.cc
Signed-off-by: Chuck Ebbert cebb...@redhat.com

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -12,7 +12,7 @@ if CRYPTO_HW
 
 config CRYPTO_DEV_PADLOCK
tristate Support for VIA PadLock ACE
-   depends on X86_32  !UML
+   depends on X86  !UML 
select CRYPTO_ALGAPI
help
  Some VIA processors come with an integrated crypto engine
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 3f0fdd1..ddd27c7 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
int cpu = raw_smp_processor_id();
 
if (cword != per_cpu(last_cword, cpu))
+#ifndef CONFIG_X86_64
asm volatile (pushfl; popfl);
+#else
+   asm volatile (pushfq; popfq);
+#endif
 }
 
 static inline void padlock_store_cword(struct cword *cword)
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano

2009-06-09 Thread Chuck Ebbert
On Tue, 9 Jun 2009 10:35:33 -0400
Chuck Ebbert cebb...@redhat.com wrote:

 The VIA Nano has a bug that makes the padlock unit fetch extra data
 during encryption operations. Add workarounds for that, and enable
 the driver on x86_64.
 
 1/3 Fix ECB encryption mode
 2/3 Fix CBC mode, clean up code
 3/3 Enable building for 64-bit kernels

Forgot to mention, I have 64-bit working on a Samsung NC20 with LVM
encrypted disks using AES. The cryptomgr self-tests pass too. Before
the prefetch fixes it was oopsing near page boundaries when trying to
decrypt disk blocks.

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano

2009-06-09 Thread Sebastian Andrzej Siewior
* Chuck Ebbert | 2009-06-09 10:35:33 [-0400]:

The VIA Nano has a bug that makes the padlock unit fetch extra data
during encryption operations. Add workarounds for that, and enable
the driver on x86_64.
Nice. The X86_64 padlock will make it mainline in next merge window so
I'm asking you kindly to rebase it against Herbert's cryptodev tree [0].

I guess the bug can trigger on 32bit if you boot the affected 64bit CPU
in 32bit mode? I'm not sure if it is better to send this patches via
stable tree _or_ deactivate the padlock on affected CPUs.

[0]
git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git

Sebastian
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano

2009-06-09 Thread Harald Welte
On Tue, Jun 09, 2009 at 10:35:33AM -0400, Chuck Ebbert wrote:
 The VIA Nano has a bug that makes the padlock unit fetch extra data
 during encryption operations. Add workarounds for that, and enable
 the driver on x86_64.

Thanks!

Where did you get the errata from, and what kind of document / revision is it?
I have not seen that document so far.

I've asked Centaur to confirm the errata, also inquired whether there will be
a microcode update or a new stepping to fix it.  If yes, we should add proper
checks to the workaround to make sure it's only enabled when we need it.

Out of curiosity: Only the ACE (crypto) is affected, not PHE (hashing)?

I'll also put on my todo list to do some actual benchmarking to determine
how much impact this bug has.  I expect especially for storage encryption,
many crypto operations will be page aligned and thus need the extra memcpy :(

p.s.: the patch to enable padlock on x86_64 is already in the crypto-dev tree,
as far as I know.  Interestingly, I have not observed this problem so far,
despite running dm-crypt on a nano for quite some time. 

Cheers,
-- 
- Harald Welte haraldwe...@viatech.comhttp://linux.via.com.tw/

VIA Free and Open Source Software Liaison
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html