Every field in struct aesni_xts_ctx is a pointer to a byte array. Each
array has a size of struct crypto_aes_ctx. Then, the field can be
redefined as that struct type instead of the obscure pointer.

Subsequently, the address to struct aesni_xts_ctx should be aligned
right away. This can also simplify the runtime alignment code.

Redefine struct aesni_xts_ctx, and align its address on the front.
This draws a rework by refactoring the common alignment code. Then,
clean up the alignment for the old pointers.

Suggested-by: Eric Biggers <ebigg...@kernel.org>
Signed-off-by: Chang S. Bae <chang.seok....@intel.com>
Cc: Herbert Xu <herb...@gondor.apana.org.au>
Cc: "David S. Miller" <da...@davemloft.net>
Cc: Eric Biggers <ebigg...@kernel.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: x...@kernel.org
Cc: linux-cry...@vger.kernel.org
Cc: linux-ker...@vger.kernel.org
---
Changes from v6:
* Add as a new patch. (Eric Biggers)

This fix was considered to be better addressed before the preparatory
AES-NI code rework.
---
 arch/x86/crypto/aesni-intel_glue.c | 38 +++++++++++++++++-------------
 1 file changed, 22 insertions(+), 16 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c 
b/arch/x86/crypto/aesni-intel_glue.c
index a5b0cb3efeba..97a1629b84c4 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -61,8 +61,8 @@ struct generic_gcmaes_ctx {
 };
 
 struct aesni_xts_ctx {
-       u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
-       u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+       struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR;
+       struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR;
 };
 
 #define GCM_BLOCK_LEN 16
@@ -219,14 +219,20 @@ generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct 
crypto_aead *tfm)
 }
 #endif
 
+static inline unsigned long aes_align_addr(unsigned long addr)
+{
+       return (crypto_tfm_ctx_alignment() >= AESNI_ALIGN) ?
+              ALIGN(addr, 1) : ALIGN(addr, AESNI_ALIGN);
+}
+
 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 {
-       unsigned long addr = (unsigned long)raw_ctx;
-       unsigned long align = AESNI_ALIGN;
+       return (struct crypto_aes_ctx *)aes_align_addr((unsigned long)raw_ctx);
+}
 
-       if (align <= crypto_tfm_ctx_alignment())
-               align = 1;
-       return (struct crypto_aes_ctx *)ALIGN(addr, align);
+static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
+{
+       return (struct aesni_xts_ctx *)aes_align_addr((unsigned 
long)crypto_skcipher_ctx(tfm));
 }
 
 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
@@ -883,7 +889,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
                            unsigned int keylen)
 {
-       struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
        int err;
 
        err = xts_verify_key(tfm, key, keylen);
@@ -893,20 +899,20 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, 
const u8 *key,
        keylen /= 2;
 
        /* first half of xts-key is for crypt */
-       err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+       err = aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->crypt_ctx,
                                 key, keylen);
        if (err)
                return err;
 
        /* second half of xts-key is for tweak */
-       return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+       return aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->tweak_ctx,
                                  key + keylen, keylen);
 }
 
 static int xts_crypt(struct skcipher_request *req, bool encrypt)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
        int tail = req->cryptlen % AES_BLOCK_SIZE;
        struct skcipher_request subreq;
        struct skcipher_walk walk;
@@ -942,7 +948,7 @@ static int xts_crypt(struct skcipher_request *req, bool 
encrypt)
        kernel_fpu_begin();
 
        /* calculate first value of T */
-       aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
+       aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
 
        while (walk.nbytes > 0) {
                int nbytes = walk.nbytes;
@@ -951,11 +957,11 @@ static int xts_crypt(struct skcipher_request *req, bool 
encrypt)
                        nbytes &= ~(AES_BLOCK_SIZE - 1);
 
                if (encrypt)
-                       aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+                       aesni_xts_encrypt(&ctx->crypt_ctx,
                                          walk.dst.virt.addr, 
walk.src.virt.addr,
                                          nbytes, walk.iv);
                else
-                       aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+                       aesni_xts_decrypt(&ctx->crypt_ctx,
                                          walk.dst.virt.addr, 
walk.src.virt.addr,
                                          nbytes, walk.iv);
                kernel_fpu_end();
@@ -983,11 +989,11 @@ static int xts_crypt(struct skcipher_request *req, bool 
encrypt)
 
                kernel_fpu_begin();
                if (encrypt)
-                       aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+                       aesni_xts_encrypt(&ctx->crypt_ctx,
                                          walk.dst.virt.addr, 
walk.src.virt.addr,
                                          walk.nbytes, walk.iv);
                else
-                       aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+                       aesni_xts_decrypt(&ctx->crypt_ctx,
                                          walk.dst.virt.addr, 
walk.src.virt.addr,
                                          walk.nbytes, walk.iv);
                kernel_fpu_end();
-- 
2.17.1

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to