On Fri, Feb 1, 2019 at 8:52 AM Eric Biggers <ebigg...@kernel.org> wrote:
> From: Eric Biggers <ebigg...@google.com>
>
> The x86 AEGIS implementations all fail the improved AEAD tests because
> they produce the wrong result with some data layouts.  The issue is that
> they assume that if the skcipher_walk API gives 'nbytes' not aligned to
> the walksize (a.k.a. walk.stride), then it is the end of the data.  In
> fact, this can happen before the end.
>
> Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
> incorrectly sleep in the skcipher_walk_*() functions while preemption
> has been disabled by kernel_fpu_begin().
>
> Fix these bugs.
>
> Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
> Cc: <sta...@vger.kernel.org> # v4.18+
> Cc: Ondrej Mosnacek <omosn...@redhat.com>
> Signed-off-by: Eric Biggers <ebigg...@google.com>

Reviewed-by: Ondrej Mosnacek <omosn...@redhat.com>

> ---
>  arch/x86/crypto/aegis128-aesni-glue.c  | 38 ++++++++++----------------
>  arch/x86/crypto/aegis128l-aesni-glue.c | 38 ++++++++++----------------
>  arch/x86/crypto/aegis256-aesni-glue.c  | 38 ++++++++++----------------
>  3 files changed, 45 insertions(+), 69 deletions(-)
>
> diff --git a/arch/x86/crypto/aegis128-aesni-glue.c 
> b/arch/x86/crypto/aegis128-aesni-glue.c
> index 2a356b948720e..3ea71b8718135 100644
> --- a/arch/x86/crypto/aegis128-aesni-glue.c
> +++ b/arch/x86/crypto/aegis128-aesni-glue.c
> @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
>  }
>
>  static void crypto_aegis128_aesni_process_crypt(
> -               struct aegis_state *state, struct aead_request *req,
> +               struct aegis_state *state, struct skcipher_walk *walk,
>                 const struct aegis_crypt_ops *ops)
>  {
> -       struct skcipher_walk walk;
> -       u8 *src, *dst;
> -       unsigned int chunksize, base;
> -
> -       ops->skcipher_walk_init(&walk, req, false);
> -
> -       while (walk.nbytes) {
> -               src = walk.src.virt.addr;
> -               dst = walk.dst.virt.addr;
> -               chunksize = walk.nbytes;
> -
> -               ops->crypt_blocks(state, chunksize, src, dst);
> -
> -               base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
> -               src += base;
> -               dst += base;
> -               chunksize &= AEGIS128_BLOCK_SIZE - 1;
> -
> -               if (chunksize > 0)
> -                       ops->crypt_tail(state, chunksize, src, dst);
> +       while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
> +               ops->crypt_blocks(state,
> +                                 round_down(walk->nbytes, 
> AEGIS128_BLOCK_SIZE),
> +                                 walk->src.virt.addr, walk->dst.virt.addr);
> +               skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
> +       }
>
> -               skcipher_walk_done(&walk, 0);
> +       if (walk->nbytes) {
> +               ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
> +                               walk->dst.virt.addr);
> +               skcipher_walk_done(walk, 0);
>         }
>  }
>
> @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct 
> aead_request *req,
>  {
>         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>         struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
> +       struct skcipher_walk walk;
>         struct aegis_state state;
>
> +       ops->skcipher_walk_init(&walk, req, true);
> +
>         kernel_fpu_begin();
>
>         crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
>         crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
> -       crypto_aegis128_aesni_process_crypt(&state, req, ops);
> +       crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
>         crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
>
>         kernel_fpu_end();
> diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c 
> b/arch/x86/crypto/aegis128l-aesni-glue.c
> index dbe8bb980da15..1b1b39c66c5e2 100644
> --- a/arch/x86/crypto/aegis128l-aesni-glue.c
> +++ b/arch/x86/crypto/aegis128l-aesni-glue.c
> @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
>  }
>
>  static void crypto_aegis128l_aesni_process_crypt(
> -               struct aegis_state *state, struct aead_request *req,
> +               struct aegis_state *state, struct skcipher_walk *walk,
>                 const struct aegis_crypt_ops *ops)
>  {
> -       struct skcipher_walk walk;
> -       u8 *src, *dst;
> -       unsigned int chunksize, base;
> -
> -       ops->skcipher_walk_init(&walk, req, false);
> -
> -       while (walk.nbytes) {
> -               src = walk.src.virt.addr;
> -               dst = walk.dst.virt.addr;
> -               chunksize = walk.nbytes;
> -
> -               ops->crypt_blocks(state, chunksize, src, dst);
> -
> -               base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
> -               src += base;
> -               dst += base;
> -               chunksize &= AEGIS128L_BLOCK_SIZE - 1;
> -
> -               if (chunksize > 0)
> -                       ops->crypt_tail(state, chunksize, src, dst);
> +       while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
> +               ops->crypt_blocks(state, round_down(walk->nbytes,
> +                                                   AEGIS128L_BLOCK_SIZE),
> +                                 walk->src.virt.addr, walk->dst.virt.addr);
> +               skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
> +       }
>
> -               skcipher_walk_done(&walk, 0);
> +       if (walk->nbytes) {
> +               ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
> +                               walk->dst.virt.addr);
> +               skcipher_walk_done(walk, 0);
>         }
>  }
>
> @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct 
> aead_request *req,
>  {
>         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>         struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
> +       struct skcipher_walk walk;
>         struct aegis_state state;
>
> +       ops->skcipher_walk_init(&walk, req, true);
> +
>         kernel_fpu_begin();
>
>         crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
>         crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
> -       crypto_aegis128l_aesni_process_crypt(&state, req, ops);
> +       crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
>         crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, 
> cryptlen);
>
>         kernel_fpu_end();
> diff --git a/arch/x86/crypto/aegis256-aesni-glue.c 
> b/arch/x86/crypto/aegis256-aesni-glue.c
> index 8bebda2de92fe..6227ca3220a05 100644
> --- a/arch/x86/crypto/aegis256-aesni-glue.c
> +++ b/arch/x86/crypto/aegis256-aesni-glue.c
> @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
>  }
>
>  static void crypto_aegis256_aesni_process_crypt(
> -               struct aegis_state *state, struct aead_request *req,
> +               struct aegis_state *state, struct skcipher_walk *walk,
>                 const struct aegis_crypt_ops *ops)
>  {
> -       struct skcipher_walk walk;
> -       u8 *src, *dst;
> -       unsigned int chunksize, base;
> -
> -       ops->skcipher_walk_init(&walk, req, false);
> -
> -       while (walk.nbytes) {
> -               src = walk.src.virt.addr;
> -               dst = walk.dst.virt.addr;
> -               chunksize = walk.nbytes;
> -
> -               ops->crypt_blocks(state, chunksize, src, dst);
> -
> -               base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
> -               src += base;
> -               dst += base;
> -               chunksize &= AEGIS256_BLOCK_SIZE - 1;
> -
> -               if (chunksize > 0)
> -                       ops->crypt_tail(state, chunksize, src, dst);
> +       while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
> +               ops->crypt_blocks(state,
> +                                 round_down(walk->nbytes, 
> AEGIS256_BLOCK_SIZE),
> +                                 walk->src.virt.addr, walk->dst.virt.addr);
> +               skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
> +       }
>
> -               skcipher_walk_done(&walk, 0);
> +       if (walk->nbytes) {
> +               ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
> +                               walk->dst.virt.addr);
> +               skcipher_walk_done(walk, 0);
>         }
>  }
>
> @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct 
> aead_request *req,
>  {
>         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>         struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
> +       struct skcipher_walk walk;
>         struct aegis_state state;
>
> +       ops->skcipher_walk_init(&walk, req, true);
> +
>         kernel_fpu_begin();
>
>         crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
>         crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
> -       crypto_aegis256_aesni_process_crypt(&state, req, ops);
> +       crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
>         crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
>
>         kernel_fpu_end();
> --
> 2.20.1
>


-- 
Ondrej Mosnacek <omosnace at redhat dot com>
Associate Software Engineer, Security Technologies
Red Hat, Inc.

Reply via email to