From: Eric Biggers <ebigg...@google.com>

The generic AEGIS implementations all fail the improved AEAD tests
because they produce the wrong result with some data layouts.  The issue
is that they assume that if the skcipher_walk API gives 'nbytes' not
aligned to the walksize (a.k.a. walk.stride), then it is the end of the
data.  In fact, this can happen before the end.  Fix them.

Fixes: f606a88e5823 ("crypto: aegis - Add generic AEGIS AEAD implementations")
Cc: <sta...@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosn...@redhat.com>
Signed-off-by: Eric Biggers <ebigg...@google.com>
---
 crypto/aegis128.c  | 14 +++++++-------
 crypto/aegis128l.c | 14 +++++++-------
 crypto/aegis256.c  | 14 +++++++-------
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 96e078a8a00a1..3718a83413032 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -286,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct 
aegis_state *state,
                                          const struct aegis128_ops *ops)
 {
        struct skcipher_walk walk;
-       u8 *src, *dst;
-       unsigned int chunksize;
 
        ops->skcipher_walk_init(&walk, req, false);
 
        while (walk.nbytes) {
-               src = walk.src.virt.addr;
-               dst = walk.dst.virt.addr;
-               chunksize = walk.nbytes;
+               unsigned int nbytes = walk.nbytes;
 
-               ops->crypt_chunk(state, dst, src, chunksize);
+               if (nbytes < walk.total)
+                       nbytes = round_down(nbytes, walk.stride);
 
-               skcipher_walk_done(&walk, 0);
+               ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+                                nbytes);
+
+               skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }
 }
 
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index a210e779b911b..275a8616d71bd 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -349,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct 
aegis_state *state,
                                           const struct aegis128l_ops *ops)
 {
        struct skcipher_walk walk;
-       u8 *src, *dst;
-       unsigned int chunksize;
 
        ops->skcipher_walk_init(&walk, req, false);
 
        while (walk.nbytes) {
-               src = walk.src.virt.addr;
-               dst = walk.dst.virt.addr;
-               chunksize = walk.nbytes;
+               unsigned int nbytes = walk.nbytes;
 
-               ops->crypt_chunk(state, dst, src, chunksize);
+               if (nbytes < walk.total)
+                       nbytes = round_down(nbytes, walk.stride);
 
-               skcipher_walk_done(&walk, 0);
+               ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+                                nbytes);
+
+               skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }
 }
 
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index 49882a28e93e9..ecd6b7f34a2d2 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -299,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct 
aegis_state *state,
                                          const struct aegis256_ops *ops)
 {
        struct skcipher_walk walk;
-       u8 *src, *dst;
-       unsigned int chunksize;
 
        ops->skcipher_walk_init(&walk, req, false);
 
        while (walk.nbytes) {
-               src = walk.src.virt.addr;
-               dst = walk.dst.virt.addr;
-               chunksize = walk.nbytes;
+               unsigned int nbytes = walk.nbytes;
 
-               ops->crypt_chunk(state, dst, src, chunksize);
+               if (nbytes < walk.total)
+                       nbytes = round_down(nbytes, walk.stride);
 
-               skcipher_walk_done(&walk, 0);
+               ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+                                nbytes);
+
+               skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }
 }
 
-- 
2.20.1

Reply via email to