* [PATCH v2 01/15] crypto: aegis - fix handling chunked inputs
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
@ 2019-02-01 7:51 ` Eric Biggers
2019-02-05 9:31 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 02/15] crypto: morus " Eric Biggers
` (5 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable, Ondrej Mosnacek
From: Eric Biggers <ebiggers@google.com>
The generic AEGIS implementations all fail the improved AEAD tests
because they produce the wrong result with some data layouts. The issue
is that they assume that if the skcipher_walk API gives 'nbytes' not
aligned to the walksize (a.k.a. walk.stride), then it is the end of the
data. In fact, this can happen before the end. Fix them.
Fixes: f606a88e5823 ("crypto: aegis - Add generic AEGIS AEAD implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/aegis128.c | 14 +++++++-------
crypto/aegis128l.c | 14 +++++++-------
crypto/aegis256.c | 14 +++++++-------
3 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 96e078a8a00a1..3718a83413032 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -286,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
const struct aegis128_ops *ops)
{
struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, chunksize);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index a210e779b911b..275a8616d71bd 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -349,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
const struct aegis128l_ops *ops)
{
struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, chunksize);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index 49882a28e93e9..ecd6b7f34a2d2 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -299,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
const struct aegis256_ops *ops)
{
struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, chunksize);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 02/15] crypto: morus - fix handling chunked inputs
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
2019-02-01 7:51 ` [PATCH v2 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
@ 2019-02-01 7:51 ` Eric Biggers
2019-02-05 9:30 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
` (4 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable, Ondrej Mosnacek
From: Eric Biggers <ebiggers@google.com>
The generic MORUS implementations all fail the improved AEAD tests
because they produce the wrong result with some data layouts. The issue
is that they assume that if the skcipher_walk API gives 'nbytes' not
aligned to the walksize (a.k.a. walk.stride), then it is the end of the
data. In fact, this can happen before the end. Fix them.
Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/morus1280.c | 13 +++++++------
crypto/morus640.c | 13 +++++++------
2 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 78ba09db7328c..0747732d5b78a 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -362,18 +362,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
const struct morus1280_ops *ops)
{
struct skcipher_walk walk;
- u8 *dst;
- const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
+ unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, walk.nbytes);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 5cf530139b271..1617a1eb8be13 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -361,18 +361,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
const struct morus640_ops *ops)
{
struct skcipher_walk walk;
- u8 *dst;
- const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
+ unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, walk.nbytes);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
2019-02-01 7:51 ` [PATCH v2 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
2019-02-01 7:51 ` [PATCH v2 02/15] crypto: morus " Eric Biggers
@ 2019-02-01 7:51 ` Eric Biggers
2019-02-05 9:31 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 04/15] crypto: x86/morus " Eric Biggers
` (3 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable, Ondrej Mosnacek
From: Eric Biggers <ebiggers@google.com>
The x86 AEGIS implementations all fail the improved AEAD tests because
they produce the wrong result with some data layouts. The issue is that
they assume that if the skcipher_walk API gives 'nbytes' not aligned to
the walksize (a.k.a. walk.stride), then it is the end of the data. In
fact, this can happen before the end.
Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
incorrectly sleep in the skcipher_walk_*() functions while preemption
has been disabled by kernel_fpu_begin().
Fix these bugs.
Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
arch/x86/crypto/aegis128-aesni-glue.c | 38 ++++++++++----------------
arch/x86/crypto/aegis128l-aesni-glue.c | 38 ++++++++++----------------
arch/x86/crypto/aegis256-aesni-glue.c | 38 ++++++++++----------------
3 files changed, 45 insertions(+), 69 deletions(-)
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 2a356b948720e..3ea71b8718135 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
}
static void crypto_aegis128_aesni_process_crypt(
- struct aegis_state *state, struct aead_request *req,
+ struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize, base;
-
- ops->skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops->crypt_blocks(state, chunksize, src, dst);
-
- base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
- src += base;
- dst += base;
- chunksize &= AEGIS128_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops->crypt_tail(state, chunksize, src, dst);
+ while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
+ ops->crypt_blocks(state,
+ round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
+ walk->src.virt.addr, walk->dst.virt.addr);
+ skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ walk->dst.virt.addr);
+ skcipher_walk_done(walk, 0);
}
}
@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
+ struct skcipher_walk walk;
struct aegis_state state;
+ ops->skcipher_walk_init(&walk, req, true);
+
kernel_fpu_begin();
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis128_aesni_process_crypt(&state, req, ops);
+ crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index dbe8bb980da15..1b1b39c66c5e2 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
}
static void crypto_aegis128l_aesni_process_crypt(
- struct aegis_state *state, struct aead_request *req,
+ struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize, base;
-
- ops->skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops->crypt_blocks(state, chunksize, src, dst);
-
- base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
- src += base;
- dst += base;
- chunksize &= AEGIS128L_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops->crypt_tail(state, chunksize, src, dst);
+ while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
+ ops->crypt_blocks(state, round_down(walk->nbytes,
+ AEGIS128L_BLOCK_SIZE),
+ walk->src.virt.addr, walk->dst.virt.addr);
+ skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ walk->dst.virt.addr);
+ skcipher_walk_done(walk, 0);
}
}
@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
+ struct skcipher_walk walk;
struct aegis_state state;
+ ops->skcipher_walk_init(&walk, req, true);
+
kernel_fpu_begin();
crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis128l_aesni_process_crypt(&state, req, ops);
+ crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 8bebda2de92fe..6227ca3220a05 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
}
static void crypto_aegis256_aesni_process_crypt(
- struct aegis_state *state, struct aead_request *req,
+ struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize, base;
-
- ops->skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops->crypt_blocks(state, chunksize, src, dst);
-
- base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
- src += base;
- dst += base;
- chunksize &= AEGIS256_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops->crypt_tail(state, chunksize, src, dst);
+ while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
+ ops->crypt_blocks(state,
+ round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
+ walk->src.virt.addr, walk->dst.virt.addr);
+ skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ walk->dst.virt.addr);
+ skcipher_walk_done(walk, 0);
}
}
@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
+ struct skcipher_walk walk;
struct aegis_state state;
+ ops->skcipher_walk_init(&walk, req, true);
+
kernel_fpu_begin();
crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis256_aesni_process_crypt(&state, req, ops);
+ crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 04/15] crypto: x86/morus - fix handling chunked inputs and MAY_SLEEP
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
` (2 preceding siblings ...)
2019-02-01 7:51 ` [PATCH v2 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
@ 2019-02-01 7:51 ` Eric Biggers
2019-02-05 9:32 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext Eric Biggers
` (2 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable, Ondrej Mosnacek
From: Eric Biggers <ebiggers@google.com>
The x86 MORUS implementations all fail the improved AEAD tests because
they produce the wrong result with some data layouts. The issue is that
they assume that if the skcipher_walk API gives 'nbytes' not aligned to
the walksize (a.k.a. walk.stride), then it is the end of the data. In
fact, this can happen before the end.
Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
incorrectly sleep in the skcipher_walk_*() functions while preemption
has been disabled by kernel_fpu_begin().
Fix these bugs.
Fixes: 56e8e57fc3a7 ("crypto: morus - Add common SIMD glue code for MORUS")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
arch/x86/crypto/morus1280_glue.c | 40 +++++++++++++-------------------
arch/x86/crypto/morus640_glue.c | 39 ++++++++++++-------------------
2 files changed, 31 insertions(+), 48 deletions(-)
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 0dccdda1eb3a1..7e600f8bcdad8 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
struct morus1280_ops ops,
- struct aead_request *req)
+ struct skcipher_walk *walk)
{
- struct skcipher_walk walk;
- u8 *cursor_src, *cursor_dst;
- unsigned int chunksize, base;
-
- ops.skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- cursor_src = walk.src.virt.addr;
- cursor_dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
-
- base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
- cursor_src += base;
- cursor_dst += base;
- chunksize &= MORUS1280_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops.crypt_tail(state, cursor_src, cursor_dst,
- chunksize);
+ while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
+ ops.crypt_blocks(state, walk->src.virt.addr,
+ walk->dst.virt.addr,
+ round_down(walk->nbytes,
+ MORUS1280_BLOCK_SIZE));
+ skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
+ walk->nbytes);
+ skcipher_walk_done(walk, 0);
}
}
@@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
struct morus1280_state state;
+ struct skcipher_walk walk;
+
+ ops.skcipher_walk_init(&walk, req, true);
kernel_fpu_begin();
ctx->ops->init(&state, &ctx->key, req->iv);
crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
- crypto_morus1280_glue_process_crypt(&state, ops, req);
+ crypto_morus1280_glue_process_crypt(&state, ops, &walk);
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index 7b58fe4d9bd1a..cb3a817320160 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
struct morus640_ops ops,
- struct aead_request *req)
+ struct skcipher_walk *walk)
{
- struct skcipher_walk walk;
- u8 *cursor_src, *cursor_dst;
- unsigned int chunksize, base;
-
- ops.skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- cursor_src = walk.src.virt.addr;
- cursor_dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
-
- base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
- cursor_src += base;
- cursor_dst += base;
- chunksize &= MORUS640_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops.crypt_tail(state, cursor_src, cursor_dst,
- chunksize);
+ while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
+ ops.crypt_blocks(state, walk->src.virt.addr,
+ walk->dst.virt.addr,
+ round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
+ skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
+ walk->nbytes);
+ skcipher_walk_done(walk, 0);
}
}
@@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
struct morus640_state state;
+ struct skcipher_walk walk;
+
+ ops.skcipher_walk_init(&walk, req, true);
kernel_fpu_begin();
ctx->ops->init(&state, &ctx->key, req->iv);
crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
- crypto_morus640_glue_process_crypt(&state, ops, req);
+ crypto_morus640_glue_process_crypt(&state, ops, &walk);
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
` (3 preceding siblings ...)
2019-02-01 7:51 ` [PATCH v2 04/15] crypto: x86/morus " Eric Biggers
@ 2019-02-01 7:51 ` Eric Biggers
2019-02-01 7:51 ` [PATCH v2 06/15] crypto: ahash - fix another early termination in hash walk Eric Biggers
2019-02-01 7:51 ` [PATCH v2 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
6 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable, Dave Watson
From: Eric Biggers <ebiggers@google.com>
gcmaes_crypt_by_sg() dereferences the NULL pointer returned by
scatterwalk_ffwd() when encrypting an empty plaintext and the source
scatterlist ends immediately after the associated data.
Fix it by only fast-forwarding to the src/dst data scatterlists if the
data length is nonzero.
This bug is reproduced by the "rfc4543(gcm(aes))" test vectors when run
with the new AEAD test manager.
Fixes: e845520707f8 ("crypto: aesni - Update aesni-intel_glue to use scatter/gather")
Cc: <stable@vger.kernel.org> # v4.17+
Cc: Dave Watson <davejwatson@fb.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
arch/x86/crypto/aesni-intel_glue.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 9b5ccde3ef315..1e3d2102033a0 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -813,11 +813,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
}
- src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
- scatterwalk_start(&src_sg_walk, src_sg);
- if (req->src != req->dst) {
- dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
- scatterwalk_start(&dst_sg_walk, dst_sg);
+ if (left) {
+ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
+ scatterwalk_start(&src_sg_walk, src_sg);
+ if (req->src != req->dst) {
+ dst_sg = scatterwalk_ffwd(dst_start, req->dst,
+ req->assoclen);
+ scatterwalk_start(&dst_sg_walk, dst_sg);
+ }
}
kernel_fpu_begin();
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 06/15] crypto: ahash - fix another early termination in hash walk
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
` (4 preceding siblings ...)
2019-02-01 7:51 ` [PATCH v2 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext Eric Biggers
@ 2019-02-01 7:51 ` Eric Biggers
2019-02-01 7:51 ` [PATCH v2 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
6 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable
From: Eric Biggers <ebiggers@google.com>
Hash algorithms with an alignmask set, e.g. "xcbc(aes-aesni)" and
"michael_mic", fail the improved hash tests because they sometimes
produce the wrong digest. The bug is that in the case where a
scatterlist element crosses pages, not all the data is actually hashed
because the scatterlist walk terminates too early. This happens because
the 'nbytes' variable in crypto_hash_walk_done() is assigned the number
of bytes remaining in the page, then later interpreted as the number of
bytes remaining in the scatterlist element. Fix it.
Fixes: 900a081f6912 ("crypto: ahash - Fix early termination in hash walk")
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/ahash.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ca0d3e281fefb..81e2767e2164e 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
- unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
- if (nbytes && walk->offset & alignmask && !err) {
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(nbytes,
- ((unsigned int)(PAGE_SIZE)) - walk->offset);
- walk->entrylen -= nbytes;
+ if (walk->entrylen && (walk->offset & alignmask) && !err) {
+ unsigned int nbytes;
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
+ nbytes = min(walk->entrylen,
+ (unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
+ walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
- if (nbytes) {
+ if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
` (5 preceding siblings ...)
2019-02-01 7:51 ` [PATCH v2 06/15] crypto: ahash - fix another early termination in hash walk Eric Biggers
@ 2019-02-01 7:51 ` Eric Biggers
6 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2019-02-01 7:51 UTC (permalink / raw)
To: linux-crypto, Herbert Xu; +Cc: linux-kernel, stable, Ard Biesheuvel
From: Eric Biggers <ebiggers@google.com>
The arm64 NEON bit-sliced implementation of AES-CTR fails the improved
skcipher tests because it sometimes produces the wrong ciphertext. The
bug is that the final keystream block isn't returned from the assembly
code when the number of non-final blocks is zero. This can happen if
the input data ends a few bytes after a page boundary. In this case the
last bytes get "encrypted" by XOR'ing them with uninitialized memory.
Fix the assembly code to return the final keystream block when needed.
Fixes: 88a3f582bea9 ("crypto: arm64/aes - don't use IV buffer to return final keystream block")
Cc: <stable@vger.kernel.org> # v4.11+
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
arch/arm64/crypto/aes-neonbs-core.S | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index e613a87f8b53f..8432c8d0dea66 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
8: next_ctr v0
st1 {v0.16b}, [x24]
- cbz x23, 0f
+ cbz x23, .Lctr_done
cond_yield_neon 98b
b 99b
-0: frame_pop
+.Lctr_done:
+ frame_pop
ret
/*
* If we are handling the tail of the input (x6 != NULL), return the
* final keystream block back to the caller.
*/
+0: cbz x25, 8b
+ st1 {v0.16b}, [x25]
+ b 8b
1: cbz x25, 8b
st1 {v1.16b}, [x25]
b 8b
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v2 02/15] crypto: morus - fix handling chunked inputs
2019-02-01 7:51 ` [PATCH v2 02/15] crypto: morus " Eric Biggers
@ 2019-02-05 9:30 ` Ondrej Mosnacek
0 siblings, 0 replies; 11+ messages in thread
From: Ondrej Mosnacek @ 2019-02-05 9:30 UTC (permalink / raw)
To: Eric Biggers; +Cc: linux-crypto, Herbert Xu, Linux kernel mailing list, stable
On Fri, Feb 1, 2019 at 8:52 AM Eric Biggers <ebiggers@kernel.org> wrote:
> From: Eric Biggers <ebiggers@google.com>
>
> The generic MORUS implementations all fail the improved AEAD tests
> because they produce the wrong result with some data layouts. The issue
> is that they assume that if the skcipher_walk API gives 'nbytes' not
> aligned to the walksize (a.k.a. walk.stride), then it is the end of the
> data. In fact, this can happen before the end. Fix them.
>
> Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations")
> Cc: <stable@vger.kernel.org> # v4.18+
> Cc: Ondrej Mosnacek <omosnace@redhat.com>
> Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
> ---
> crypto/morus1280.c | 13 +++++++------
> crypto/morus640.c | 13 +++++++------
> 2 files changed, 14 insertions(+), 12 deletions(-)
>
> diff --git a/crypto/morus1280.c b/crypto/morus1280.c
> index 78ba09db7328c..0747732d5b78a 100644
> --- a/crypto/morus1280.c
> +++ b/crypto/morus1280.c
> @@ -362,18 +362,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
> const struct morus1280_ops *ops)
> {
> struct skcipher_walk walk;
> - u8 *dst;
> - const u8 *src;
>
> ops->skcipher_walk_init(&walk, req, false);
>
> while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> + unsigned int nbytes = walk.nbytes;
>
> - ops->crypt_chunk(state, dst, src, walk.nbytes);
> + if (nbytes < walk.total)
> + nbytes = round_down(nbytes, walk.stride);
>
> - skcipher_walk_done(&walk, 0);
> + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> + nbytes);
> +
> + skcipher_walk_done(&walk, walk.nbytes - nbytes);
> }
> }
>
> diff --git a/crypto/morus640.c b/crypto/morus640.c
> index 5cf530139b271..1617a1eb8be13 100644
> --- a/crypto/morus640.c
> +++ b/crypto/morus640.c
> @@ -361,18 +361,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
> const struct morus640_ops *ops)
> {
> struct skcipher_walk walk;
> - u8 *dst;
> - const u8 *src;
>
> ops->skcipher_walk_init(&walk, req, false);
>
> while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> + unsigned int nbytes = walk.nbytes;
>
> - ops->crypt_chunk(state, dst, src, walk.nbytes);
> + if (nbytes < walk.total)
> + nbytes = round_down(nbytes, walk.stride);
>
> - skcipher_walk_done(&walk, 0);
> + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> + nbytes);
> +
> + skcipher_walk_done(&walk, walk.nbytes - nbytes);
> }
> }
>
> --
> 2.20.1
>
--
Ondrej Mosnacek <omosnace at redhat dot com>
Associate Software Engineer, Security Technologies
Red Hat, Inc.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 01/15] crypto: aegis - fix handling chunked inputs
2019-02-01 7:51 ` [PATCH v2 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
@ 2019-02-05 9:31 ` Ondrej Mosnacek
0 siblings, 0 replies; 11+ messages in thread
From: Ondrej Mosnacek @ 2019-02-05 9:31 UTC (permalink / raw)
To: Eric Biggers; +Cc: linux-crypto, Herbert Xu, Linux kernel mailing list, stable
On Fri, Feb 1, 2019 at 8:52 AM Eric Biggers <ebiggers@kernel.org> wrote:
> From: Eric Biggers <ebiggers@google.com>
>
> The generic AEGIS implementations all fail the improved AEAD tests
> because they produce the wrong result with some data layouts. The issue
> is that they assume that if the skcipher_walk API gives 'nbytes' not
> aligned to the walksize (a.k.a. walk.stride), then it is the end of the
> data. In fact, this can happen before the end. Fix them.
>
> Fixes: f606a88e5823 ("crypto: aegis - Add generic AEGIS AEAD implementations")
> Cc: <stable@vger.kernel.org> # v4.18+
> Cc: Ondrej Mosnacek <omosnace@redhat.com>
> Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
> ---
> crypto/aegis128.c | 14 +++++++-------
> crypto/aegis128l.c | 14 +++++++-------
> crypto/aegis256.c | 14 +++++++-------
> 3 files changed, 21 insertions(+), 21 deletions(-)
>
> diff --git a/crypto/aegis128.c b/crypto/aegis128.c
> index 96e078a8a00a1..3718a83413032 100644
> --- a/crypto/aegis128.c
> +++ b/crypto/aegis128.c
> @@ -286,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
> const struct aegis128_ops *ops)
> {
> struct skcipher_walk walk;
> - u8 *src, *dst;
> - unsigned int chunksize;
>
> ops->skcipher_walk_init(&walk, req, false);
>
> while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> + unsigned int nbytes = walk.nbytes;
>
> - ops->crypt_chunk(state, dst, src, chunksize);
> + if (nbytes < walk.total)
> + nbytes = round_down(nbytes, walk.stride);
>
> - skcipher_walk_done(&walk, 0);
> + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> + nbytes);
> +
> + skcipher_walk_done(&walk, walk.nbytes - nbytes);
> }
> }
>
> diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
> index a210e779b911b..275a8616d71bd 100644
> --- a/crypto/aegis128l.c
> +++ b/crypto/aegis128l.c
> @@ -349,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
> const struct aegis128l_ops *ops)
> {
> struct skcipher_walk walk;
> - u8 *src, *dst;
> - unsigned int chunksize;
>
> ops->skcipher_walk_init(&walk, req, false);
>
> while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> + unsigned int nbytes = walk.nbytes;
>
> - ops->crypt_chunk(state, dst, src, chunksize);
> + if (nbytes < walk.total)
> + nbytes = round_down(nbytes, walk.stride);
>
> - skcipher_walk_done(&walk, 0);
> + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> + nbytes);
> +
> + skcipher_walk_done(&walk, walk.nbytes - nbytes);
> }
> }
>
> diff --git a/crypto/aegis256.c b/crypto/aegis256.c
> index 49882a28e93e9..ecd6b7f34a2d2 100644
> --- a/crypto/aegis256.c
> +++ b/crypto/aegis256.c
> @@ -299,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
> const struct aegis256_ops *ops)
> {
> struct skcipher_walk walk;
> - u8 *src, *dst;
> - unsigned int chunksize;
>
> ops->skcipher_walk_init(&walk, req, false);
>
> while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> + unsigned int nbytes = walk.nbytes;
>
> - ops->crypt_chunk(state, dst, src, chunksize);
> + if (nbytes < walk.total)
> + nbytes = round_down(nbytes, walk.stride);
>
> - skcipher_walk_done(&walk, 0);
> + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> + nbytes);
> +
> + skcipher_walk_done(&walk, walk.nbytes - nbytes);
> }
> }
>
> --
> 2.20.1
>
--
Ondrej Mosnacek <omosnace at redhat dot com>
Associate Software Engineer, Security Technologies
Red Hat, Inc.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP
2019-02-01 7:51 ` [PATCH v2 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
@ 2019-02-05 9:31 ` Ondrej Mosnacek
0 siblings, 0 replies; 11+ messages in thread
From: Ondrej Mosnacek @ 2019-02-05 9:31 UTC (permalink / raw)
To: Eric Biggers; +Cc: linux-crypto, Herbert Xu, Linux kernel mailing list, stable
On Fri, Feb 1, 2019 at 8:52 AM Eric Biggers <ebiggers@kernel.org> wrote:
> From: Eric Biggers <ebiggers@google.com>
>
> The x86 AEGIS implementations all fail the improved AEAD tests because
> they produce the wrong result with some data layouts. The issue is that
> they assume that if the skcipher_walk API gives 'nbytes' not aligned to
> the walksize (a.k.a. walk.stride), then it is the end of the data. In
> fact, this can happen before the end.
>
> Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
> incorrectly sleep in the skcipher_walk_*() functions while preemption
> has been disabled by kernel_fpu_begin().
>
> Fix these bugs.
>
> Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
> Cc: <stable@vger.kernel.org> # v4.18+
> Cc: Ondrej Mosnacek <omosnace@redhat.com>
> Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
> ---
> arch/x86/crypto/aegis128-aesni-glue.c | 38 ++++++++++----------------
> arch/x86/crypto/aegis128l-aesni-glue.c | 38 ++++++++++----------------
> arch/x86/crypto/aegis256-aesni-glue.c | 38 ++++++++++----------------
> 3 files changed, 45 insertions(+), 69 deletions(-)
>
> diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
> index 2a356b948720e..3ea71b8718135 100644
> --- a/arch/x86/crypto/aegis128-aesni-glue.c
> +++ b/arch/x86/crypto/aegis128-aesni-glue.c
> @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
> }
>
> static void crypto_aegis128_aesni_process_crypt(
> - struct aegis_state *state, struct aead_request *req,
> + struct aegis_state *state, struct skcipher_walk *walk,
> const struct aegis_crypt_ops *ops)
> {
> - struct skcipher_walk walk;
> - u8 *src, *dst;
> - unsigned int chunksize, base;
> -
> - ops->skcipher_walk_init(&walk, req, false);
> -
> - while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> -
> - ops->crypt_blocks(state, chunksize, src, dst);
> -
> - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
> - src += base;
> - dst += base;
> - chunksize &= AEGIS128_BLOCK_SIZE - 1;
> -
> - if (chunksize > 0)
> - ops->crypt_tail(state, chunksize, src, dst);
> + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
> + ops->crypt_blocks(state,
> + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
> + walk->src.virt.addr, walk->dst.virt.addr);
> + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
> + }
>
> - skcipher_walk_done(&walk, 0);
> + if (walk->nbytes) {
> + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
> + walk->dst.virt.addr);
> + skcipher_walk_done(walk, 0);
> }
> }
>
> @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
> {
> struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
> + struct skcipher_walk walk;
> struct aegis_state state;
>
> + ops->skcipher_walk_init(&walk, req, true);
> +
> kernel_fpu_begin();
>
> crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
> crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
> - crypto_aegis128_aesni_process_crypt(&state, req, ops);
> + crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
> crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
>
> kernel_fpu_end();
> diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
> index dbe8bb980da15..1b1b39c66c5e2 100644
> --- a/arch/x86/crypto/aegis128l-aesni-glue.c
> +++ b/arch/x86/crypto/aegis128l-aesni-glue.c
> @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
> }
>
> static void crypto_aegis128l_aesni_process_crypt(
> - struct aegis_state *state, struct aead_request *req,
> + struct aegis_state *state, struct skcipher_walk *walk,
> const struct aegis_crypt_ops *ops)
> {
> - struct skcipher_walk walk;
> - u8 *src, *dst;
> - unsigned int chunksize, base;
> -
> - ops->skcipher_walk_init(&walk, req, false);
> -
> - while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> -
> - ops->crypt_blocks(state, chunksize, src, dst);
> -
> - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
> - src += base;
> - dst += base;
> - chunksize &= AEGIS128L_BLOCK_SIZE - 1;
> -
> - if (chunksize > 0)
> - ops->crypt_tail(state, chunksize, src, dst);
> + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
> + ops->crypt_blocks(state, round_down(walk->nbytes,
> + AEGIS128L_BLOCK_SIZE),
> + walk->src.virt.addr, walk->dst.virt.addr);
> + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
> + }
>
> - skcipher_walk_done(&walk, 0);
> + if (walk->nbytes) {
> + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
> + walk->dst.virt.addr);
> + skcipher_walk_done(walk, 0);
> }
> }
>
> @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
> {
> struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
> + struct skcipher_walk walk;
> struct aegis_state state;
>
> + ops->skcipher_walk_init(&walk, req, true);
> +
> kernel_fpu_begin();
>
> crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
> crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
> - crypto_aegis128l_aesni_process_crypt(&state, req, ops);
> + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
> crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
>
> kernel_fpu_end();
> diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
> index 8bebda2de92fe..6227ca3220a05 100644
> --- a/arch/x86/crypto/aegis256-aesni-glue.c
> +++ b/arch/x86/crypto/aegis256-aesni-glue.c
> @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
> }
>
> static void crypto_aegis256_aesni_process_crypt(
> - struct aegis_state *state, struct aead_request *req,
> + struct aegis_state *state, struct skcipher_walk *walk,
> const struct aegis_crypt_ops *ops)
> {
> - struct skcipher_walk walk;
> - u8 *src, *dst;
> - unsigned int chunksize, base;
> -
> - ops->skcipher_walk_init(&walk, req, false);
> -
> - while (walk.nbytes) {
> - src = walk.src.virt.addr;
> - dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> -
> - ops->crypt_blocks(state, chunksize, src, dst);
> -
> - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
> - src += base;
> - dst += base;
> - chunksize &= AEGIS256_BLOCK_SIZE - 1;
> -
> - if (chunksize > 0)
> - ops->crypt_tail(state, chunksize, src, dst);
> + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
> + ops->crypt_blocks(state,
> + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
> + walk->src.virt.addr, walk->dst.virt.addr);
> + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
> + }
>
> - skcipher_walk_done(&walk, 0);
> + if (walk->nbytes) {
> + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
> + walk->dst.virt.addr);
> + skcipher_walk_done(walk, 0);
> }
> }
>
> @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
> {
> struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
> + struct skcipher_walk walk;
> struct aegis_state state;
>
> + ops->skcipher_walk_init(&walk, req, true);
> +
> kernel_fpu_begin();
>
> crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
> crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
> - crypto_aegis256_aesni_process_crypt(&state, req, ops);
> + crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
> crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
>
> kernel_fpu_end();
> --
> 2.20.1
>
--
Ondrej Mosnacek <omosnace at redhat dot com>
Associate Software Engineer, Security Technologies
Red Hat, Inc.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 04/15] crypto: x86/morus - fix handling chunked inputs and MAY_SLEEP
2019-02-01 7:51 ` [PATCH v2 04/15] crypto: x86/morus " Eric Biggers
@ 2019-02-05 9:32 ` Ondrej Mosnacek
0 siblings, 0 replies; 11+ messages in thread
From: Ondrej Mosnacek @ 2019-02-05 9:32 UTC (permalink / raw)
To: Eric Biggers; +Cc: linux-crypto, Herbert Xu, Linux kernel mailing list, stable
On Fri, Feb 1, 2019 at 8:52 AM Eric Biggers <ebiggers@kernel.org> wrote:
> From: Eric Biggers <ebiggers@google.com>
>
> The x86 MORUS implementations all fail the improved AEAD tests because
> they produce the wrong result with some data layouts. The issue is that
> they assume that if the skcipher_walk API gives 'nbytes' not aligned to
> the walksize (a.k.a. walk.stride), then it is the end of the data. In
> fact, this can happen before the end.
>
> Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
> incorrectly sleep in the skcipher_walk_*() functions while preemption
> has been disabled by kernel_fpu_begin().
>
> Fix these bugs.
>
> Fixes: 56e8e57fc3a7 ("crypto: morus - Add common SIMD glue code for MORUS")
> Cc: <stable@vger.kernel.org> # v4.18+
> Cc: Ondrej Mosnacek <omosnace@redhat.com>
> Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
> ---
> arch/x86/crypto/morus1280_glue.c | 40 +++++++++++++-------------------
> arch/x86/crypto/morus640_glue.c | 39 ++++++++++++-------------------
> 2 files changed, 31 insertions(+), 48 deletions(-)
>
> diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
> index 0dccdda1eb3a1..7e600f8bcdad8 100644
> --- a/arch/x86/crypto/morus1280_glue.c
> +++ b/arch/x86/crypto/morus1280_glue.c
> @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
>
> static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
> struct morus1280_ops ops,
> - struct aead_request *req)
> + struct skcipher_walk *walk)
> {
> - struct skcipher_walk walk;
> - u8 *cursor_src, *cursor_dst;
> - unsigned int chunksize, base;
> -
> - ops.skcipher_walk_init(&walk, req, false);
> -
> - while (walk.nbytes) {
> - cursor_src = walk.src.virt.addr;
> - cursor_dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> -
> - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
> -
> - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
> - cursor_src += base;
> - cursor_dst += base;
> - chunksize &= MORUS1280_BLOCK_SIZE - 1;
> -
> - if (chunksize > 0)
> - ops.crypt_tail(state, cursor_src, cursor_dst,
> - chunksize);
> + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
> + ops.crypt_blocks(state, walk->src.virt.addr,
> + walk->dst.virt.addr,
> + round_down(walk->nbytes,
> + MORUS1280_BLOCK_SIZE));
> + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
> + }
>
> - skcipher_walk_done(&walk, 0);
> + if (walk->nbytes) {
> + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
> + walk->nbytes);
> + skcipher_walk_done(walk, 0);
> }
> }
>
> @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
> struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
> struct morus1280_state state;
> + struct skcipher_walk walk;
> +
> + ops.skcipher_walk_init(&walk, req, true);
>
> kernel_fpu_begin();
>
> ctx->ops->init(&state, &ctx->key, req->iv);
> crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
> - crypto_morus1280_glue_process_crypt(&state, ops, req);
> + crypto_morus1280_glue_process_crypt(&state, ops, &walk);
> ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
>
> kernel_fpu_end();
> diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
> index 7b58fe4d9bd1a..cb3a817320160 100644
> --- a/arch/x86/crypto/morus640_glue.c
> +++ b/arch/x86/crypto/morus640_glue.c
> @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
>
> static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
> struct morus640_ops ops,
> - struct aead_request *req)
> + struct skcipher_walk *walk)
> {
> - struct skcipher_walk walk;
> - u8 *cursor_src, *cursor_dst;
> - unsigned int chunksize, base;
> -
> - ops.skcipher_walk_init(&walk, req, false);
> -
> - while (walk.nbytes) {
> - cursor_src = walk.src.virt.addr;
> - cursor_dst = walk.dst.virt.addr;
> - chunksize = walk.nbytes;
> -
> - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
> -
> - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
> - cursor_src += base;
> - cursor_dst += base;
> - chunksize &= MORUS640_BLOCK_SIZE - 1;
> -
> - if (chunksize > 0)
> - ops.crypt_tail(state, cursor_src, cursor_dst,
> - chunksize);
> + while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
> + ops.crypt_blocks(state, walk->src.virt.addr,
> + walk->dst.virt.addr,
> + round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
> + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
> + }
>
> - skcipher_walk_done(&walk, 0);
> + if (walk->nbytes) {
> + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
> + walk->nbytes);
> + skcipher_walk_done(walk, 0);
> }
> }
>
> @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
> struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
> struct morus640_state state;
> + struct skcipher_walk walk;
> +
> + ops.skcipher_walk_init(&walk, req, true);
>
> kernel_fpu_begin();
>
> ctx->ops->init(&state, &ctx->key, req->iv);
> crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
> - crypto_morus640_glue_process_crypt(&state, ops, req);
> + crypto_morus640_glue_process_crypt(&state, ops, &walk);
> ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
>
> kernel_fpu_end();
> --
> 2.20.1
>
--
Ondrej Mosnacek <omosnace at redhat dot com>
Associate Software Engineer, Security Technologies
Red Hat, Inc.
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2019-02-05 9:32 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20190201075150.18644-1-ebiggers@kernel.org>
2019-02-01 7:51 ` [PATCH v2 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
2019-02-05 9:31 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 02/15] crypto: morus " Eric Biggers
2019-02-05 9:30 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
2019-02-05 9:31 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 04/15] crypto: x86/morus " Eric Biggers
2019-02-05 9:32 ` Ondrej Mosnacek
2019-02-01 7:51 ` [PATCH v2 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext Eric Biggers
2019-02-01 7:51 ` [PATCH v2 06/15] crypto: ahash - fix another early termination in hash walk Eric Biggers
2019-02-01 7:51 ` [PATCH v2 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).