stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC/RFT PATCH 01/18] crypto: x86/poly1305 - fix overflow during partial reduction
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
@ 2019-03-31 20:04 ` Eric Biggers
  2019-04-01  7:52   ` Martin Willi
  2019-03-31 20:04 ` [RFC/RFT PATCH 02/18] crypto: crct10dif-generic - fix use via crypto_shash_digest() Eric Biggers
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable, Martin Willi, Jason A . Donenfeld

From: Eric Biggers <ebiggers@google.com>

The x86_64 implementation of Poly1305 produces the wrong result on some
inputs because poly1305_4block_avx2() incorrectly assumes that when
partially reducing the accumulator, the bits carried from limb 'd4' to
limb 'h0' fit in a 32-bit integer.  This is true for poly1305-generic
which processes only one block at a time.  However, it's not true for
the AVX2 implementation, which processes 4 blocks at a time and
therefore can produce intermediate limbs about 4x larger.

Fix it by making the relevant calculations use 64-bit arithmetic rather
than 32-bit.  Note that most of the carries already used 64-bit
arithmetic, but the d4 -> h0 carry was different for some reason.

To be safe I also made the same change to the corresponding SSE2 code,
though that only operates on 1 or 2 blocks at a time.  I don't think
it's really needed for poly1305_block_sse2(), but it doesn't hurt
because it's already x86_64 code.  It *might* be needed for
poly1305_2block_sse2(), but overflows aren't easy to reproduce there.

This bug was originally detected by my patches that improve testmgr to
fuzz algorithms against their generic implementation.  But also add a
test vector which reproduces it directly (in the AVX2 case).

Fixes: b1ccc8f4b631 ("crypto: poly1305 - Add a four block AVX2 variant for x86_64")
Fixes: c70f4abef07a ("crypto: poly1305 - Add a SSE2 SIMD variant for x86_64")
Cc: <stable@vger.kernel.org> # v4.3+
Cc: Martin Willi <martin@strongswan.org>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/x86/crypto/poly1305-avx2-x86_64.S | 14 +++++---
 arch/x86/crypto/poly1305-sse2-x86_64.S | 22 ++++++++-----
 crypto/testmgr.h                       | 44 +++++++++++++++++++++++++-
 3 files changed, 67 insertions(+), 13 deletions(-)

diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index 3b6e70d085da8..8457cdd47f751 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
 	vpaddq		t2,t1,t1
 	vmovq		t1x,d4
 
+	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+	# integers.  It's true in a single-block implementation, but not here.
+
 	# d1 += d0 >> 26
 	mov		d0,%rax
 	shr		$26,%rax
@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index e6add74d78a59..6f0be7a869641 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
@@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
 	paddq		t2,t1
 	movq		t1,d4
 
+	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+	# integers.  It's true in a single-block implementation, but not here.
+
 	# d1 += d0 >> 26
 	mov		d0,%rax
 	shr		$26,%rax
@@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index f267633cf13ac..d18a37629f053 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -5634,7 +5634,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
 		.psize		= 80,
 		.digest		= "\x13\x00\x00\x00\x00\x00\x00\x00"
 				  "\x00\x00\x00\x00\x00\x00\x00\x00",
-	},
+	}, { /* Regression test for overflow in AVX2 implementation */
+		.plaintext	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff",
+		.psize		= 300,
+		.digest		= "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+				  "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+	}
 };
 
 /* NHPoly1305 test vectors from https://github.com/google/adiantum */
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 02/18] crypto: crct10dif-generic - fix use via crypto_shash_digest()
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
  2019-03-31 20:04 ` [RFC/RFT PATCH 01/18] crypto: x86/poly1305 - fix overflow during partial reduction Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-03-31 20:04 ` [RFC/RFT PATCH 03/18] crypto: x86/crct10dif-pcl " Eric Biggers
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable, Tim Chen

From: Eric Biggers <ebiggers@google.com>

The ->digest() method of crct10dif-generic reads the current CRC value
from the shash_desc context.  But this value is uninitialized, causing
crypto_shash_digest() to compute the wrong result.  Fix it.

Probably this wasn't noticed before because lib/crc-t10dif.c only uses
crypto_shash_update(), not crypto_shash_digest().  Likewise,
crypto_shash_digest() is not yet tested by the crypto self-tests because
those only test the ahash API which only uses shash init/update/final.

This bug was detected by my patches that improve testmgr to fuzz
algorithms against their generic implementation.

Fixes: 2d31e518a428 ("crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework")
Cc: <stable@vger.kernel.org> # v3.11+
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/crct10dif_generic.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29dc6fc8..d08048ae55527 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
-	*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+	*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 03/18] crypto: x86/crct10dif-pcl - fix use via crypto_shash_digest()
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
  2019-03-31 20:04 ` [RFC/RFT PATCH 01/18] crypto: x86/poly1305 - fix overflow during partial reduction Eric Biggers
  2019-03-31 20:04 ` [RFC/RFT PATCH 02/18] crypto: crct10dif-generic - fix use via crypto_shash_digest() Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-03-31 20:04 ` [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error Eric Biggers
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable, Tim Chen

From: Eric Biggers <ebiggers@google.com>

The ->digest() method of crct10dif-pclmul reads the current CRC value
from the shash_desc context.  But this value is uninitialized, causing
crypto_shash_digest() to compute the wrong result.  Fix it.

Probably this wasn't noticed before because lib/crc-t10dif.c only uses
crypto_shash_update(), not crypto_shash_digest().  Likewise,
crypto_shash_digest() is not yet tested by the crypto self-tests because
those only test the ahash API which only uses shash init/update/final.

Fixes: 0b95a7f85718 ("crypto: crct10dif - Glue code to cast accelerated CRCT10DIF assembly as a crypto transform")
Cc: <stable@vger.kernel.org> # v3.11+
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/x86/crypto/crct10dif-pclmul_glue.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 64f17d2d8b67a..3c81e15b0873f 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -71,15 +71,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
 	if (len >= 16 && crypto_simd_usable()) {
 		kernel_fpu_begin();
-		*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
 		kernel_fpu_end();
 	} else
-		*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -88,15 +87,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
                   ` (2 preceding siblings ...)
  2019-03-31 20:04 ` [RFC/RFT PATCH 03/18] crypto: x86/crct10dif-pcl " Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-04-08  6:23   ` Herbert Xu
  2019-03-31 20:04 ` [RFC/RFT PATCH 05/18] crypto: skcipher - don't WARN on unprocessed data after slow walk step Eric Biggers
                   ` (3 subsequent siblings)
  7 siblings, 1 reply; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable

From: Eric Biggers <ebiggers@google.com>

When the user-provided IV buffer is not aligned to the algorithm's
alignmask, skcipher_walk_virt() allocates an aligned buffer and copies
the IV into it.  However, skcipher_walk_virt() can fail after that
point, and in this case the buffer will be freed.

This causes a use-after-free read in callers that read from walk->iv
unconditionally, e.g. the LRW template.  For example, this can be
reproduced by trying to encrypt fewer than 16 bytes using "lrw(aes)".

Fix it by making skcipher_walk_first() reset walk->iv to walk->oiv if
skcipher_walk_next() fails.

This addresses a similar problem as commit 2b4f27c36bcd ("crypto:
skcipher - set walk.iv for zero-length inputs"), but that didn't
consider the case where skcipher_walk_virt() fails after copying the IV.

This bug was detected by my patches that improve testmgr to fuzz
algorithms against their generic implementation, when the extra
self-tests were run on a KASAN-enabled kernel.

Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/skcipher.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bcf13d95f54ac..0f639f018047d 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -426,19 +426,27 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
 
 static int skcipher_walk_first(struct skcipher_walk *walk)
 {
+	int err;
+
 	if (WARN_ON_ONCE(in_irq()))
 		return -EDEADLK;
 
 	walk->buffer = NULL;
 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
-		int err = skcipher_copy_iv(walk);
+		err = skcipher_copy_iv(walk);
 		if (err)
 			return err;
 	}
 
 	walk->page = NULL;
 
-	return skcipher_walk_next(walk);
+	err = skcipher_walk_next(walk);
+
+	/* On error, don't leave 'walk->iv' pointing to freed buffer. */
+	if (unlikely(err))
+		walk->iv = walk->oiv;
+
+	return err;
 }
 
 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 05/18] crypto: skcipher - don't WARN on unprocessed data after slow walk step
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
                   ` (3 preceding siblings ...)
  2019-03-31 20:04 ` [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-03-31 20:04 ` [RFC/RFT PATCH 06/18] crypto: chacha20poly1305 - set cra_name correctly Eric Biggers
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable

From: Eric Biggers <ebiggers@google.com>

skcipher_walk_done() assumes it's a bug if, after the "slow" path is
executed where the next chunk of data is processed via a bounce buffer,
the algorithm says it didn't process all bytes.  Thus it WARNs on this.

However, this can happen legitimately when the message needs to be
evenly divisible into "blocks" but isn't, and the algorithm has a
'walksize' greater than the block size.  For example, ecb-aes-neonbs
sets 'walksize' to 128 bytes and only supports messages evenly divisible
into 16-byte blocks.  If, say, 17 message bytes remain but they straddle
scatterlist elements, the skcipher_walk code will take the "slow" path
and pass the algorithm all 17 bytes in the bounce buffer.  But the
algorithm will only be able to process 16 bytes, triggering the WARN.

Fix this by just removing the WARN_ON().  Returning -EINVAL, as the code
already does, is the right behavior.

This bug was detected by my patches that improve testmgr to fuzz
algorithms against their generic implementation.

Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/skcipher.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0f639f018047d..85836be4f274f 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -131,8 +131,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
 		memcpy(walk->dst.virt.addr, walk->page, n);
 		skcipher_unmap_dst(walk);
 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-		if (WARN_ON(err)) {
-			/* unexpected case; didn't process all bytes */
+		if (err) {
+			/*
+			 * Didn't process all bytes.  Either the algorithm is
+			 * broken, or this was the last step and it turned out
+			 * the message wasn't evenly divisible into blocks but
+			 * the algorithm requires it.
+			 */
 			err = -EINVAL;
 			goto finish;
 		}
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 06/18] crypto: chacha20poly1305 - set cra_name correctly
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
                   ` (4 preceding siblings ...)
  2019-03-31 20:04 ` [RFC/RFT PATCH 05/18] crypto: skcipher - don't WARN on unprocessed data after slow walk step Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-04-01  7:57   ` Martin Willi
  2019-03-31 20:04 ` [RFC/RFT PATCH 07/18] crypto: gcm - fix incompatibility between "gcm" and "gcm_base" Eric Biggers
  2019-03-31 20:04 ` [RFC/RFT PATCH 08/18] crypto: ccm - fix incompatibility between "ccm" and "ccm_base" Eric Biggers
  7 siblings, 1 reply; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable, Martin Willi

From: Eric Biggers <ebiggers@google.com>

If the rfc7539 template is instantiated with specific implementations,
e.g. "rfc7539(chacha20-generic,poly1305-generic)" rather than
"rfc7539(chacha20,poly1305)", then the implementation names end up
included in the instance's cra_name.  This is incorrect because it then
prevents all users from allocating "rfc7539(chacha20,poly1305)", if the
highest priority implementations of chacha20 and poly1305 were selected.
Also, the self-tests aren't run on an instance allocated in this way.

Fix it by setting the instance's cra_name from the underlying
algorithms' actual cra_names, rather than from the requested names.
This matches what other templates do.

Fixes: 71ebc4d1b27d ("crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD construction, RFC7539")
Cc: <stable@vger.kernel.org> # v4.2+
Cc: Martin Willi <martin@strongswan.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/chacha20poly1305.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ed2e12e26dd80..279d816ab51dd 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -645,8 +645,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
 
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "%s(%s,%s)", name, chacha_name,
-		     poly_name) >= CRYPTO_MAX_ALG_NAME)
+		     "%s(%s,%s)", name, chacha->base.cra_name,
+		     poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha->base.cra_driver_name,
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 07/18] crypto: gcm - fix incompatibility between "gcm" and "gcm_base"
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
                   ` (5 preceding siblings ...)
  2019-03-31 20:04 ` [RFC/RFT PATCH 06/18] crypto: chacha20poly1305 - set cra_name correctly Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-04-01 15:56   ` Eric Biggers
  2019-03-31 20:04 ` [RFC/RFT PATCH 08/18] crypto: ccm - fix incompatibility between "ccm" and "ccm_base" Eric Biggers
  7 siblings, 1 reply; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable

From: Eric Biggers <ebiggers@google.com>

GCM instances can be created by either the "gcm" template, which only
allows choosing the block cipher, e.g. "gcm(aes)"; or by "gcm_base",
which allows choosing the ctr and ghash implementations, e.g.
"gcm_base(ctr(aes-generic),ghash-generic)".

However, a "gcm_base" instance prevents a "gcm" instance from being
registered using the same implementations.  Nor will the instance be
found by lookups of "gcm".  This can be used as a denial of service.
Moreover, "gcm_base" instances are never tested by the crypto
self-tests, even if there are compatible "gcm" tests.

The root cause of these problems is that instances of the two templates
use different cra_names.  Therefore, fix these problems by making
"gcm_base" instances set the same cra_name as "gcm" instances, e.g.
"gcm(aes)" instead of "gcm_base(ctr(aes-generic),ghash-generic)".

This requires extracting the block cipher name from the name of the ctr
algorithm, which means starting to require that the stream cipher really
be ctr and not something else.  But it would be pretty bizarre if anyone
was actually relying on being able to use a non-ctr stream cipher here.

Fixes: d00aa19b507b ("[CRYPTO] gcm: Allow block cipher parameter")
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/gcm.c | 30 ++++++++----------------------
 1 file changed, 8 insertions(+), 22 deletions(-)

diff --git a/crypto/gcm.c b/crypto/gcm.c
index e1a11f529d257..12ff5ed569272 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
 
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *ghash_name)
 {
@@ -650,24 +649,23 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 
 	ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
 
-	/* We only support 16-byte blocks. */
+	/* Must be CTR mode with 16-byte blocks. */
 	err = -EINVAL;
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16)
 		goto out_put_ctr;
 
-	/* Not a stream cipher? */
-	if (ctr->base.cra_blocksize != 1)
+	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
 		goto out_put_ctr;
 
-	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
 		     ghash_alg->cra_driver_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_put_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = (ghash->base.cra_flags |
 				    ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -709,7 +707,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -719,12 +716,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 	    CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, "ghash");
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
 }
 
 static int crypto_gcm_base_create(struct crypto_template *tmpl,
@@ -732,7 +724,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 {
 	const char *ctr_name;
 	const char *ghash_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
@@ -742,12 +733,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 	if (IS_ERR(ghash_name))
 		return PTR_ERR(ghash_name);
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
-		     ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, ghash_name);
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
 }
 
 static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [RFC/RFT PATCH 08/18] crypto: ccm - fix incompatibility between "ccm" and "ccm_base"
       [not found] <20190331200428.26597-1-ebiggers@kernel.org>
                   ` (6 preceding siblings ...)
  2019-03-31 20:04 ` [RFC/RFT PATCH 07/18] crypto: gcm - fix incompatibility between "gcm" and "gcm_base" Eric Biggers
@ 2019-03-31 20:04 ` Eric Biggers
  2019-04-02 15:48   ` Sasha Levin
  7 siblings, 1 reply; 15+ messages in thread
From: Eric Biggers @ 2019-03-31 20:04 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable

From: Eric Biggers <ebiggers@google.com>

[This is essentially the same as the corresponding patch to GCM.]

CCM instances can be created by either the "ccm" template, which only
allows choosing the block cipher, e.g. "ccm(aes)"; or by "ccm_base",
which allows choosing the ctr and cbcmac implementations, e.g.
"ccm_base(ctr(aes-generic),cbcmac(aes-generic))".

However, a "ccm_base" instance prevents a "ccm" instance from being
registered using the same implementations.  Nor will the instance be
found by lookups of "ccm".  This can be used as a denial of service.
Moreover, "ccm_base" instances are never tested by the crypto
self-tests, even if there are compatible "ccm" tests.

The root cause of these problems is that instances of the two templates
use different cra_names.  Therefore, fix these problems by making
"ccm_base" instances set the same cra_name as "ccm" instances, e.g.
"ccm(aes)" instead of "ccm_base(ctr(aes-generic),cbcmac(aes-generic))".

This requires extracting the block cipher name from the name of the ctr
algorithm, which means starting to require that the stream cipher really
be ctr and not something else.  But it would be pretty bizarre if anyone
was actually relying on being able to use a non-ctr stream cipher here.

Fixes: 4a49b499dfa0 ("[CRYPTO] ccm: Added CCM mode")
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/ccm.c | 38 ++++++++++++--------------------------
 1 file changed, 12 insertions(+), 26 deletions(-)

diff --git a/crypto/ccm.c b/crypto/ccm.c
index 50df8f001c1c9..3bb49776450b7 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
 
 static int crypto_ccm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *mac_name)
 {
@@ -509,23 +508,22 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
 
 	ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
 
-	/* Not a stream cipher? */
+	/* Must be CTR mode with 16-byte blocks. */
 	err = -EINVAL;
-	if (ctr->base.cra_blocksize != 1)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16)
 		goto err_drop_ctr;
 
-	/* We want the real thing! */
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
+	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_ctr;
 
-	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "ccm_base(%s,%s)", ctr->base.cra_driver_name,
 		     mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (mac->base.cra_priority +
 				       ctr->base.cra_priority) / 2;
@@ -567,7 +565,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
 	char mac_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -581,35 +578,24 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
-					mac_name);
+	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
 }
 
 static int crypto_ccm_base_create(struct crypto_template *tmpl,
 				  struct rtattr **tb)
 {
 	const char *ctr_name;
-	const char *cipher_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
+	const char *mac_name;
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
 		return PTR_ERR(ctr_name);
 
-	cipher_name = crypto_attr_alg_name(tb[2]);
-	if (IS_ERR(cipher_name))
-		return PTR_ERR(cipher_name);
-
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
-		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
+	mac_name = crypto_attr_alg_name(tb[2]);
+	if (IS_ERR(mac_name))
+		return PTR_ERR(mac_name);
 
-	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
-					cipher_name);
+	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
 }
 
 static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 01/18] crypto: x86/poly1305 - fix overflow during partial reduction
  2019-03-31 20:04 ` [RFC/RFT PATCH 01/18] crypto: x86/poly1305 - fix overflow during partial reduction Eric Biggers
@ 2019-04-01  7:52   ` Martin Willi
  0 siblings, 0 replies; 15+ messages in thread
From: Martin Willi @ 2019-04-01  7:52 UTC (permalink / raw)
  To: Eric Biggers, linux-crypto; +Cc: stable, Jason A . Donenfeld

Hi,

> The x86_64 implementation of Poly1305 produces the wrong result on
> some inputs because poly1305_4block_avx2() incorrectly assumes that
> when partially reducing the accumulator, the bits carried from limb
> 'd4' to limb 'h0' fit in a 32-bit integer.

> [...] This bug was originally detected by my patches that improve
> testmgr to fuzz algorithms against their generic implementation. 

Thanks Eric. This shows how valuable your continued work on the crypto
testing code is, and how useful such a (common) testing infrastructure
can be.

Reviewed-by: Martin Willi <martin@strongswan.org>


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 06/18] crypto: chacha20poly1305 - set cra_name correctly
  2019-03-31 20:04 ` [RFC/RFT PATCH 06/18] crypto: chacha20poly1305 - set cra_name correctly Eric Biggers
@ 2019-04-01  7:57   ` Martin Willi
  0 siblings, 0 replies; 15+ messages in thread
From: Martin Willi @ 2019-04-01  7:57 UTC (permalink / raw)
  To: Eric Biggers, linux-crypto; +Cc: stable


> If the rfc7539 template is instantiated with specific
> implementations, e.g. "rfc7539(chacha20-generic,poly1305-generic)"
> rather than "rfc7539(chacha20,poly1305)", then the implementation
> names end up included in the instance's cra_name.  This is incorrect
> [...]

Reviewed-by: Martin Willi <martin@strongswan.org>

Thanks,
Martin


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 07/18] crypto: gcm - fix incompatibility between "gcm" and "gcm_base"
  2019-03-31 20:04 ` [RFC/RFT PATCH 07/18] crypto: gcm - fix incompatibility between "gcm" and "gcm_base" Eric Biggers
@ 2019-04-01 15:56   ` Eric Biggers
  0 siblings, 0 replies; 15+ messages in thread
From: Eric Biggers @ 2019-04-01 15:56 UTC (permalink / raw)
  To: linux-crypto; +Cc: stable

On Sun, Mar 31, 2019 at 01:04:17PM -0700, Eric Biggers wrote:
> From: Eric Biggers <ebiggers@google.com>
> 
> GCM instances can be created by either the "gcm" template, which only
> allows choosing the block cipher, e.g. "gcm(aes)"; or by "gcm_base",
> which allows choosing the ctr and ghash implementations, e.g.
> "gcm_base(ctr(aes-generic),ghash-generic)".
> 
> However, a "gcm_base" instance prevents a "gcm" instance from being
> registered using the same implementations.  Nor will the instance be
> found by lookups of "gcm".  This can be used as a denial of service.
> Moreover, "gcm_base" instances are never tested by the crypto
> self-tests, even if there are compatible "gcm" tests.
> 
> The root cause of these problems is that instances of the two templates
> use different cra_names.  Therefore, fix these problems by making
> "gcm_base" instances set the same cra_name as "gcm" instances, e.g.
> "gcm(aes)" instead of "gcm_base(ctr(aes-generic),ghash-generic)".
> 
> This requires extracting the block cipher name from the name of the ctr
> algorithm, which means starting to require that the stream cipher really
> be ctr and not something else.  But it would be pretty bizarre if anyone
> was actually relying on being able to use a non-ctr stream cipher here.
> 
> Fixes: d00aa19b507b ("[CRYPTO] gcm: Allow block cipher parameter")
> Cc: stable@vger.kernel.org
> Signed-off-by: Eric Biggers <ebiggers@google.com>
> ---
>  crypto/gcm.c | 30 ++++++++----------------------
>  1 file changed, 8 insertions(+), 22 deletions(-)
> 
> diff --git a/crypto/gcm.c b/crypto/gcm.c
> index e1a11f529d257..12ff5ed569272 100644
> --- a/crypto/gcm.c
> +++ b/crypto/gcm.c
> @@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
>  
>  static int crypto_gcm_create_common(struct crypto_template *tmpl,
>  				    struct rtattr **tb,
> -				    const char *full_name,
>  				    const char *ctr_name,
>  				    const char *ghash_name)
>  {
> @@ -650,24 +649,23 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
>  
>  	ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
>  
> -	/* We only support 16-byte blocks. */
> +	/* Must be CTR mode with 16-byte blocks. */
>  	err = -EINVAL;
> -	if (crypto_skcipher_alg_ivsize(ctr) != 16)
> +	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
> +	    crypto_skcipher_alg_ivsize(ctr) != 16)
>  		goto out_put_ctr;
>  
> -	/* Not a stream cipher? */
> -	if (ctr->base.cra_blocksize != 1)
> +	err = -ENAMETOOLONG;
> +	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
> +		     "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
>  		goto out_put_ctr;
>  
> -	err = -ENAMETOOLONG;
>  	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
>  		     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
>  		     ghash_alg->cra_driver_name) >=
>  	    CRYPTO_MAX_ALG_NAME)
>  		goto out_put_ctr;
>  
> -	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
> -
>  	inst->alg.base.cra_flags = (ghash->base.cra_flags |
>  				    ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
>  	inst->alg.base.cra_priority = (ghash->base.cra_priority +
> @@ -709,7 +707,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
>  {
>  	const char *cipher_name;
>  	char ctr_name[CRYPTO_MAX_ALG_NAME];
> -	char full_name[CRYPTO_MAX_ALG_NAME];
>  
>  	cipher_name = crypto_attr_alg_name(tb[1]);
>  	if (IS_ERR(cipher_name))
> @@ -719,12 +716,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
>  	    CRYPTO_MAX_ALG_NAME)
>  		return -ENAMETOOLONG;
>  
> -	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
> -	    CRYPTO_MAX_ALG_NAME)
> -		return -ENAMETOOLONG;
> -
> -	return crypto_gcm_create_common(tmpl, tb, full_name,
> -					ctr_name, "ghash");
> +	return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
>  }
>  
>  static int crypto_gcm_base_create(struct crypto_template *tmpl,
> @@ -732,7 +724,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
>  {
>  	const char *ctr_name;
>  	const char *ghash_name;
> -	char full_name[CRYPTO_MAX_ALG_NAME];
>  
>  	ctr_name = crypto_attr_alg_name(tb[1]);
>  	if (IS_ERR(ctr_name))
> @@ -742,12 +733,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
>  	if (IS_ERR(ghash_name))
>  		return PTR_ERR(ghash_name);
>  
> -	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
> -		     ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
> -		return -ENAMETOOLONG;
> -
> -	return crypto_gcm_create_common(tmpl, tb, full_name,
> -					ctr_name, ghash_name);
> +	return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
>  }
>  
>  static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
> -- 
> 2.21.0
> 

Oops, I think there's a bug here: we also need to check that the hash algorithm
passed to gcm_base is really "ghash".  Similarly, in the next patch, ccm_base
requires "cbcmac".

- Eric

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 08/18] crypto: ccm - fix incompatibility between "ccm" and "ccm_base"
  2019-03-31 20:04 ` [RFC/RFT PATCH 08/18] crypto: ccm - fix incompatibility between "ccm" and "ccm_base" Eric Biggers
@ 2019-04-02 15:48   ` Sasha Levin
  0 siblings, 0 replies; 15+ messages in thread
From: Sasha Levin @ 2019-04-02 15:48 UTC (permalink / raw)
  To: Sasha Levin, Eric Biggers, Eric Biggers, linux-crypto
  Cc: stable, stable, stable

Hi,

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag,
fixing commit: 4a49b499dfa0 [CRYPTO] ccm: Added CCM mode.

The bot has tested the following trees: v5.0.5, v4.19.32, v4.14.109, v4.9.166, v4.4.177, v3.18.137.

v5.0.5: Build OK!
v4.19.32: Build OK!
v4.14.109: Build OK!
v4.9.166: Failed to apply! Possible dependencies:
    f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")

v4.4.177: Failed to apply! Possible dependencies:
    464b93a3c7d1 ("crypto: ccm - Use skcipher")
    f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")

v3.18.137: Failed to apply! Possible dependencies:
    2c221ad39424 ("crypto: ccm - Use crypto_aead_set_reqsize helper")
    464b93a3c7d1 ("crypto: ccm - Use skcipher")
    81c4c35eb61a ("crypto: ccm - Convert to new AEAD interface")
    f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")


How should we proceed with this patch?

--
Thanks,
Sasha

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error
  2019-03-31 20:04 ` [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error Eric Biggers
@ 2019-04-08  6:23   ` Herbert Xu
  2019-04-08 17:27     ` Eric Biggers
  0 siblings, 1 reply; 15+ messages in thread
From: Herbert Xu @ 2019-04-08  6:23 UTC (permalink / raw)
  To: Eric Biggers; +Cc: linux-crypto, stable

Eric Biggers <ebiggers@kernel.org> wrote:
> From: Eric Biggers <ebiggers@google.com>
> 
> When the user-provided IV buffer is not aligned to the algorithm's
> alignmask, skcipher_walk_virt() allocates an aligned buffer and copies
> the IV into it.  However, skcipher_walk_virt() can fail after that
> point, and in this case the buffer will be freed.
> 
> This causes a use-after-free read in callers that read from walk->iv
> unconditionally, e.g. the LRW template.  For example, this can be
> reproduced by trying to encrypt fewer than 16 bytes using "lrw(aes)".

This looks like a bug in LRW.  Relying on walk->iv to be set to
anything after a failed skcipher_walk_virt call is wrong.  So we
should fix it there instead.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error
  2019-04-08  6:23   ` Herbert Xu
@ 2019-04-08 17:27     ` Eric Biggers
  2019-04-09  6:37       ` Herbert Xu
  0 siblings, 1 reply; 15+ messages in thread
From: Eric Biggers @ 2019-04-08 17:27 UTC (permalink / raw)
  To: Herbert Xu; +Cc: linux-crypto, stable

On Mon, Apr 08, 2019 at 02:23:54PM +0800, Herbert Xu wrote:
> Eric Biggers <ebiggers@kernel.org> wrote:
> > From: Eric Biggers <ebiggers@google.com>
> > 
> > When the user-provided IV buffer is not aligned to the algorithm's
> > alignmask, skcipher_walk_virt() allocates an aligned buffer and copies
> > the IV into it.  However, skcipher_walk_virt() can fail after that
> > point, and in this case the buffer will be freed.
> > 
> > This causes a use-after-free read in callers that read from walk->iv
> > unconditionally, e.g. the LRW template.  For example, this can be
> > reproduced by trying to encrypt fewer than 16 bytes using "lrw(aes)".
> 
> This looks like a bug in LRW.  Relying on walk->iv to be set to
> anything after a failed skcipher_walk_virt call is wrong.  So we
> should fix it there instead.
> 
> Cheers,
> -- 

It's not just LRW though.  It's actually 7 places:

	arch/arm/crypto/aes-neonbs-glue.c
	arch/arm/crypto/chacha-neon-glue.c
	arch/arm64/crypto/aes-neonbs-glue.c
	arch/arm64/crypto/chacha-neon-glue.c
	crypto/chacha-generic.c
	crypto/lrw.c
	crypto/salsa20-generic.c

Do you prefer that all those be updated?

- Eric

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error
  2019-04-08 17:27     ` Eric Biggers
@ 2019-04-09  6:37       ` Herbert Xu
  0 siblings, 0 replies; 15+ messages in thread
From: Herbert Xu @ 2019-04-09  6:37 UTC (permalink / raw)
  To: Eric Biggers; +Cc: linux-crypto, stable

On Mon, Apr 08, 2019 at 10:27:37AM -0700, Eric Biggers wrote:
>
> It's not just LRW though.  It's actually 7 places:
> 
> 	arch/arm/crypto/aes-neonbs-glue.c
> 	arch/arm/crypto/chacha-neon-glue.c
> 	arch/arm64/crypto/aes-neonbs-glue.c
> 	arch/arm64/crypto/chacha-neon-glue.c
> 	crypto/chacha-generic.c
> 	crypto/lrw.c
> 	crypto/salsa20-generic.c
> 
> Do you prefer that all those be updated?

We have to because if memory allocation fails then walk->iv will
just be the origial IV.  If you can use the original IV then you
might as well just do that.

I just checked chacha-generic and it is fine as it's using the
original IV and not walk->iv (the difference is that one may be
unaligned while the other is guaranteed to be aligned).

arm*/chacha-neon-glue.c are also correct.

salsa20 is indeed broken but the fix is trivial since it's already
using unaligned loads.

arm/aes-neonbs-glue seems easily fixed as it can simply use the
unaligned original IV since it's going through the crypto API
again.

arm64/aes-neonbs-glue I'm not quite sure as it's calling into
assembly so depending on whether that wants aligned/unaligned
this can either use the original IV or check for errors and
abort if necessary.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2019-04-09  6:37 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20190331200428.26597-1-ebiggers@kernel.org>
2019-03-31 20:04 ` [RFC/RFT PATCH 01/18] crypto: x86/poly1305 - fix overflow during partial reduction Eric Biggers
2019-04-01  7:52   ` Martin Willi
2019-03-31 20:04 ` [RFC/RFT PATCH 02/18] crypto: crct10dif-generic - fix use via crypto_shash_digest() Eric Biggers
2019-03-31 20:04 ` [RFC/RFT PATCH 03/18] crypto: x86/crct10dif-pcl " Eric Biggers
2019-03-31 20:04 ` [RFC/RFT PATCH 04/18] crypto: skcipher - restore default skcipher_walk::iv on error Eric Biggers
2019-04-08  6:23   ` Herbert Xu
2019-04-08 17:27     ` Eric Biggers
2019-04-09  6:37       ` Herbert Xu
2019-03-31 20:04 ` [RFC/RFT PATCH 05/18] crypto: skcipher - don't WARN on unprocessed data after slow walk step Eric Biggers
2019-03-31 20:04 ` [RFC/RFT PATCH 06/18] crypto: chacha20poly1305 - set cra_name correctly Eric Biggers
2019-04-01  7:57   ` Martin Willi
2019-03-31 20:04 ` [RFC/RFT PATCH 07/18] crypto: gcm - fix incompatibility between "gcm" and "gcm_base" Eric Biggers
2019-04-01 15:56   ` Eric Biggers
2019-03-31 20:04 ` [RFC/RFT PATCH 08/18] crypto: ccm - fix incompatibility between "ccm" and "ccm_base" Eric Biggers
2019-04-02 15:48   ` Sasha Levin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).