Linux cryptographic layer development
 help / color / mirror / Atom feed
* [PATCH] crypto: ctr - Convert from skcipher to lskcipher
@ 2026-05-10 23:09 Alexandre Knecht
  2026-05-10 23:32 ` Eric Biggers
  0 siblings, 1 reply; 5+ messages in thread
From: Alexandre Knecht @ 2026-05-10 23:09 UTC (permalink / raw)
  To: herbert, David S . Miller
  Cc: ebiggers, linux-crypto, linux-kernel, bpf, Alexandre Knecht

Replace the existing skcipher CTR template with an lskcipher version,
following the pattern established by the CBC conversion (705b52fef3c7).

This enables BPF programs using the bpf_crypto kfuncs to use CTR mode
ciphers like ctr(aes), which previously failed because
crypto_alloc_lskcipher() could not find an lskcipher implementation.
ECB and CBC already have lskcipher support; CTR was the missing piece.

The rfc3686 template remains as an skcipher and continues to work
through the automatic lskcipher-to-skcipher bridge.

Tested with NIST SP 800-38A test vectors (AES-128/192/256-CTR),
partial block handling, and rfc3686 compatibility. Kernel self-tests
pass on instantiation (selftest: passed in /proc/crypto).

Signed-off-by: Alexandre Knecht <knecht.alexandre@gmail.com>
Assisted-by: Claude:claude-opus-4-6 checkpatch
---
 crypto/ctr.c | 143 +++++++++++++++++++--------------------------------
 1 file changed, 54 insertions(+), 89 deletions(-)

diff --git a/crypto/ctr.c b/crypto/ctr.c
index a388f0ceb3a0..5fceaf47bedc 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -7,7 +7,6 @@
 
 #include <crypto/algapi.h>
 #include <crypto/ctr.h>
-#include <crypto/internal/cipher.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -25,139 +24,105 @@ struct crypto_rfc3686_req_ctx {
 	struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
 };
 
-static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
-				   struct crypto_cipher *tfm)
+static int crypto_ctr_crypt_segment(struct crypto_lskcipher *cipher,
+				    const u8 *src, u8 *dst, unsigned int nbytes,
+				    u8 *iv)
 {
-	unsigned int bsize = crypto_cipher_blocksize(tfm);
-	unsigned long alignmask = crypto_cipher_alignmask(tfm);
-	u8 *ctrblk = walk->iv;
-	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
-	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
-	const u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	unsigned int nbytes = walk->nbytes;
-
-	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
-	crypto_xor_cpy(dst, keystream, src, nbytes);
-
-	crypto_inc(ctrblk, bsize);
-}
+	unsigned int bsize = crypto_lskcipher_blocksize(cipher);
 
-static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
-				    struct crypto_cipher *tfm)
-{
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		   crypto_cipher_alg(tfm)->cia_encrypt;
-	unsigned int bsize = crypto_cipher_blocksize(tfm);
-	u8 *ctrblk = walk->iv;
-	const u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	unsigned int nbytes = walk->nbytes;
-
-	do {
-		/* create keystream */
-		fn(crypto_cipher_tfm(tfm), dst, ctrblk);
+	while (nbytes >= bsize) {
+		/* Encrypt counter block to produce keystream */
+		crypto_lskcipher_encrypt(cipher, iv, dst, bsize, NULL);
 		crypto_xor(dst, src, bsize);
-
-		/* increment counter in counterblock */
-		crypto_inc(ctrblk, bsize);
+		crypto_inc(iv, bsize);  /* Increment counter */
 
 		src += bsize;
 		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
+		nbytes -= bsize;
+	}
 
 	return nbytes;
 }
 
-static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
-				    struct crypto_cipher *tfm)
+static int crypto_ctr_crypt_inplace(struct crypto_lskcipher *cipher,
+				    u8 *dst, unsigned int nbytes, u8 *iv)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		   crypto_cipher_alg(tfm)->cia_encrypt;
-	unsigned int bsize = crypto_cipher_blocksize(tfm);
-	unsigned long alignmask = crypto_cipher_alignmask(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *ctrblk = walk->iv;
-	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
-	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
-
-	do {
-		/* create keystream */
-		fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
-		crypto_xor(dst, keystream, bsize);
+	unsigned int bsize = crypto_lskcipher_blocksize(cipher);
+	u8 keystream[MAX_CIPHER_BLOCKSIZE];
 
-		/* increment counter in counterblock */
-		crypto_inc(ctrblk, bsize);
+	while (nbytes >= bsize) {
+		/* Encrypt counter block to produce keystream */
+		crypto_lskcipher_encrypt(cipher, iv, keystream, bsize, NULL);
+		crypto_xor(dst, keystream, bsize);
+		crypto_inc(iv, bsize);  /* Increment counter */
 
 		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
+		nbytes -= bsize;
+	}
 
+	memzero_explicit(keystream, sizeof(keystream));
 	return nbytes;
 }
 
-static int crypto_ctr_crypt(struct skcipher_request *req)
+static int crypto_ctr_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+			    u8 *dst, unsigned int len, u8 *iv, u32 flags)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
-	const unsigned int bsize = crypto_cipher_blocksize(cipher);
-	struct skcipher_walk walk;
+	struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher *cipher = *ctx;
+	unsigned int bsize = crypto_lskcipher_blocksize(cipher);
+	bool final = flags & CRYPTO_LSKCIPHER_FLAG_FINAL;
 	unsigned int nbytes;
-	int err;
-
-	err = skcipher_walk_virt(&walk, req, false);
 
-	while (walk.nbytes >= bsize) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
-		else
-			nbytes = crypto_ctr_crypt_segment(&walk, cipher);
-
-		err = skcipher_walk_done(&walk, nbytes);
-	}
-
-	if (walk.nbytes) {
-		crypto_ctr_crypt_final(&walk, cipher);
-		err = skcipher_walk_done(&walk, 0);
+	if (src == dst)
+		nbytes = crypto_ctr_crypt_inplace(cipher, dst, len, iv);
+	else
+		nbytes = crypto_ctr_crypt_segment(cipher, src, dst, len, iv);
+
+	/* Handle final partial block. */
+	if (nbytes && final) {
+		u8 keystream[MAX_CIPHER_BLOCKSIZE];
+
+		crypto_lskcipher_encrypt(cipher, iv, keystream, bsize, NULL);
+		crypto_xor_cpy(dst + len - nbytes, src + len - nbytes,
+			       keystream, nbytes);
+		crypto_inc(iv, bsize);
+		memzero_explicit(keystream, sizeof(keystream));
+		nbytes = 0;
 	}
 
-	return err;
+	return nbytes;
 }
 
 static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	struct skcipher_instance *inst;
-	struct crypto_alg *alg;
+	struct lskcipher_instance *inst;
 	int err;
 
-	inst = skcipher_alloc_instance_simple(tmpl, tb);
+	inst = lskcipher_alloc_instance_simple(tmpl, tb);
 	if (IS_ERR(inst))
 		return PTR_ERR(inst);
 
-	alg = skcipher_ialg_simple(inst);
-
 	/* Block size must be >= 4 bytes. */
 	err = -EINVAL;
-	if (alg->cra_blocksize < 4)
+	if (inst->alg.co.base.cra_blocksize < 4)
 		goto out_free_inst;
 
 	/* If this is false we'd fail the alignment of crypto_inc. */
-	if (alg->cra_blocksize % 4)
+	if (inst->alg.co.base.cra_blocksize % 4)
 		goto out_free_inst;
 
-	/* CTR mode is a stream cipher. */
-	inst->alg.base.cra_blocksize = 1;
-
 	/*
-	 * To simplify the implementation, configure the skcipher walk to only
-	 * give a partial block at the very end, never earlier.
+	 * CTR mode is a stream cipher.  Set chunksize to the underlying
+	 * cipher block size so partial blocks only occur at the end.
 	 */
-	inst->alg.chunksize = alg->cra_blocksize;
+	inst->alg.co.chunksize = inst->alg.co.base.cra_blocksize;
+	inst->alg.co.base.cra_blocksize = 1;
 
+	/* CTR encrypt and decrypt are the same XOR-based operation. */
 	inst->alg.encrypt = crypto_ctr_crypt;
 	inst->alg.decrypt = crypto_ctr_crypt;
 
-	err = skcipher_register_instance(tmpl, inst);
+	err = lskcipher_register_instance(tmpl, inst);
 	if (err) {
 out_free_inst:
 		inst->free(inst);
-- 
2.51.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] crypto: ctr - Convert from skcipher to lskcipher
  2026-05-10 23:09 [PATCH] crypto: ctr - Convert from skcipher to lskcipher Alexandre Knecht
@ 2026-05-10 23:32 ` Eric Biggers
  2026-05-10 23:44   ` Eric Biggers
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Biggers @ 2026-05-10 23:32 UTC (permalink / raw)
  To: Alexandre Knecht
  Cc: herbert, David S . Miller, linux-crypto, linux-kernel, bpf

On Mon, May 11, 2026 at 01:09:01AM +0200, Alexandre Knecht wrote:
> Replace the existing skcipher CTR template with an lskcipher version,
> following the pattern established by the CBC conversion (705b52fef3c7).
> 
> This enables BPF programs using the bpf_crypto kfuncs to use CTR mode
> ciphers like ctr(aes), which previously failed because
> crypto_alloc_lskcipher() could not find an lskcipher implementation.
> ECB and CBC already have lskcipher support; CTR was the missing piece.
> 
> The rfc3686 template remains as an skcipher and continues to work
> through the automatic lskcipher-to-skcipher bridge.
> 
> Tested with NIST SP 800-38A test vectors (AES-128/192/256-CTR),
> partial block handling, and rfc3686 compatibility. Kernel self-tests
> pass on instantiation (selftest: passed in /proc/crypto).
> 
> Signed-off-by: Alexandre Knecht <knecht.alexandre@gmail.com>
> Assisted-by: Claude:claude-opus-4-6 checkpatch

I'm confused.  Why was that BPF crypto feature even added with ECB mode
as the only supported encryption mode?  Who is using that, and why?

CTR isn't necessarily much better, either.

What is the use case for the BPF crypto?  The first step should be to
decide what *specific* algorithm(s) it needs.  It doesn't seem like that
has ever happened, and I'm not sure this patch helps much.

That needs to be done anyway.  But that would also be helpful for a
potential future switch to lib/crypto/, which would avoid all the weird
issues with lskcipher etc.

- Eric

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] crypto: ctr - Convert from skcipher to lskcipher
  2026-05-10 23:32 ` Eric Biggers
@ 2026-05-10 23:44   ` Eric Biggers
  2026-05-11  0:02     ` Alexandre Knecht
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Biggers @ 2026-05-10 23:44 UTC (permalink / raw)
  To: Alexandre Knecht
  Cc: herbert, David S . Miller, linux-crypto, linux-kernel, bpf

On Sun, May 10, 2026 at 04:32:39PM -0700, Eric Biggers wrote:
> On Mon, May 11, 2026 at 01:09:01AM +0200, Alexandre Knecht wrote:
> > Replace the existing skcipher CTR template with an lskcipher version,
> > following the pattern established by the CBC conversion (705b52fef3c7).
> > 
> > This enables BPF programs using the bpf_crypto kfuncs to use CTR mode
> > ciphers like ctr(aes), which previously failed because
> > crypto_alloc_lskcipher() could not find an lskcipher implementation.
> > ECB and CBC already have lskcipher support; CTR was the missing piece.
> > 
> > The rfc3686 template remains as an skcipher and continues to work
> > through the automatic lskcipher-to-skcipher bridge.
> > 
> > Tested with NIST SP 800-38A test vectors (AES-128/192/256-CTR),
> > partial block handling, and rfc3686 compatibility. Kernel self-tests
> > pass on instantiation (selftest: passed in /proc/crypto).
> > 
> > Signed-off-by: Alexandre Knecht <knecht.alexandre@gmail.com>
> > Assisted-by: Claude:claude-opus-4-6 checkpatch
> 
> I'm confused.  Why was that BPF crypto feature even added with ECB mode
> as the only supported encryption mode?  Who is using that, and why?
> 
> CTR isn't necessarily much better, either.
> 
> What is the use case for the BPF crypto?  The first step should be to
> decide what *specific* algorithm(s) it needs.  It doesn't seem like that
> has ever happened, and I'm not sure this patch helps much.
> 
> That needs to be done anyway.  But that would also be helpful for a
> potential future switch to lib/crypto/, which would avoid all the weird
> issues with lskcipher etc.

Also note that lskcipher doesn't provide access to the accelerated AES
mode implementations.  Indeed, almost nothing is supported by lskcipher.
The fact that you found something to be missing isn't surprising.

I think "lskcipher" is kind of a dead end, to be honest.  It's not clear
why it got added.  The path forwards is to get the AES encryption modes
added to lib/crypto/ and to just use that instead.

- Eric

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] crypto: ctr - Convert from skcipher to lskcipher
  2026-05-10 23:44   ` Eric Biggers
@ 2026-05-11  0:02     ` Alexandre Knecht
  2026-05-11  0:19       ` Eric Biggers
  0 siblings, 1 reply; 5+ messages in thread
From: Alexandre Knecht @ 2026-05-11  0:02 UTC (permalink / raw)
  To: Eric Biggers; +Cc: herbert, David S . Miller, linux-crypto, linux-kernel, bpf

Le lun. 11 mai 2026 à 01:44, Eric Biggers <ebiggers@kernel.org> a écrit :
> Also note that lskcipher doesn't provide access to the accelerated AES
> mode implementations.  Indeed, almost nothing is supported by lskcipher.
> The fact that you found something to be missing isn't surprising.
>
> I think "lskcipher" is kind of a dead end, to be honest.  It's not clear
> why it got added.  The path forwards is to get the AES encryption modes
> added to lib/crypto/ and to just use that instead.
>
> - Eric

Hi Eric,

Thanks for the review — you're asking the right questions.

I'm developing a VXLAN/EVPN-based CNI for Kubernetes (releasing in the
coming months), and the goal is to implement datapath encryption for
overlay traffic in a zero-trust datacenter model. The encryption
happens in BPF programs attached via TC on the VXLAN device (encrypt
inner frames on egress, decrypt on ingress).

The algorithm I actually need is AES-GCM (authenticated encryption of
VXLAN inner frames, with the outer headers as AAD). When I looked at
bpf_crypto, I found that:

1. Only lskcipher ("skcipher" type) was implemented
2. ecb(aes) was the only usable algorithm
3. AEAD support was designed for (authsize field exists in
 bpf_crypto_params, setauthsize in bpf_crypto_type) but never
 implemented
4. ctr(aes) wasn't available as lskcipher either

I looked at Herbert's history converting ECB and CBC to lskcipher and
assumed that was the path forward for CTR. But you're right, the
real goal is AEAD, not CTR. CTR alone doesn't give me integrity.

Your point about lib/crypto/ is interesting. If there's a path to
expose AES-GCM (or the building blocks) as direct library calls that
BPF programs in TC/XDP could use (avoiding the template/instance
machinery and getting hardware acceleration) that would be ideal for
this use case.

What would that look like? Is there existing lib/crypto/ work for
AES-GCM that could be wired up to BPF, or would that need to be
built?

Thanks,
Alexandre

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] crypto: ctr - Convert from skcipher to lskcipher
  2026-05-11  0:02     ` Alexandre Knecht
@ 2026-05-11  0:19       ` Eric Biggers
  0 siblings, 0 replies; 5+ messages in thread
From: Eric Biggers @ 2026-05-11  0:19 UTC (permalink / raw)
  To: Alexandre Knecht
  Cc: herbert, David S . Miller, linux-crypto, linux-kernel, bpf

On Mon, May 11, 2026 at 02:02:22AM +0200, Alexandre Knecht wrote:
> Le lun. 11 mai 2026 à 01:44, Eric Biggers <ebiggers@kernel.org> a écrit :
> > Also note that lskcipher doesn't provide access to the accelerated AES
> > mode implementations.  Indeed, almost nothing is supported by lskcipher.
> > The fact that you found something to be missing isn't surprising.
> >
> > I think "lskcipher" is kind of a dead end, to be honest.  It's not clear
> > why it got added.  The path forwards is to get the AES encryption modes
> > added to lib/crypto/ and to just use that instead.
> >
> > - Eric
> 
> Hi Eric,
> 
> Thanks for the review — you're asking the right questions.
> 
> I'm developing a VXLAN/EVPN-based CNI for Kubernetes (releasing in the
> coming months), and the goal is to implement datapath encryption for
> overlay traffic in a zero-trust datacenter model. The encryption
> happens in BPF programs attached via TC on the VXLAN device (encrypt
> inner frames on egress, decrypt on ingress).
> 
> The algorithm I actually need is AES-GCM (authenticated encryption of
> VXLAN inner frames, with the outer headers as AAD). When I looked at
> bpf_crypto, I found that:
> 
> 1. Only lskcipher ("skcipher" type) was implemented
> 2. ecb(aes) was the only usable algorithm
> 3. AEAD support was designed for (authsize field exists in
>  bpf_crypto_params, setauthsize in bpf_crypto_type) but never
>  implemented
> 4. ctr(aes) wasn't available as lskcipher either
> 
> I looked at Herbert's history converting ECB and CBC to lskcipher and
> assumed that was the path forward for CTR. But you're right, the
> real goal is AEAD, not CTR. CTR alone doesn't give me integrity.
> 
> Your point about lib/crypto/ is interesting. If there's a path to
> expose AES-GCM (or the building blocks) as direct library calls that
> BPF programs in TC/XDP could use (avoiding the template/instance
> machinery and getting hardware acceleration) that would be ideal for
> this use case.
> 
> What would that look like? Is there existing lib/crypto/ work for
> AES-GCM that could be wired up to BPF, or would that need to be
> built?

Sure, it makes sense that AES-GCM is what you actually need.  There's
actually a lot of demand for AES-GCM in lib/crypto/, and I've been
working on it.

There's already an existing AES-GCM lib/crypto/ API (see
include/crypto/gcm.h), and I optimized it a bit in 7.0 and 7.1.  For
example, it now uses the architecture-optimized single-block AES code.

You might be able to go ahead and use that right now.

However, it currently supports only one-shot computation, and it doesn't
yet take advantage of the fully optimized AES-GCM assembly code that
interleaves the AES and GHASH computations.  I'm planning to address
both of those limitations soon.

Anyway, that seems like the clear way forward.  The lskcipher thing
seems like a dead end to me.

- Eric

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-05-11  0:19 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-10 23:09 [PATCH] crypto: ctr - Convert from skcipher to lskcipher Alexandre Knecht
2026-05-10 23:32 ` Eric Biggers
2026-05-10 23:44   ` Eric Biggers
2026-05-11  0:02     ` Alexandre Knecht
2026-05-11  0:19       ` Eric Biggers

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox