linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Eric Biggers <ebiggers@kernel.org>
To: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>,
	"Jason A . Donenfeld" <Jason@zx2c4.com>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	linux-arm-kernel@lists.infradead.org, x86@kernel.org,
	Eric Biggers <ebiggers@kernel.org>
Subject: [PATCH 07/12] crypto: adiantum - Use scatter_walk API instead of sg_miter
Date: Wed, 10 Dec 2025 17:18:39 -0800	[thread overview]
Message-ID: <20251211011846.8179-8-ebiggers@kernel.org> (raw)
In-Reply-To: <20251211011846.8179-1-ebiggers@kernel.org>

Make adiantum_hash_message() use the scatter_walk API instead of
sg_miter.  scatter_walk is a bit simpler and also more efficient.  For
example, unlike sg_miter, scatter_walk doesn't require that the number
of scatterlist entries be calculated up-front.

Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
 crypto/adiantum.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index bbe519fbd739..519e95228ad8 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -367,30 +367,27 @@ static void nhpoly1305_final(struct nhpoly1305_ctx *ctx,
  * evaluated as a polynomial in GF(2^{130}-5), like in the Poly1305 MAC.  Note
  * that the polynomial evaluation by itself would suffice to achieve the ε-∆U
  * property; NH is used for performance since it's much faster than Poly1305.
  */
 static void adiantum_hash_message(struct skcipher_request *req,
-				  struct scatterlist *sgl, unsigned int nents,
-				  le128 *out)
+				  struct scatterlist *sgl, le128 *out)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
-	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
-	struct sg_mapping_iter miter;
-	unsigned int i, n;
+	unsigned int len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+	struct scatter_walk walk;
 
 	nhpoly1305_init(&rctx->u.hash_ctx);
+	scatterwalk_start(&walk, sgl);
+	while (len) {
+		unsigned int n = scatterwalk_next(&walk, len);
 
-	sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
-	for (i = 0; i < bulk_len; i += n) {
-		sg_miter_next(&miter);
-		n = min_t(unsigned int, miter.length, bulk_len - i);
-		nhpoly1305_update(&rctx->u.hash_ctx, tctx, miter.addr, n);
+		nhpoly1305_update(&rctx->u.hash_ctx, tctx, walk.addr, n);
+		scatterwalk_done_src(&walk, n);
+		len -= n;
 	}
-	sg_miter_stop(&miter);
-
 	nhpoly1305_final(&rctx->u.hash_ctx, tctx, out);
 }
 
 /* Continue Adiantum encryption/decryption after the stream cipher step */
 static int adiantum_finish(struct skcipher_request *req)
@@ -398,11 +395,10 @@ static int adiantum_finish(struct skcipher_request *req)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
 	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
 	struct scatterlist *dst = req->dst;
-	const unsigned int dst_nents = sg_nents(dst);
 	le128 digest;
 
 	/* If decrypting, decrypt C_M with the block cipher to get P_M */
 	if (!rctx->enc)
 		crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
@@ -412,11 +408,12 @@ static int adiantum_finish(struct skcipher_request *req)
 	 * Second hash step
 	 *	enc: C_R = C_M - H_{K_H}(T, C_L)
 	 *	dec: P_R = P_M - H_{K_H}(T, P_L)
 	 */
 	le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
-	if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
+	if (dst->length >= req->cryptlen &&
+	    dst->offset + req->cryptlen <= PAGE_SIZE) {
 		/* Fast path for single-page destination */
 		struct page *page = sg_page(dst);
 		void *virt = kmap_local_page(page) + dst->offset;
 
 		nhpoly1305_init(&rctx->u.hash_ctx);
@@ -426,11 +423,11 @@ static int adiantum_finish(struct skcipher_request *req)
 		memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
 		flush_dcache_page(page);
 		kunmap_local(virt);
 	} else {
 		/* Slow path that works for any destination scatterlist */
-		adiantum_hash_message(req, dst, dst_nents, &digest);
+		adiantum_hash_message(req, dst, &digest);
 		le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
 		scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
 					 bulk_len, sizeof(le128), 1);
 	}
 	return 0;
@@ -451,11 +448,10 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
 	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
 	struct scatterlist *src = req->src;
-	const unsigned int src_nents = sg_nents(src);
 	unsigned int stream_len;
 	le128 digest;
 
 	if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
 		return -EINVAL;
@@ -466,22 +462,23 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
 	 * First hash step
 	 *	enc: P_M = P_R + H_{K_H}(T, P_L)
 	 *	dec: C_M = C_R + H_{K_H}(T, C_L)
 	 */
 	adiantum_hash_header(req);
-	if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
+	if (src->length >= req->cryptlen &&
+	    src->offset + req->cryptlen <= PAGE_SIZE) {
 		/* Fast path for single-page source */
 		void *virt = kmap_local_page(sg_page(src)) + src->offset;
 
 		nhpoly1305_init(&rctx->u.hash_ctx);
 		nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
 		nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
 		memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
 		kunmap_local(virt);
 	} else {
 		/* Slow path that works for any source scatterlist */
-		adiantum_hash_message(req, src, src_nents, &digest);
+		adiantum_hash_message(req, src, &digest);
 		scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
 					 bulk_len, sizeof(le128), 0);
 	}
 	le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
 	le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
-- 
2.52.0


  parent reply	other threads:[~2025-12-11  1:20 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-11  1:18 [PATCH 00/12] NH library and Adiantum cleanup Eric Biggers
2025-12-11  1:18 ` [PATCH 01/12] lib/crypto: nh: Add NH library Eric Biggers
2025-12-11  1:18 ` [PATCH 02/12] lib/crypto: tests: Add KUnit tests for NH Eric Biggers
2025-12-11  1:18 ` [PATCH 03/12] lib/crypto: arm/nh: Migrate optimized code into library Eric Biggers
2025-12-11  1:18 ` [PATCH 04/12] lib/crypto: arm64/nh: " Eric Biggers
2025-12-11  1:18 ` [PATCH 05/12] lib/crypto: x86/nh: " Eric Biggers
2025-12-11  1:18 ` [PATCH 06/12] crypto: adiantum - Convert to use NH library Eric Biggers
2025-12-11  1:18 ` Eric Biggers [this message]
2025-12-11  1:18 ` [PATCH 08/12] crypto: adiantum - Use memcpy_{to,from}_sglist() Eric Biggers
2025-12-11  3:02   ` Herbert Xu
2025-12-11  1:18 ` [PATCH 09/12] crypto: adiantum - Drop support for asynchronous xchacha ciphers Eric Biggers
2025-12-11  1:18 ` [PATCH 10/12] crypto: nhpoly1305 - Remove crypto_shash support Eric Biggers
2025-12-11  3:02   ` Herbert Xu
2025-12-11  1:18 ` [PATCH 11/12] crypto: testmgr - Remove nhpoly1305 tests Eric Biggers
2025-12-11  3:03   ` Herbert Xu
2025-12-11  1:18 ` [PATCH 12/12] fscrypt: Drop obsolete recommendation to enable optimized NHPoly1305 Eric Biggers

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251211011846.8179-8-ebiggers@kernel.org \
    --to=ebiggers@kernel.org \
    --cc=Jason@zx2c4.com \
    --cc=ardb@kernel.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).