netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Eric Biggers <ebiggers@kernel.org>
To: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, netdev@vger.kernel.org
Subject: [PATCH v3 17/19] crypto: skcipher - use the new scatterwalk functions
Date: Wed, 19 Feb 2025 10:23:39 -0800	[thread overview]
Message-ID: <20250219182341.43961-18-ebiggers@kernel.org> (raw)
In-Reply-To: <20250219182341.43961-1-ebiggers@kernel.org>

From: Eric Biggers <ebiggers@google.com>

Convert skcipher_walk to use the new scatterwalk functions.

This includes a few changes to exactly where the different parts of the
iteration happen.  For example the dcache flush that previously happened
in scatterwalk_done() now happens in scatterwalk_dst_done() or in
memcpy_to_scatterwalk().  Advancing to the next sg entry now happens
just-in-time in scatterwalk_clamp() instead of in scatterwalk_done().

Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/skcipher.c | 51 ++++++++++++++++++-----------------------------
 1 file changed, 19 insertions(+), 32 deletions(-)

diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 33508d001f361..0a78a96d8583d 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -47,20 +47,10 @@ static inline void skcipher_map_src(struct skcipher_walk *walk)
 static inline void skcipher_map_dst(struct skcipher_walk *walk)
 {
 	walk->dst.virt.addr = scatterwalk_map(&walk->out);
 }
 
-static inline void skcipher_unmap_src(struct skcipher_walk *walk)
-{
-	scatterwalk_unmap(walk->src.virt.addr);
-}
-
-static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
-{
-	scatterwalk_unmap(walk->dst.virt.addr);
-}
-
 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
 {
 	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
 }
 
@@ -68,18 +58,10 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
 	struct crypto_alg *alg)
 {
 	return container_of(alg, struct skcipher_alg, base);
 }
 
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
-	u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
-
-	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
-	return 0;
-}
-
 /**
  * skcipher_walk_done() - finish one step of a skcipher_walk
  * @walk: the skcipher_walk
  * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
  *	 or a -errno value to terminate the walk due to an error
@@ -110,44 +92,45 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res)
 	}
 
 	if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
 				    SKCIPHER_WALK_COPY |
 				    SKCIPHER_WALK_DIFF)))) {
-unmap_src:
-		skcipher_unmap_src(walk);
+		scatterwalk_advance(&walk->in, n);
 	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
-		skcipher_unmap_dst(walk);
-		goto unmap_src;
+		scatterwalk_unmap(walk->src.virt.addr);
+		scatterwalk_advance(&walk->in, n);
 	} else if (walk->flags & SKCIPHER_WALK_COPY) {
+		scatterwalk_advance(&walk->in, n);
 		skcipher_map_dst(walk);
 		memcpy(walk->dst.virt.addr, walk->page, n);
-		skcipher_unmap_dst(walk);
 	} else { /* SKCIPHER_WALK_SLOW */
 		if (res > 0) {
 			/*
 			 * Didn't process all bytes.  Either the algorithm is
 			 * broken, or this was the last step and it turned out
 			 * the message wasn't evenly divisible into blocks but
 			 * the algorithm requires it.
 			 */
 			res = -EINVAL;
 			total = 0;
-		} else
-			n = skcipher_done_slow(walk, n);
+		} else {
+			u8 *buf = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
+
+			memcpy_to_scatterwalk(&walk->out, buf, n);
+		}
+		goto dst_done;
 	}
 
+	scatterwalk_done_dst(&walk->out, walk->dst.virt.addr, n);
+dst_done:
+
 	if (res > 0)
 		res = 0;
 
 	walk->total = total;
 	walk->nbytes = 0;
 
-	scatterwalk_advance(&walk->in, n);
-	scatterwalk_advance(&walk->out, n);
-	scatterwalk_done(&walk->in, 0, total);
-	scatterwalk_done(&walk->out, 1, total);
-
 	if (total) {
 		if (walk->flags & SKCIPHER_WALK_SLEEP)
 			cond_resched();
 		walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
 				 SKCIPHER_WALK_DIFF);
@@ -190,11 +173,11 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
 		walk->buffer = buffer;
 	}
 	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
 	walk->src.virt.addr = walk->dst.virt.addr;
 
-	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+	memcpy_from_scatterwalk(walk->src.virt.addr, &walk->in, bsize);
 
 	walk->nbytes = bsize;
 	walk->flags |= SKCIPHER_WALK_SLOW;
 
 	return 0;
@@ -204,11 +187,15 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
 {
 	u8 *tmp = walk->page;
 
 	skcipher_map_src(walk);
 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
-	skcipher_unmap_src(walk);
+	scatterwalk_unmap(walk->src.virt.addr);
+	/*
+	 * walk->in is advanced later when the number of bytes actually
+	 * processed (which might be less than walk->nbytes) is known.
+	 */
 
 	walk->src.virt.addr = tmp;
 	walk->dst.virt.addr = tmp;
 	return 0;
 }
-- 
2.48.1


  parent reply	other threads:[~2025-02-19 18:24 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-19 18:23 [PATCH v3 00/19] crypto: scatterlist handling improvements Eric Biggers
2025-02-19 18:23 ` [PATCH v3 01/19] crypto: scatterwalk - move to next sg entry just in time Eric Biggers
2025-02-19 18:23 ` [PATCH v3 02/19] crypto: scatterwalk - add new functions for skipping data Eric Biggers
2025-02-19 18:23 ` [PATCH v3 03/19] crypto: scatterwalk - add new functions for iterating through data Eric Biggers
2025-03-02  6:28   ` Herbert Xu
2025-03-02 20:21     ` Eric Biggers
2025-03-03  2:35       ` Herbert Xu
2025-02-19 18:23 ` [PATCH v3 04/19] crypto: scatterwalk - add new functions for copying data Eric Biggers
2025-03-02  6:40   ` Herbert Xu
2025-03-02 21:37     ` Eric Biggers
2025-03-03  2:39       ` Herbert Xu
2025-02-19 18:23 ` [PATCH v3 05/19] crypto: scatterwalk - add scatterwalk_get_sglist() Eric Biggers
2025-02-19 18:23 ` [PATCH v3 06/19] crypto: skcipher - use scatterwalk_start_at_pos() Eric Biggers
2025-02-19 18:23 ` [PATCH v3 07/19] crypto: aegis - use the new scatterwalk functions Eric Biggers
2025-02-19 18:23 ` [PATCH v3 08/19] crypto: arm/ghash " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 09/19] crypto: arm64 " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 10/19] crypto: nx " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 11/19] crypto: s390/aes-gcm " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 12/19] crypto: s5p-sss " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 13/19] crypto: stm32 " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 14/19] crypto: x86/aes-gcm " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 15/19] crypto: x86/aegis " Eric Biggers
2025-02-19 18:23 ` [PATCH v3 16/19] net/tls: " Eric Biggers
2025-02-19 18:23 ` Eric Biggers [this message]
2025-02-19 18:23 ` [PATCH v3 18/19] crypto: scatterwalk - remove obsolete functions Eric Biggers
2025-02-19 18:23 ` [PATCH v3 19/19] crypto: scatterwalk - don't split at page boundaries when !HIGHMEM Eric Biggers
2025-03-02  8:11 ` [PATCH v3 00/19] crypto: scatterlist handling improvements Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250219182341.43961-18-ebiggers@kernel.org \
    --to=ebiggers@kernel.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).