From: Ard Biesheuvel <ardb+git@google.com>
To: linux-arm-kernel@lists.infradead.org
Cc: linux-crypto@vger.kernel.org, linux-kernel@vger.kernel.org,
herbert@gondor.apana.org.au, linux@armlinux.org.uk,
Ard Biesheuvel <ardb@kernel.org>, Marc Zyngier <maz@kernel.org>,
Will Deacon <will@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Kees Cook <keescook@chromium.org>,
Catalin Marinas <catalin.marinas@arm.com>,
Mark Brown <broonie@kernel.org>,
Eric Biggers <ebiggers@kernel.org>
Subject: [PATCH v2 10/20] lib/crypto: Switch ARM and arm64 to 'ksimd' scoped guard API
Date: Wed, 1 Oct 2025 23:02:12 +0200 [thread overview]
Message-ID: <20251001210201.838686-32-ardb+git@google.com> (raw)
In-Reply-To: <20251001210201.838686-22-ardb+git@google.com>
From: Ard Biesheuvel <ardb@kernel.org>
Before modifying the prototypes of kernel_neon_begin() and
kernel_neon_end() to accommodate kernel mode FP/SIMD state buffers
allocated on the stack, move arm64 to the new 'ksimd' scoped guard API,
which encapsulates the calls to those functions.
For symmetry, do the same for 32-bit ARM too.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
lib/crypto/arm/chacha-glue.c | 6 ++----
lib/crypto/arm/poly1305-glue.c | 6 ++----
lib/crypto/arm/sha1.h | 13 ++++++-------
lib/crypto/arm/sha256.h | 14 +++++++-------
lib/crypto/arm/sha512.h | 6 +++---
lib/crypto/arm64/chacha-neon-glue.c | 11 ++++-------
lib/crypto/arm64/poly1305-glue.c | 6 ++----
lib/crypto/arm64/sha1.h | 7 +++----
lib/crypto/arm64/sha256.h | 15 +++++++--------
lib/crypto/arm64/sha512.h | 8 ++++----
10 files changed, 40 insertions(+), 52 deletions(-)
diff --git a/lib/crypto/arm/chacha-glue.c b/lib/crypto/arm/chacha-glue.c
index 88ec96415283..9c2e8d5edf20 100644
--- a/lib/crypto/arm/chacha-glue.c
+++ b/lib/crypto/arm/chacha-glue.c
@@ -14,7 +14,6 @@
#include <asm/cputype.h>
#include <asm/hwcap.h>
-#include <asm/neon.h>
#include <asm/simd.h>
asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
@@ -90,9 +89,8 @@ void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
do {
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
- kernel_neon_begin();
- chacha_doneon(state, dst, src, todo, nrounds);
- kernel_neon_end();
+ scoped_ksimd()
+ chacha_doneon(state, dst, src, todo, nrounds);
bytes -= todo;
src += todo;
diff --git a/lib/crypto/arm/poly1305-glue.c b/lib/crypto/arm/poly1305-glue.c
index 2d86c78af883..3e4624477e9f 100644
--- a/lib/crypto/arm/poly1305-glue.c
+++ b/lib/crypto/arm/poly1305-glue.c
@@ -6,7 +6,6 @@
*/
#include <asm/hwcap.h>
-#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/poly1305.h>
#include <linux/cpufeature.h>
@@ -39,9 +38,8 @@ void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
do {
unsigned int todo = min_t(unsigned int, len, SZ_4K);
- kernel_neon_begin();
- poly1305_blocks_neon(state, src, todo, padbit);
- kernel_neon_end();
+ scoped_ksimd()
+ poly1305_blocks_neon(state, src, todo, padbit);
len -= todo;
src += todo;
diff --git a/lib/crypto/arm/sha1.h b/lib/crypto/arm/sha1.h
index fa1e92419000..a4296ffefd05 100644
--- a/lib/crypto/arm/sha1.h
+++ b/lib/crypto/arm/sha1.h
@@ -4,7 +4,6 @@
*
* Copyright 2025 Google LLC
*/
-#include <asm/neon.h>
#include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
@@ -22,12 +21,12 @@ static void sha1_blocks(struct sha1_block_state *state,
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && likely(may_use_simd())) {
- kernel_neon_begin();
- if (static_branch_likely(&have_ce))
- sha1_ce_transform(state, data, nblocks);
- else
- sha1_transform_neon(state, data, nblocks);
- kernel_neon_end();
+ scoped_ksimd() {
+ if (static_branch_likely(&have_ce))
+ sha1_ce_transform(state, data, nblocks);
+ else
+ sha1_transform_neon(state, data, nblocks);
+ }
} else {
sha1_block_data_order(state, data, nblocks);
}
diff --git a/lib/crypto/arm/sha256.h b/lib/crypto/arm/sha256.h
index da75cbdc51d4..df861cc5b9ff 100644
--- a/lib/crypto/arm/sha256.h
+++ b/lib/crypto/arm/sha256.h
@@ -4,7 +4,7 @@
*
* Copyright 2025 Google LLC
*/
-#include <asm/neon.h>
+#include <asm/simd.h>
#include <crypto/internal/simd.h>
asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
@@ -22,12 +22,12 @@ static void sha256_blocks(struct sha256_block_state *state,
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && crypto_simd_usable()) {
- kernel_neon_begin();
- if (static_branch_likely(&have_ce))
- sha256_ce_transform(state, data, nblocks);
- else
- sha256_block_data_order_neon(state, data, nblocks);
- kernel_neon_end();
+ scoped_ksimd() {
+ if (static_branch_likely(&have_ce))
+ sha256_ce_transform(state, data, nblocks);
+ else
+ sha256_block_data_order_neon(state, data, nblocks);
+ }
} else {
sha256_block_data_order(state, data, nblocks);
}
diff --git a/lib/crypto/arm/sha512.h b/lib/crypto/arm/sha512.h
index f147b6490d6c..35b80e7e7db7 100644
--- a/lib/crypto/arm/sha512.h
+++ b/lib/crypto/arm/sha512.h
@@ -6,6 +6,7 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
#include <crypto/internal/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
@@ -20,9 +21,8 @@ static void sha512_blocks(struct sha512_block_state *state,
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && likely(crypto_simd_usable())) {
- kernel_neon_begin();
- sha512_block_data_order_neon(state, data, nblocks);
- kernel_neon_end();
+ scoped_ksimd()
+ sha512_block_data_order_neon(state, data, nblocks);
} else {
sha512_block_data_order(state, data, nblocks);
}
diff --git a/lib/crypto/arm64/chacha-neon-glue.c b/lib/crypto/arm64/chacha-neon-glue.c
index d0188f974ca5..a3d109f0ce1e 100644
--- a/lib/crypto/arm64/chacha-neon-glue.c
+++ b/lib/crypto/arm64/chacha-neon-glue.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <asm/hwcap.h>
-#include <asm/neon.h>
#include <asm/simd.h>
asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
@@ -67,9 +66,8 @@ void hchacha_block_arch(const struct chacha_state *state,
if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
hchacha_block_generic(state, out, nrounds);
} else {
- kernel_neon_begin();
- hchacha_block_neon(state, out, nrounds);
- kernel_neon_end();
+ scoped_ksimd()
+ hchacha_block_neon(state, out, nrounds);
}
}
EXPORT_SYMBOL(hchacha_block_arch);
@@ -84,9 +82,8 @@ void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
do {
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
- kernel_neon_begin();
- chacha_doneon(state, dst, src, todo, nrounds);
- kernel_neon_end();
+ scoped_ksimd()
+ chacha_doneon(state, dst, src, todo, nrounds);
bytes -= todo;
src += todo;
diff --git a/lib/crypto/arm64/poly1305-glue.c b/lib/crypto/arm64/poly1305-glue.c
index 31aea21ce42f..c83ce7d835d9 100644
--- a/lib/crypto/arm64/poly1305-glue.c
+++ b/lib/crypto/arm64/poly1305-glue.c
@@ -6,7 +6,6 @@
*/
#include <asm/hwcap.h>
-#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/poly1305.h>
#include <linux/cpufeature.h>
@@ -38,9 +37,8 @@ void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
do {
unsigned int todo = min_t(unsigned int, len, SZ_4K);
- kernel_neon_begin();
- poly1305_blocks_neon(state, src, todo, padbit);
- kernel_neon_end();
+ scoped_ksimd()
+ poly1305_blocks_neon(state, src, todo, padbit);
len -= todo;
src += todo;
diff --git a/lib/crypto/arm64/sha1.h b/lib/crypto/arm64/sha1.h
index f822563538cc..3d0da0045fed 100644
--- a/lib/crypto/arm64/sha1.h
+++ b/lib/crypto/arm64/sha1.h
@@ -4,7 +4,6 @@
*
* Copyright 2025 Google LLC
*/
-#include <asm/neon.h>
#include <asm/simd.h>
#include <linux/cpufeature.h>
@@ -20,9 +19,9 @@ static void sha1_blocks(struct sha1_block_state *state,
do {
size_t rem;
- kernel_neon_begin();
- rem = __sha1_ce_transform(state, data, nblocks);
- kernel_neon_end();
+ scoped_ksimd()
+ rem = __sha1_ce_transform(state, data, nblocks);
+
data += (nblocks - rem) * SHA1_BLOCK_SIZE;
nblocks = rem;
} while (nblocks);
diff --git a/lib/crypto/arm64/sha256.h b/lib/crypto/arm64/sha256.h
index a211966c124a..0a9f9d70bb43 100644
--- a/lib/crypto/arm64/sha256.h
+++ b/lib/crypto/arm64/sha256.h
@@ -4,7 +4,7 @@
*
* Copyright 2025 Google LLC
*/
-#include <asm/neon.h>
+#include <asm/simd.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
@@ -27,17 +27,16 @@ static void sha256_blocks(struct sha256_block_state *state,
do {
size_t rem;
- kernel_neon_begin();
- rem = __sha256_ce_transform(state,
- data, nblocks);
- kernel_neon_end();
+ scoped_ksimd()
+ rem = __sha256_ce_transform(state, data,
+ nblocks);
+
data += (nblocks - rem) * SHA256_BLOCK_SIZE;
nblocks = rem;
} while (nblocks);
} else {
- kernel_neon_begin();
- sha256_block_neon(state, data, nblocks);
- kernel_neon_end();
+ scoped_ksimd()
+ sha256_block_neon(state, data, nblocks);
}
} else {
sha256_block_data_order(state, data, nblocks);
diff --git a/lib/crypto/arm64/sha512.h b/lib/crypto/arm64/sha512.h
index 6abb40b467f2..1b6c3974d553 100644
--- a/lib/crypto/arm64/sha512.h
+++ b/lib/crypto/arm64/sha512.h
@@ -5,7 +5,7 @@
* Copyright 2025 Google LLC
*/
-#include <asm/neon.h>
+#include <asm/simd.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
@@ -25,9 +25,9 @@ static void sha512_blocks(struct sha512_block_state *state,
do {
size_t rem;
- kernel_neon_begin();
- rem = __sha512_ce_transform(state, data, nblocks);
- kernel_neon_end();
+ scoped_ksimd()
+ rem = __sha512_ce_transform(state, data, nblocks);
+
data += (nblocks - rem) * SHA512_BLOCK_SIZE;
nblocks = rem;
} while (nblocks);
--
2.51.0.618.g983fd99d29-goog
next prev parent reply other threads:[~2025-10-01 21:04 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-01 21:02 [PATCH v2 00/20] arm64: Move kernel mode FPSIMD buffer to the stack Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 01/20] arm64: Revert support for generic kernel mode FPU Ard Biesheuvel
2025-10-02 16:23 ` Mark Brown
2025-10-08 12:44 ` Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 02/20] arm64/simd: Add scoped guard API for kernel mode SIMD Ard Biesheuvel
2025-10-02 16:17 ` Kees Cook
2025-10-14 14:34 ` Mark Brown
2025-10-01 21:02 ` [PATCH v2 03/20] ARM/simd: " Ard Biesheuvel
2025-10-02 16:18 ` Kees Cook
2025-10-01 21:02 ` [PATCH v2 04/20] crypto: aegis128-neon - Move to more abstract 'ksimd' guard API Ard Biesheuvel
2025-10-02 16:20 ` Kees Cook
2025-10-02 16:48 ` Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 05/20] raid6: " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 06/20] crypto/arm64: aes-ce-ccm - Avoid pointless yield of the NEON unit Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 07/20] crypto/arm64: sm4-ce-ccm " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 08/20] crypto/arm64: sm4-ce-gcm " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 09/20] lib/crc: Switch ARM and arm64 to 'ksimd' scoped guard API Ard Biesheuvel
2025-10-01 21:02 ` Ard Biesheuvel [this message]
2025-10-01 21:02 ` [PATCH v2 11/20] crypto/arm64: aes-ccm - Switch " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 12/20] crypto/arm64: aes-blk " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 13/20] crypto/arm64: aes-gcm " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 14/20] crypto/arm64: nhpoly1305 " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 15/20] crypto/arm64: polyval " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 16/20] crypto/arm64: sha3 " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 17/20] crypto/arm64: sm3 " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 18/20] crypto/arm64: sm4 " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 19/20] arm64/xorblocks: " Ard Biesheuvel
2025-10-01 21:02 ` [PATCH v2 20/20] arm64/fpsimd: Allocate kernel mode FP/SIMD buffers on the stack Ard Biesheuvel
2025-10-02 16:22 ` Kees Cook
2025-10-02 16:51 ` Ard Biesheuvel
2025-10-03 20:18 ` Eric Biggers
2025-10-05 14:54 ` Ard Biesheuvel
2025-10-03 20:28 ` [PATCH v2 00/20] arm64: Move kernel mode FPSIMD buffer to " Eric Biggers
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251001210201.838686-32-ardb+git@google.com \
--to=ardb+git@google.com \
--cc=ardb@kernel.org \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=ebiggers@kernel.org \
--cc=herbert@gondor.apana.org.au \
--cc=keescook@chromium.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).