* [PATCH] lib/crypto: sha256: Use underlying functions instead of crypto_simd_usable()
@ 2025-07-31 22:35 Eric Biggers
2025-08-11 17:58 ` Eric Biggers
0 siblings, 1 reply; 2+ messages in thread
From: Eric Biggers @ 2025-07-31 22:35 UTC (permalink / raw)
To: linux-crypto
Cc: Ard Biesheuvel, Jason A . Donenfeld, linux-arm-kernel, x86,
Eric Biggers
Since sha256_kunit tests the fallback code paths without using
crypto_simd_disabled_for_test, make the SHA-256 code just use the
underlying may_use_simd() and irq_fpu_usable() functions directly
instead of crypto_simd_usable(). This eliminates an unnecessary layer.
While doing this, also add likely() annotations, and fix a minor
inconsistency where the static keys in the sha256.h files were in a
different place than in the corresponding sha1.h and sha512.h files.
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
lib/crypto/arm/sha256.h | 10 +++++-----
lib/crypto/arm64/sha256.h | 10 +++++-----
lib/crypto/riscv/sha256.h | 8 ++++----
lib/crypto/x86/sha256.h | 3 +--
4 files changed, 15 insertions(+), 16 deletions(-)
diff --git a/lib/crypto/arm/sha256.h b/lib/crypto/arm/sha256.h
index da75cbdc51d41..eab713e650f33 100644
--- a/lib/crypto/arm/sha256.h
+++ b/lib/crypto/arm/sha256.h
@@ -3,27 +3,27 @@
* SHA-256 optimized for ARM
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
-#include <crypto/internal/simd.h>
+#include <asm/simd.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage void sha256_block_data_order_neon(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage void sha256_ce_transform(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-
static void sha256_blocks(struct sha256_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- static_branch_likely(&have_neon) && crypto_simd_usable()) {
+ static_branch_likely(&have_neon) && likely(may_use_simd())) {
kernel_neon_begin();
if (static_branch_likely(&have_ce))
sha256_ce_transform(state, data, nblocks);
else
sha256_block_data_order_neon(state, data, nblocks);
diff --git a/lib/crypto/arm64/sha256.h b/lib/crypto/arm64/sha256.h
index a211966c124a9..d95f1077c32bd 100644
--- a/lib/crypto/arm64/sha256.h
+++ b/lib/crypto/arm64/sha256.h
@@ -3,28 +3,28 @@
* SHA-256 optimized for ARM64
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
-#include <crypto/internal/simd.h>
+#include <asm/simd.h>
#include <linux/cpufeature.h>
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
+
asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage void sha256_block_neon(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-
static void sha256_blocks(struct sha256_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- static_branch_likely(&have_neon) && crypto_simd_usable()) {
+ static_branch_likely(&have_neon) && likely(may_use_simd())) {
if (static_branch_likely(&have_ce)) {
do {
size_t rem;
kernel_neon_begin();
diff --git a/lib/crypto/riscv/sha256.h b/lib/crypto/riscv/sha256.h
index c0f79c18f1199..f36f68d2e88cc 100644
--- a/lib/crypto/riscv/sha256.h
+++ b/lib/crypto/riscv/sha256.h
@@ -7,23 +7,23 @@
*
* Copyright (C) 2023 SiFive, Inc.
* Author: Jerry Shih <jerry.shih@sifive.com>
*/
+#include <asm/simd.h>
#include <asm/vector.h>
-#include <crypto/internal/simd.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
asmlinkage void
sha256_transform_zvknha_or_zvknhb_zvkb(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
-
static void sha256_blocks(struct sha256_block_state *state,
const u8 *data, size_t nblocks)
{
- if (static_branch_likely(&have_extensions) && crypto_simd_usable()) {
+ if (static_branch_likely(&have_extensions) && likely(may_use_simd())) {
kernel_vector_begin();
sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks);
kernel_vector_end();
} else {
sha256_blocks_generic(state, data, nblocks);
diff --git a/lib/crypto/x86/sha256.h b/lib/crypto/x86/sha256.h
index 669bc06538b67..c852396ef3190 100644
--- a/lib/crypto/x86/sha256.h
+++ b/lib/crypto/x86/sha256.h
@@ -3,22 +3,21 @@
* SHA-256 optimized for x86_64
*
* Copyright 2025 Google LLC
*/
#include <asm/fpu/api.h>
-#include <crypto/internal/simd.h>
#include <linux/static_call.h>
DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_blocks_generic);
#define DEFINE_X86_SHA256_FN(c_fn, asm_fn) \
asmlinkage void asm_fn(struct sha256_block_state *state, \
const u8 *data, size_t nblocks); \
static void c_fn(struct sha256_block_state *state, const u8 *data, \
size_t nblocks) \
{ \
- if (likely(crypto_simd_usable())) { \
+ if (likely(irq_fpu_usable())) { \
kernel_fpu_begin(); \
asm_fn(state, data, nblocks); \
kernel_fpu_end(); \
} else { \
sha256_blocks_generic(state, data, nblocks); \
base-commit: d6084bb815c453de27af8071a23163a711586a6c
--
2.50.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] lib/crypto: sha256: Use underlying functions instead of crypto_simd_usable()
2025-07-31 22:35 [PATCH] lib/crypto: sha256: Use underlying functions instead of crypto_simd_usable() Eric Biggers
@ 2025-08-11 17:58 ` Eric Biggers
0 siblings, 0 replies; 2+ messages in thread
From: Eric Biggers @ 2025-08-11 17:58 UTC (permalink / raw)
To: linux-crypto; +Cc: Ard Biesheuvel, Jason A . Donenfeld, linux-arm-kernel, x86
On Thu, Jul 31, 2025 at 03:35:10PM -0700, Eric Biggers wrote:
> Since sha256_kunit tests the fallback code paths without using
> crypto_simd_disabled_for_test, make the SHA-256 code just use the
> underlying may_use_simd() and irq_fpu_usable() functions directly
> instead of crypto_simd_usable(). This eliminates an unnecessary layer.
>
> While doing this, also add likely() annotations, and fix a minor
> inconsistency where the static keys in the sha256.h files were in a
> different place than in the corresponding sha1.h and sha512.h files.
>
> Signed-off-by: Eric Biggers <ebiggers@kernel.org>
> ---
> lib/crypto/arm/sha256.h | 10 +++++-----
> lib/crypto/arm64/sha256.h | 10 +++++-----
> lib/crypto/riscv/sha256.h | 8 ++++----
> lib/crypto/x86/sha256.h | 3 +--
> 4 files changed, 15 insertions(+), 16 deletions(-)
Applied to https://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux.git/log/?h=libcrypto-next
- Eric
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-08-11 17:59 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-31 22:35 [PATCH] lib/crypto: sha256: Use underlying functions instead of crypto_simd_usable() Eric Biggers
2025-08-11 17:58 ` Eric Biggers
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).