From: Christoph Hellwig <hch@lst.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Richard Henderson <richard.henderson@linaro.org>,
Matt Turner <mattst88@gmail.com>,
Magnus Lindholm <linmag7@gmail.com>,
Russell King <linux@armlinux.org.uk>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
Huacai Chen <chenhuacai@kernel.org>,
WANG Xuerui <kernel@xen0n.name>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
"Christophe Leroy (CS GROUP)" <chleroy@kernel.org>,
Paul Walmsley <pjw@kernel.org>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Alexandre Ghiti <alex@ghiti.fr>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
"David S. Miller" <davem@davemloft.net>,
Andreas Larsson <andreas@gaisler.com>,
Richard Weinberger <richard@nod.at>,
Anton Ivanov <anton.ivanov@cambridgegreys.com>,
Johannes Berg <johannes@sipsolutions.net>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
Herbert Xu <herbert@gondor.apana.org.au>,
Dan Williams <dan.j.williams@intel.com>, Chris Mason <clm@fb.com>,
David Sterba <dsterba@suse.com>, Arnd Bergmann <arnd@arndb.de>,
Song Liu <song@kernel.org>, Yu Kuai <yukuai@fnnas.com>,
Li Nan <linan122@huawei.com>, "Theodore Ts'o" <tytso@mit.edu>,
"Jason A. Donenfeld" <Jason@zx2c4.com>,
linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, loongarch@lists.linux.dev,
linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
linux-s390@vger.kernel.org, sparclinux@vger.kernel.org,
linux-um@lists.infradead.org, linux-crypto@vger.kernel.org,
linux-btrfs@vger.kernel.org, linux-arch@vger.kernel.org,
linux-raid@vger.kernel.org
Subject: [PATCH 06/26] xor: cleanup registration and probing
Date: Tue, 24 Mar 2026 07:21:42 +0100 [thread overview]
Message-ID: <20260324062211.3216301-7-hch@lst.de> (raw)
In-Reply-To: <20260324062211.3216301-1-hch@lst.de>
Originally, the XOR code benchmarked all algorithms at load time, but
it has since then been hacked multiple times to allow forcing an
algorithm, and then commit 524ccdbdfb52 ("crypto: xor - defer load time
benchmark to a later time") changed the logic to a two-step process
or registration and benchmarking, but only when built-in.
Rework this, so that the XOR_TRY_TEMPLATES macro magic now always just
deals with adding the templates to the list, and benchmarking is always
done in a second pass; for modular builds from module_init, and for the
built-in case using a separate init call level.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/xor/xor-core.c | 98 ++++++++++++++++++++---------------------
1 file changed, 48 insertions(+), 50 deletions(-)
diff --git a/lib/raid/xor/xor-core.c b/lib/raid/xor/xor-core.c
index edb4e498da60..88667a89b75b 100644
--- a/lib/raid/xor/xor-core.c
+++ b/lib/raid/xor/xor-core.c
@@ -52,29 +52,14 @@ EXPORT_SYMBOL(xor_blocks);
/* Set of all registered templates. */
static struct xor_block_template *__initdata template_list;
+static bool __initdata xor_forced = false;
-#ifndef MODULE
static void __init do_xor_register(struct xor_block_template *tmpl)
{
tmpl->next = template_list;
template_list = tmpl;
}
-static int __init register_xor_blocks(void)
-{
- active_template = XOR_SELECT_TEMPLATE(NULL);
-
- if (!active_template) {
-#define xor_speed do_xor_register
- // register all the templates and pick the first as the default
- XOR_TRY_TEMPLATES;
-#undef xor_speed
- active_template = template_list;
- }
- return 0;
-}
-#endif
-
#define BENCH_SIZE 4096
#define REPS 800U
@@ -85,9 +70,6 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
unsigned long reps;
ktime_t min, start, t0;
- tmpl->next = template_list;
- template_list = tmpl;
-
preempt_disable();
reps = 0;
@@ -111,63 +93,79 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
}
-static int __init
-calibrate_xor_blocks(void)
+static int __init calibrate_xor_blocks(void)
{
void *b1, *b2;
struct xor_block_template *f, *fastest;
- fastest = XOR_SELECT_TEMPLATE(NULL);
-
- if (fastest) {
- printk(KERN_INFO "xor: automatically using best "
- "checksumming function %-10s\n",
- fastest->name);
- goto out;
- }
+ if (xor_forced)
+ return 0;
b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (!b1) {
- printk(KERN_WARNING "xor: Yikes! No memory available.\n");
+ pr_warn("xor: Yikes! No memory available.\n");
return -ENOMEM;
}
b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
- /*
- * If this arch/cpu has a short-circuited selection, don't loop through
- * all the possible functions, just test the best one
- */
-
-#define xor_speed(templ) do_xor_speed((templ), b1, b2)
-
- printk(KERN_INFO "xor: measuring software checksum speed\n");
- template_list = NULL;
- XOR_TRY_TEMPLATES;
+ pr_info("xor: measuring software checksum speed\n");
fastest = template_list;
- for (f = fastest; f; f = f->next)
+ for (f = template_list; f; f = f->next) {
+ do_xor_speed(f, b1, b2);
if (f->speed > fastest->speed)
fastest = f;
-
+ }
+ active_template = fastest;
pr_info("xor: using function: %s (%d MB/sec)\n",
fastest->name, fastest->speed);
+ free_pages((unsigned long)b1, 2);
+ return 0;
+}
+
+static int __init xor_init(void)
+{
+ /*
+ * If this arch/cpu has a short-circuited selection, don't loop through
+ * all the possible functions, just use the best one.
+ */
+ active_template = XOR_SELECT_TEMPLATE(NULL);
+ if (active_template) {
+ pr_info("xor: automatically using best checksumming function %-10s\n",
+ active_template->name);
+ xor_forced = true;
+ return 0;
+ }
+
+#define xor_speed do_xor_register
+ XOR_TRY_TEMPLATES;
#undef xor_speed
- free_pages((unsigned long)b1, 2);
-out:
- active_template = fastest;
+#ifdef MODULE
+ return calibrate_xor_blocks();
+#else
+ /*
+ * Pick the first template as the temporary default until calibration
+ * happens.
+ */
+ active_template = template_list;
return 0;
+#endif
}
-static __exit void xor_exit(void) { }
+static __exit void xor_exit(void)
+{
+}
MODULE_DESCRIPTION("RAID-5 checksumming functions");
MODULE_LICENSE("GPL");
+/*
+ * When built-in we must register the default template before md, but we don't
+ * want calibration to run that early as that would delay the boot process.
+ */
#ifndef MODULE
-/* when built-in xor.o must initialize before drivers/md/md.o */
-core_initcall(register_xor_blocks);
+__initcall(calibrate_xor_blocks);
#endif
-
-module_init(calibrate_xor_blocks);
+core_initcall(xor_init);
module_exit(xor_exit);
--
2.47.3
next prev parent reply other threads:[~2026-03-24 6:23 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 6:21 cleanup the RAID5 XOR library v3 Christoph Hellwig
2026-03-24 6:21 ` [PATCH 01/26] xor: assert that xor_blocks is not from preemptible user context Christoph Hellwig
2026-03-24 6:21 ` [PATCH 02/26] arm/xor: remove in_interrupt() handling Christoph Hellwig
2026-03-24 6:21 ` [PATCH 03/26] um/xor: cleanup xor.h Christoph Hellwig
2026-03-24 6:21 ` [PATCH 04/26] xor: move to lib/raid/ Christoph Hellwig
2026-03-24 6:21 ` [PATCH 05/26] xor: small cleanups Christoph Hellwig
2026-03-24 6:21 ` Christoph Hellwig [this message]
2026-03-24 6:21 ` [PATCH 07/26] xor: split xor.h Christoph Hellwig
2026-03-24 6:21 ` [PATCH 08/26] xor: remove macro abuse for XOR implementation registrations Christoph Hellwig
2026-03-24 6:21 ` [PATCH 09/26] xor: move generic implementations out of asm-generic/xor.h Christoph Hellwig
2026-03-24 6:21 ` [PATCH 10/26] alpha: move the XOR code to lib/raid/ Christoph Hellwig
2026-03-24 6:21 ` [PATCH 11/26] arm: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 12/26] arm64: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 13/26] loongarch: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 14/26] powerpc: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 15/26] riscv: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 16/26] sparc: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 17/26] s390: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 18/26] x86: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 19/26] xor: avoid indirect calls for arm64-optimized ops Christoph Hellwig
2026-03-24 6:21 ` [PATCH 20/26] xor: make xor.ko self-contained in lib/raid/ Christoph Hellwig
2026-03-24 6:21 ` [PATCH 21/26] xor: add a better public API Christoph Hellwig
2026-03-24 6:21 ` [PATCH 22/26] async_xor: use xor_gen Christoph Hellwig
2026-03-24 6:21 ` [PATCH 23/26] btrfs: " Christoph Hellwig
2026-03-24 6:22 ` [PATCH 24/26] xor: pass the entire operation to the low-level ops Christoph Hellwig
2026-03-24 6:22 ` [PATCH 25/26] xor: use static_call for xor_gen Christoph Hellwig
2026-03-24 6:22 ` [PATCH 26/26] xor: add a kunit test case Christoph Hellwig
2026-03-24 12:59 ` cleanup the RAID5 XOR library v3 Andrew Morton
2026-03-24 16:42 ` Andrew Morton
2026-03-25 19:39 ` Eric Biggers
2026-03-26 5:18 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260324062211.3216301-7-hch@lst.de \
--to=hch@lst.de \
--cc=Jason@zx2c4.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=alex@ghiti.fr \
--cc=andreas@gaisler.com \
--cc=anton.ivanov@cambridgegreys.com \
--cc=aou@eecs.berkeley.edu \
--cc=ardb@kernel.org \
--cc=arnd@arndb.de \
--cc=borntraeger@linux.ibm.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=chenhuacai@kernel.org \
--cc=chleroy@kernel.org \
--cc=clm@fb.com \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=dsterba@suse.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=herbert@gondor.apana.org.au \
--cc=hpa@zytor.com \
--cc=johannes@sipsolutions.net \
--cc=kernel@xen0n.name \
--cc=linan122@huawei.com \
--cc=linmag7@gmail.com \
--cc=linux-alpha@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-um@lists.infradead.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=maddy@linux.ibm.com \
--cc=mattst88@gmail.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=palmer@dabbelt.com \
--cc=pjw@kernel.org \
--cc=richard.henderson@linaro.org \
--cc=richard@nod.at \
--cc=song@kernel.org \
--cc=sparclinux@vger.kernel.org \
--cc=svens@linux.ibm.com \
--cc=tglx@kernel.org \
--cc=tytso@mit.edu \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yukuai@fnnas.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox