public inbox for linux-btrfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Richard Henderson <richard.henderson@linaro.org>,
	Matt Turner <mattst88@gmail.com>,
	Magnus Lindholm <linmag7@gmail.com>,
	Russell King <linux@armlinux.org.uk>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	WANG Xuerui <kernel@xen0n.name>,
	Madhavan Srinivasan <maddy@linux.ibm.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Nicholas Piggin <npiggin@gmail.com>,
	"Christophe Leroy (CS GROUP)" <chleroy@kernel.org>,
	Paul Walmsley <pjw@kernel.org>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Alexandre Ghiti <alex@ghiti.fr>,
	Heiko Carstens <hca@linux.ibm.com>,
	Vasily Gorbik <gor@linux.ibm.com>,
	Alexander Gordeev <agordeev@linux.ibm.com>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Sven Schnelle <svens@linux.ibm.com>,
	"David S. Miller" <davem@davemloft.net>,
	Andreas Larsson <andreas@gaisler.com>,
	Richard Weinberger <richard@nod.at>,
	Anton Ivanov <anton.ivanov@cambridgegreys.com>,
	Johannes Berg <johannes@sipsolutions.net>,
	Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
	Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Dan Williams <dan.j.williams@intel.com>, Chris Mason <clm@fb.com>,
	David Sterba <dsterba@suse.com>, Arnd Bergmann <arnd@arndb.de>,
	Song Liu <song@kernel.org>, Yu Kuai <yukuai@fnnas.com>,
	Li Nan <linan122@huawei.com>,
	linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, loongarch@lists.linux.dev,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-s390@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, linux-crypto@vger.kernel.org,
	linux-btrfs@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-raid@vger.kernel.org
Subject: [PATCH 06/25] xor: cleanup registration and probing
Date: Thu, 26 Feb 2026 07:10:18 -0800	[thread overview]
Message-ID: <20260226151106.144735-7-hch@lst.de> (raw)
In-Reply-To: <20260226151106.144735-1-hch@lst.de>

Originally, the XOR code benchmarked all algorithms at load time, but
it has since then been hacked multiple times to allow forcing an
algorithm, and then commit 524ccdbdfb52 ("crypto: xor - defer load time
benchmark to a later time") changed the logic to a two-step process
or registration and benchmarking, but only when built-in.

Rework this, so that the XOR_TRY_TEMPLATES macro magic now always just
deals with adding the templates to the list, and benchmarking is always
done in a second pass; for modular builds from module_init, and for the
built-in case using a separate init call level.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 lib/raid/xor/xor-core.c | 98 ++++++++++++++++++++---------------------
 1 file changed, 48 insertions(+), 50 deletions(-)

diff --git a/lib/raid/xor/xor-core.c b/lib/raid/xor/xor-core.c
index 28aa654c288d..a2c529d7b7c2 100644
--- a/lib/raid/xor/xor-core.c
+++ b/lib/raid/xor/xor-core.c
@@ -52,29 +52,14 @@ EXPORT_SYMBOL(xor_blocks);
 
 /* Set of all registered templates.  */
 static struct xor_block_template *__initdata template_list;
+static int __initdata xor_forced = false;
 
-#ifndef MODULE
 static void __init do_xor_register(struct xor_block_template *tmpl)
 {
 	tmpl->next = template_list;
 	template_list = tmpl;
 }
 
-static int __init register_xor_blocks(void)
-{
-	active_template = XOR_SELECT_TEMPLATE(NULL);
-
-	if (!active_template) {
-#define xor_speed	do_xor_register
-		// register all the templates and pick the first as the default
-		XOR_TRY_TEMPLATES;
-#undef xor_speed
-		active_template = template_list;
-	}
-	return 0;
-}
-#endif
-
 #define BENCH_SIZE	4096
 #define REPS		800U
 
@@ -85,9 +70,6 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
 	unsigned long reps;
 	ktime_t min, start, t0;
 
-	tmpl->next = template_list;
-	template_list = tmpl;
-
 	preempt_disable();
 
 	reps = 0;
@@ -111,63 +93,79 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
 	pr_info("   %-16s: %5d MB/sec\n", tmpl->name, speed);
 }
 
-static int __init
-calibrate_xor_blocks(void)
+static int __init calibrate_xor_blocks(void)
 {
 	void *b1, *b2;
 	struct xor_block_template *f, *fastest;
 
-	fastest = XOR_SELECT_TEMPLATE(NULL);
-
-	if (fastest) {
-		printk(KERN_INFO "xor: automatically using best "
-				 "checksumming function   %-10s\n",
-		       fastest->name);
-		goto out;
-	}
+	if (xor_forced)
+		return 0;
 
 	b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
 	if (!b1) {
-		printk(KERN_WARNING "xor: Yikes!  No memory available.\n");
+		pr_info("xor: Yikes!  No memory available.\n");
 		return -ENOMEM;
 	}
 	b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
 
-	/*
-	 * If this arch/cpu has a short-circuited selection, don't loop through
-	 * all the possible functions, just test the best one
-	 */
-
-#define xor_speed(templ)	do_xor_speed((templ), b1, b2)
-
-	printk(KERN_INFO "xor: measuring software checksum speed\n");
-	template_list = NULL;
-	XOR_TRY_TEMPLATES;
+	pr_info("xor: measuring software checksum speed\n");
 	fastest = template_list;
-	for (f = fastest; f; f = f->next)
+	for (f = template_list; f; f = f->next) {
+		do_xor_speed(f, b1, b2);
 		if (f->speed > fastest->speed)
 			fastest = f;
-
+	}
+	active_template = fastest;
 	pr_info("xor: using function: %s (%d MB/sec)\n",
 	       fastest->name, fastest->speed);
 
+	free_pages((unsigned long)b1, 2);
+	return 0;
+}
+
+static int __init xor_init(void)
+{
+	/*
+	 * If this arch/cpu has a short-circuited selection, don't loop through
+	 * all the possible functions, just use the best one.
+	 */
+	active_template = XOR_SELECT_TEMPLATE(NULL);
+	if (active_template) {
+		pr_info("xor: automatically using best checksumming function   %-10s\n",
+			active_template->name);
+		xor_forced = true;
+		return 0;
+	}
+
+#define xor_speed	do_xor_register
+	XOR_TRY_TEMPLATES;
 #undef xor_speed
 
-	free_pages((unsigned long)b1, 2);
-out:
-	active_template = fastest;
+#ifdef MODULE
+	return calibrate_xor_blocks();
+#else
+	/*
+	 * Pick the first template as the temporary default until calibration
+	 * happens.
+	 */
+	active_template = template_list;
 	return 0;
+#endif
 }
 
-static __exit void xor_exit(void) { }
+static __exit void xor_exit(void)
+{
+}
 
 MODULE_DESCRIPTION("RAID-5 checksumming functions");
 MODULE_LICENSE("GPL");
 
+/*
+ * When built-in we must register the default template before md, but we don't
+ * want calibration to run that early as that would delay the boot process.
+ */
 #ifndef MODULE
-/* when built-in xor.o must initialize before drivers/md/md.o */
-core_initcall(register_xor_blocks);
+__initcall(calibrate_xor_blocks);
 #endif
-
-module_init(calibrate_xor_blocks);
+core_initcall(xor_init);
 module_exit(xor_exit);
-- 
2.47.3


  parent reply	other threads:[~2026-02-26 15:11 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-26 15:10 cleanup the RAID5 XOR library Christoph Hellwig
2026-02-26 15:10 ` [PATCH 01/25] xor: assert that xor_blocks is not called from interrupt context Christoph Hellwig
2026-02-27 14:24   ` Peter Zijlstra
2026-03-03 16:00     ` Christoph Hellwig
2026-03-03 19:55       ` Eric Biggers
2026-03-04 14:51         ` Christoph Hellwig
2026-03-04 15:15           ` Peter Zijlstra
2026-03-04 15:42             ` Christoph Hellwig
2026-03-04 15:01         ` Heiko Carstens
2026-03-04 15:06           ` Christoph Hellwig
2026-03-04 15:08           ` Heiko Carstens
2026-02-26 15:10 ` [PATCH 02/25] arm/xor: remove in_interrupt() handling Christoph Hellwig
2026-02-26 15:10 ` [PATCH 03/25] um/xor: don't override XOR_SELECT_TEMPLATE Christoph Hellwig
2026-02-26 21:45   ` Richard Weinberger
2026-02-26 22:00     ` hch
2026-02-27  7:39     ` Johannes Berg
2026-02-28  4:30   ` Eric Biggers
2026-03-02  7:38     ` Johannes Berg
2026-02-26 15:10 ` [PATCH 04/25] xor: move to lib/raid/ Christoph Hellwig
2026-02-28  4:35   ` Eric Biggers
2026-03-03 16:01     ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 05/25] xor: small cleanups Christoph Hellwig
2026-02-26 15:10 ` Christoph Hellwig [this message]
2026-02-28  4:41   ` [PATCH 06/25] xor: cleanup registration and probing Eric Biggers
2026-02-26 15:10 ` [PATCH 07/25] xor: split xor.h Christoph Hellwig
2026-02-28  4:43   ` Eric Biggers
2026-03-03 16:03     ` Christoph Hellwig
2026-03-03 16:15       ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 08/25] xor: remove macro abuse for XOR implementation registrations Christoph Hellwig
2026-02-26 15:10 ` [PATCH 09/25] xor: move generic implementations out of asm-generic/xor.h Christoph Hellwig
2026-02-26 15:40   ` Arnd Bergmann
2026-02-28  7:15   ` Eric Biggers
2026-03-03 16:09     ` Christoph Hellwig
2026-03-10 14:00       ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 10/25] alpha: move the XOR code to lib/raid/ Christoph Hellwig
2026-02-26 15:10 ` [PATCH 11/25] arm: " Christoph Hellwig
2026-02-26 15:10 ` [PATCH 12/25] arm64: " Christoph Hellwig
2026-02-26 15:10 ` [PATCH 13/25] loongarch: " Christoph Hellwig
2026-02-26 15:10 ` [PATCH 14/25] powerpc: " Christoph Hellwig
2026-02-26 15:10 ` [PATCH 15/25] riscv: " Christoph Hellwig
2026-02-28  5:37   ` Eric Biggers
2026-02-26 15:10 ` [PATCH 16/25] sparc: " Christoph Hellwig
2026-02-28  5:47   ` Eric Biggers
2026-03-03 16:04     ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 17/25] s390: " Christoph Hellwig
2026-02-27  9:09   ` Heiko Carstens
2026-02-27 14:13     ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 18/25] x86: " Christoph Hellwig
2026-02-27 14:30   ` Peter Zijlstra
2026-02-27 23:55     ` Eric Biggers
2026-02-28 10:31       ` Peter Zijlstra
2026-03-03 16:05         ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 19/25] xor: avoid indirect calls for arm64-optimized ops Christoph Hellwig
2026-02-26 15:10 ` [PATCH 20/25] xor: make xor.ko self-contained in lib/raid/ Christoph Hellwig
2026-02-28  6:42   ` Eric Biggers
2026-03-03 16:06     ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 21/25] xor: add a better public API Christoph Hellwig
2026-02-28  6:50   ` Eric Biggers
2026-03-03 16:07     ` Christoph Hellwig
2026-03-10  6:58     ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 22/25] async_xor: use xor_gen Christoph Hellwig
2026-02-28  6:55   ` Eric Biggers
2026-02-26 15:10 ` [PATCH 23/25] btrfs: " Christoph Hellwig
2026-02-26 15:10 ` [PATCH 24/25] xor: pass the entire operation to the low-level ops Christoph Hellwig
2026-02-28  6:58   ` Eric Biggers
2026-03-03 16:09     ` Christoph Hellwig
2026-02-26 15:10 ` [PATCH 25/25] xor: use static_call for xor_gen Christoph Hellwig
2026-02-27 14:36   ` Peter Zijlstra
2026-02-26 18:20 ` cleanup the RAID5 XOR library Andrew Morton
2026-02-28  7:35 ` Eric Biggers
2026-03-03 16:11   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260226151106.144735-7-hch@lst.de \
    --to=hch@lst.de \
    --cc=agordeev@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex@ghiti.fr \
    --cc=andreas@gaisler.com \
    --cc=anton.ivanov@cambridgegreys.com \
    --cc=aou@eecs.berkeley.edu \
    --cc=arnd@arndb.de \
    --cc=borntraeger@linux.ibm.com \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=chenhuacai@kernel.org \
    --cc=chleroy@kernel.org \
    --cc=clm@fb.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=davem@davemloft.net \
    --cc=dsterba@suse.com \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=hpa@zytor.com \
    --cc=johannes@sipsolutions.net \
    --cc=kernel@xen0n.name \
    --cc=linan122@huawei.com \
    --cc=linmag7@gmail.com \
    --cc=linux-alpha@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-um@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=loongarch@lists.linux.dev \
    --cc=maddy@linux.ibm.com \
    --cc=mattst88@gmail.com \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=palmer@dabbelt.com \
    --cc=pjw@kernel.org \
    --cc=richard.henderson@linaro.org \
    --cc=richard@nod.at \
    --cc=song@kernel.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=svens@linux.ibm.com \
    --cc=tglx@kernel.org \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=yukuai@fnnas.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox