From: Christoph Hellwig <hch@lst.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Richard Henderson <richard.henderson@linaro.org>,
Matt Turner <mattst88@gmail.com>,
Magnus Lindholm <linmag7@gmail.com>,
Russell King <linux@armlinux.org.uk>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
Huacai Chen <chenhuacai@kernel.org>,
WANG Xuerui <kernel@xen0n.name>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
"Christophe Leroy (CS GROUP)" <chleroy@kernel.org>,
Paul Walmsley <pjw@kernel.org>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Alexandre Ghiti <alex@ghiti.fr>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
"David S. Miller" <davem@davemloft.net>,
Andreas Larsson <andreas@gaisler.com>,
Richard Weinberger <richard@nod.at>,
Anton Ivanov <anton.ivanov@cambridgegreys.com>,
Johannes Berg <johannes@sipsolutions.net>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
Herbert Xu <herbert@gondor.apana.org.au>,
Dan Williams <dan.j.williams@intel.com>, Chris Mason <clm@fb.com>,
David Sterba <dsterba@suse.com>, Arnd Bergmann <arnd@arndb.de>,
Song Liu <song@kernel.org>, Yu Kuai <yukuai@fnnas.com>,
Li Nan <linan122@huawei.com>, "Theodore Ts'o" <tytso@mit.edu>,
"Jason A. Donenfeld" <Jason@zx2c4.com>,
linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, loongarch@lists.linux.dev,
linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
linux-s390@vger.kernel.org, sparclinux@vger.kernel.org,
linux-um@lists.infradead.org, linux-crypto@vger.kernel.org,
linux-btrfs@vger.kernel.org, linux-arch@vger.kernel.org,
linux-raid@vger.kernel.org
Subject: [PATCH 19/26] xor: avoid indirect calls for arm64-optimized ops
Date: Tue, 24 Mar 2026 07:21:55 +0100 [thread overview]
Message-ID: <20260324062211.3216301-20-hch@lst.de> (raw)
In-Reply-To: <20260324062211.3216301-1-hch@lst.de>
Remove the inner xor_block_templates, and instead have two separate
actual template that call into the neon-enabled compilation unit.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/arm64/include/asm/xor.h | 13 ++--
lib/raid/xor/arm64/xor-neon-glue.c | 95 +++++++++++++++---------------
lib/raid/xor/arm64/xor-neon.c | 73 +++++++++--------------
lib/raid/xor/arm64/xor-neon.h | 30 ++++++++++
4 files changed, 114 insertions(+), 97 deletions(-)
create mode 100644 lib/raid/xor/arm64/xor-neon.h
diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h
index 81718f010761..4782c760bcac 100644
--- a/arch/arm64/include/asm/xor.h
+++ b/arch/arm64/include/asm/xor.h
@@ -7,15 +7,18 @@
#include <asm-generic/xor.h>
#include <asm/simd.h>
-extern struct xor_block_template xor_block_arm64;
-void __init xor_neon_init(void);
+extern struct xor_block_template xor_block_neon;
+extern struct xor_block_template xor_block_eor3;
#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
- xor_neon_init();
xor_register(&xor_block_8regs);
xor_register(&xor_block_32regs);
- if (cpu_has_neon())
- xor_register(&xor_block_arm64);
+ if (cpu_has_neon()) {
+ if (cpu_have_named_feature(SHA3))
+ xor_register(&xor_block_eor3);
+ else
+ xor_register(&xor_block_neon);
+ }
}
diff --git a/lib/raid/xor/arm64/xor-neon-glue.c b/lib/raid/xor/arm64/xor-neon-glue.c
index 067a2095659a..08c3e3573388 100644
--- a/lib/raid/xor/arm64/xor-neon-glue.c
+++ b/lib/raid/xor/arm64/xor-neon-glue.c
@@ -7,51 +7,54 @@
#include <linux/raid/xor_impl.h>
#include <asm/simd.h>
#include <asm/xor.h>
+#include "xor-neon.h"
-extern struct xor_block_template const xor_block_inner_neon;
-
-static void
-xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
-{
- scoped_ksimd()
- xor_block_inner_neon.do_2(bytes, p1, p2);
-}
-
-static void
-xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
-{
- scoped_ksimd()
- xor_block_inner_neon.do_3(bytes, p1, p2, p3);
-}
-
-static void
-xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
-{
- scoped_ksimd()
- xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
-}
-
-static void
-xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
-{
- scoped_ksimd()
- xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
-}
-
-struct xor_block_template xor_block_arm64 = {
- .name = "arm64_neon",
- .do_2 = xor_neon_2,
- .do_3 = xor_neon_3,
- .do_4 = xor_neon_4,
- .do_5 = xor_neon_5
+#define XOR_TEMPLATE(_name) \
+static void \
+xor_##_name##_2(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2) \
+{ \
+ scoped_ksimd() \
+ __xor_##_name##_2(bytes, p1, p2); \
+} \
+ \
+static void \
+xor_##_name##_3(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3) \
+{ \
+ scoped_ksimd() \
+ __xor_##_name##_3(bytes, p1, p2, p3); \
+} \
+ \
+static void \
+xor_##_name##_4(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3, \
+ const unsigned long * __restrict p4) \
+{ \
+ scoped_ksimd() \
+ __xor_##_name##_4(bytes, p1, p2, p3, p4); \
+} \
+ \
+static void \
+xor_##_name##_5(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3, \
+ const unsigned long * __restrict p4, \
+ const unsigned long * __restrict p5) \
+{ \
+ scoped_ksimd() \
+ __xor_##_name##_5(bytes, p1, p2, p3, p4, p5); \
+} \
+ \
+struct xor_block_template xor_block_##_name = { \
+ .name = __stringify(_name), \
+ .do_2 = xor_##_name##_2, \
+ .do_3 = xor_##_name##_3, \
+ .do_4 = xor_##_name##_4, \
+ .do_5 = xor_##_name##_5 \
};
+
+XOR_TEMPLATE(neon);
+XOR_TEMPLATE(eor3);
diff --git a/lib/raid/xor/arm64/xor-neon.c b/lib/raid/xor/arm64/xor-neon.c
index 8d2d185090db..61194c292917 100644
--- a/lib/raid/xor/arm64/xor-neon.c
+++ b/lib/raid/xor/arm64/xor-neon.c
@@ -8,9 +8,10 @@
#include <linux/cache.h>
#include <asm/neon-intrinsics.h>
#include <asm/xor.h>
+#include "xor-neon.h"
-static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
+void __xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -36,9 +37,9 @@ static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
+void __xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -72,10 +73,10 @@ static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
+void __xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -117,11 +118,11 @@ static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
+void __xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -171,14 +172,6 @@ static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-struct xor_block_template xor_block_inner_neon __ro_after_init = {
- .name = "__inner_neon__",
- .do_2 = xor_arm64_neon_2,
- .do_3 = xor_arm64_neon_3,
- .do_4 = xor_arm64_neon_4,
- .do_5 = xor_arm64_neon_5,
-};
-
static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r)
{
uint64x2_t res;
@@ -189,10 +182,9 @@ static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r)
return res;
}
-static void xor_arm64_eor3_3(unsigned long bytes,
- unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
+void __xor_eor3_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -224,11 +216,10 @@ static void xor_arm64_eor3_3(unsigned long bytes,
} while (--lines > 0);
}
-static void xor_arm64_eor3_4(unsigned long bytes,
- unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
+void __xor_eor3_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -268,12 +259,11 @@ static void xor_arm64_eor3_4(unsigned long bytes,
} while (--lines > 0);
}
-static void xor_arm64_eor3_5(unsigned long bytes,
- unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
+void __xor_eor3_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -314,12 +304,3 @@ static void xor_arm64_eor3_5(unsigned long bytes,
dp5 += 8;
} while (--lines > 0);
}
-
-void __init xor_neon_init(void)
-{
- if (cpu_have_named_feature(SHA3)) {
- xor_block_inner_neon.do_3 = xor_arm64_eor3_3;
- xor_block_inner_neon.do_4 = xor_arm64_eor3_4;
- xor_block_inner_neon.do_5 = xor_arm64_eor3_5;
- }
-}
diff --git a/lib/raid/xor/arm64/xor-neon.h b/lib/raid/xor/arm64/xor-neon.h
new file mode 100644
index 000000000000..cec0ac846fea
--- /dev/null
+++ b/lib/raid/xor/arm64/xor-neon.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+void __xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void __xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
+
+#define __xor_eor3_2 __xor_neon_2
+void __xor_eor3_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void __xor_eor3_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_eor3_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
--
2.47.3
next prev parent reply other threads:[~2026-03-24 6:26 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 6:21 cleanup the RAID5 XOR library v3 Christoph Hellwig
2026-03-24 6:21 ` [PATCH 01/26] xor: assert that xor_blocks is not from preemptible user context Christoph Hellwig
2026-03-24 6:21 ` [PATCH 02/26] arm/xor: remove in_interrupt() handling Christoph Hellwig
2026-03-24 6:21 ` [PATCH 03/26] um/xor: cleanup xor.h Christoph Hellwig
2026-03-24 6:21 ` [PATCH 04/26] xor: move to lib/raid/ Christoph Hellwig
2026-03-24 6:21 ` [PATCH 05/26] xor: small cleanups Christoph Hellwig
2026-03-24 6:21 ` [PATCH 06/26] xor: cleanup registration and probing Christoph Hellwig
2026-03-24 6:21 ` [PATCH 07/26] xor: split xor.h Christoph Hellwig
2026-03-24 6:21 ` [PATCH 08/26] xor: remove macro abuse for XOR implementation registrations Christoph Hellwig
2026-03-24 6:21 ` [PATCH 09/26] xor: move generic implementations out of asm-generic/xor.h Christoph Hellwig
2026-03-24 6:21 ` [PATCH 10/26] alpha: move the XOR code to lib/raid/ Christoph Hellwig
2026-03-24 6:21 ` [PATCH 11/26] arm: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 12/26] arm64: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 13/26] loongarch: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 14/26] powerpc: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 15/26] riscv: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 16/26] sparc: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 17/26] s390: " Christoph Hellwig
2026-03-24 6:21 ` [PATCH 18/26] x86: " Christoph Hellwig
2026-03-24 6:21 ` Christoph Hellwig [this message]
2026-03-24 6:21 ` [PATCH 20/26] xor: make xor.ko self-contained in lib/raid/ Christoph Hellwig
2026-03-24 6:21 ` [PATCH 21/26] xor: add a better public API Christoph Hellwig
2026-03-24 6:21 ` [PATCH 22/26] async_xor: use xor_gen Christoph Hellwig
2026-03-24 6:21 ` [PATCH 23/26] btrfs: " Christoph Hellwig
2026-03-24 6:22 ` [PATCH 24/26] xor: pass the entire operation to the low-level ops Christoph Hellwig
2026-03-24 6:22 ` [PATCH 25/26] xor: use static_call for xor_gen Christoph Hellwig
2026-03-24 6:22 ` [PATCH 26/26] xor: add a kunit test case Christoph Hellwig
2026-03-24 12:59 ` cleanup the RAID5 XOR library v3 Andrew Morton
2026-03-24 16:42 ` Andrew Morton
2026-03-25 19:39 ` Eric Biggers
2026-03-26 5:18 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260324062211.3216301-20-hch@lst.de \
--to=hch@lst.de \
--cc=Jason@zx2c4.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=alex@ghiti.fr \
--cc=andreas@gaisler.com \
--cc=anton.ivanov@cambridgegreys.com \
--cc=aou@eecs.berkeley.edu \
--cc=ardb@kernel.org \
--cc=arnd@arndb.de \
--cc=borntraeger@linux.ibm.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=chenhuacai@kernel.org \
--cc=chleroy@kernel.org \
--cc=clm@fb.com \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=dsterba@suse.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=herbert@gondor.apana.org.au \
--cc=hpa@zytor.com \
--cc=johannes@sipsolutions.net \
--cc=kernel@xen0n.name \
--cc=linan122@huawei.com \
--cc=linmag7@gmail.com \
--cc=linux-alpha@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-um@lists.infradead.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=maddy@linux.ibm.com \
--cc=mattst88@gmail.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=palmer@dabbelt.com \
--cc=pjw@kernel.org \
--cc=richard.henderson@linaro.org \
--cc=richard@nod.at \
--cc=song@kernel.org \
--cc=sparclinux@vger.kernel.org \
--cc=svens@linux.ibm.com \
--cc=tglx@kernel.org \
--cc=tytso@mit.edu \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yukuai@fnnas.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox