* [PATCH 01/17] raid6: turn the userspace test harness into a kunit test
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 02/17] raid6: remove __KERNEL__ ifdefs Christoph Hellwig
` (15 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Currently the raid6 code can be compiled as userspace code to run the
test suite. Convert that to be a kunit case with minimal changes to
avoid mutating global state so that we can drop this requirement.
Note that this is not a good kunit test case yet and will need a lot more
work, but that is deferred until the raid6 code is moved to it's new
place, which is easier if the userspace makefile doesn't need adjustments
for the new location first.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
include/linux/raid/pq.h | 3 -
lib/Kconfig | 11 +++
lib/raid6/Makefile | 2 +-
lib/raid6/algos.c | 5 +-
lib/raid6/recov.c | 34 ---------
lib/raid6/test/Makefile | 155 +-------------------------------------
lib/raid6/test/test.c | 161 +++++++++++++++++++++-------------------
7 files changed, 103 insertions(+), 268 deletions(-)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 2467b3be15c9..08c5995ea980 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -144,7 +144,6 @@ extern const struct raid6_calls raid6_neonx8;
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
extern const struct raid6_recov_calls *const raid6_recov_algos[];
-int raid6_select_algo(void);
/* Return values from chk_syndrome */
#define RAID6_OK 0
@@ -165,8 +164,6 @@ extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
void **ptrs);
extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
void **ptrs);
-void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
- void **ptrs);
/* Some definitions to allow code to be compiled for testing in userspace */
#ifndef __KERNEL__
diff --git a/lib/Kconfig b/lib/Kconfig
index 5be57adcd454..716247fedaf0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -11,6 +11,17 @@ menu "Library routines"
config RAID6_PQ
tristate
+config RAID6_PQ_KUNIT_TEST
+ tristate "KUnit tests for raid6 PQ functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on RAID6_PQ
+ default KUNIT_ALL_TESTS
+ help
+ Unit tests for the RAID6 PQ library functions.
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
config RAID6_PQ_BENCHMARK
bool "Automatically choose fastest RAID6 PQ functions"
depends on RAID6_PQ
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 5be0a4e60ab1..6fd048c127b6 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
+obj-$(CONFIG_RAID6_PQ) += raid6_pq.o test/
raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
int8.o
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 799e0e5eac26..5a9f4882e18d 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/gfp.h>
#endif
+#include <kunit/visibility.h>
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
@@ -86,6 +87,7 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_intx1,
NULL
};
+EXPORT_SYMBOL_IF_KUNIT(raid6_algos);
void (*raid6_2data_recov)(int, size_t, int, int, void **);
EXPORT_SYMBOL_GPL(raid6_2data_recov);
@@ -119,6 +121,7 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
&raid6_recov_intx1,
NULL
};
+EXPORT_SYMBOL_IF_KUNIT(raid6_recov_algos);
#ifdef __KERNEL__
#define RAID6_TIME_JIFFIES_LG2 4
@@ -239,7 +242,7 @@ static inline const struct raid6_calls *raid6_choose_gen(
/* Try to pick the best algorithm */
/* This code uses the gfmul table as convenient data set to abuse */
-int __init raid6_select_algo(void)
+static int __init raid6_select_algo(void)
{
const int disks = RAID6_TEST_DISKS;
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index b5e47c008b41..8d113196632e 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -99,37 +99,3 @@ const struct raid6_recov_calls raid6_recov_intx1 = {
.name = "intx1",
.priority = 0,
};
-
-#ifndef __KERNEL__
-/* Testing only */
-
-/* Recover two failed blocks. */
-void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
-{
- if ( faila > failb ) {
- int tmp = faila;
- faila = failb;
- failb = tmp;
- }
-
- if ( failb == disks-1 ) {
- if ( faila == disks-2 ) {
- /* P+Q failure. Just rebuild the syndrome. */
- raid6_call.gen_syndrome(disks, bytes, ptrs);
- } else {
- /* data+Q failure. Reconstruct data from P,
- then rebuild syndrome. */
- /* NOT IMPLEMENTED - equivalent to RAID-5 */
- }
- } else {
- if ( failb == disks-2 ) {
- /* data+P failure. */
- raid6_datap_recov(disks, bytes, faila, ptrs);
- } else {
- /* data+data failure. */
- raid6_2data_recov(disks, bytes, faila, failb, ptrs);
- }
- }
-}
-
-#endif
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 09bbe2b14cce..268b085af4d3 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -1,156 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-#
-# This is a simple Makefile to test some of the RAID-6 code
-# from userspace.
-#
-pound := \#
-
-# Adjust as desired
-CC = gcc
-OPTFLAGS = -O2
-CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
-LD = ld
-AWK = awk -f
-AR = ar
-RANLIB = ranlib
-OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
-
-ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/)
-ifeq ($(ARCH),i386)
- CFLAGS += -DCONFIG_X86_32
- IS_X86 = yes
-endif
-ifeq ($(ARCH),x86_64)
- CFLAGS += -DCONFIG_X86_64
- IS_X86 = yes
-endif
-
-ifeq ($(ARCH),arm)
- CFLAGS += -I../../../arch/arm/include -mfpu=neon
- HAS_NEON = yes
-endif
-ifeq ($(ARCH),aarch64)
- CFLAGS += -I../../../arch/arm64/include
- HAS_NEON = yes
-endif
-
-ifeq ($(findstring riscv,$(ARCH)),riscv)
- CFLAGS += -I../../../arch/riscv/include -DCONFIG_RISCV=1
- HAS_RVV = yes
-endif
-
-ifeq ($(findstring ppc,$(ARCH)),ppc)
- CFLAGS += -I../../../arch/powerpc/include
- HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
- gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
-endif
-
-ifeq ($(ARCH),loongarch64)
- CFLAGS += -I../../../arch/loongarch/include -DCONFIG_LOONGARCH=1
- CFLAGS += $(shell echo 'vld $$vr0, $$zero, 0' | \
- gcc -c -x assembler - >/dev/null 2>&1 && \
- rm ./-.o && echo -DCONFIG_CPU_HAS_LSX=1)
- CFLAGS += $(shell echo 'xvld $$xr0, $$zero, 0' | \
- gcc -c -x assembler - >/dev/null 2>&1 && \
- rm ./-.o && echo -DCONFIG_CPU_HAS_LASX=1)
-endif
-
-ifeq ($(IS_X86),yes)
- OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
- CFLAGS += -DCONFIG_X86
-else ifeq ($(HAS_NEON),yes)
- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
- CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
-else ifeq ($(HAS_ALTIVEC),yes)
- CFLAGS += -DCONFIG_ALTIVEC
- OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
- vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
-else ifeq ($(ARCH),loongarch64)
- OBJS += loongarch_simd.o recov_loongarch_simd.o
-else ifeq ($(HAS_RVV),yes)
- OBJS += rvv.o recov_rvv.o
- CFLAGS += -DCONFIG_RISCV_ISA_V=1
-endif
-
-.c.o:
- $(CC) $(CFLAGS) -c -o $@ $<
-
-%.c: ../%.c
- cp -f $< $@
-
-%.uc: ../%.uc
- cp -f $< $@
-
-all: raid6.a raid6test
-
-raid6.a: $(OBJS)
- rm -f $@
- $(AR) cq $@ $^
- $(RANLIB) $@
-
-raid6test: test.c raid6.a
- $(CC) $(CFLAGS) -o raid6test $^
-
-neon1.c: neon.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < neon.uc > $@
-
-neon2.c: neon.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < neon.uc > $@
-
-neon4.c: neon.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < neon.uc > $@
-
-neon8.c: neon.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < neon.uc > $@
-
-altivec1.c: altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < altivec.uc > $@
-
-altivec2.c: altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < altivec.uc > $@
-
-altivec4.c: altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < altivec.uc > $@
-
-altivec8.c: altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@
-
-vpermxor1.c: vpermxor.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@
-
-vpermxor2.c: vpermxor.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@
-
-vpermxor4.c: vpermxor.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@
-
-vpermxor8.c: vpermxor.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@
-
-int1.c: int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < int.uc > $@
-
-int2.c: int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < int.uc > $@
-
-int4.c: int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < int.uc > $@
-
-int8.c: int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < int.uc > $@
-
-int16.c: int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=16 < int.uc > $@
-
-int32.c: int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=32 < int.uc > $@
-
-tables.c: mktables
- ./mktables > tables.c
-
-clean:
- rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test
-
-spotless: clean
- rm -f *~
+obj-$(CONFIG_RAID6_PQ_KUNIT_TEST) += test.o
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
index 841a55242aba..97e036b19049 100644
--- a/lib/raid6/test/test.c
+++ b/lib/raid6/test/test.c
@@ -1,43 +1,37 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * raid6test.c
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
- * Test RAID-6 recovery with various algorithms
+ * Test RAID-6 recovery algorithms.
*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
+#include <kunit/test.h>
+#include <linux/prandom.h>
#include <linux/raid/pq.h>
-#define NDISKS 16 /* Including P and Q */
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+#define RAID6_KUNIT_SEED 42
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+#define NDISKS 16 /* Including P and Q */
-char *dataptrs[NDISKS];
-char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
-char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
-char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+static struct rnd_state rng;
+static void *dataptrs[NDISKS];
+static char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+static char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+static char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
static void makedata(int start, int stop)
{
- int i, j;
+ int i;
for (i = start; i <= stop; i++) {
- for (j = 0; j < PAGE_SIZE; j++)
- data[i][j] = rand();
-
+ prandom_bytes_state(&rng, data[i], PAGE_SIZE);
dataptrs[i] = data[i];
}
}
-static char disk_type(int d)
+static char member_type(int d)
{
switch (d) {
case NDISKS-2:
@@ -49,104 +43,121 @@ static char disk_type(int d)
}
}
-static int test_disks(int i, int j)
+static void test_disks(struct kunit *test, const struct raid6_calls *calls,
+ const struct raid6_recov_calls *ra, int faila, int failb)
{
- int erra, errb;
-
memset(recovi, 0xf0, PAGE_SIZE);
memset(recovj, 0xba, PAGE_SIZE);
- dataptrs[i] = recovi;
- dataptrs[j] = recovj;
-
- raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
-
- erra = memcmp(data[i], recovi, PAGE_SIZE);
- errb = memcmp(data[j], recovj, PAGE_SIZE);
-
- if (i < NDISKS-2 && j == NDISKS-1) {
- /* We don't implement the DQ failure scenario, since it's
- equivalent to a RAID-5 failure (XOR, then recompute Q) */
- erra = errb = 0;
+ dataptrs[faila] = recovi;
+ dataptrs[failb] = recovj;
+
+ if (faila > failb)
+ swap(faila, failb);
+
+ if (failb == NDISKS - 1) {
+ /*
+ * We don't implement the data+Q failure scenario, since it
+ * is equivalent to a RAID-5 failure (XOR, then recompute Q).
+ */
+ if (faila != NDISKS - 2)
+ goto skip;
+
+ /* P+Q failure. Just rebuild the syndrome. */
+ calls->gen_syndrome(NDISKS, PAGE_SIZE, dataptrs);
+ } else if (failb == NDISKS - 2) {
+ /* data+P failure. */
+ ra->datap(NDISKS, PAGE_SIZE, faila, dataptrs);
} else {
- printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
- raid6_call.name,
- i, disk_type(i),
- j, disk_type(j),
- (!erra && !errb) ? "OK" :
- !erra ? "ERRB" :
- !errb ? "ERRA" : "ERRAB");
+ /* data+data failure. */
+ ra->data2(NDISKS, PAGE_SIZE, faila, failb, dataptrs);
}
- dataptrs[i] = data[i];
- dataptrs[j] = data[j];
-
- return erra || errb;
+ KUNIT_EXPECT_MEMEQ_MSG(test, data[faila], recovi, PAGE_SIZE,
+ "algo=%-8s/%-8s faila miscompared: %3d[%c] (failb=%3d[%c])\n",
+ calls->name, ra->name,
+ faila, member_type(faila),
+ failb, member_type(failb));
+ KUNIT_EXPECT_MEMEQ_MSG(test, data[failb], recovj, PAGE_SIZE,
+ "algo=%-8s/%-8s failb miscompared: %3d[%c] (faila=%3d[%c])\n",
+ calls->name, ra->name,
+ failb, member_type(failb),
+ faila, member_type(faila));
+
+skip:
+ dataptrs[faila] = data[faila];
+ dataptrs[failb] = data[failb];
}
-int main(int argc, char *argv[])
+static void raid6_test(struct kunit *test)
{
const struct raid6_calls *const *algo;
const struct raid6_recov_calls *const *ra;
int i, j, p1, p2;
- int err = 0;
-
- makedata(0, NDISKS-1);
for (ra = raid6_recov_algos; *ra; ra++) {
if ((*ra)->valid && !(*ra)->valid())
continue;
- raid6_2data_recov = (*ra)->data2;
- raid6_datap_recov = (*ra)->datap;
-
- printf("using recovery %s\n", (*ra)->name);
-
for (algo = raid6_algos; *algo; algo++) {
- if ((*algo)->valid && !(*algo)->valid())
- continue;
+ const struct raid6_calls *calls = *algo;
- raid6_call = **algo;
+ if (calls->valid && !calls->valid())
+ continue;
/* Nuke syndromes */
- memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
+ memset(data[NDISKS - 2], 0xee, PAGE_SIZE);
+ memset(data[NDISKS - 1], 0xee, PAGE_SIZE);
/* Generate assumed good syndrome */
- raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
+ calls->gen_syndrome(NDISKS, PAGE_SIZE,
(void **)&dataptrs);
for (i = 0; i < NDISKS-1; i++)
for (j = i+1; j < NDISKS; j++)
- err += test_disks(i, j);
+ test_disks(test, calls, *ra, i, j);
- if (!raid6_call.xor_syndrome)
+ if (!calls->xor_syndrome)
continue;
for (p1 = 0; p1 < NDISKS-2; p1++)
for (p2 = p1; p2 < NDISKS-2; p2++) {
/* Simulate rmw run */
- raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ calls->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
(void **)&dataptrs);
makedata(p1, p2);
- raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ calls->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
(void **)&dataptrs);
for (i = 0; i < NDISKS-1; i++)
for (j = i+1; j < NDISKS; j++)
- err += test_disks(i, j);
+ test_disks(test, calls,
+ *ra, i, j);
}
}
- printf("\n");
}
+}
- printf("\n");
- /* Pick the best algorithm test */
- raid6_select_algo();
-
- if (err)
- printf("\n*** ERRORS FOUND ***\n");
+static struct kunit_case raid6_test_cases[] = {
+ KUNIT_CASE(raid6_test),
+ {},
+};
- return err;
+static int raid6_suite_init(struct kunit_suite *suite)
+{
+ prandom_seed_state(&rng, RAID6_KUNIT_SEED);
+ makedata(0, NDISKS - 1);
+ return 0;
}
+
+static struct kunit_suite raid6_test_suite = {
+ .name = "raid6",
+ .test_cases = raid6_test_cases,
+ .suite_init = raid6_suite_init,
+};
+kunit_test_suite(raid6_test_suite);
+
+MODULE_DESCRIPTION("Unit test for the XOR library functions");
+MODULE_LICENSE("GPL");
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 02/17] raid6: remove __KERNEL__ ifdefs
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
2026-03-24 6:40 ` [PATCH 01/17] raid6: turn the userspace test harness into a kunit test Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-25 15:13 ` H. Peter Anvin
2026-03-24 6:40 ` [PATCH 03/17] raid6: move to lib/raid/ Christoph Hellwig
` (14 subsequent siblings)
16 siblings, 1 reply; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
With the test code ported to kernel space, none of this is required.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
include/linux/raid/pq.h | 90 --------------------------------
lib/raid6/algos.c | 12 -----
lib/raid6/altivec.uc | 10 +---
lib/raid6/avx2.c | 2 +-
lib/raid6/avx512.c | 2 +-
lib/raid6/loongarch.h | 38 --------------
lib/raid6/loongarch_simd.c | 3 +-
lib/raid6/mktables.c | 14 -----
lib/raid6/mmx.c | 2 +-
lib/raid6/neon.c | 6 ---
lib/raid6/recov_avx2.c | 2 +-
lib/raid6/recov_avx512.c | 2 +-
lib/raid6/recov_loongarch_simd.c | 3 +-
lib/raid6/recov_neon.c | 6 ---
lib/raid6/recov_ssse3.c | 2 +-
lib/raid6/rvv.h | 11 +---
lib/raid6/sse1.c | 2 +-
lib/raid6/sse2.c | 2 +-
lib/raid6/vpermxor.uc | 7 ---
lib/raid6/x86.h | 75 --------------------------
20 files changed, 15 insertions(+), 276 deletions(-)
delete mode 100644 lib/raid6/loongarch.h
delete mode 100644 lib/raid6/x86.h
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 08c5995ea980..d26788fada58 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -8,8 +8,6 @@
#ifndef LINUX_RAID_RAID6_H
#define LINUX_RAID_RAID6_H
-#ifdef __KERNEL__
-
#include <linux/blkdev.h>
#include <linux/mm.h>
@@ -19,59 +17,6 @@ static inline void *raid6_get_zero_page(void)
return page_address(ZERO_PAGE(0));
}
-#else /* ! __KERNEL__ */
-/* Used for testing in user space */
-
-#include <errno.h>
-#include <inttypes.h>
-#include <stddef.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-/* Not standard, but glibc defines it */
-#define BITS_PER_LONG __WORDSIZE
-
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
-
-#ifndef PAGE_SIZE
-# define PAGE_SIZE 4096
-#endif
-#ifndef PAGE_SHIFT
-# define PAGE_SHIFT 12
-#endif
-extern const char raid6_empty_zero_page[PAGE_SIZE];
-
-#define __init
-#define __exit
-#ifndef __attribute_const__
-# define __attribute_const__ __attribute__((const))
-#endif
-#define noinline __attribute__((noinline))
-
-#define preempt_enable()
-#define preempt_disable()
-#define cpu_has_feature(x) 1
-#define enable_kernel_altivec()
-#define disable_kernel_altivec()
-
-#undef EXPORT_SYMBOL
-#define EXPORT_SYMBOL(sym)
-#undef EXPORT_SYMBOL_GPL
-#define EXPORT_SYMBOL_GPL(sym)
-#define MODULE_LICENSE(licence)
-#define MODULE_DESCRIPTION(desc)
-#define subsys_initcall(x)
-#define module_exit(x)
-
-#define IS_ENABLED(x) (x)
-#define CONFIG_RAID6_PQ_BENCHMARK 1
-#endif /* __KERNEL__ */
-
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
@@ -165,39 +110,4 @@ extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
void **ptrs);
-/* Some definitions to allow code to be compiled for testing in userspace */
-#ifndef __KERNEL__
-
-# define jiffies raid6_jiffies()
-# define printk printf
-# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
-# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
-# define GFP_KERNEL 0
-# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
- PROT_READ|PROT_WRITE, \
- MAP_PRIVATE|MAP_ANONYMOUS,\
- 0, 0))
-# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y))
-
-static inline void cpu_relax(void)
-{
- /* Nothing */
-}
-
-#undef HZ
-#define HZ 1000
-static inline uint32_t raid6_jiffies(void)
-{
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return tv.tv_sec*1000 + tv.tv_usec/1000;
-}
-
-static inline void *raid6_get_zero_page(void)
-{
- return raid6_empty_zero_page;
-}
-
-#endif /* ! __KERNEL__ */
-
#endif /* LINUX_RAID_RAID6_H */
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 5a9f4882e18d..985c60bb00a4 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -12,13 +12,8 @@
*/
#include <linux/raid/pq.h>
-#ifndef __KERNEL__
-#include <sys/mman.h>
-#include <stdio.h>
-#else
#include <linux/module.h>
#include <linux/gfp.h>
-#endif
#include <kunit/visibility.h>
struct raid6_calls raid6_call;
@@ -123,14 +118,7 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
};
EXPORT_SYMBOL_IF_KUNIT(raid6_recov_algos);
-#ifdef __KERNEL__
#define RAID6_TIME_JIFFIES_LG2 4
-#else
-/* Need more time to be stable in userspace */
-#define RAID6_TIME_JIFFIES_LG2 9
-#define time_before(x, y) ((x) < (y))
-#endif
-
#define RAID6_TEST_DISKS 8
#define RAID6_TEST_DISKS_ORDER 3
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index d20ed0d11411..2c59963e58f9 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -27,10 +27,8 @@
#ifdef CONFIG_ALTIVEC
#include <altivec.h>
-#ifdef __KERNEL__
-# include <asm/cputable.h>
-# include <asm/switch_to.h>
-#endif /* __KERNEL__ */
+#include <asm/cputable.h>
+#include <asm/switch_to.h>
/*
* This is the C data type to use. We use a vector of
@@ -113,11 +111,7 @@ int raid6_have_altivec(void);
int raid6_have_altivec(void)
{
/* This assumes either all CPUs have Altivec or none does */
-# ifdef __KERNEL__
return cpu_has_feature(CPU_FTR_ALTIVEC);
-# else
- return 1;
-# endif
}
#endif
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 059024234dce..a1a5213918af 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -14,7 +14,7 @@
*/
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
static const struct raid6_avx2_constants {
u64 x1d[4];
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
index 009bd0adeebf..874998bcd7d7 100644
--- a/lib/raid6/avx512.c
+++ b/lib/raid6/avx512.c
@@ -18,7 +18,7 @@
*/
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
static const struct raid6_avx512_constants {
u64 x1d[8];
diff --git a/lib/raid6/loongarch.h b/lib/raid6/loongarch.h
deleted file mode 100644
index acfc33ce7056..000000000000
--- a/lib/raid6/loongarch.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
- *
- * raid6/loongarch.h
- *
- * Definitions common to LoongArch RAID-6 code only
- */
-
-#ifndef _LIB_RAID6_LOONGARCH_H
-#define _LIB_RAID6_LOONGARCH_H
-
-#ifdef __KERNEL__
-
-#include <asm/cpu-features.h>
-#include <asm/fpu.h>
-
-#else /* for user-space testing */
-
-#include <sys/auxv.h>
-
-/* have to supply these defines for glibc 2.37- and musl */
-#ifndef HWCAP_LOONGARCH_LSX
-#define HWCAP_LOONGARCH_LSX (1 << 4)
-#endif
-#ifndef HWCAP_LOONGARCH_LASX
-#define HWCAP_LOONGARCH_LASX (1 << 5)
-#endif
-
-#define kernel_fpu_begin()
-#define kernel_fpu_end()
-
-#define cpu_has_lsx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LSX)
-#define cpu_has_lasx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LASX)
-
-#endif /* __KERNEL__ */
-
-#endif /* _LIB_RAID6_LOONGARCH_H */
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid6/loongarch_simd.c
index aa5d9f924ca3..72f4d92d4876 100644
--- a/lib/raid6/loongarch_simd.c
+++ b/lib/raid6/loongarch_simd.c
@@ -10,7 +10,8 @@
*/
#include <linux/raid/pq.h>
-#include "loongarch.h"
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
/*
* The vector algorithms are currently priority 0, which means the generic
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 3be03793237c..3de1dbf6846c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -56,9 +56,7 @@ int main(int argc, char *argv[])
uint8_t v;
uint8_t exptbl[256], invtbl[256];
- printf("#ifdef __KERNEL__\n");
printf("#include <linux/export.h>\n");
- printf("#endif\n");
printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
@@ -76,9 +74,7 @@ int main(int argc, char *argv[])
printf("\t},\n");
}
printf("};\n");
- printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfmul);\n");
- printf("#endif\n");
/* Compute vector multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
@@ -101,9 +97,7 @@ int main(int argc, char *argv[])
printf("\t},\n");
}
printf("};\n");
- printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_vgfmul);\n");
- printf("#endif\n");
/* Compute power-of-2 table (exponent) */
v = 1;
@@ -120,9 +114,7 @@ int main(int argc, char *argv[])
}
}
printf("};\n");
- printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
- printf("#endif\n");
/* Compute log-of-2 table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
@@ -140,9 +132,7 @@ int main(int argc, char *argv[])
}
}
printf("};\n");
- printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gflog);\n");
- printf("#endif\n");
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
@@ -155,9 +145,7 @@ int main(int argc, char *argv[])
}
}
printf("};\n");
- printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfinv);\n");
- printf("#endif\n");
/* Compute inv(2^x + 1) (exponent-xor-inverse) table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
@@ -169,9 +157,7 @@ int main(int argc, char *argv[])
(j == 7) ? '\n' : ' ');
}
printf("};\n");
- printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexi);\n");
- printf("#endif\n");
return 0;
}
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c
index 3a5bf53a297b..e411f0cfbd95 100644
--- a/lib/raid6/mmx.c
+++ b/lib/raid6/mmx.c
@@ -14,7 +14,7 @@
#ifdef CONFIG_X86_32
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
/* Shared with raid6/sse1.c */
const struct raid6_mmx_constants {
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index 6d9474ce6da9..47b8bb0afc65 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -6,13 +6,7 @@
*/
#include <linux/raid/pq.h>
-
-#ifdef __KERNEL__
#include <asm/simd.h>
-#else
-#define scoped_ksimd()
-#define cpu_has_neon() (1)
-#endif
/*
* There are 2 reasons these wrappers are kept in a separate compilation unit
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
index 97d598d2535c..19fbd9c4dce6 100644
--- a/lib/raid6/recov_avx2.c
+++ b/lib/raid6/recov_avx2.c
@@ -5,7 +5,7 @@
*/
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
static int raid6_has_avx2(void)
{
diff --git a/lib/raid6/recov_avx512.c b/lib/raid6/recov_avx512.c
index 7986120ca444..143f4976b2ad 100644
--- a/lib/raid6/recov_avx512.c
+++ b/lib/raid6/recov_avx512.c
@@ -7,7 +7,7 @@
*/
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
static int raid6_has_avx512(void)
{
diff --git a/lib/raid6/recov_loongarch_simd.c b/lib/raid6/recov_loongarch_simd.c
index 93dc515997a1..eb3a1e79f01f 100644
--- a/lib/raid6/recov_loongarch_simd.c
+++ b/lib/raid6/recov_loongarch_simd.c
@@ -11,7 +11,8 @@
*/
#include <linux/raid/pq.h>
-#include "loongarch.h"
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
/*
* Unlike with the syndrome calculation algorithms, there's no boot-time
diff --git a/lib/raid6/recov_neon.c b/lib/raid6/recov_neon.c
index 9d99aeabd31a..13d5df718c15 100644
--- a/lib/raid6/recov_neon.c
+++ b/lib/raid6/recov_neon.c
@@ -5,14 +5,8 @@
*/
#include <linux/raid/pq.h>
-
-#ifdef __KERNEL__
#include <asm/simd.h>
#include "neon.h"
-#else
-#define scoped_ksimd()
-#define cpu_has_neon() (1)
-#endif
static int raid6_has_neon(void)
{
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 2e849185c32b..146cdbf465bd 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -4,7 +4,7 @@
*/
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
static int raid6_has_ssse3(void)
{
diff --git a/lib/raid6/rvv.h b/lib/raid6/rvv.h
index 6d0708a2c8a4..b0a71b375962 100644
--- a/lib/raid6/rvv.h
+++ b/lib/raid6/rvv.h
@@ -7,17 +7,8 @@
* Definitions for RISC-V RAID-6 code
*/
-#ifdef __KERNEL__
-#include <asm/vector.h>
-#else
-#define kernel_vector_begin()
-#define kernel_vector_end()
-#include <sys/auxv.h>
-#include <asm/hwcap.h>
-#define has_vector() (getauxval(AT_HWCAP) & COMPAT_HWCAP_ISA_V)
-#endif
-
#include <linux/raid/pq.h>
+#include <asm/vector.h>
static int rvv_has_vector(void)
{
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c
index 692fa3a93bf0..794d5cfa0306 100644
--- a/lib/raid6/sse1.c
+++ b/lib/raid6/sse1.c
@@ -19,7 +19,7 @@
#ifdef CONFIG_X86_32
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
/* Defined in raid6/mmx.c */
extern const struct raid6_mmx_constants {
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 2930220249c9..f9edf8a8d1c4 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -13,7 +13,7 @@
*/
#include <linux/raid/pq.h>
-#include "x86.h"
+#include <asm/fpu/api.h>
static const struct raid6_sse_constants {
u64 x1d[2];
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
index 1bfb127fbfe8..a8e76b1c956e 100644
--- a/lib/raid6/vpermxor.uc
+++ b/lib/raid6/vpermxor.uc
@@ -25,10 +25,8 @@
#include <altivec.h>
#include <asm/ppc-opcode.h>
-#ifdef __KERNEL__
#include <asm/cputable.h>
#include <asm/switch_to.h>
-#endif
typedef vector unsigned char unative_t;
#define NSIZE sizeof(unative_t)
@@ -85,13 +83,8 @@ int raid6_have_altivec_vpermxor(void);
int raid6_have_altivec_vpermxor(void)
{
/* Check if arch has both altivec and the vpermxor instructions */
-# ifdef __KERNEL__
return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
cpu_has_feature(CPU_FTR_ARCH_207S));
-# else
- return 1;
-#endif
-
}
#endif
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h
deleted file mode 100644
index 9a6ff37115e7..000000000000
--- a/lib/raid6/x86.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* ----------------------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6/x86.h
- *
- * Definitions common to x86 and x86-64 RAID-6 code only
- */
-
-#ifndef LINUX_RAID_RAID6X86_H
-#define LINUX_RAID_RAID6X86_H
-
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
-#ifdef __KERNEL__ /* Real code */
-
-#include <asm/fpu/api.h>
-
-#else /* Dummy code for user space testing */
-
-static inline void kernel_fpu_begin(void)
-{
-}
-
-static inline void kernel_fpu_end(void)
-{
-}
-
-#define __aligned(x) __attribute__((aligned(x)))
-
-#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
- * (fast save and restore) */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_AVX512DQ (9*32+17) /* AVX-512 DQ (Double/Quad granular)
- * Instructions
- */
-#define X86_FEATURE_AVX512BW (9*32+30) /* AVX-512 BW (Byte/Word granular)
- * Instructions
- */
-#define X86_FEATURE_AVX512VL (9*32+31) /* AVX-512 VL (128/256 Vector Length)
- * Extensions
- */
-#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-
-/* Should work well enough on modern CPUs for testing */
-static inline int boot_cpu_has(int flag)
-{
- u32 eax, ebx, ecx, edx;
-
- eax = (flag & 0x100) ? 7 :
- (flag & 0x20) ? 0x80000001 : 1;
- ecx = 0;
-
- asm volatile("cpuid"
- : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx));
-
- return ((flag & 0x100 ? ebx :
- (flag & 0x80) ? ecx : edx) >> (flag & 31)) & 1;
-}
-
-#endif /* ndef __KERNEL__ */
-
-#endif
-#endif
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH 02/17] raid6: remove __KERNEL__ ifdefs
2026-03-24 6:40 ` [PATCH 02/17] raid6: remove __KERNEL__ ifdefs Christoph Hellwig
@ 2026-03-25 15:13 ` H. Peter Anvin
2026-03-25 16:13 ` H. Peter Anvin
0 siblings, 1 reply; 22+ messages in thread
From: H. Peter Anvin @ 2026-03-25 15:13 UTC (permalink / raw)
To: Christoph Hellwig, Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, Herbert Xu, Dan Williams, Chris Mason,
David Sterba, Arnd Bergmann, Song Liu, Yu Kuai, Li Nan,
linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
On 2026-03-23 23:40, Christoph Hellwig wrote:
> With the test code ported to kernel space, none of this is required.
I really *really* don't like this.
The ability of running in user space is really useful when it comes to
developing new code for new platforms, which happens often enough for this code.
-hpa
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 02/17] raid6: remove __KERNEL__ ifdefs
2026-03-25 15:13 ` H. Peter Anvin
@ 2026-03-25 16:13 ` H. Peter Anvin
2026-03-25 19:58 ` Eric Biggers
0 siblings, 1 reply; 22+ messages in thread
From: H. Peter Anvin @ 2026-03-25 16:13 UTC (permalink / raw)
To: Christoph Hellwig, Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, Herbert Xu, Dan Williams, Chris Mason,
David Sterba, Arnd Bergmann, Song Liu, Yu Kuai, Li Nan,
linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
On 2026-03-25 08:13, H. Peter Anvin wrote:
> On 2026-03-23 23:40, Christoph Hellwig wrote:
>> With the test code ported to kernel space, none of this is required.
>
> I really *really* don't like this.
>
> The ability of running in user space is really useful when it comes to
> developing new code for new platforms, which happens often enough for this code.
>
That being said -- and I do say this as the original author of this code --
this should be reduced to the maximum extent possible to a (minimal) set of
#ifndef __KERNEL__, which should be localized as much as possible. The actual
user space components, even such a thing as a simple #include, should be moved
to a separate user space header.
But pretty please do leave the ability to debug the algorithms in user space.
This is hard code to write and debug; it is not just about regression testing.
-hpa
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 02/17] raid6: remove __KERNEL__ ifdefs
2026-03-25 16:13 ` H. Peter Anvin
@ 2026-03-25 19:58 ` Eric Biggers
2026-03-26 5:25 ` Christoph Hellwig
0 siblings, 1 reply; 22+ messages in thread
From: Eric Biggers @ 2026-03-25 19:58 UTC (permalink / raw)
To: H. Peter Anvin
Cc: Christoph Hellwig, Andrew Morton, Catalin Marinas, Will Deacon,
Ard Biesheuvel, Huacai Chen, WANG Xuerui, Madhavan Srinivasan,
Michael Ellerman, Nicholas Piggin, Christophe Leroy (CS GROUP),
Paul Walmsley, Palmer Dabbelt, Albert Ou, Alexandre Ghiti,
Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Christian Borntraeger, Sven Schnelle, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, x86, Herbert Xu,
Dan Williams, Chris Mason, David Sterba, Arnd Bergmann, Song Liu,
Yu Kuai, Li Nan, linux-kernel, linux-arm-kernel, loongarch,
linuxppc-dev, linux-riscv, linux-s390, linux-crypto, linux-btrfs,
linux-arch, linux-raid
On Wed, Mar 25, 2026 at 09:13:16AM -0700, H. Peter Anvin wrote:
> On 2026-03-25 08:13, H. Peter Anvin wrote:
> > On 2026-03-23 23:40, Christoph Hellwig wrote:
> >> With the test code ported to kernel space, none of this is required.
> >
> > I really *really* don't like this.
> >
> > The ability of running in user space is really useful when it comes to
> > developing new code for new platforms, which happens often enough for this code.
> >
>
> That being said -- and I do say this as the original author of this code --
> this should be reduced to the maximum extent possible to a (minimal) set of
> #ifndef __KERNEL__, which should be localized as much as possible. The actual
> user space components, even such a thing as a simple #include, should be moved
> to a separate user space header.
>
> But pretty please do leave the ability to debug the algorithms in user space.
> This is hard code to write and debug; it is not just about regression testing.
While I do like developing code in userspace as well, I've been doing it
less over time as the kernel's tests and benchmarks have been improved.
Running the KUnit tests is pretty straightforward and fast.
The issues with providing userspace build support in the tree are that:
- It has to be maintained.
- It's fundamentally a bit of a hack that is used only by developers
(who always have the option of doing something locally that is
tailored to the specific function they're working on)
- It diverts effort from the kernel's actual test and benchmark.
So while the faster iteration speed that userspace development enables
is definitely nice, I do think we should be cautious with committing to
maintain it in the kernel tree. If it's causing problems for the
ongoing refactoring, dropping it for now seems reasonable to me.
I would suggest adding a benchmark to the KUnit test similar to the
crypto and CRC ones, though.
- Eric
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 02/17] raid6: remove __KERNEL__ ifdefs
2026-03-25 19:58 ` Eric Biggers
@ 2026-03-26 5:25 ` Christoph Hellwig
0 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-26 5:25 UTC (permalink / raw)
To: Eric Biggers
Cc: H. Peter Anvin, Christoph Hellwig, Andrew Morton, Catalin Marinas,
Will Deacon, Ard Biesheuvel, Huacai Chen, WANG Xuerui,
Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy (CS GROUP), Paul Walmsley, Palmer Dabbelt,
Albert Ou, Alexandre Ghiti, Heiko Carstens, Vasily Gorbik,
Alexander Gordeev, Christian Borntraeger, Sven Schnelle,
Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
Herbert Xu, Dan Williams, Chris Mason, David Sterba,
Arnd Bergmann, Song Liu, Yu Kuai, Li Nan, linux-kernel,
linux-arm-kernel, loongarch, linuxppc-dev, linux-riscv,
linux-s390, linux-crypto, linux-btrfs, linux-arch, linux-raid
On Wed, Mar 25, 2026 at 12:58:21PM -0700, Eric Biggers wrote:
> While I do like developing code in userspace as well, I've been doing it
> less over time as the kernel's tests and benchmarks have been improved.
> Running the KUnit tests is pretty straightforward and fast.
Yes. I would have totally subscribed to hpa's position when he initially
wrote the code, but 20+ years later things look different. In fact these
days I often write code intended for userspace in the kernel first to
benefit from lockdep primarily, but also other checkers that are in theory
available in userspace but as easy to use. Now of course lockdep doesn't
really matter for the algorithms here, but the rest still stands.
I also find the point of developing new code for new platforms
interesting: in this decade we had two new platforms added: loongarch
and riscv and all other changes were to the wiring up and not the
algorithms. And of those riscv only had the compile in userspace
support added 8 month after the algorithm, so it doesn't really look
like development was aided by it. We also plan to add new optimized
code, and getting the library in shape and dropping the hard to
maintain userspace code is actually prep work for making that not
painful.
>
> The issues with providing userspace build support in the tree are that:
>
> - It has to be maintained.
> - It's fundamentally a bit of a hack that is used only by developers
> (who always have the option of doing something locally that is
> tailored to the specific function they're working on)
> - It diverts effort from the kernel's actual test and benchmark.
>
> So while the faster iteration speed that userspace development enables
> is definitely nice, I do think we should be cautious with committing to
> maintain it in the kernel tree. If it's causing problems for the
> ongoing refactoring, dropping it for now seems reasonable to me.
>
> I would suggest adding a benchmark to the KUnit test similar to the
> crypto and CRC ones, though.
The code already has a benchmark used for runtime selection, although
that could be improved on and run for bigger data sets from kunit.
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 03/17] raid6: move to lib/raid/
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
2026-03-24 6:40 ` [PATCH 01/17] raid6: turn the userspace test harness into a kunit test Christoph Hellwig
2026-03-24 6:40 ` [PATCH 02/17] raid6: remove __KERNEL__ ifdefs Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 04/17] raid6: remove unused defines in pq.h Christoph Hellwig
` (13 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Move the raid6 code to live in lib/raid/ with the XOR code, and change
the internal organization so that each architecture has a subdirectory
similar to the CRC, crypto and XOR libraries, and fix up the Makefile to
only build files actually needed.
Also move the kunit test case from the history test/ subdirectory to
tests/ and use the normal naming scheme for it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/Kconfig | 22 ----
lib/Makefile | 1 -
lib/raid/Kconfig | 22 ++++
lib/raid/Makefile | 2 +-
lib/{ => raid}/raid6/.gitignore | 0
lib/raid/raid6/Makefile | 118 ++++++++++++++++++
lib/{ => raid}/raid6/algos.c | 0
lib/{raid6 => raid/raid6/arm}/neon.c | 0
lib/{raid6 => raid/raid6/arm}/neon.h | 0
lib/{raid6 => raid/raid6/arm}/neon.uc | 0
lib/{raid6 => raid/raid6/arm}/recov_neon.c | 0
.../raid6/arm}/recov_neon_inner.c | 0
lib/{ => raid}/raid6/int.uc | 0
.../raid6/loongarch}/loongarch_simd.c | 0
.../raid6/loongarch}/recov_loongarch_simd.c | 0
lib/{ => raid}/raid6/mktables.c | 0
lib/{raid6 => raid/raid6/powerpc}/altivec.uc | 4 -
lib/{raid6 => raid/raid6/powerpc}/vpermxor.uc | 3 -
lib/{ => raid}/raid6/recov.c | 0
lib/{raid6 => raid/raid6/riscv}/recov_rvv.c | 0
lib/{raid6 => raid/raid6/riscv}/rvv.c | 0
lib/{raid6 => raid/raid6/riscv}/rvv.h | 0
lib/{raid6 => raid/raid6/s390}/recov_s390xc.c | 0
lib/{raid6 => raid/raid6/s390}/s390vx.uc | 0
lib/raid/raid6/tests/Makefile | 3 +
.../test.c => raid/raid6/tests/raid6_kunit.c} | 0
lib/{ => raid}/raid6/unroll.awk | 0
lib/{raid6 => raid/raid6/x86}/avx2.c | 0
lib/{raid6 => raid/raid6/x86}/avx512.c | 0
lib/{raid6 => raid/raid6/x86}/mmx.c | 4 -
lib/{raid6 => raid/raid6/x86}/recov_avx2.c | 0
lib/{raid6 => raid/raid6/x86}/recov_avx512.c | 0
lib/{raid6 => raid/raid6/x86}/recov_ssse3.c | 0
lib/{raid6 => raid/raid6/x86}/sse1.c | 4 -
lib/{raid6 => raid/raid6/x86}/sse2.c | 0
lib/raid6/Makefile | 83 ------------
lib/raid6/test/.gitignore | 3 -
lib/raid6/test/Makefile | 3 -
38 files changed, 144 insertions(+), 128 deletions(-)
rename lib/{ => raid}/raid6/.gitignore (100%)
create mode 100644 lib/raid/raid6/Makefile
rename lib/{ => raid}/raid6/algos.c (100%)
rename lib/{raid6 => raid/raid6/arm}/neon.c (100%)
rename lib/{raid6 => raid/raid6/arm}/neon.h (100%)
rename lib/{raid6 => raid/raid6/arm}/neon.uc (100%)
rename lib/{raid6 => raid/raid6/arm}/recov_neon.c (100%)
rename lib/{raid6 => raid/raid6/arm}/recov_neon_inner.c (100%)
rename lib/{ => raid}/raid6/int.uc (100%)
rename lib/{raid6 => raid/raid6/loongarch}/loongarch_simd.c (100%)
rename lib/{raid6 => raid/raid6/loongarch}/recov_loongarch_simd.c (100%)
rename lib/{ => raid}/raid6/mktables.c (100%)
rename lib/{raid6 => raid/raid6/powerpc}/altivec.uc (98%)
rename lib/{raid6 => raid/raid6/powerpc}/vpermxor.uc (98%)
rename lib/{ => raid}/raid6/recov.c (100%)
rename lib/{raid6 => raid/raid6/riscv}/recov_rvv.c (100%)
rename lib/{raid6 => raid/raid6/riscv}/rvv.c (100%)
rename lib/{raid6 => raid/raid6/riscv}/rvv.h (100%)
rename lib/{raid6 => raid/raid6/s390}/recov_s390xc.c (100%)
rename lib/{raid6 => raid/raid6/s390}/s390vx.uc (100%)
create mode 100644 lib/raid/raid6/tests/Makefile
rename lib/{raid6/test/test.c => raid/raid6/tests/raid6_kunit.c} (100%)
rename lib/{ => raid}/raid6/unroll.awk (100%)
rename lib/{raid6 => raid/raid6/x86}/avx2.c (100%)
rename lib/{raid6 => raid/raid6/x86}/avx512.c (100%)
rename lib/{raid6 => raid/raid6/x86}/mmx.c (99%)
rename lib/{raid6 => raid/raid6/x86}/recov_avx2.c (100%)
rename lib/{raid6 => raid/raid6/x86}/recov_avx512.c (100%)
rename lib/{raid6 => raid/raid6/x86}/recov_ssse3.c (100%)
rename lib/{raid6 => raid/raid6/x86}/sse1.c (99%)
rename lib/{raid6 => raid/raid6/x86}/sse2.c (100%)
delete mode 100644 lib/raid6/Makefile
delete mode 100644 lib/raid6/test/.gitignore
delete mode 100644 lib/raid6/test/Makefile
diff --git a/lib/Kconfig b/lib/Kconfig
index 716247fedaf0..67cc485484aa 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -8,28 +8,6 @@ config BINARY_PRINTF
menu "Library routines"
-config RAID6_PQ
- tristate
-
-config RAID6_PQ_KUNIT_TEST
- tristate "KUnit tests for raid6 PQ functions" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on RAID6_PQ
- default KUNIT_ALL_TESTS
- help
- Unit tests for the RAID6 PQ library functions.
-
- This is intended to help people writing architecture-specific
- optimized versions. If unsure, say N.
-
-config RAID6_PQ_BENCHMARK
- bool "Automatically choose fastest RAID6 PQ functions"
- depends on RAID6_PQ
- default y
- help
- Benchmark all available RAID6 PQ functions on init and choose the
- fastest one.
-
config LINEAR_RANGES
tristate
diff --git a/lib/Makefile b/lib/Makefile
index 84da412a044f..b5275d922662 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -166,7 +166,6 @@ obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
obj-$(CONFIG_XZ_DEC) += xz/
-obj-$(CONFIG_RAID6_PQ) += raid6/
lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
diff --git a/lib/raid/Kconfig b/lib/raid/Kconfig
index 1fc4b00e0d71..9bea599d66da 100644
--- a/lib/raid/Kconfig
+++ b/lib/raid/Kconfig
@@ -28,3 +28,25 @@ config XOR_KUNIT_TEST
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+
+config RAID6_PQ
+ tristate
+
+config RAID6_PQ_KUNIT_TEST
+ tristate "KUnit tests for raid6 PQ functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on RAID6_PQ
+ default KUNIT_ALL_TESTS
+ help
+ Unit tests for the RAID6 PQ library functions.
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
+config RAID6_PQ_BENCHMARK
+ bool "Automatically choose fastest RAID6 PQ functions"
+ depends on RAID6_PQ
+ default y
+ help
+ Benchmark all available RAID6 PQ functions on init and choose the
+ fastest one.
diff --git a/lib/raid/Makefile b/lib/raid/Makefile
index 3540fe846dc4..6fc5eeb53df0 100644
--- a/lib/raid/Makefile
+++ b/lib/raid/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += xor/
+obj-y += xor/ raid6/
diff --git a/lib/raid6/.gitignore b/lib/raid/raid6/.gitignore
similarity index 100%
rename from lib/raid6/.gitignore
rename to lib/raid/raid6/.gitignore
diff --git a/lib/raid/raid6/Makefile b/lib/raid/raid6/Makefile
new file mode 100644
index 000000000000..f155eba06fe3
--- /dev/null
+++ b/lib/raid/raid6/Makefile
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_RAID6_PQ) += raid6_pq.o tests/
+
+raid6_pq-y += algos.o tables.o
+
+# generic integer generation and recovery implementation
+raid6_pq-y += int1.o int2.o int4.o int8.o
+raid6_pq-y += recov.o
+
+# architecture-specific generation and recovery implementations:
+raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += arm/neon.o \
+ arm/neon1.o \
+ arm/neon2.o \
+ arm/neon4.o \
+ arm/neon8.o \
+ arm/recov_neon.o \
+ arm/recov_neon_inner.o
+raid6_pq-$(CONFIG_LOONGARCH) += loongarch/loongarch_simd.o \
+ loongarch/recov_loongarch_simd.o
+raid6_pq-$(CONFIG_ALTIVEC) += powerpc/altivec1.o \
+ powerpc/altivec2.o \
+ powerpc/altivec4.o \
+ powerpc/altivec8.o \
+ powerpc/vpermxor1.o \
+ powerpc/vpermxor2.o \
+ powerpc/vpermxor4.o \
+ powerpc/vpermxor8.o
+raid6_pq-$(CONFIG_RISCV_ISA_V) += riscv/rvv.o \
+ riscv/recov_rvv.o
+raid6_pq-$(CONFIG_S390) += s390/s390vx8.o \
+ s390/recov_s390xc.o
+ifeq ($(CONFIG_X86),y)
+raid6_pq-$(CONFIG_X86_32) += x86/mmx.o \
+ x86/sse1.o
+endif
+raid6_pq-$(CONFIG_X86) += x86/sse2.o \
+ x86/avx2.o \
+ x86/avx512.o \
+ x86/recov_ssse3.o \
+ x86/recov_avx2.o \
+ x86/recov_avx512.o
+
+hostprogs += mktables
+
+ifeq ($(CONFIG_ALTIVEC),y)
+altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+# Enable <altivec.h>
+altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
+
+ifdef CONFIG_CC_IS_CLANG
+# clang ppc port does not yet support -maltivec when -msoft-float is
+# enabled. A future release of clang will resolve this
+# https://llvm.org/pr31177
+CFLAGS_REMOVE_powerpc/altivec1.o += -msoft-float
+CFLAGS_REMOVE_powerpc/altivec2.o += -msoft-float
+CFLAGS_REMOVE_powerpc/altivec4.o += -msoft-float
+CFLAGS_REMOVE_powerpc/altivec8.o += -msoft-float
+CFLAGS_REMOVE_vpermxor1.o += -msoft-float
+CFLAGS_REMOVE_vpermxor2.o += -msoft-float
+CFLAGS_REMOVE_vpermxor4.o += -msoft-float
+CFLAGS_REMOVE_vpermxor8.o += -msoft-float
+endif
+endif
+
+quiet_cmd_unroll = UNROLL $@
+ cmd_unroll = $(AWK) -v N=$* -f $(src)/unroll.awk < $< > $@
+
+targets += int1.c int2.c int4.c int8.c
+$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_arm/neon1.o += $(CC_FLAGS_FPU)
+CFLAGS_arm/neon2.o += $(CC_FLAGS_FPU)
+CFLAGS_arm/neon4.o += $(CC_FLAGS_FPU)
+CFLAGS_arm/neon8.o += $(CC_FLAGS_FPU)
+CFLAGS_arm/recov_neon_inner.o += $(CC_FLAGS_FPU)
+CFLAGS_REMOVE_arm/neon1.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_arm/neon2.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_arm/neon4.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_arm/neon8.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_arm/recov_neon_inner.o += $(CC_FLAGS_NO_FPU)
+targets += arm/neon1.c arm/neon2.c arm/neon4.c arm/neon8.c
+$(obj)/arm/neon%.c: $(src)/arm/neon.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_powerpc/altivec1.o += $(altivec_flags)
+CFLAGS_powerpc/altivec2.o += $(altivec_flags)
+CFLAGS_powerpc/altivec4.o += $(altivec_flags)
+CFLAGS_powerpc/altivec8.o += $(altivec_flags)
+targets += powerpc/altivec1.c \
+ powerpc/altivec2.c \
+ powerpc/altivec4.c \
+ powerpc/altivec8.c
+$(obj)/powerpc/altivec%.c: $(src)/powerpc/altivec.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_powerpc/vpermxor1.o += $(altivec_flags)
+CFLAGS_powerpc/vpermxor2.o += $(altivec_flags)
+CFLAGS_powerpc/vpermxor4.o += $(altivec_flags)
+CFLAGS_powerpc/vpermxor8.o += $(altivec_flags)
+targets += powerpc/vpermxor1.c \
+ powerpc/vpermxor2.c \
+ powerpc/vpermxor4.c \
+ powerpc/vpermxor8.c
+$(obj)/powerpc/vpermxor%.c: $(src)/powerpc/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+targets += s390/s390vx8.c
+$(obj)/s390/s390vx%.c: $(src)/s390/s390vx.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+quiet_cmd_mktable = TABLE $@
+ cmd_mktable = $(obj)/mktables > $@
+
+targets += tables.c
+$(obj)/tables.c: $(obj)/mktables FORCE
+ $(call if_changed,mktable)
diff --git a/lib/raid6/algos.c b/lib/raid/raid6/algos.c
similarity index 100%
rename from lib/raid6/algos.c
rename to lib/raid/raid6/algos.c
diff --git a/lib/raid6/neon.c b/lib/raid/raid6/arm/neon.c
similarity index 100%
rename from lib/raid6/neon.c
rename to lib/raid/raid6/arm/neon.c
diff --git a/lib/raid6/neon.h b/lib/raid/raid6/arm/neon.h
similarity index 100%
rename from lib/raid6/neon.h
rename to lib/raid/raid6/arm/neon.h
diff --git a/lib/raid6/neon.uc b/lib/raid/raid6/arm/neon.uc
similarity index 100%
rename from lib/raid6/neon.uc
rename to lib/raid/raid6/arm/neon.uc
diff --git a/lib/raid6/recov_neon.c b/lib/raid/raid6/arm/recov_neon.c
similarity index 100%
rename from lib/raid6/recov_neon.c
rename to lib/raid/raid6/arm/recov_neon.c
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid/raid6/arm/recov_neon_inner.c
similarity index 100%
rename from lib/raid6/recov_neon_inner.c
rename to lib/raid/raid6/arm/recov_neon_inner.c
diff --git a/lib/raid6/int.uc b/lib/raid/raid6/int.uc
similarity index 100%
rename from lib/raid6/int.uc
rename to lib/raid/raid6/int.uc
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid/raid6/loongarch/loongarch_simd.c
similarity index 100%
rename from lib/raid6/loongarch_simd.c
rename to lib/raid/raid6/loongarch/loongarch_simd.c
diff --git a/lib/raid6/recov_loongarch_simd.c b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
similarity index 100%
rename from lib/raid6/recov_loongarch_simd.c
rename to lib/raid/raid6/loongarch/recov_loongarch_simd.c
diff --git a/lib/raid6/mktables.c b/lib/raid/raid6/mktables.c
similarity index 100%
rename from lib/raid6/mktables.c
rename to lib/raid/raid6/mktables.c
diff --git a/lib/raid6/altivec.uc b/lib/raid/raid6/powerpc/altivec.uc
similarity index 98%
rename from lib/raid6/altivec.uc
rename to lib/raid/raid6/powerpc/altivec.uc
index 2c59963e58f9..130d3d3dd42c 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid/raid6/powerpc/altivec.uc
@@ -24,8 +24,6 @@
#include <linux/raid/pq.h>
-#ifdef CONFIG_ALTIVEC
-
#include <altivec.h>
#include <asm/cputable.h>
#include <asm/switch_to.h>
@@ -122,5 +120,3 @@ const struct raid6_calls raid6_altivec$# = {
"altivecx$#",
0
};
-
-#endif /* CONFIG_ALTIVEC */
diff --git a/lib/raid6/vpermxor.uc b/lib/raid/raid6/powerpc/vpermxor.uc
similarity index 98%
rename from lib/raid6/vpermxor.uc
rename to lib/raid/raid6/powerpc/vpermxor.uc
index a8e76b1c956e..595f20aaf4cf 100644
--- a/lib/raid6/vpermxor.uc
+++ b/lib/raid/raid6/powerpc/vpermxor.uc
@@ -21,8 +21,6 @@
*/
#include <linux/raid/pq.h>
-#ifdef CONFIG_ALTIVEC
-
#include <altivec.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
@@ -95,4 +93,3 @@ const struct raid6_calls raid6_vpermxor$# = {
"vpermxor$#",
0
};
-#endif
diff --git a/lib/raid6/recov.c b/lib/raid/raid6/recov.c
similarity index 100%
rename from lib/raid6/recov.c
rename to lib/raid/raid6/recov.c
diff --git a/lib/raid6/recov_rvv.c b/lib/raid/raid6/riscv/recov_rvv.c
similarity index 100%
rename from lib/raid6/recov_rvv.c
rename to lib/raid/raid6/riscv/recov_rvv.c
diff --git a/lib/raid6/rvv.c b/lib/raid/raid6/riscv/rvv.c
similarity index 100%
rename from lib/raid6/rvv.c
rename to lib/raid/raid6/riscv/rvv.c
diff --git a/lib/raid6/rvv.h b/lib/raid/raid6/riscv/rvv.h
similarity index 100%
rename from lib/raid6/rvv.h
rename to lib/raid/raid6/riscv/rvv.h
diff --git a/lib/raid6/recov_s390xc.c b/lib/raid/raid6/s390/recov_s390xc.c
similarity index 100%
rename from lib/raid6/recov_s390xc.c
rename to lib/raid/raid6/s390/recov_s390xc.c
diff --git a/lib/raid6/s390vx.uc b/lib/raid/raid6/s390/s390vx.uc
similarity index 100%
rename from lib/raid6/s390vx.uc
rename to lib/raid/raid6/s390/s390vx.uc
diff --git a/lib/raid/raid6/tests/Makefile b/lib/raid/raid6/tests/Makefile
new file mode 100644
index 000000000000..87a001b22847
--- /dev/null
+++ b/lib/raid/raid6/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_RAID6_PQ_KUNIT_TEST) += raid6_kunit.o
diff --git a/lib/raid6/test/test.c b/lib/raid/raid6/tests/raid6_kunit.c
similarity index 100%
rename from lib/raid6/test/test.c
rename to lib/raid/raid6/tests/raid6_kunit.c
diff --git a/lib/raid6/unroll.awk b/lib/raid/raid6/unroll.awk
similarity index 100%
rename from lib/raid6/unroll.awk
rename to lib/raid/raid6/unroll.awk
diff --git a/lib/raid6/avx2.c b/lib/raid/raid6/x86/avx2.c
similarity index 100%
rename from lib/raid6/avx2.c
rename to lib/raid/raid6/x86/avx2.c
diff --git a/lib/raid6/avx512.c b/lib/raid/raid6/x86/avx512.c
similarity index 100%
rename from lib/raid6/avx512.c
rename to lib/raid/raid6/x86/avx512.c
diff --git a/lib/raid6/mmx.c b/lib/raid/raid6/x86/mmx.c
similarity index 99%
rename from lib/raid6/mmx.c
rename to lib/raid/raid6/x86/mmx.c
index e411f0cfbd95..7e9810669347 100644
--- a/lib/raid6/mmx.c
+++ b/lib/raid/raid6/x86/mmx.c
@@ -11,8 +11,6 @@
* MMX implementation of RAID-6 syndrome functions
*/
-#ifdef CONFIG_X86_32
-
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
@@ -135,5 +133,3 @@ const struct raid6_calls raid6_mmxx2 = {
"mmxx2",
0
};
-
-#endif
diff --git a/lib/raid6/recov_avx2.c b/lib/raid/raid6/x86/recov_avx2.c
similarity index 100%
rename from lib/raid6/recov_avx2.c
rename to lib/raid/raid6/x86/recov_avx2.c
diff --git a/lib/raid6/recov_avx512.c b/lib/raid/raid6/x86/recov_avx512.c
similarity index 100%
rename from lib/raid6/recov_avx512.c
rename to lib/raid/raid6/x86/recov_avx512.c
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid/raid6/x86/recov_ssse3.c
similarity index 100%
rename from lib/raid6/recov_ssse3.c
rename to lib/raid/raid6/x86/recov_ssse3.c
diff --git a/lib/raid6/sse1.c b/lib/raid/raid6/x86/sse1.c
similarity index 99%
rename from lib/raid6/sse1.c
rename to lib/raid/raid6/x86/sse1.c
index 794d5cfa0306..deecdd72ceec 100644
--- a/lib/raid6/sse1.c
+++ b/lib/raid/raid6/x86/sse1.c
@@ -16,8 +16,6 @@
* worthwhile as a separate implementation.
*/
-#ifdef CONFIG_X86_32
-
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
@@ -155,5 +153,3 @@ const struct raid6_calls raid6_sse1x2 = {
"sse1x2",
1 /* Has cache hints */
};
-
-#endif
diff --git a/lib/raid6/sse2.c b/lib/raid/raid6/x86/sse2.c
similarity index 100%
rename from lib/raid6/sse2.c
rename to lib/raid/raid6/x86/sse2.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
deleted file mode 100644
index 6fd048c127b6..000000000000
--- a/lib/raid6/Makefile
+++ /dev/null
@@ -1,83 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_RAID6_PQ) += raid6_pq.o test/
-
-raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
- int8.o
-
-raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
-raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
- vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
-raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
-raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
-raid6_pq-$(CONFIG_LOONGARCH) += loongarch_simd.o recov_loongarch_simd.o
-raid6_pq-$(CONFIG_RISCV_ISA_V) += rvv.o recov_rvv.o
-
-hostprogs += mktables
-
-ifeq ($(CONFIG_ALTIVEC),y)
-altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
-# Enable <altivec.h>
-altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
-
-ifdef CONFIG_CC_IS_CLANG
-# clang ppc port does not yet support -maltivec when -msoft-float is
-# enabled. A future release of clang will resolve this
-# https://llvm.org/pr31177
-CFLAGS_REMOVE_altivec1.o += -msoft-float
-CFLAGS_REMOVE_altivec2.o += -msoft-float
-CFLAGS_REMOVE_altivec4.o += -msoft-float
-CFLAGS_REMOVE_altivec8.o += -msoft-float
-CFLAGS_REMOVE_vpermxor1.o += -msoft-float
-CFLAGS_REMOVE_vpermxor2.o += -msoft-float
-CFLAGS_REMOVE_vpermxor4.o += -msoft-float
-CFLAGS_REMOVE_vpermxor8.o += -msoft-float
-endif
-endif
-
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -v N=$* -f $(src)/unroll.awk < $< > $@
-
-targets += int1.c int2.c int4.c int8.c
-$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_altivec1.o += $(altivec_flags)
-CFLAGS_altivec2.o += $(altivec_flags)
-CFLAGS_altivec4.o += $(altivec_flags)
-CFLAGS_altivec8.o += $(altivec_flags)
-targets += altivec1.c altivec2.c altivec4.c altivec8.c
-$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_vpermxor1.o += $(altivec_flags)
-CFLAGS_vpermxor2.o += $(altivec_flags)
-CFLAGS_vpermxor4.o += $(altivec_flags)
-CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
-$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_neon1.o += $(CC_FLAGS_FPU)
-CFLAGS_neon2.o += $(CC_FLAGS_FPU)
-CFLAGS_neon4.o += $(CC_FLAGS_FPU)
-CFLAGS_neon8.o += $(CC_FLAGS_FPU)
-CFLAGS_recov_neon_inner.o += $(CC_FLAGS_FPU)
-CFLAGS_REMOVE_neon1.o += $(CC_FLAGS_NO_FPU)
-CFLAGS_REMOVE_neon2.o += $(CC_FLAGS_NO_FPU)
-CFLAGS_REMOVE_neon4.o += $(CC_FLAGS_NO_FPU)
-CFLAGS_REMOVE_neon8.o += $(CC_FLAGS_NO_FPU)
-CFLAGS_REMOVE_recov_neon_inner.o += $(CC_FLAGS_NO_FPU)
-targets += neon1.c neon2.c neon4.c neon8.c
-$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += s390vx8.c
-$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-quiet_cmd_mktable = TABLE $@
- cmd_mktable = $(obj)/mktables > $@
-
-targets += tables.c
-$(obj)/tables.c: $(obj)/mktables FORCE
- $(call if_changed,mktable)
diff --git a/lib/raid6/test/.gitignore b/lib/raid6/test/.gitignore
deleted file mode 100644
index 1b68a77f348f..000000000000
--- a/lib/raid6/test/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/int.uc
-/neon.uc
-/raid6test
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
deleted file mode 100644
index 268b085af4d3..000000000000
--- a/lib/raid6/test/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_RAID6_PQ_KUNIT_TEST) += test.o
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 04/17] raid6: remove unused defines in pq.h
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (2 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 03/17] raid6: move to lib/raid/ Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 05/17] raid6: remove raid6_get_zero_page Christoph Hellwig
` (12 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
These are not used anywhere in the kernel.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
include/linux/raid/pq.h | 6 ------
1 file changed, 6 deletions(-)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d26788fada58..5e7e743b83f5 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -90,12 +90,6 @@ extern const struct raid6_calls raid6_neonx8;
extern const struct raid6_calls * const raid6_algos[];
extern const struct raid6_recov_calls *const raid6_recov_algos[];
-/* Return values from chk_syndrome */
-#define RAID6_OK 0
-#define RAID6_P_BAD 1
-#define RAID6_Q_BAD 2
-#define RAID6_PQ_BAD 3
-
/* Galois field tables */
extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 05/17] raid6: remove raid6_get_zero_page
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (3 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 04/17] raid6: remove unused defines in pq.h Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 06/17] raid6: use named initializers for struct raid6_calls Christoph Hellwig
` (11 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Just open code it as in other places in the kernel.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
crypto/async_tx/async_pq.c | 2 +-
crypto/async_tx/async_raid6_recov.c | 4 ++--
include/linux/raid/pq.h | 6 ------
lib/raid/raid6/arm/recov_neon.c | 6 +++---
lib/raid/raid6/loongarch/recov_loongarch_simd.c | 12 ++++++------
lib/raid/raid6/recov.c | 6 +++---
lib/raid/raid6/riscv/recov_rvv.c | 6 +++---
lib/raid/raid6/s390/recov_s390xc.c | 6 +++---
lib/raid/raid6/x86/recov_avx2.c | 6 +++---
lib/raid/raid6/x86/recov_avx512.c | 6 +++---
lib/raid/raid6/x86/recov_ssse3.c | 6 +++---
11 files changed, 30 insertions(+), 36 deletions(-)
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 9e4bb7fbde25..0ce6f07b4e0d 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -119,7 +119,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
for (i = 0; i < disks; i++) {
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
- srcs[i] = raid6_get_zero_page();
+ srcs[i] = page_address(ZERO_PAGE(0));
} else {
srcs[i] = page_address(blocks[i]) + offsets[i];
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 539ea5b378dc..f2dc6af6e6a7 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -414,7 +414,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
- ptrs[i] = raid6_get_zero_page();
+ ptrs[i] = page_address(ZERO_PAGE(0));
else
ptrs[i] = page_address(blocks[i]) + offs[i];
@@ -497,7 +497,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
- ptrs[i] = raid6_get_zero_page();
+ ptrs[i] = page_address(ZERO_PAGE(0));
else
ptrs[i] = page_address(blocks[i]) + offs[i];
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 5e7e743b83f5..f27a866c287f 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -11,12 +11,6 @@
#include <linux/blkdev.h>
#include <linux/mm.h>
-/* This should be const but the raid6 code is too convoluted for that. */
-static inline void *raid6_get_zero_page(void)
-{
- return page_address(ZERO_PAGE(0));
-}
-
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
diff --git a/lib/raid/raid6/arm/recov_neon.c b/lib/raid/raid6/arm/recov_neon.c
index 13d5df718c15..461bdb98b5cc 100644
--- a/lib/raid/raid6/arm/recov_neon.c
+++ b/lib/raid/raid6/arm/recov_neon.c
@@ -29,10 +29,10 @@ static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -66,7 +66,7 @@ static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/loongarch/recov_loongarch_simd.c b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
index eb3a1e79f01f..4d4563209647 100644
--- a/lib/raid/raid6/loongarch/recov_loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
@@ -43,10 +43,10 @@ static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -198,7 +198,7 @@ static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -317,10 +317,10 @@ static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -437,7 +437,7 @@ static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/recov.c b/lib/raid/raid6/recov.c
index 8d113196632e..211e1df28963 100644
--- a/lib/raid/raid6/recov.c
+++ b/lib/raid/raid6/recov.c
@@ -31,10 +31,10 @@ static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -72,7 +72,7 @@ static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/riscv/recov_rvv.c b/lib/raid/raid6/riscv/recov_rvv.c
index 40c393206b6a..f77d9c430687 100644
--- a/lib/raid/raid6/riscv/recov_rvv.c
+++ b/lib/raid/raid6/riscv/recov_rvv.c
@@ -158,10 +158,10 @@ static void raid6_2data_recov_rvv(int disks, size_t bytes, int faila,
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -196,7 +196,7 @@ static void raid6_datap_recov_rvv(int disks, size_t bytes, int faila,
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/s390/recov_s390xc.c b/lib/raid/raid6/s390/recov_s390xc.c
index 487018f81192..0f32217b7123 100644
--- a/lib/raid/raid6/s390/recov_s390xc.c
+++ b/lib/raid/raid6/s390/recov_s390xc.c
@@ -34,10 +34,10 @@ static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -81,7 +81,7 @@ static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/x86/recov_avx2.c b/lib/raid/raid6/x86/recov_avx2.c
index 19fbd9c4dce6..325310c81e1c 100644
--- a/lib/raid/raid6/x86/recov_avx2.c
+++ b/lib/raid/raid6/x86/recov_avx2.c
@@ -28,10 +28,10 @@ static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -196,7 +196,7 @@ static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/x86/recov_avx512.c b/lib/raid/raid6/x86/recov_avx512.c
index 143f4976b2ad..08de77fcb8bd 100644
--- a/lib/raid/raid6/x86/recov_avx512.c
+++ b/lib/raid/raid6/x86/recov_avx512.c
@@ -37,10 +37,10 @@ static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
*/
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -238,7 +238,7 @@ static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
*/
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
diff --git a/lib/raid/raid6/x86/recov_ssse3.c b/lib/raid/raid6/x86/recov_ssse3.c
index 146cdbf465bd..002bef1e0847 100644
--- a/lib/raid/raid6/x86/recov_ssse3.c
+++ b/lib/raid/raid6/x86/recov_ssse3.c
@@ -30,10 +30,10 @@ static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
- ptrs[failb] = raid6_get_zero_page();
+ ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
@@ -203,7 +203,7 @@ static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
- ptrs[faila] = raid6_get_zero_page();
+ ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 06/17] raid6: use named initializers for struct raid6_calls
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (4 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 05/17] raid6: remove raid6_get_zero_page Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 07/17] raid6: improve the public interface Christoph Hellwig
` (10 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/arm/neon.c | 9 +++----
lib/raid/raid6/int.uc | 8 +++---
lib/raid/raid6/loongarch/loongarch_simd.c | 18 ++++++-------
lib/raid/raid6/powerpc/altivec.uc | 8 +++---
lib/raid/raid6/powerpc/vpermxor.uc | 8 +++---
lib/raid/raid6/riscv/rvv.h | 9 +++----
lib/raid/raid6/s390/s390vx.uc | 10 +++----
lib/raid/raid6/x86/avx2.c | 33 ++++++++++++-----------
lib/raid/raid6/x86/avx512.c | 33 ++++++++++++-----------
lib/raid/raid6/x86/mmx.c | 16 +++++------
lib/raid/raid6/x86/sse1.c | 18 ++++++-------
lib/raid/raid6/x86/sse2.c | 30 ++++++++++-----------
12 files changed, 95 insertions(+), 105 deletions(-)
diff --git a/lib/raid/raid6/arm/neon.c b/lib/raid/raid6/arm/neon.c
index 47b8bb0afc65..c21da59ab48f 100644
--- a/lib/raid/raid6/arm/neon.c
+++ b/lib/raid/raid6/arm/neon.c
@@ -40,11 +40,10 @@
start, stop, (unsigned long)bytes, ptrs);\
} \
struct raid6_calls const raid6_neonx ## _n = { \
- raid6_neon ## _n ## _gen_syndrome, \
- raid6_neon ## _n ## _xor_syndrome, \
- raid6_have_neon, \
- "neonx" #_n, \
- 0 \
+ .gen_syndrome = raid6_neon ## _n ## _gen_syndrome, \
+ .xor_syndrome = raid6_neon ## _n ## _xor_syndrome, \
+ .valid = raid6_have_neon, \
+ .name = "neonx" #_n, \
}
static int raid6_have_neon(void)
diff --git a/lib/raid/raid6/int.uc b/lib/raid/raid6/int.uc
index 1ba56c3fa482..4f5f2869e21e 100644
--- a/lib/raid/raid6/int.uc
+++ b/lib/raid/raid6/int.uc
@@ -139,9 +139,7 @@ static void raid6_int$#_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_intx$# = {
- raid6_int$#_gen_syndrome,
- raid6_int$#_xor_syndrome,
- NULL, /* always valid */
- "int" NSTRING "x$#",
- 0
+ .gen_syndrome = raid6_int$#_gen_syndrome,
+ .xor_syndrome = raid6_int$#_xor_syndrome,
+ .name = "int" NSTRING "x$#",
};
diff --git a/lib/raid/raid6/loongarch/loongarch_simd.c b/lib/raid/raid6/loongarch/loongarch_simd.c
index 72f4d92d4876..1b4cd1512d05 100644
--- a/lib/raid/raid6/loongarch/loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/loongarch_simd.c
@@ -244,11 +244,10 @@ static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_lsx = {
- raid6_lsx_gen_syndrome,
- raid6_lsx_xor_syndrome,
- raid6_has_lsx,
- "lsx",
- .priority = 0 /* see the comment near the top of the file for reason */
+ .gen_syndrome = raid6_lsx_gen_syndrome,
+ .xor_syndrome = raid6_lsx_xor_syndrome,
+ .valid = raid6_has_lsx,
+ .name = "lsx",
};
#undef NSIZE
@@ -413,11 +412,10 @@ static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_lasx = {
- raid6_lasx_gen_syndrome,
- raid6_lasx_xor_syndrome,
- raid6_has_lasx,
- "lasx",
- .priority = 0 /* see the comment near the top of the file for reason */
+ .gen_syndrome = raid6_lasx_gen_syndrome,
+ .xor_syndrome = raid6_lasx_xor_syndrome,
+ .valid = raid6_has_lasx,
+ .name = "lasx",
};
#undef NSIZE
#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid/raid6/powerpc/altivec.uc b/lib/raid/raid6/powerpc/altivec.uc
index 130d3d3dd42c..084ead768ddb 100644
--- a/lib/raid/raid6/powerpc/altivec.uc
+++ b/lib/raid/raid6/powerpc/altivec.uc
@@ -114,9 +114,7 @@ int raid6_have_altivec(void)
#endif
const struct raid6_calls raid6_altivec$# = {
- raid6_altivec$#_gen_syndrome,
- NULL, /* XOR not yet implemented */
- raid6_have_altivec,
- "altivecx$#",
- 0
+ .gen_syndrome = raid6_altivec$#_gen_syndrome,
+ .valid = raid6_have_altivec,
+ .name = "altivecx$#",
};
diff --git a/lib/raid/raid6/powerpc/vpermxor.uc b/lib/raid/raid6/powerpc/vpermxor.uc
index 595f20aaf4cf..bb2c3a316ae8 100644
--- a/lib/raid/raid6/powerpc/vpermxor.uc
+++ b/lib/raid/raid6/powerpc/vpermxor.uc
@@ -87,9 +87,7 @@ int raid6_have_altivec_vpermxor(void)
#endif
const struct raid6_calls raid6_vpermxor$# = {
- raid6_vpermxor$#_gen_syndrome,
- NULL,
- raid6_have_altivec_vpermxor,
- "vpermxor$#",
- 0
+ .gen_syndrome = raid6_vpermxor$#_gen_syndrome,
+ .valid = raid6_have_altivec_vpermxor,
+ .name = "vpermxor$#",
};
diff --git a/lib/raid/raid6/riscv/rvv.h b/lib/raid/raid6/riscv/rvv.h
index b0a71b375962..0d430a4c5f08 100644
--- a/lib/raid/raid6/riscv/rvv.h
+++ b/lib/raid/raid6/riscv/rvv.h
@@ -39,9 +39,8 @@ static int rvv_has_vector(void)
kernel_vector_end(); \
} \
struct raid6_calls const raid6_rvvx ## _n = { \
- raid6_rvv ## _n ## _gen_syndrome, \
- raid6_rvv ## _n ## _xor_syndrome, \
- rvv_has_vector, \
- "rvvx" #_n, \
- 0 \
+ .gen_syndrome = raid6_rvv ## _n ## _gen_syndrome, \
+ .xor_syndrome = raid6_rvv ## _n ## _xor_syndrome, \
+ .valid = rvv_has_vector, \
+ .name = "rvvx" #_n, \
}
diff --git a/lib/raid/raid6/s390/s390vx.uc b/lib/raid/raid6/s390/s390vx.uc
index 8aa53eb2f395..97c5d5d9dcf9 100644
--- a/lib/raid/raid6/s390/s390vx.uc
+++ b/lib/raid/raid6/s390/s390vx.uc
@@ -127,9 +127,9 @@ static int raid6_s390vx$#_valid(void)
}
const struct raid6_calls raid6_s390vx$# = {
- raid6_s390vx$#_gen_syndrome,
- raid6_s390vx$#_xor_syndrome,
- raid6_s390vx$#_valid,
- "vx128x$#",
- 1
+ .gen_syndrome = raid6_s390vx$#_gen_syndrome,
+ .xor_syndrome = raid6_s390vx$#_xor_syndrome,
+ .valid = raid6_s390vx$#_valid,
+ .name = "vx128x$#",
+ .priority = 1,
};
diff --git a/lib/raid/raid6/x86/avx2.c b/lib/raid/raid6/x86/avx2.c
index a1a5213918af..aab8b624c635 100644
--- a/lib/raid/raid6/x86/avx2.c
+++ b/lib/raid/raid6/x86/avx2.c
@@ -128,11 +128,12 @@ static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_avx2x1 = {
- raid6_avx21_gen_syndrome,
- raid6_avx21_xor_syndrome,
- raid6_have_avx2,
- "avx2x1",
- .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
+ .gen_syndrome = raid6_avx21_gen_syndrome,
+ .xor_syndrome = raid6_avx21_xor_syndrome,
+ .valid = raid6_have_avx2,
+ .name = "avx2x1",
+ /* Prefer AVX2 over priority 1 (SSE2 and others) */
+ .priority = 2,
};
/*
@@ -258,11 +259,12 @@ static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_avx2x2 = {
- raid6_avx22_gen_syndrome,
- raid6_avx22_xor_syndrome,
- raid6_have_avx2,
- "avx2x2",
- .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
+ .gen_syndrome = raid6_avx22_gen_syndrome,
+ .xor_syndrome = raid6_avx22_xor_syndrome,
+ .valid = raid6_have_avx2,
+ .name = "avx2x2",
+ /* Prefer AVX2 over priority 1 (SSE2 and others) */
+ .priority = 2,
};
#ifdef CONFIG_X86_64
@@ -461,10 +463,11 @@ static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_avx2x4 = {
- raid6_avx24_gen_syndrome,
- raid6_avx24_xor_syndrome,
- raid6_have_avx2,
- "avx2x4",
- .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
+ .gen_syndrome = raid6_avx24_gen_syndrome,
+ .xor_syndrome = raid6_avx24_xor_syndrome,
+ .valid = raid6_have_avx2,
+ .name = "avx2x4",
+ /* Prefer AVX2 over priority 1 (SSE2 and others) */
+ .priority = 2,
};
#endif /* CONFIG_X86_64 */
diff --git a/lib/raid/raid6/x86/avx512.c b/lib/raid/raid6/x86/avx512.c
index 874998bcd7d7..47636b16632f 100644
--- a/lib/raid/raid6/x86/avx512.c
+++ b/lib/raid/raid6/x86/avx512.c
@@ -156,11 +156,12 @@ static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_avx512x1 = {
- raid6_avx5121_gen_syndrome,
- raid6_avx5121_xor_syndrome,
- raid6_have_avx512,
- "avx512x1",
- .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
+ .gen_syndrome = raid6_avx5121_gen_syndrome,
+ .xor_syndrome = raid6_avx5121_xor_syndrome,
+ .valid = raid6_have_avx512,
+ .name = "avx512x1",
+ /* Prefer AVX512 over priority 1 (SSE2 and others) */
+ .priority = 2,
};
/*
@@ -313,11 +314,12 @@ static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_avx512x2 = {
- raid6_avx5122_gen_syndrome,
- raid6_avx5122_xor_syndrome,
- raid6_have_avx512,
- "avx512x2",
- .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
+ .gen_syndrome = raid6_avx5122_gen_syndrome,
+ .xor_syndrome = raid6_avx5122_xor_syndrome,
+ .valid = raid6_have_avx512,
+ .name = "avx512x2",
+ /* Prefer AVX512 over priority 1 (SSE2 and others) */
+ .priority = 2,
};
#ifdef CONFIG_X86_64
@@ -551,10 +553,11 @@ static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
kernel_fpu_end();
}
const struct raid6_calls raid6_avx512x4 = {
- raid6_avx5124_gen_syndrome,
- raid6_avx5124_xor_syndrome,
- raid6_have_avx512,
- "avx512x4",
- .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
+ .gen_syndrome = raid6_avx5124_gen_syndrome,
+ .xor_syndrome = raid6_avx5124_xor_syndrome,
+ .valid = raid6_have_avx512,
+ .name = "avx512x4",
+ /* Prefer AVX512 over priority 1 (SSE2 and others) */
+ .priority = 2,
};
#endif
diff --git a/lib/raid/raid6/x86/mmx.c b/lib/raid/raid6/x86/mmx.c
index 7e9810669347..22b9fdaa705f 100644
--- a/lib/raid/raid6/x86/mmx.c
+++ b/lib/raid/raid6/x86/mmx.c
@@ -68,11 +68,9 @@ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
const struct raid6_calls raid6_mmxx1 = {
- raid6_mmx1_gen_syndrome,
- NULL, /* XOR not yet implemented */
- raid6_have_mmx,
- "mmxx1",
- 0
+ .gen_syndrome = raid6_mmx1_gen_syndrome,
+ .valid = raid6_have_mmx,
+ .name = "mmxx1",
};
/*
@@ -127,9 +125,7 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
const struct raid6_calls raid6_mmxx2 = {
- raid6_mmx2_gen_syndrome,
- NULL, /* XOR not yet implemented */
- raid6_have_mmx,
- "mmxx2",
- 0
+ .gen_syndrome = raid6_mmx2_gen_syndrome,
+ .valid = raid6_have_mmx,
+ .name = "mmxx2",
};
diff --git a/lib/raid/raid6/x86/sse1.c b/lib/raid/raid6/x86/sse1.c
index deecdd72ceec..fad214a430d8 100644
--- a/lib/raid/raid6/x86/sse1.c
+++ b/lib/raid/raid6/x86/sse1.c
@@ -84,11 +84,10 @@ static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
const struct raid6_calls raid6_sse1x1 = {
- raid6_sse11_gen_syndrome,
- NULL, /* XOR not yet implemented */
- raid6_have_sse1_or_mmxext,
- "sse1x1",
- 1 /* Has cache hints */
+ .gen_syndrome = raid6_sse11_gen_syndrome,
+ .valid = raid6_have_sse1_or_mmxext,
+ .name = "sse1x1",
+ .priority = 1, /* Has cache hints */
};
/*
@@ -147,9 +146,8 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
const struct raid6_calls raid6_sse1x2 = {
- raid6_sse12_gen_syndrome,
- NULL, /* XOR not yet implemented */
- raid6_have_sse1_or_mmxext,
- "sse1x2",
- 1 /* Has cache hints */
+ .gen_syndrome = raid6_sse12_gen_syndrome,
+ .valid = raid6_have_sse1_or_mmxext,
+ .name = "sse1x2",
+ .priority = 1, /* Has cache hints */
};
diff --git a/lib/raid/raid6/x86/sse2.c b/lib/raid/raid6/x86/sse2.c
index f9edf8a8d1c4..1b28e858a1d4 100644
--- a/lib/raid/raid6/x86/sse2.c
+++ b/lib/raid/raid6/x86/sse2.c
@@ -133,11 +133,11 @@ static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_sse2x1 = {
- raid6_sse21_gen_syndrome,
- raid6_sse21_xor_syndrome,
- raid6_have_sse2,
- "sse2x1",
- 1 /* Has cache hints */
+ .gen_syndrome = raid6_sse21_gen_syndrome,
+ .xor_syndrome = raid6_sse21_xor_syndrome,
+ .valid = raid6_have_sse2,
+ .name = "sse2x1",
+ .priority = 1, /* Has cache hints */
};
/*
@@ -263,11 +263,11 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
}
const struct raid6_calls raid6_sse2x2 = {
- raid6_sse22_gen_syndrome,
- raid6_sse22_xor_syndrome,
- raid6_have_sse2,
- "sse2x2",
- 1 /* Has cache hints */
+ .gen_syndrome = raid6_sse22_gen_syndrome,
+ .xor_syndrome = raid6_sse22_xor_syndrome,
+ .valid = raid6_have_sse2,
+ .name = "sse2x2",
+ .priority = 1, /* Has cache hints */
};
#ifdef CONFIG_X86_64
@@ -470,11 +470,11 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_sse2x4 = {
- raid6_sse24_gen_syndrome,
- raid6_sse24_xor_syndrome,
- raid6_have_sse2,
- "sse2x4",
- 1 /* Has cache hints */
+ .gen_syndrome = raid6_sse24_gen_syndrome,
+ .xor_syndrome = raid6_sse24_xor_syndrome,
+ .valid = raid6_have_sse2,
+ .name = "sse2x4",
+ .priority = 1, /* Has cache hints */
};
#endif /* CONFIG_X86_64 */
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 07/17] raid6: improve the public interface
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (5 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 06/17] raid6: use named initializers for struct raid6_calls Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 08/17] raid6: hide internals Christoph Hellwig
` (9 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Stop directly calling into function pointers from users of the RAID6 PQ
API, and provide exported functions with proper documentation and
API gurantees asserts where applicable instead.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
Documentation/crypto/async-tx-api.rst | 4 +-
crypto/async_tx/async_pq.c | 6 +-
crypto/async_tx/async_raid6_recov.c | 4 +-
drivers/md/raid5.c | 4 +-
fs/btrfs/raid56.c | 8 +-
include/linux/raid/pq.h | 19 +--
lib/raid/raid6/algos.c | 137 +++++++++++++++++-
lib/raid/raid6/arm/recov_neon.c | 4 +-
.../raid6/loongarch/recov_loongarch_simd.c | 8 +-
lib/raid/raid6/recov.c | 4 +-
lib/raid/raid6/riscv/recov_rvv.c | 4 +-
lib/raid/raid6/s390/recov_s390xc.c | 4 +-
lib/raid/raid6/x86/recov_avx2.c | 4 +-
lib/raid/raid6/x86/recov_avx512.c | 4 +-
lib/raid/raid6/x86/recov_ssse3.c | 4 +-
15 files changed, 170 insertions(+), 48 deletions(-)
diff --git a/Documentation/crypto/async-tx-api.rst b/Documentation/crypto/async-tx-api.rst
index f88a7809385e..49fcfc66314a 100644
--- a/Documentation/crypto/async-tx-api.rst
+++ b/Documentation/crypto/async-tx-api.rst
@@ -82,9 +82,9 @@ xor_val xor a series of source buffers and set a flag if the
pq generate the p+q (raid6 syndrome) from a series of source buffers
pq_val validate that a p and or q buffer are in sync with a given series of
sources
-datap (raid6_datap_recov) recover a raid6 data block and the p block
+datap (raid6_recov_datap) recover a raid6 data block and the p block
from the given sources
-2data (raid6_2data_recov) recover 2 raid6 data blocks from the given
+2data (raid6_recov_2data) recover 2 raid6 data blocks from the given
sources
======== ====================================================================
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 0ce6f07b4e0d..f3574f80d1df 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -131,11 +131,11 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
}
}
if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
- BUG_ON(!raid6_call.xor_syndrome);
+ BUG_ON(!raid6_can_xor_syndrome());
if (start >= 0)
- raid6_call.xor_syndrome(disks, start, stop, len, srcs);
+ raid6_xor_syndrome(disks, start, stop, len, srcs);
} else
- raid6_call.gen_syndrome(disks, len, srcs);
+ raid6_gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index f2dc6af6e6a7..305ea1421a3e 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -418,7 +418,7 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
else
ptrs[i] = page_address(blocks[i]) + offs[i];
- raid6_2data_recov(disks, bytes, faila, failb, ptrs);
+ raid6_recov_2data(disks, bytes, faila, failb, ptrs);
async_tx_sync_epilog(submit);
@@ -501,7 +501,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
else
ptrs[i] = page_address(blocks[i]) + offs[i];
- raid6_datap_recov(disks, bytes, faila, ptrs);
+ raid6_recov_datap(disks, bytes, faila, ptrs);
async_tx_sync_epilog(submit);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a8e8d431071b..13943e2b3d51 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6944,7 +6944,7 @@ raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
if (kstrtoul(page, 10, &new))
return -EINVAL;
- if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+ if (new != PARITY_DISABLE_RMW && !raid6_can_xor_syndrome())
return -EINVAL;
if (new != PARITY_DISABLE_RMW &&
@@ -7635,7 +7635,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->level = mddev->new_level;
if (conf->level == 6) {
conf->max_degraded = 2;
- if (raid6_call.xor_syndrome)
+ if (raid6_can_xor_syndrome())
conf->rmw_level = PARITY_ENABLE_RMW;
else
conf->rmw_level = PARITY_DISABLE_RMW;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index ad091c56ce77..4ab3cccb11f1 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1410,7 +1410,7 @@ static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int
rbio_qstripe_paddr(rbio, sector_nr, step_nr));
assert_rbio(rbio);
- raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ raid6_gen_syndrome(rbio->real_stripes, step, pointers);
} else {
/* raid5 */
memcpy(pointers[rbio->nr_data], pointers[0], step);
@@ -1992,10 +1992,10 @@ static void recover_vertical_step(struct btrfs_raid_bio *rbio,
}
if (failb == rbio->real_stripes - 2) {
- raid6_datap_recov(rbio->real_stripes, step,
+ raid6_recov_datap(rbio->real_stripes, step,
faila, pointers);
} else {
- raid6_2data_recov(rbio->real_stripes, step,
+ raid6_recov_2data(rbio->real_stripes, step,
faila, failb, pointers);
}
} else {
@@ -2649,7 +2649,7 @@ static bool verify_one_parity_step(struct btrfs_raid_bio *rbio,
if (has_qstripe) {
assert_rbio(rbio);
/* RAID6, call the library function to fill in our P/Q. */
- raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ raid6_gen_syndrome(rbio->real_stripes, step, pointers);
} else {
/* RAID5. */
memcpy(pointers[nr_data], pointers[0], step);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f27a866c287f..425a227591c0 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -11,6 +11,16 @@
#include <linux/blkdev.h>
#include <linux/mm.h>
+void raid6_gen_syndrome(int disks, size_t bytes, void **ptrs);
+void raid6_xor_syndrome(int disks, int start, int stop, size_t bytes,
+ void **ptrs);
+bool raid6_can_xor_syndrome(void);
+
+void raid6_recov_2data(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+void raid6_recov_datap(int disks, size_t bytes, int faila,
+ void **ptrs);
+
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
@@ -20,9 +30,6 @@ struct raid6_calls {
int priority; /* Relative priority ranking if non-zero */
};
-/* Selected algorithm */
-extern struct raid6_calls raid6_call;
-
/* Various routine sets */
extern const struct raid6_calls raid6_intx1;
extern const struct raid6_calls raid6_intx2;
@@ -92,10 +99,4 @@ extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
-/* Recovery routines */
-extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
- void **ptrs);
-extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
- void **ptrs);
-
#endif /* LINUX_RAID_RAID6_H */
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index 985c60bb00a4..4958c58e3f69 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -16,8 +16,83 @@
#include <linux/gfp.h>
#include <kunit/visibility.h>
-struct raid6_calls raid6_call;
-EXPORT_SYMBOL_GPL(raid6_call);
+static const struct raid6_recov_calls *raid6_recov_algo;
+
+/* Selected algorithm */
+static struct raid6_calls raid6_call;
+
+/**
+ * raid6_gen_syndrome - generate RAID6 P/Q parity
+ * @disks: number of "disks" to operate on including parity
+ * @bytes: length in bytes of each vector
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Generate @bytes worth of RAID6 P and Q parity in @ptrs[@disks - 2] and
+ * @ptrs[@disks - 1] respectively from the memory pointed to by @ptrs[0] to
+ * @ptrs[@disks - 3].
+ *
+ * @disks must be at least 3, and the memory pointed to by each member of @ptrs
+ * must be at least 64-byte aligned. @bytes must be non-zero and a multiple of
+ * 512.
+ *
+ * See https://kernel.org/pub/linux/kernel/people/hpa/raid6.pdf for underlying
+ * algorithm.
+ */
+void raid6_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_gen_syndrome);
+
+/**
+ * raid6_xor_syndrome - update RAID6 P/Q parity
+ * @disks: number of "disks" to operate on including parity
+ * @start: first index into @disk to update
+ * @stop: last index into @disk to update
+ * @bytes: length in bytes of each vector
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Update @bytes worth of RAID6 P and Q parity in @ptrs[@disks - 2] and
+ * @ptrs[@disks - 1] respectively for the memory pointed to by
+ * @ptrs[@start..@stop].
+ *
+ * This is used to update parity in place using the following sequence:
+ *
+ * 1) call raid6_xor_syndrome(disk, start, stop, ...) for the existing data.
+ * 2) update the the data in @ptrs[@start..@stop].
+ * 3) call raid6_xor_syndrome(disk, start, stop, ...) for the new data.
+ *
+ * Data between @start and @stop that is not changed should be filled
+ * with a pointer to the kernel zero page.
+ *
+ * @disks must be at least 3, and the memory pointed to by each member of @ptrs
+ * must be at least 64-byte aligned. @bytes must be non-zero and a multiple of
+ * 512. @stop must be larger or equal to @start.
+ */
+void raid6_xor_syndrome(int disks, int start, int stop, size_t bytes,
+ void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+ WARN_ON_ONCE(stop < start);
+
+ raid6_call.xor_syndrome(disks, start, stop, bytes, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_xor_syndrome);
+
+/*
+ * raid6_can_xor_syndrome - check if raid6_xor_syndrome() can be used
+ *
+ * Returns %true if raid6_can_xor_syndrome() can be used, else %false.
+ */
+bool raid6_can_xor_syndrome(void)
+{
+ return !!raid6_call.xor_syndrome;
+}
+EXPORT_SYMBOL_GPL(raid6_can_xor_syndrome);
const struct raid6_calls * const raid6_algos[] = {
#if defined(__i386__) && !defined(__arch_um__)
@@ -84,11 +159,58 @@ const struct raid6_calls * const raid6_algos[] = {
};
EXPORT_SYMBOL_IF_KUNIT(raid6_algos);
-void (*raid6_2data_recov)(int, size_t, int, int, void **);
-EXPORT_SYMBOL_GPL(raid6_2data_recov);
+/**
+ * raid6_recov_2data - recover two missing data disks
+ * @disks: number of "disks" to operate on including parity
+ * @bytes: length in bytes of each vector
+ * @faila: first failed data disk index
+ * @failb: second failed data disk index
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Rebuild @bytes of missing data in @ptrs[@faila] and @ptrs[@failb] from the
+ * data in the remaining disks and the two parities pointed to by the other
+ * indices between 0 and @disks - 1 in @ptrs. @disks includes the data disks
+ * and the two parities. @faila must be smaller than @failb.
+ *
+ * Memory pointed to by each pointer in @ptrs must be page aligned and is
+ * limited to %PAGE_SIZE.
+ */
+void raid6_recov_2data(int disks, size_t bytes, int faila, int failb,
+ void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+ WARN_ON_ONCE(bytes > PAGE_SIZE);
+ WARN_ON_ONCE(failb <= faila);
+
+ raid6_recov_algo->data2(disks, bytes, faila, failb, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_recov_2data);
+
+/**
+ * raid6_recov_datap - recover a missing data disk and missing P-parity
+ * @disks: number of "disks" to operate on including parity
+ * @bytes: length in bytes of each vector
+ * @faila: failed data disk index
+ * @ptrs: @disks size array of memory pointers
+ *
+ * Rebuild @bytes of missing data in @ptrs[@faila] and the missing P-parity in
+ * @ptrs[@disks - 2] from the data in the remaining disks and the Q-parity
+ * pointed to by the other indices between 0 and @disks - 1 in @ptrs. @disks
+ * includes the data disks and the two parities.
+ *
+ * Memory pointed to by each pointer in @ptrs must be page aligned and is
+ * limited to %PAGE_SIZE.
+ */
+void raid6_recov_datap(int disks, size_t bytes, int faila, void **ptrs)
+{
+ lockdep_assert_preemption_enabled();
+ WARN_ON_ONCE(bytes & 511);
+ WARN_ON_ONCE(bytes > PAGE_SIZE);
-void (*raid6_datap_recov)(int, size_t, int, void **);
-EXPORT_SYMBOL_GPL(raid6_datap_recov);
+ raid6_recov_algo->datap(disks, bytes, faila, ptrs);
+}
+EXPORT_SYMBOL_GPL(raid6_recov_datap);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
#ifdef CONFIG_X86
@@ -133,8 +255,7 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
best = *algo;
if (best) {
- raid6_2data_recov = best->data2;
- raid6_datap_recov = best->datap;
+ raid6_recov_algo = best;
pr_info("raid6: using %s recovery algorithm\n", best->name);
} else
diff --git a/lib/raid/raid6/arm/recov_neon.c b/lib/raid/raid6/arm/recov_neon.c
index 461bdb98b5cc..21c26da6290a 100644
--- a/lib/raid/raid6/arm/recov_neon.c
+++ b/lib/raid/raid6/arm/recov_neon.c
@@ -35,7 +35,7 @@ static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -69,7 +69,7 @@ static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/loongarch/recov_loongarch_simd.c b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
index 4d4563209647..7d4d349322b3 100644
--- a/lib/raid/raid6/loongarch/recov_loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
@@ -49,7 +49,7 @@ static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -201,7 +201,7 @@ static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
@@ -323,7 +323,7 @@ static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -440,7 +440,7 @@ static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/recov.c b/lib/raid/raid6/recov.c
index 211e1df28963..cc7e4dc1eaa6 100644
--- a/lib/raid/raid6/recov.c
+++ b/lib/raid/raid6/recov.c
@@ -37,7 +37,7 @@ static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -75,7 +75,7 @@ static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/riscv/recov_rvv.c b/lib/raid/raid6/riscv/recov_rvv.c
index f77d9c430687..3ff39826e33f 100644
--- a/lib/raid/raid6/riscv/recov_rvv.c
+++ b/lib/raid/raid6/riscv/recov_rvv.c
@@ -164,7 +164,7 @@ static void raid6_2data_recov_rvv(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -199,7 +199,7 @@ static void raid6_datap_recov_rvv(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks - 1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/s390/recov_s390xc.c b/lib/raid/raid6/s390/recov_s390xc.c
index 0f32217b7123..2bc4c85174de 100644
--- a/lib/raid/raid6/s390/recov_s390xc.c
+++ b/lib/raid/raid6/s390/recov_s390xc.c
@@ -40,7 +40,7 @@ static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -84,7 +84,7 @@ static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/x86/recov_avx2.c b/lib/raid/raid6/x86/recov_avx2.c
index 325310c81e1c..bef82a38d8eb 100644
--- a/lib/raid/raid6/x86/recov_avx2.c
+++ b/lib/raid/raid6/x86/recov_avx2.c
@@ -34,7 +34,7 @@ static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -199,7 +199,7 @@ static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/x86/recov_avx512.c b/lib/raid/raid6/x86/recov_avx512.c
index 08de77fcb8bd..06c70e771eaa 100644
--- a/lib/raid/raid6/x86/recov_avx512.c
+++ b/lib/raid/raid6/x86/recov_avx512.c
@@ -43,7 +43,7 @@ static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -241,7 +241,7 @@ static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
diff --git a/lib/raid/raid6/x86/recov_ssse3.c b/lib/raid/raid6/x86/recov_ssse3.c
index 002bef1e0847..5ca7d56f23d8 100644
--- a/lib/raid/raid6/x86/recov_ssse3.c
+++ b/lib/raid/raid6/x86/recov_ssse3.c
@@ -36,7 +36,7 @@ static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
ptrs[failb] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
@@ -206,7 +206,7 @@ static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
ptrs[faila] = page_address(ZERO_PAGE(0));
ptrs[disks-1] = dq;
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ raid6_gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 08/17] raid6: hide internals
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (6 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 07/17] raid6: improve the public interface Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 09/17] raid6: rework the init helpers Christoph Hellwig
` (8 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Split out two new headers from the public pq.h:
- lib/raid/raid6/algos.h contains the algorithm lists private to
lib/raid/raid6
- include/linux/raid/pq_tables.h contains the tables also used by
async_tx providers.
The public include/linux/pq.h is now limited to the public interface for
the consumers of the RAID6 PQ API.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
crypto/async_tx/async_pq.c | 1 +
crypto/async_tx/async_raid6_recov.c | 1 +
drivers/dma/bcm-sba-raid.c | 1 +
include/linux/raid/pq.h | 96 ++-----------------
include/linux/raid/pq_tables.h | 19 ++++
lib/raid/raid6/Makefile | 2 +
lib/raid/raid6/algos.c | 3 +-
lib/raid/raid6/algos.h | 82 ++++++++++++++++
lib/raid/raid6/arm/neon.c | 2 +-
lib/raid/raid6/arm/recov_neon.c | 2 +
lib/raid/raid6/int.uc | 2 +-
lib/raid/raid6/loongarch/loongarch_simd.c | 2 +-
.../raid6/loongarch/recov_loongarch_simd.c | 2 +
lib/raid/raid6/mktables.c | 2 +-
lib/raid/raid6/powerpc/altivec.uc | 2 +-
lib/raid/raid6/powerpc/vpermxor.uc | 2 +-
lib/raid/raid6/recov.c | 2 +
lib/raid/raid6/riscv/recov_rvv.c | 2 +
lib/raid/raid6/riscv/rvv.h | 2 +-
lib/raid/raid6/s390/recov_s390xc.c | 2 +
lib/raid/raid6/s390/s390vx.uc | 2 +-
lib/raid/raid6/tests/raid6_kunit.c | 2 +-
lib/raid/raid6/x86/avx2.c | 3 +-
lib/raid/raid6/x86/avx512.c | 3 +-
lib/raid/raid6/x86/mmx.c | 3 +-
lib/raid/raid6/x86/recov_avx2.c | 2 +
lib/raid/raid6/x86/recov_avx512.c | 2 +
lib/raid/raid6/x86/recov_ssse3.c | 2 +
lib/raid/raid6/x86/sse1.c | 3 +-
lib/raid/raid6/x86/sse2.c | 3 +-
30 files changed, 151 insertions(+), 103 deletions(-)
create mode 100644 include/linux/raid/pq_tables.h
create mode 100644 lib/raid/raid6/algos.h
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f3574f80d1df..27f99349e310 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
+#include <linux/raid/pq_tables.h>
#include <linux/async_tx.h>
#include <linux/gfp.h>
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 305ea1421a3e..e53870d84bc5 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
+#include <linux/raid/pq_tables.h>
#include <linux/async_tx.h>
#include <linux/dmaengine.h>
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index ed037fa883f6..0de03611252e 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -40,6 +40,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/raid/pq.h>
+#include <linux/raid/pq_tables.h>
#include "dmaengine.h"
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 425a227591c0..1b4da8b98828 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -1,15 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* -*- linux-c -*- ------------------------------------------------------- *
+/*
+ * Copyright 2003 H. Peter Anvin - All Rights Reserved
*
- * Copyright 2003 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
-#ifndef LINUX_RAID_RAID6_H
-#define LINUX_RAID_RAID6_H
+ * Public interface to the RAID6 P/Q calculation and recovery library.
+ */
+#ifndef LINUX_RAID_PQ_H
+#define LINUX_RAID_PQ_H
-#include <linux/blkdev.h>
-#include <linux/mm.h>
+#include <linux/types.h>
void raid6_gen_syndrome(int disks, size_t bytes, void **ptrs);
void raid6_xor_syndrome(int disks, int start, int stop, size_t bytes,
@@ -21,82 +19,4 @@ void raid6_recov_2data(int disks, size_t bytes, int faila, int failb,
void raid6_recov_datap(int disks, size_t bytes, int faila,
void **ptrs);
-/* Routine choices */
-struct raid6_calls {
- void (*gen_syndrome)(int, size_t, void **);
- void (*xor_syndrome)(int, int, int, size_t, void **);
- int (*valid)(void); /* Returns 1 if this routine set is usable */
- const char *name; /* Name of this routine set */
- int priority; /* Relative priority ranking if non-zero */
-};
-
-/* Various routine sets */
-extern const struct raid6_calls raid6_intx1;
-extern const struct raid6_calls raid6_intx2;
-extern const struct raid6_calls raid6_intx4;
-extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_mmxx1;
-extern const struct raid6_calls raid6_mmxx2;
-extern const struct raid6_calls raid6_sse1x1;
-extern const struct raid6_calls raid6_sse1x2;
-extern const struct raid6_calls raid6_sse2x1;
-extern const struct raid6_calls raid6_sse2x2;
-extern const struct raid6_calls raid6_sse2x4;
-extern const struct raid6_calls raid6_altivec1;
-extern const struct raid6_calls raid6_altivec2;
-extern const struct raid6_calls raid6_altivec4;
-extern const struct raid6_calls raid6_altivec8;
-extern const struct raid6_calls raid6_avx2x1;
-extern const struct raid6_calls raid6_avx2x2;
-extern const struct raid6_calls raid6_avx2x4;
-extern const struct raid6_calls raid6_avx512x1;
-extern const struct raid6_calls raid6_avx512x2;
-extern const struct raid6_calls raid6_avx512x4;
-extern const struct raid6_calls raid6_s390vx8;
-extern const struct raid6_calls raid6_vpermxor1;
-extern const struct raid6_calls raid6_vpermxor2;
-extern const struct raid6_calls raid6_vpermxor4;
-extern const struct raid6_calls raid6_vpermxor8;
-extern const struct raid6_calls raid6_lsx;
-extern const struct raid6_calls raid6_lasx;
-extern const struct raid6_calls raid6_rvvx1;
-extern const struct raid6_calls raid6_rvvx2;
-extern const struct raid6_calls raid6_rvvx4;
-extern const struct raid6_calls raid6_rvvx8;
-
-struct raid6_recov_calls {
- void (*data2)(int, size_t, int, int, void **);
- void (*datap)(int, size_t, int, void **);
- int (*valid)(void);
- const char *name;
- int priority;
-};
-
-extern const struct raid6_recov_calls raid6_recov_intx1;
-extern const struct raid6_recov_calls raid6_recov_ssse3;
-extern const struct raid6_recov_calls raid6_recov_avx2;
-extern const struct raid6_recov_calls raid6_recov_avx512;
-extern const struct raid6_recov_calls raid6_recov_s390xc;
-extern const struct raid6_recov_calls raid6_recov_neon;
-extern const struct raid6_recov_calls raid6_recov_lsx;
-extern const struct raid6_recov_calls raid6_recov_lasx;
-extern const struct raid6_recov_calls raid6_recov_rvv;
-
-extern const struct raid6_calls raid6_neonx1;
-extern const struct raid6_calls raid6_neonx2;
-extern const struct raid6_calls raid6_neonx4;
-extern const struct raid6_calls raid6_neonx8;
-
-/* Algorithm list */
-extern const struct raid6_calls * const raid6_algos[];
-extern const struct raid6_recov_calls *const raid6_recov_algos[];
-
-/* Galois field tables */
-extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
-extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
-extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
-extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
-extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
-extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
-
-#endif /* LINUX_RAID_RAID6_H */
+#endif /* LINUX_RAID_PQ_H */
diff --git a/include/linux/raid/pq_tables.h b/include/linux/raid/pq_tables.h
new file mode 100644
index 000000000000..7b1ebe675677
--- /dev/null
+++ b/include/linux/raid/pq_tables.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2003 H. Peter Anvin - All Rights Reserved
+ *
+ * Galois field tables for the Linux RAID6 P/Q parity algorithm.
+ */
+#ifndef _LINUX_RAID_PQ_TABLES_H
+#define _LINUX_RAID_PQ_TABLES_H
+
+#include <linux/types.h>
+
+extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
+extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
+extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
+extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
+
+#endif /* _LINUX_RAID_PQ_TABLES_H */
diff --git a/lib/raid/raid6/Makefile b/lib/raid/raid6/Makefile
index f155eba06fe3..fc46c3e61c82 100644
--- a/lib/raid/raid6/Makefile
+++ b/lib/raid/raid6/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I $(src)
+
obj-$(CONFIG_RAID6_PQ) += raid6_pq.o tests/
raid6_pq-y += algos.o tables.o
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index 4958c58e3f69..406efdcc03b8 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -11,10 +11,11 @@
* Algorithm list and algorithm selection for RAID-6
*/
-#include <linux/raid/pq.h>
#include <linux/module.h>
#include <linux/gfp.h>
+#include <linux/raid/pq.h>
#include <kunit/visibility.h>
+#include "algos.h"
static const struct raid6_recov_calls *raid6_recov_algo;
diff --git a/lib/raid/raid6/algos.h b/lib/raid/raid6/algos.h
new file mode 100644
index 000000000000..e5f1098d2179
--- /dev/null
+++ b/lib/raid/raid6/algos.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2003 H. Peter Anvin - All Rights Reserved
+ */
+#ifndef _PQ_IMPL_H
+#define _PQ_IMPL_H
+
+#include <linux/raid/pq_tables.h>
+
+/* Routine choices */
+struct raid6_calls {
+ const char *name;
+ void (*gen_syndrome)(int disks, size_t bytes, void **ptrs);
+ void (*xor_syndrome)(int disks, int start, int stop, size_t bytes,
+ void **ptrs);
+ int (*valid)(void); /* Returns 1 if this routine set is usable */
+ int priority; /* Relative priority ranking if non-zero */
+};
+
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+extern const struct raid6_calls raid6_avx2x1;
+extern const struct raid6_calls raid6_avx2x2;
+extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_avx512x1;
+extern const struct raid6_calls raid6_avx512x2;
+extern const struct raid6_calls raid6_avx512x4;
+extern const struct raid6_calls raid6_s390vx8;
+extern const struct raid6_calls raid6_vpermxor1;
+extern const struct raid6_calls raid6_vpermxor2;
+extern const struct raid6_calls raid6_vpermxor4;
+extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
+extern const struct raid6_calls raid6_rvvx1;
+extern const struct raid6_calls raid6_rvvx2;
+extern const struct raid6_calls raid6_rvvx4;
+extern const struct raid6_calls raid6_rvvx8;
+
+struct raid6_recov_calls {
+ const char *name;
+ void (*data2)(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+ void (*datap)(int disks, size_t bytes, int faila, void **ptrs);
+ int (*valid)(void);
+ int priority;
+};
+
+extern const struct raid6_recov_calls raid6_recov_intx1;
+extern const struct raid6_recov_calls raid6_recov_ssse3;
+extern const struct raid6_recov_calls raid6_recov_avx2;
+extern const struct raid6_recov_calls raid6_recov_avx512;
+extern const struct raid6_recov_calls raid6_recov_s390xc;
+extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
+extern const struct raid6_recov_calls raid6_recov_rvv;
+
+extern const struct raid6_calls raid6_neonx1;
+extern const struct raid6_calls raid6_neonx2;
+extern const struct raid6_calls raid6_neonx4;
+extern const struct raid6_calls raid6_neonx8;
+
+/* Algorithm list */
+extern const struct raid6_calls * const raid6_algos[];
+extern const struct raid6_recov_calls *const raid6_recov_algos[];
+
+#endif /* _PQ_IMPL_H */
diff --git a/lib/raid/raid6/arm/neon.c b/lib/raid/raid6/arm/neon.c
index c21da59ab48f..bd4ec4c86ee8 100644
--- a/lib/raid/raid6/arm/neon.c
+++ b/lib/raid/raid6/arm/neon.c
@@ -5,8 +5,8 @@
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
-#include <linux/raid/pq.h>
#include <asm/simd.h>
+#include "algos.h"
/*
* There are 2 reasons these wrappers are kept in a separate compilation unit
diff --git a/lib/raid/raid6/arm/recov_neon.c b/lib/raid/raid6/arm/recov_neon.c
index 21c26da6290a..2a2afbd9ead5 100644
--- a/lib/raid/raid6/arm/recov_neon.c
+++ b/lib/raid/raid6/arm/recov_neon.c
@@ -4,8 +4,10 @@
* Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
#include <asm/simd.h>
+#include "algos.h"
#include "neon.h"
static int raid6_has_neon(void)
diff --git a/lib/raid/raid6/int.uc b/lib/raid/raid6/int.uc
index 4f5f2869e21e..e63bd5a9c2ed 100644
--- a/lib/raid/raid6/int.uc
+++ b/lib/raid/raid6/int.uc
@@ -18,7 +18,7 @@
* This file is postprocessed using unroll.awk
*/
-#include <linux/raid/pq.h>
+#include "algos.h"
/*
* This is the C data type to use
diff --git a/lib/raid/raid6/loongarch/loongarch_simd.c b/lib/raid/raid6/loongarch/loongarch_simd.c
index 1b4cd1512d05..f77d11ce676e 100644
--- a/lib/raid/raid6/loongarch/loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/loongarch_simd.c
@@ -9,9 +9,9 @@
* Copyright 2002-2004 H. Peter Anvin
*/
-#include <linux/raid/pq.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include "algos.h"
/*
* The vector algorithms are currently priority 0, which means the generic
diff --git a/lib/raid/raid6/loongarch/recov_loongarch_simd.c b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
index 7d4d349322b3..0bbdc8b5c2e7 100644
--- a/lib/raid/raid6/loongarch/recov_loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
@@ -10,9 +10,11 @@
* Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include "algos.h"
/*
* Unlike with the syndrome calculation algorithms, there's no boot-time
diff --git a/lib/raid/raid6/mktables.c b/lib/raid/raid6/mktables.c
index 3de1dbf6846c..97a17493bbd8 100644
--- a/lib/raid/raid6/mktables.c
+++ b/lib/raid/raid6/mktables.c
@@ -57,7 +57,7 @@ int main(int argc, char *argv[])
uint8_t exptbl[256], invtbl[256];
printf("#include <linux/export.h>\n");
- printf("#include <linux/raid/pq.h>\n");
+ printf("#include \"algos.h\"\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
diff --git a/lib/raid/raid6/powerpc/altivec.uc b/lib/raid/raid6/powerpc/altivec.uc
index 084ead768ddb..eb4a448cc88e 100644
--- a/lib/raid/raid6/powerpc/altivec.uc
+++ b/lib/raid/raid6/powerpc/altivec.uc
@@ -22,7 +22,7 @@
* bracked this with preempt_disable/enable or in a lock)
*/
-#include <linux/raid/pq.h>
+#include "algos.h"
#include <altivec.h>
#include <asm/cputable.h>
diff --git a/lib/raid/raid6/powerpc/vpermxor.uc b/lib/raid/raid6/powerpc/vpermxor.uc
index bb2c3a316ae8..ec61f30bec11 100644
--- a/lib/raid/raid6/powerpc/vpermxor.uc
+++ b/lib/raid/raid6/powerpc/vpermxor.uc
@@ -20,11 +20,11 @@
* This instruction was introduced in POWER8 - ISA v2.07.
*/
-#include <linux/raid/pq.h>
#include <altivec.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
#include <asm/switch_to.h>
+#include "algos.h"
typedef vector unsigned char unative_t;
#define NSIZE sizeof(unative_t)
diff --git a/lib/raid/raid6/recov.c b/lib/raid/raid6/recov.c
index cc7e4dc1eaa6..735ab4013771 100644
--- a/lib/raid/raid6/recov.c
+++ b/lib/raid/raid6/recov.c
@@ -13,7 +13,9 @@
* the syndrome.)
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
+#include "algos.h"
/* Recover two failed data blocks. */
static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
diff --git a/lib/raid/raid6/riscv/recov_rvv.c b/lib/raid/raid6/riscv/recov_rvv.c
index 3ff39826e33f..02120d245e22 100644
--- a/lib/raid/raid6/riscv/recov_rvv.c
+++ b/lib/raid/raid6/riscv/recov_rvv.c
@@ -4,7 +4,9 @@
* Author: Chunyan Zhang <zhangchunyan@iscas.ac.cn>
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
+#include "algos.h"
#include "rvv.h"
static void __raid6_2data_recov_rvv(int bytes, u8 *p, u8 *q, u8 *dp,
diff --git a/lib/raid/raid6/riscv/rvv.h b/lib/raid/raid6/riscv/rvv.h
index 0d430a4c5f08..c293130d798b 100644
--- a/lib/raid/raid6/riscv/rvv.h
+++ b/lib/raid/raid6/riscv/rvv.h
@@ -7,8 +7,8 @@
* Definitions for RISC-V RAID-6 code
*/
-#include <linux/raid/pq.h>
#include <asm/vector.h>
+#include "algos.h"
static int rvv_has_vector(void)
{
diff --git a/lib/raid/raid6/s390/recov_s390xc.c b/lib/raid/raid6/s390/recov_s390xc.c
index 2bc4c85174de..e7b3409f21e2 100644
--- a/lib/raid/raid6/s390/recov_s390xc.c
+++ b/lib/raid/raid6/s390/recov_s390xc.c
@@ -6,7 +6,9 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
+#include "algos.h"
static inline void xor_block(u8 *p1, u8 *p2)
{
diff --git a/lib/raid/raid6/s390/s390vx.uc b/lib/raid/raid6/s390/s390vx.uc
index 97c5d5d9dcf9..aba3515eacac 100644
--- a/lib/raid/raid6/s390/s390vx.uc
+++ b/lib/raid/raid6/s390/s390vx.uc
@@ -12,8 +12,8 @@
*/
#include <linux/cpufeature.h>
-#include <linux/raid/pq.h>
#include <asm/fpu.h>
+#include "algos.h"
#define NSIZE 16
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index 97e036b19049..1a6168096456 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -7,7 +7,7 @@
#include <kunit/test.h>
#include <linux/prandom.h>
-#include <linux/raid/pq.h>
+#include "../algos.h"
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/lib/raid/raid6/x86/avx2.c b/lib/raid/raid6/x86/avx2.c
index aab8b624c635..0bf831799082 100644
--- a/lib/raid/raid6/x86/avx2.c
+++ b/lib/raid/raid6/x86/avx2.c
@@ -13,8 +13,9 @@
*
*/
-#include <linux/raid/pq.h>
+#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
+#include "algos.h"
static const struct raid6_avx2_constants {
u64 x1d[4];
diff --git a/lib/raid/raid6/x86/avx512.c b/lib/raid/raid6/x86/avx512.c
index 47636b16632f..98ed42fb0a46 100644
--- a/lib/raid/raid6/x86/avx512.c
+++ b/lib/raid/raid6/x86/avx512.c
@@ -17,8 +17,9 @@
*
*/
-#include <linux/raid/pq.h>
+#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
+#include "algos.h"
static const struct raid6_avx512_constants {
u64 x1d[8];
diff --git a/lib/raid/raid6/x86/mmx.c b/lib/raid/raid6/x86/mmx.c
index 22b9fdaa705f..052d9f010bfe 100644
--- a/lib/raid/raid6/x86/mmx.c
+++ b/lib/raid/raid6/x86/mmx.c
@@ -11,8 +11,9 @@
* MMX implementation of RAID-6 syndrome functions
*/
-#include <linux/raid/pq.h>
+#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
+#include "algos.h"
/* Shared with raid6/sse1.c */
const struct raid6_mmx_constants {
diff --git a/lib/raid/raid6/x86/recov_avx2.c b/lib/raid/raid6/x86/recov_avx2.c
index bef82a38d8eb..06c6e05763bc 100644
--- a/lib/raid/raid6/x86/recov_avx2.c
+++ b/lib/raid/raid6/x86/recov_avx2.c
@@ -4,8 +4,10 @@
* Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
+#include "algos.h"
static int raid6_has_avx2(void)
{
diff --git a/lib/raid/raid6/x86/recov_avx512.c b/lib/raid/raid6/x86/recov_avx512.c
index 06c70e771eaa..850bb962b514 100644
--- a/lib/raid/raid6/x86/recov_avx512.c
+++ b/lib/raid/raid6/x86/recov_avx512.c
@@ -6,8 +6,10 @@
* Author: Megha Dey <megha.dey@linux.intel.com>
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
+#include "algos.h"
static int raid6_has_avx512(void)
{
diff --git a/lib/raid/raid6/x86/recov_ssse3.c b/lib/raid/raid6/x86/recov_ssse3.c
index 5ca7d56f23d8..95589c33003a 100644
--- a/lib/raid/raid6/x86/recov_ssse3.c
+++ b/lib/raid/raid6/x86/recov_ssse3.c
@@ -3,8 +3,10 @@
* Copyright (C) 2012 Intel Corporation
*/
+#include <linux/mm.h>
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
+#include "algos.h"
static int raid6_has_ssse3(void)
{
diff --git a/lib/raid/raid6/x86/sse1.c b/lib/raid/raid6/x86/sse1.c
index fad214a430d8..7004255a0bb1 100644
--- a/lib/raid/raid6/x86/sse1.c
+++ b/lib/raid/raid6/x86/sse1.c
@@ -16,8 +16,9 @@
* worthwhile as a separate implementation.
*/
-#include <linux/raid/pq.h>
+#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
+#include "algos.h"
/* Defined in raid6/mmx.c */
extern const struct raid6_mmx_constants {
diff --git a/lib/raid/raid6/x86/sse2.c b/lib/raid/raid6/x86/sse2.c
index 1b28e858a1d4..f30be4ee14d0 100644
--- a/lib/raid/raid6/x86/sse2.c
+++ b/lib/raid/raid6/x86/sse2.c
@@ -12,8 +12,9 @@
*
*/
-#include <linux/raid/pq.h>
+#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
+#include "algos.h"
static const struct raid6_sse_constants {
u64 x1d[2];
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 09/17] raid6: rework the init helpers
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (7 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 08/17] raid6: hide internals Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 10/17] raid6: use static_call for gen_syndrom and xor_syndrom Christoph Hellwig
` (7 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Match the xor version with two initcalls for the built-in case to delay
calibrartion. This prepares for adding non-calibration init code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/Kconfig | 11 +
lib/raid/raid6/Makefile | 4 +
lib/raid/raid6/algos.c | 309 ++++++++----------
lib/raid/raid6/algos.h | 67 +---
lib/raid/raid6/arm/neon.c | 6 -
lib/raid/raid6/arm/pq_arch.h | 22 ++
lib/raid/raid6/arm/recov_neon.c | 7 -
lib/raid/raid6/loongarch/loongarch_simd.c | 12 -
lib/raid/raid6/loongarch/pq_arch.h | 23 ++
.../raid6/loongarch/recov_loongarch_simd.c | 14 -
lib/raid/raid6/powerpc/altivec.uc | 10 -
lib/raid/raid6/powerpc/pq_arch.h | 31 ++
lib/raid/raid6/powerpc/vpermxor.uc | 11 -
lib/raid/raid6/recov.c | 2 -
lib/raid/raid6/riscv/pq_arch.h | 21 ++
lib/raid/raid6/riscv/recov_rvv.c | 2 -
lib/raid/raid6/riscv/rvv.h | 6 -
lib/raid/raid6/s390/pq_arch.h | 15 +
lib/raid/raid6/s390/recov_s390xc.c | 2 -
lib/raid/raid6/s390/s390vx.uc | 7 -
lib/raid/raid6/tests/raid6_kunit.c | 23 +-
lib/raid/raid6/x86/avx2.c | 14 -
lib/raid/raid6/x86/avx512.c | 19 --
lib/raid/raid6/x86/mmx.c | 8 -
lib/raid/raid6/x86/pq_arch.h | 96 ++++++
lib/raid/raid6/x86/recov_avx2.c | 8 -
lib/raid/raid6/x86/recov_avx512.c | 12 -
lib/raid/raid6/x86/recov_ssse3.c | 9 -
lib/raid/raid6/x86/sse1.c | 12 -
lib/raid/raid6/x86/sse2.c | 15 -
30 files changed, 389 insertions(+), 409 deletions(-)
create mode 100644 lib/raid/raid6/arm/pq_arch.h
create mode 100644 lib/raid/raid6/loongarch/pq_arch.h
create mode 100644 lib/raid/raid6/powerpc/pq_arch.h
create mode 100644 lib/raid/raid6/riscv/pq_arch.h
create mode 100644 lib/raid/raid6/s390/pq_arch.h
create mode 100644 lib/raid/raid6/x86/pq_arch.h
diff --git a/lib/raid/Kconfig b/lib/raid/Kconfig
index 9bea599d66da..6795cfefb43b 100644
--- a/lib/raid/Kconfig
+++ b/lib/raid/Kconfig
@@ -32,6 +32,17 @@ config XOR_KUNIT_TEST
config RAID6_PQ
tristate
+# selected by architectures that provide an optimized PQ implementation
+config RAID6_PQ_ARCH
+ depends on RAID6_PQ
+ default y if CONFIG_KERNEL_MODE_NEON # arm32/arm64
+ default y if LOONGARCH
+ default y if ALTIVEC # powerpc
+ default y if RISCV_ISA_V
+ default y if S390
+ default y if X86
+ bool
+
config RAID6_PQ_KUNIT_TEST
tristate "KUnit tests for raid6 PQ functions" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/raid/raid6/Makefile b/lib/raid/raid6/Makefile
index fc46c3e61c82..db2844a314b0 100644
--- a/lib/raid/raid6/Makefile
+++ b/lib/raid/raid6/Makefile
@@ -2,6 +2,10 @@
ccflags-y += -I $(src)
+ifeq ($(CONFIG_RAID6_PQ_ARCH),y)
+CFLAGS_algos.o += -I$(src)/$(SRCARCH)
+endif
+
obj-$(CONFIG_RAID6_PQ) += raid6_pq.o tests/
raid6_pq-y += algos.o tables.o
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index 406efdcc03b8..f9e8a8752e2d 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -17,6 +17,9 @@
#include <kunit/visibility.h>
#include "algos.h"
+#define RAID6_MAX_ALGOS 16
+static const struct raid6_calls *raid6_algos[RAID6_MAX_ALGOS];
+static unsigned int raid6_nr_algos;
static const struct raid6_recov_calls *raid6_recov_algo;
/* Selected algorithm */
@@ -95,71 +98,6 @@ bool raid6_can_xor_syndrome(void)
}
EXPORT_SYMBOL_GPL(raid6_can_xor_syndrome);
-const struct raid6_calls * const raid6_algos[] = {
-#if defined(__i386__) && !defined(__arch_um__)
- &raid6_avx512x2,
- &raid6_avx512x1,
- &raid6_avx2x2,
- &raid6_avx2x1,
- &raid6_sse2x2,
- &raid6_sse2x1,
- &raid6_sse1x2,
- &raid6_sse1x1,
- &raid6_mmxx2,
- &raid6_mmxx1,
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
- &raid6_avx512x4,
- &raid6_avx512x2,
- &raid6_avx512x1,
- &raid6_avx2x4,
- &raid6_avx2x2,
- &raid6_avx2x1,
- &raid6_sse2x4,
- &raid6_sse2x2,
- &raid6_sse2x1,
-#endif
-#ifdef CONFIG_ALTIVEC
- &raid6_vpermxor8,
- &raid6_vpermxor4,
- &raid6_vpermxor2,
- &raid6_vpermxor1,
- &raid6_altivec8,
- &raid6_altivec4,
- &raid6_altivec2,
- &raid6_altivec1,
-#endif
-#if defined(CONFIG_S390)
- &raid6_s390vx8,
-#endif
-#ifdef CONFIG_KERNEL_MODE_NEON
- &raid6_neonx8,
- &raid6_neonx4,
- &raid6_neonx2,
- &raid6_neonx1,
-#endif
-#ifdef CONFIG_LOONGARCH
-#ifdef CONFIG_CPU_HAS_LASX
- &raid6_lasx,
-#endif
-#ifdef CONFIG_CPU_HAS_LSX
- &raid6_lsx,
-#endif
-#endif
-#ifdef CONFIG_RISCV_ISA_V
- &raid6_rvvx1,
- &raid6_rvvx2,
- &raid6_rvvx4,
- &raid6_rvvx8,
-#endif
- &raid6_intx8,
- &raid6_intx4,
- &raid6_intx2,
- &raid6_intx1,
- NULL
-};
-EXPORT_SYMBOL_IF_KUNIT(raid6_algos);
-
/**
* raid6_recov_2data - recover two missing data disks
* @disks: number of "disks" to operate on including parity
@@ -213,119 +151,57 @@ void raid6_recov_datap(int disks, size_t bytes, int faila, void **ptrs)
}
EXPORT_SYMBOL_GPL(raid6_recov_datap);
-const struct raid6_recov_calls *const raid6_recov_algos[] = {
-#ifdef CONFIG_X86
- &raid6_recov_avx512,
- &raid6_recov_avx2,
- &raid6_recov_ssse3,
-#endif
-#ifdef CONFIG_S390
- &raid6_recov_s390xc,
-#endif
-#if defined(CONFIG_KERNEL_MODE_NEON)
- &raid6_recov_neon,
-#endif
-#ifdef CONFIG_LOONGARCH
-#ifdef CONFIG_CPU_HAS_LASX
- &raid6_recov_lasx,
-#endif
-#ifdef CONFIG_CPU_HAS_LSX
- &raid6_recov_lsx,
-#endif
-#endif
-#ifdef CONFIG_RISCV_ISA_V
- &raid6_recov_rvv,
-#endif
- &raid6_recov_intx1,
- NULL
-};
-EXPORT_SYMBOL_IF_KUNIT(raid6_recov_algos);
-
#define RAID6_TIME_JIFFIES_LG2 4
#define RAID6_TEST_DISKS 8
#define RAID6_TEST_DISKS_ORDER 3
-static inline const struct raid6_recov_calls *raid6_choose_recov(void)
+static int raid6_choose_gen(void *(*const dptrs)[RAID6_TEST_DISKS],
+ const int disks)
{
- const struct raid6_recov_calls *const *algo;
- const struct raid6_recov_calls *best;
-
- for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
- if (!best || (*algo)->priority > best->priority)
- if (!(*algo)->valid || (*algo)->valid())
- best = *algo;
+ /* work on the second half of the disks */
+ int start = (disks >> 1) - 1, stop = disks - 3;
+ const struct raid6_calls *best = NULL;
+ unsigned long bestgenperf = 0;
+ unsigned int i;
- if (best) {
- raid6_recov_algo = best;
+ for (i = 0; i < raid6_nr_algos; i++) {
+ const struct raid6_calls *algo = raid6_algos[i];
+ unsigned long perf = 0, j0, j1;
- pr_info("raid6: using %s recovery algorithm\n", best->name);
- } else
- pr_err("raid6: Yikes! No recovery algorithm found!\n");
-
- return best;
-}
+ preempt_disable();
+ j0 = jiffies;
+ while ((j1 = jiffies) == j0)
+ cpu_relax();
+ while (time_before(jiffies,
+ j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
+ algo->gen_syndrome(disks, PAGE_SIZE, *dptrs);
+ perf++;
+ }
+ preempt_enable();
-static inline const struct raid6_calls *raid6_choose_gen(
- void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
-{
- unsigned long perf, bestgenperf, j0, j1;
- int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
- const struct raid6_calls *const *algo;
- const struct raid6_calls *best;
-
- for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
- if (!best || (*algo)->priority >= best->priority) {
- if ((*algo)->valid && !(*algo)->valid())
- continue;
-
- if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
- best = *algo;
- break;
- }
-
- perf = 0;
-
- preempt_disable();
- j0 = jiffies;
- while ((j1 = jiffies) == j0)
- cpu_relax();
- while (time_before(jiffies,
- j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
- (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
- perf++;
- }
- preempt_enable();
-
- if (perf > bestgenperf) {
- bestgenperf = perf;
- best = *algo;
- }
- pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
- (perf * HZ * (disks-2)) >>
- (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
+ if (perf > bestgenperf) {
+ bestgenperf = perf;
+ best = algo;
}
+ pr_info("raid6: %-8s gen() %5ld MB/s\n", algo->name,
+ (perf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
}
if (!best) {
pr_err("raid6: Yikes! No algorithm found!\n");
- goto out;
+ return -EINVAL;
}
raid6_call = *best;
- if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
- pr_info("raid6: skipped pq benchmark and selected %s\n",
- best->name);
- goto out;
- }
-
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
best->name,
(bestgenperf * HZ * (disks - 2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
if (best->xor_syndrome) {
- perf = 0;
+ unsigned long perf = 0, j0, j1;
preempt_disable();
j0 = jiffies;
@@ -344,8 +220,7 @@ static inline const struct raid6_calls *raid6_choose_gen(
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
}
-out:
- return best;
+ return 0;
}
@@ -355,12 +230,17 @@ static inline const struct raid6_calls *raid6_choose_gen(
static int __init raid6_select_algo(void)
{
const int disks = RAID6_TEST_DISKS;
-
- const struct raid6_calls *gen_best;
- const struct raid6_recov_calls *rec_best;
char *disk_ptr, *p;
void *dptrs[RAID6_TEST_DISKS];
int i, cycle;
+ int error;
+
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK) || raid6_nr_algos == 1) {
+ pr_info("raid6: skipped pq benchmark and selected %s\n",
+ raid6_algos[0]->name);
+ raid6_call = *raid6_algos[0];
+ return 0;
+ }
/* prepare the buffer and fill it circularly with gfmul table */
disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
@@ -383,22 +263,115 @@ static int __init raid6_select_algo(void)
memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
/* select raid gen_syndrome function */
- gen_best = raid6_choose_gen(&dptrs, disks);
-
- /* select raid recover functions */
- rec_best = raid6_choose_recov();
+ error = raid6_choose_gen(&dptrs, disks);
free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
- return gen_best && rec_best ? 0 : -EINVAL;
+ return error;
+}
+
+void __init raid6_algo_add(const struct raid6_calls *algo)
+{
+ if (WARN_ON_ONCE(raid6_nr_algos == RAID6_MAX_ALGOS))
+ return;
+ raid6_algos[raid6_nr_algos++] = algo;
+}
+
+void __init raid6_algo_add_default(void)
+{
+ raid6_algo_add(&raid6_intx1);
+ raid6_algo_add(&raid6_intx2);
+ raid6_algo_add(&raid6_intx4);
+ raid6_algo_add(&raid6_intx8);
+}
+
+void __init raid6_recov_algo_add(const struct raid6_recov_calls *algo)
+{
+ if (WARN_ON_ONCE(raid6_recov_algo))
+ return;
+ raid6_recov_algo = algo;
+}
+
+#ifdef CONFIG_RAID6_PQ_ARCH
+#include "pq_arch.h"
+#else
+static inline void arch_raid6_init(void)
+{
+ raid6_algo_add_default();
+}
+#endif /* CONFIG_RAID6_PQ_ARCH */
+
+static int __init raid6_init(void)
+{
+ /*
+ * Architectures providing arch_raid6_init must add all PQ generation
+ * algorithms they want to consider in arch_raid6_init(), including
+ * the generic ones using raid6_algo_add_default() if wanted.
+ */
+ arch_raid6_init();
+
+ /*
+ * Architectures don't have to set a recovery algorithm, we'll just pick
+ * the generic integer one if none was set.
+ */
+ if (!raid6_recov_algo)
+ raid6_recov_algo = &raid6_recov_intx1;
+ pr_info("raid6: using %s recovery algorithm\n", raid6_recov_algo->name);
+
+#ifdef MODULE
+ return raid6_select_algo();
+#else
+ return 0;
+#endif
}
-static void raid6_exit(void)
+static void __exit raid6_exit(void)
{
- do { } while (0);
}
-subsys_initcall(raid6_select_algo);
+/*
+ * When built-in we must register the default template before md, but we don't
+ * want calibration to run that early as that would delay the boot process.
+ */
+#ifndef MODULE
+__initcall(raid6_select_algo);
+#endif
+core_initcall(raid6_init);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
+
+#if IS_ENABLED(CONFIG_RAID6_PQ_KUNIT_TEST)
+const struct raid6_calls *raid6_algo_find(unsigned int idx)
+{
+ if (idx >= raid6_nr_algos) {
+ /*
+ * Always include the simplest generic integer implementation in
+ * the unit tests as a baseline.
+ */
+ if (idx == raid6_nr_algos &&
+ raid6_algos[0] != &raid6_intx1)
+ return &raid6_intx1;
+ return NULL;
+ }
+ return raid6_algos[idx];
+}
+EXPORT_SYMBOL_IF_KUNIT(raid6_algo_find);
+
+const struct raid6_recov_calls *raid6_recov_algo_find(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ /* always test the generic integer implementation */
+ return &raid6_recov_intx1;
+ case 1:
+ /* test the optimized implementation if there is one */
+ if (raid6_recov_algo != &raid6_recov_intx1)
+ return raid6_recov_algo;
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(raid6_recov_algo_find);
+#endif /* CONFIG_RAID6_PQ_KUNIT_TEST */
diff --git a/lib/raid/raid6/algos.h b/lib/raid/raid6/algos.h
index e5f1098d2179..43f636be183f 100644
--- a/lib/raid/raid6/algos.h
+++ b/lib/raid/raid6/algos.h
@@ -5,6 +5,7 @@
#ifndef _PQ_IMPL_H
#define _PQ_IMPL_H
+#include <linux/init.h>
#include <linux/raid/pq_tables.h>
/* Routine choices */
@@ -13,70 +14,28 @@ struct raid6_calls {
void (*gen_syndrome)(int disks, size_t bytes, void **ptrs);
void (*xor_syndrome)(int disks, int start, int stop, size_t bytes,
void **ptrs);
- int (*valid)(void); /* Returns 1 if this routine set is usable */
- int priority; /* Relative priority ranking if non-zero */
};
-/* Various routine sets */
-extern const struct raid6_calls raid6_intx1;
-extern const struct raid6_calls raid6_intx2;
-extern const struct raid6_calls raid6_intx4;
-extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_mmxx1;
-extern const struct raid6_calls raid6_mmxx2;
-extern const struct raid6_calls raid6_sse1x1;
-extern const struct raid6_calls raid6_sse1x2;
-extern const struct raid6_calls raid6_sse2x1;
-extern const struct raid6_calls raid6_sse2x2;
-extern const struct raid6_calls raid6_sse2x4;
-extern const struct raid6_calls raid6_altivec1;
-extern const struct raid6_calls raid6_altivec2;
-extern const struct raid6_calls raid6_altivec4;
-extern const struct raid6_calls raid6_altivec8;
-extern const struct raid6_calls raid6_avx2x1;
-extern const struct raid6_calls raid6_avx2x2;
-extern const struct raid6_calls raid6_avx2x4;
-extern const struct raid6_calls raid6_avx512x1;
-extern const struct raid6_calls raid6_avx512x2;
-extern const struct raid6_calls raid6_avx512x4;
-extern const struct raid6_calls raid6_s390vx8;
-extern const struct raid6_calls raid6_vpermxor1;
-extern const struct raid6_calls raid6_vpermxor2;
-extern const struct raid6_calls raid6_vpermxor4;
-extern const struct raid6_calls raid6_vpermxor8;
-extern const struct raid6_calls raid6_lsx;
-extern const struct raid6_calls raid6_lasx;
-extern const struct raid6_calls raid6_rvvx1;
-extern const struct raid6_calls raid6_rvvx2;
-extern const struct raid6_calls raid6_rvvx4;
-extern const struct raid6_calls raid6_rvvx8;
-
struct raid6_recov_calls {
const char *name;
void (*data2)(int disks, size_t bytes, int faila, int failb,
void **ptrs);
void (*datap)(int disks, size_t bytes, int faila, void **ptrs);
- int (*valid)(void);
- int priority;
};
-extern const struct raid6_recov_calls raid6_recov_intx1;
-extern const struct raid6_recov_calls raid6_recov_ssse3;
-extern const struct raid6_recov_calls raid6_recov_avx2;
-extern const struct raid6_recov_calls raid6_recov_avx512;
-extern const struct raid6_recov_calls raid6_recov_s390xc;
-extern const struct raid6_recov_calls raid6_recov_neon;
-extern const struct raid6_recov_calls raid6_recov_lsx;
-extern const struct raid6_recov_calls raid6_recov_lasx;
-extern const struct raid6_recov_calls raid6_recov_rvv;
+void __init raid6_algo_add(const struct raid6_calls *algo);
+void __init raid6_algo_add_default(void);
+void __init raid6_recov_algo_add(const struct raid6_recov_calls *algo);
-extern const struct raid6_calls raid6_neonx1;
-extern const struct raid6_calls raid6_neonx2;
-extern const struct raid6_calls raid6_neonx4;
-extern const struct raid6_calls raid6_neonx8;
+/* for the kunit test */
+const struct raid6_calls *raid6_algo_find(unsigned int idx);
+const struct raid6_recov_calls *raid6_recov_algo_find(unsigned int idx);
-/* Algorithm list */
-extern const struct raid6_calls * const raid6_algos[];
-extern const struct raid6_recov_calls *const raid6_recov_algos[];
+/* generic implementations */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_recov_calls raid6_recov_intx1;
#endif /* _PQ_IMPL_H */
diff --git a/lib/raid/raid6/arm/neon.c b/lib/raid/raid6/arm/neon.c
index bd4ec4c86ee8..341c61af675e 100644
--- a/lib/raid/raid6/arm/neon.c
+++ b/lib/raid/raid6/arm/neon.c
@@ -42,15 +42,9 @@
struct raid6_calls const raid6_neonx ## _n = { \
.gen_syndrome = raid6_neon ## _n ## _gen_syndrome, \
.xor_syndrome = raid6_neon ## _n ## _xor_syndrome, \
- .valid = raid6_have_neon, \
.name = "neonx" #_n, \
}
-static int raid6_have_neon(void)
-{
- return cpu_has_neon();
-}
-
RAID6_NEON_WRAPPER(1);
RAID6_NEON_WRAPPER(2);
RAID6_NEON_WRAPPER(4);
diff --git a/lib/raid/raid6/arm/pq_arch.h b/lib/raid/raid6/arm/pq_arch.h
new file mode 100644
index 000000000000..f2bbef00cd80
--- /dev/null
+++ b/lib/raid/raid6/arm/pq_arch.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+extern const struct raid6_calls raid6_neonx1;
+extern const struct raid6_calls raid6_neonx2;
+extern const struct raid6_calls raid6_neonx4;
+extern const struct raid6_calls raid6_neonx8;
+extern const struct raid6_recov_calls raid6_recov_neon;
+
+static __always_inline void __init arch_raid6_init(void)
+{
+ if (cpu_has_neon()) {
+ raid6_algo_add(&raid6_neonx1);
+ raid6_algo_add(&raid6_neonx2);
+ raid6_algo_add(&raid6_neonx4);
+ raid6_algo_add(&raid6_neonx8);
+ }
+
+ raid6_algo_add_default();
+
+ if (cpu_has_neon())
+ raid6_recov_algo_add(&raid6_recov_neon);
+}
diff --git a/lib/raid/raid6/arm/recov_neon.c b/lib/raid/raid6/arm/recov_neon.c
index 2a2afbd9ead5..f7f7e83ea7ea 100644
--- a/lib/raid/raid6/arm/recov_neon.c
+++ b/lib/raid/raid6/arm/recov_neon.c
@@ -10,11 +10,6 @@
#include "algos.h"
#include "neon.h"
-static int raid6_has_neon(void)
-{
- return cpu_has_neon();
-}
-
static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
@@ -87,7 +82,5 @@ static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_neon = {
.data2 = raid6_2data_recov_neon,
.datap = raid6_datap_recov_neon,
- .valid = raid6_has_neon,
.name = "neon",
- .priority = 10,
};
diff --git a/lib/raid/raid6/loongarch/loongarch_simd.c b/lib/raid/raid6/loongarch/loongarch_simd.c
index f77d11ce676e..c1eb53fafd27 100644
--- a/lib/raid/raid6/loongarch/loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/loongarch_simd.c
@@ -26,11 +26,6 @@
#ifdef CONFIG_CPU_HAS_LSX
#define NSIZE 16
-static int raid6_has_lsx(void)
-{
- return cpu_has_lsx;
-}
-
static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
@@ -246,7 +241,6 @@ static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_lsx = {
.gen_syndrome = raid6_lsx_gen_syndrome,
.xor_syndrome = raid6_lsx_xor_syndrome,
- .valid = raid6_has_lsx,
.name = "lsx",
};
@@ -256,11 +250,6 @@ const struct raid6_calls raid6_lsx = {
#ifdef CONFIG_CPU_HAS_LASX
#define NSIZE 32
-static int raid6_has_lasx(void)
-{
- return cpu_has_lasx;
-}
-
static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
@@ -414,7 +403,6 @@ static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_lasx = {
.gen_syndrome = raid6_lasx_gen_syndrome,
.xor_syndrome = raid6_lasx_xor_syndrome,
- .valid = raid6_has_lasx,
.name = "lasx",
};
#undef NSIZE
diff --git a/lib/raid/raid6/loongarch/pq_arch.h b/lib/raid/raid6/loongarch/pq_arch.h
new file mode 100644
index 000000000000..cd4ee2df3f37
--- /dev/null
+++ b/lib/raid/raid6/loongarch/pq_arch.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/cpu-features.h>
+
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
+
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
+
+static __always_inline void __init arch_raid6_init(void)
+{
+ if (IS_ENABLED(CONFIG_CPU_HAS_LSX) && cpu_has_lsx)
+ raid6_algo_add(&raid6_lsx);
+ if (IS_ENABLED(CONFIG_CPU_HAS_LASX) && cpu_has_lasx)
+ raid6_algo_add(&raid6_lasx);
+ raid6_algo_add_default();
+
+ if (IS_ENABLED(CONFIG_CPU_HAS_LASX) && cpu_has_lasx)
+ raid6_recov_algo_add(&raid6_recov_lasx);
+ else if (IS_ENABLED(CONFIG_CPU_HAS_LSX) && cpu_has_lsx)
+ raid6_recov_algo_add(&raid6_recov_lsx);
+}
diff --git a/lib/raid/raid6/loongarch/recov_loongarch_simd.c b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
index 0bbdc8b5c2e7..87a2313bbb4f 100644
--- a/lib/raid/raid6/loongarch/recov_loongarch_simd.c
+++ b/lib/raid/raid6/loongarch/recov_loongarch_simd.c
@@ -24,11 +24,6 @@
*/
#ifdef CONFIG_CPU_HAS_LSX
-static int raid6_has_lsx(void)
-{
- return cpu_has_lsx;
-}
-
static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
@@ -291,18 +286,11 @@ static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_lsx = {
.data2 = raid6_2data_recov_lsx,
.datap = raid6_datap_recov_lsx,
- .valid = raid6_has_lsx,
.name = "lsx",
- .priority = 1,
};
#endif /* CONFIG_CPU_HAS_LSX */
#ifdef CONFIG_CPU_HAS_LASX
-static int raid6_has_lasx(void)
-{
- return cpu_has_lasx;
-}
-
static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
@@ -509,8 +497,6 @@ static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_lasx = {
.data2 = raid6_2data_recov_lasx,
.datap = raid6_datap_recov_lasx,
- .valid = raid6_has_lasx,
.name = "lasx",
- .priority = 2,
};
#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid/raid6/powerpc/altivec.uc b/lib/raid/raid6/powerpc/altivec.uc
index eb4a448cc88e..c5429fb71dd6 100644
--- a/lib/raid/raid6/powerpc/altivec.uc
+++ b/lib/raid/raid6/powerpc/altivec.uc
@@ -104,17 +104,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
preempt_enable();
}
-int raid6_have_altivec(void);
-#if $# == 1
-int raid6_have_altivec(void)
-{
- /* This assumes either all CPUs have Altivec or none does */
- return cpu_has_feature(CPU_FTR_ALTIVEC);
-}
-#endif
-
const struct raid6_calls raid6_altivec$# = {
.gen_syndrome = raid6_altivec$#_gen_syndrome,
- .valid = raid6_have_altivec,
.name = "altivecx$#",
};
diff --git a/lib/raid/raid6/powerpc/pq_arch.h b/lib/raid/raid6/powerpc/pq_arch.h
new file mode 100644
index 000000000000..ecae7d3be131
--- /dev/null
+++ b/lib/raid/raid6/powerpc/pq_arch.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/cputable.h>
+
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+extern const struct raid6_calls raid6_vpermxor1;
+extern const struct raid6_calls raid6_vpermxor2;
+extern const struct raid6_calls raid6_vpermxor4;
+extern const struct raid6_calls raid6_vpermxor8;
+
+static __always_inline void __init arch_raid6_init(void)
+{
+ /* This assumes either all CPUs have Altivec or none does */
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ raid6_algo_add(&raid6_altivec1);
+ raid6_algo_add(&raid6_altivec2);
+ raid6_algo_add(&raid6_altivec4);
+ raid6_algo_add(&raid6_altivec8);
+ }
+ if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ raid6_algo_add(&raid6_vpermxor1);
+ raid6_algo_add(&raid6_vpermxor2);
+ raid6_algo_add(&raid6_vpermxor4);
+ raid6_algo_add(&raid6_vpermxor8);
+ }
+ raid6_algo_add_default();
+}
diff --git a/lib/raid/raid6/powerpc/vpermxor.uc b/lib/raid/raid6/powerpc/vpermxor.uc
index ec61f30bec11..e8964361aaef 100644
--- a/lib/raid/raid6/powerpc/vpermxor.uc
+++ b/lib/raid/raid6/powerpc/vpermxor.uc
@@ -76,18 +76,7 @@ static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
preempt_enable();
}
-int raid6_have_altivec_vpermxor(void);
-#if $# == 1
-int raid6_have_altivec_vpermxor(void)
-{
- /* Check if arch has both altivec and the vpermxor instructions */
- return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
- cpu_has_feature(CPU_FTR_ARCH_207S));
-}
-#endif
-
const struct raid6_calls raid6_vpermxor$# = {
.gen_syndrome = raid6_vpermxor$#_gen_syndrome,
- .valid = raid6_have_altivec_vpermxor,
.name = "vpermxor$#",
};
diff --git a/lib/raid/raid6/recov.c b/lib/raid/raid6/recov.c
index 735ab4013771..76eb2aef3667 100644
--- a/lib/raid/raid6/recov.c
+++ b/lib/raid/raid6/recov.c
@@ -97,7 +97,5 @@ static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_intx1 = {
.data2 = raid6_2data_recov_intx1,
.datap = raid6_datap_recov_intx1,
- .valid = NULL,
.name = "intx1",
- .priority = 0,
};
diff --git a/lib/raid/raid6/riscv/pq_arch.h b/lib/raid/raid6/riscv/pq_arch.h
new file mode 100644
index 000000000000..52dd01e9fc42
--- /dev/null
+++ b/lib/raid/raid6/riscv/pq_arch.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/vector.h>
+
+extern const struct raid6_calls raid6_rvvx1;
+extern const struct raid6_calls raid6_rvvx2;
+extern const struct raid6_calls raid6_rvvx4;
+extern const struct raid6_calls raid6_rvvx8;
+extern const struct raid6_recov_calls raid6_recov_rvv;
+
+static __always_inline void __init arch_raid6_init(void)
+{
+ if (has_vector()) {
+ raid6_algo_add(&raid6_rvvx1);
+ raid6_algo_add(&raid6_rvvx2);
+ raid6_algo_add(&raid6_rvvx4);
+ raid6_algo_add(&raid6_rvvx8);
+ raid6_recov_algo_add(&raid6_recov_rvv);
+ }
+ raid6_algo_add_default();
+}
diff --git a/lib/raid/raid6/riscv/recov_rvv.c b/lib/raid/raid6/riscv/recov_rvv.c
index 02120d245e22..2305940276dd 100644
--- a/lib/raid/raid6/riscv/recov_rvv.c
+++ b/lib/raid/raid6/riscv/recov_rvv.c
@@ -218,7 +218,5 @@ static void raid6_datap_recov_rvv(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_rvv = {
.data2 = raid6_2data_recov_rvv,
.datap = raid6_datap_recov_rvv,
- .valid = rvv_has_vector,
.name = "rvv",
- .priority = 1,
};
diff --git a/lib/raid/raid6/riscv/rvv.h b/lib/raid/raid6/riscv/rvv.h
index c293130d798b..3a7c2468b1ea 100644
--- a/lib/raid/raid6/riscv/rvv.h
+++ b/lib/raid/raid6/riscv/rvv.h
@@ -10,11 +10,6 @@
#include <asm/vector.h>
#include "algos.h"
-static int rvv_has_vector(void)
-{
- return has_vector();
-}
-
#define RAID6_RVV_WRAPPER(_n) \
static void raid6_rvv ## _n ## _gen_syndrome(int disks, \
size_t bytes, void **ptrs) \
@@ -41,6 +36,5 @@ static int rvv_has_vector(void)
struct raid6_calls const raid6_rvvx ## _n = { \
.gen_syndrome = raid6_rvv ## _n ## _gen_syndrome, \
.xor_syndrome = raid6_rvv ## _n ## _xor_syndrome, \
- .valid = rvv_has_vector, \
.name = "rvvx" #_n, \
}
diff --git a/lib/raid/raid6/s390/pq_arch.h b/lib/raid/raid6/s390/pq_arch.h
new file mode 100644
index 000000000000..95d14c342306
--- /dev/null
+++ b/lib/raid/raid6/s390/pq_arch.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <linux/cpufeature.h>
+
+extern const struct raid6_calls raid6_s390vx8;
+extern const struct raid6_recov_calls raid6_recov_s390xc;
+
+static __always_inline void __init arch_raid6_init(void)
+{
+ if (cpu_has_vx())
+ raid6_algo_add(&raid6_s390vx8);
+ else
+ raid6_algo_add_default();
+ raid6_recov_algo_add(&raid6_recov_s390xc);
+}
diff --git a/lib/raid/raid6/s390/recov_s390xc.c b/lib/raid/raid6/s390/recov_s390xc.c
index e7b3409f21e2..08d56896e5ea 100644
--- a/lib/raid/raid6/s390/recov_s390xc.c
+++ b/lib/raid/raid6/s390/recov_s390xc.c
@@ -112,7 +112,5 @@ static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_s390xc = {
.data2 = raid6_2data_recov_s390xc,
.datap = raid6_datap_recov_s390xc,
- .valid = NULL,
.name = "s390xc",
- .priority = 1,
};
diff --git a/lib/raid/raid6/s390/s390vx.uc b/lib/raid/raid6/s390/s390vx.uc
index aba3515eacac..e5cf9054be2a 100644
--- a/lib/raid/raid6/s390/s390vx.uc
+++ b/lib/raid/raid6/s390/s390vx.uc
@@ -121,15 +121,8 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
-static int raid6_s390vx$#_valid(void)
-{
- return cpu_has_vx();
-}
-
const struct raid6_calls raid6_s390vx$# = {
.gen_syndrome = raid6_s390vx$#_gen_syndrome,
.xor_syndrome = raid6_s390vx$#_xor_syndrome,
- .valid = raid6_s390vx$#_valid,
.name = "vx128x$#",
- .priority = 1,
};
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index 1a6168096456..abc854cf4a98 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -91,19 +91,20 @@ static void test_disks(struct kunit *test, const struct raid6_calls *calls,
static void raid6_test(struct kunit *test)
{
- const struct raid6_calls *const *algo;
- const struct raid6_recov_calls *const *ra;
int i, j, p1, p2;
+ unsigned int r, g;
- for (ra = raid6_recov_algos; *ra; ra++) {
- if ((*ra)->valid && !(*ra)->valid())
- continue;
+ for (r = 0; ; r++) {
+ const struct raid6_recov_calls *ra = raid6_recov_algo_find(r);
- for (algo = raid6_algos; *algo; algo++) {
- const struct raid6_calls *calls = *algo;
+ if (!ra)
+ break;
- if (calls->valid && !calls->valid())
- continue;
+ for (g = 0; ; g++) {
+ const struct raid6_calls *calls = raid6_algo_find(g);
+
+ if (!calls)
+ break;
/* Nuke syndromes */
memset(data[NDISKS - 2], 0xee, PAGE_SIZE);
@@ -115,7 +116,7 @@ static void raid6_test(struct kunit *test)
for (i = 0; i < NDISKS-1; i++)
for (j = i+1; j < NDISKS; j++)
- test_disks(test, calls, *ra, i, j);
+ test_disks(test, calls, ra, i, j);
if (!calls->xor_syndrome)
continue;
@@ -133,7 +134,7 @@ static void raid6_test(struct kunit *test)
for (i = 0; i < NDISKS-1; i++)
for (j = i+1; j < NDISKS; j++)
test_disks(test, calls,
- *ra, i, j);
+ ra, i, j);
}
}
diff --git a/lib/raid/raid6/x86/avx2.c b/lib/raid/raid6/x86/avx2.c
index 0bf831799082..7efd94e6a87a 100644
--- a/lib/raid/raid6/x86/avx2.c
+++ b/lib/raid/raid6/x86/avx2.c
@@ -24,11 +24,6 @@ static const struct raid6_avx2_constants {
0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
};
-static int raid6_have_avx2(void)
-{
- return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
-}
-
/*
* Plain AVX2 implementation
*/
@@ -131,10 +126,7 @@ static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_avx2x1 = {
.gen_syndrome = raid6_avx21_gen_syndrome,
.xor_syndrome = raid6_avx21_xor_syndrome,
- .valid = raid6_have_avx2,
.name = "avx2x1",
- /* Prefer AVX2 over priority 1 (SSE2 and others) */
- .priority = 2,
};
/*
@@ -262,10 +254,7 @@ static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_avx2x2 = {
.gen_syndrome = raid6_avx22_gen_syndrome,
.xor_syndrome = raid6_avx22_xor_syndrome,
- .valid = raid6_have_avx2,
.name = "avx2x2",
- /* Prefer AVX2 over priority 1 (SSE2 and others) */
- .priority = 2,
};
#ifdef CONFIG_X86_64
@@ -466,9 +455,6 @@ static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_avx2x4 = {
.gen_syndrome = raid6_avx24_gen_syndrome,
.xor_syndrome = raid6_avx24_xor_syndrome,
- .valid = raid6_have_avx2,
.name = "avx2x4",
- /* Prefer AVX2 over priority 1 (SSE2 and others) */
- .priority = 2,
};
#endif /* CONFIG_X86_64 */
diff --git a/lib/raid/raid6/x86/avx512.c b/lib/raid/raid6/x86/avx512.c
index 98ed42fb0a46..0772e798b742 100644
--- a/lib/raid/raid6/x86/avx512.c
+++ b/lib/raid/raid6/x86/avx512.c
@@ -30,16 +30,6 @@ static const struct raid6_avx512_constants {
0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
};
-static int raid6_have_avx512(void)
-{
- return boot_cpu_has(X86_FEATURE_AVX2) &&
- boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX512F) &&
- boot_cpu_has(X86_FEATURE_AVX512BW) &&
- boot_cpu_has(X86_FEATURE_AVX512VL) &&
- boot_cpu_has(X86_FEATURE_AVX512DQ);
-}
-
static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
@@ -159,10 +149,7 @@ static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_avx512x1 = {
.gen_syndrome = raid6_avx5121_gen_syndrome,
.xor_syndrome = raid6_avx5121_xor_syndrome,
- .valid = raid6_have_avx512,
.name = "avx512x1",
- /* Prefer AVX512 over priority 1 (SSE2 and others) */
- .priority = 2,
};
/*
@@ -317,10 +304,7 @@ static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_avx512x2 = {
.gen_syndrome = raid6_avx5122_gen_syndrome,
.xor_syndrome = raid6_avx5122_xor_syndrome,
- .valid = raid6_have_avx512,
.name = "avx512x2",
- /* Prefer AVX512 over priority 1 (SSE2 and others) */
- .priority = 2,
};
#ifdef CONFIG_X86_64
@@ -556,9 +540,6 @@ static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_avx512x4 = {
.gen_syndrome = raid6_avx5124_gen_syndrome,
.xor_syndrome = raid6_avx5124_xor_syndrome,
- .valid = raid6_have_avx512,
.name = "avx512x4",
- /* Prefer AVX512 over priority 1 (SSE2 and others) */
- .priority = 2,
};
#endif
diff --git a/lib/raid/raid6/x86/mmx.c b/lib/raid/raid6/x86/mmx.c
index 052d9f010bfe..3228c335965a 100644
--- a/lib/raid/raid6/x86/mmx.c
+++ b/lib/raid/raid6/x86/mmx.c
@@ -22,12 +22,6 @@ const struct raid6_mmx_constants {
0x1d1d1d1d1d1d1d1dULL,
};
-static int raid6_have_mmx(void)
-{
- /* Not really "boot_cpu" but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX);
-}
-
/*
* Plain MMX implementation
*/
@@ -70,7 +64,6 @@ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_mmxx1 = {
.gen_syndrome = raid6_mmx1_gen_syndrome,
- .valid = raid6_have_mmx,
.name = "mmxx1",
};
@@ -127,6 +120,5 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_mmxx2 = {
.gen_syndrome = raid6_mmx2_gen_syndrome,
- .valid = raid6_have_mmx,
.name = "mmxx2",
};
diff --git a/lib/raid/raid6/x86/pq_arch.h b/lib/raid/raid6/x86/pq_arch.h
new file mode 100644
index 000000000000..09838cad630b
--- /dev/null
+++ b/lib/raid/raid6/x86/pq_arch.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/cpufeature.h>
+
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_avx2x1;
+extern const struct raid6_calls raid6_avx2x2;
+extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_avx512x1;
+extern const struct raid6_calls raid6_avx512x2;
+extern const struct raid6_calls raid6_avx512x4;
+
+extern const struct raid6_recov_calls raid6_recov_ssse3;
+extern const struct raid6_recov_calls raid6_recov_avx2;
+extern const struct raid6_recov_calls raid6_recov_avx512;
+
+static inline int raid6_has_avx512(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512DQ);
+}
+
+static inline bool raid6_has_avx2(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
+}
+
+static inline bool raid6_has_ssse3(void)
+{
+ return boot_cpu_has(X86_FEATURE_XMM) &&
+ boot_cpu_has(X86_FEATURE_XMM2) &&
+ boot_cpu_has(X86_FEATURE_SSSE3);
+}
+
+static inline bool raid6_has_sse2(void)
+{
+ return boot_cpu_has(X86_FEATURE_MMX) &&
+ boot_cpu_has(X86_FEATURE_FXSR) &&
+ boot_cpu_has(X86_FEATURE_XMM) &&
+ boot_cpu_has(X86_FEATURE_XMM2);
+}
+
+static inline bool raid6_has_sse1_or_mmxext(void)
+{
+ return boot_cpu_has(X86_FEATURE_MMX) &&
+ (boot_cpu_has(X86_FEATURE_XMM) ||
+ boot_cpu_has(X86_FEATURE_MMXEXT));
+}
+
+static __always_inline void __init arch_raid6_init(void)
+{
+ if (raid6_has_avx2()) {
+ if (raid6_has_avx512()) {
+ raid6_algo_add(&raid6_avx512x1);
+ raid6_algo_add(&raid6_avx512x2);
+ if (IS_ENABLED(CONFIG_X86_64))
+ raid6_algo_add(&raid6_avx512x4);
+ }
+ raid6_algo_add(&raid6_avx2x1);
+ raid6_algo_add(&raid6_avx2x2);
+ if (IS_ENABLED(CONFIG_X86_64))
+ raid6_algo_add(&raid6_avx2x4);
+ } else if (IS_ENABLED(CONFIG_X86_64) || raid6_has_sse2()) {
+ raid6_algo_add(&raid6_sse2x1);
+ raid6_algo_add(&raid6_sse2x2);
+ if (IS_ENABLED(CONFIG_X86_64))
+ raid6_algo_add(&raid6_sse2x4);
+ } else if (raid6_has_sse1_or_mmxext()) {
+ raid6_algo_add(&raid6_sse1x1);
+ raid6_algo_add(&raid6_sse1x2);
+ raid6_algo_add_default();
+ } else {
+ if (boot_cpu_has(X86_FEATURE_MMX)) {
+ raid6_algo_add(&raid6_mmxx1);
+ raid6_algo_add(&raid6_mmxx2);
+ }
+ raid6_algo_add_default();
+ }
+
+ if (raid6_has_avx512())
+ raid6_recov_algo_add(&raid6_recov_avx512);
+ else if (raid6_has_avx2())
+ raid6_recov_algo_add(&raid6_recov_avx2);
+ else if (raid6_has_ssse3())
+ raid6_recov_algo_add(&raid6_recov_ssse3);
+}
diff --git a/lib/raid/raid6/x86/recov_avx2.c b/lib/raid/raid6/x86/recov_avx2.c
index 06c6e05763bc..a714a780a2d8 100644
--- a/lib/raid/raid6/x86/recov_avx2.c
+++ b/lib/raid/raid6/x86/recov_avx2.c
@@ -9,12 +9,6 @@
#include <asm/fpu/api.h>
#include "algos.h"
-static int raid6_has_avx2(void)
-{
- return boot_cpu_has(X86_FEATURE_AVX2) &&
- boot_cpu_has(X86_FEATURE_AVX);
-}
-
static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
@@ -305,11 +299,9 @@ static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_avx2 = {
.data2 = raid6_2data_recov_avx2,
.datap = raid6_datap_recov_avx2,
- .valid = raid6_has_avx2,
#ifdef CONFIG_X86_64
.name = "avx2x2",
#else
.name = "avx2x1",
#endif
- .priority = 2,
};
diff --git a/lib/raid/raid6/x86/recov_avx512.c b/lib/raid/raid6/x86/recov_avx512.c
index 850bb962b514..ec72d5a30c01 100644
--- a/lib/raid/raid6/x86/recov_avx512.c
+++ b/lib/raid/raid6/x86/recov_avx512.c
@@ -11,16 +11,6 @@
#include <asm/fpu/api.h>
#include "algos.h"
-static int raid6_has_avx512(void)
-{
- return boot_cpu_has(X86_FEATURE_AVX2) &&
- boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX512F) &&
- boot_cpu_has(X86_FEATURE_AVX512BW) &&
- boot_cpu_has(X86_FEATURE_AVX512VL) &&
- boot_cpu_has(X86_FEATURE_AVX512DQ);
-}
-
static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
@@ -369,11 +359,9 @@ static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_avx512 = {
.data2 = raid6_2data_recov_avx512,
.datap = raid6_datap_recov_avx512,
- .valid = raid6_has_avx512,
#ifdef CONFIG_X86_64
.name = "avx512x2",
#else
.name = "avx512x1",
#endif
- .priority = 3,
};
diff --git a/lib/raid/raid6/x86/recov_ssse3.c b/lib/raid/raid6/x86/recov_ssse3.c
index 95589c33003a..700bd2c865ec 100644
--- a/lib/raid/raid6/x86/recov_ssse3.c
+++ b/lib/raid/raid6/x86/recov_ssse3.c
@@ -8,13 +8,6 @@
#include <asm/fpu/api.h>
#include "algos.h"
-static int raid6_has_ssse3(void)
-{
- return boot_cpu_has(X86_FEATURE_XMM) &&
- boot_cpu_has(X86_FEATURE_XMM2) &&
- boot_cpu_has(X86_FEATURE_SSSE3);
-}
-
static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
@@ -320,11 +313,9 @@ static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
const struct raid6_recov_calls raid6_recov_ssse3 = {
.data2 = raid6_2data_recov_ssse3,
.datap = raid6_datap_recov_ssse3,
- .valid = raid6_has_ssse3,
#ifdef CONFIG_X86_64
.name = "ssse3x2",
#else
.name = "ssse3x1",
#endif
- .priority = 1,
};
diff --git a/lib/raid/raid6/x86/sse1.c b/lib/raid/raid6/x86/sse1.c
index 7004255a0bb1..6ebdcf824e00 100644
--- a/lib/raid/raid6/x86/sse1.c
+++ b/lib/raid/raid6/x86/sse1.c
@@ -25,14 +25,6 @@ extern const struct raid6_mmx_constants {
u64 x1d;
} raid6_mmx_constants;
-static int raid6_have_sse1_or_mmxext(void)
-{
- /* Not really boot_cpu but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX) &&
- (boot_cpu_has(X86_FEATURE_XMM) ||
- boot_cpu_has(X86_FEATURE_MMXEXT));
-}
-
/*
* Plain SSE1 implementation
*/
@@ -86,9 +78,7 @@ static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_sse1x1 = {
.gen_syndrome = raid6_sse11_gen_syndrome,
- .valid = raid6_have_sse1_or_mmxext,
.name = "sse1x1",
- .priority = 1, /* Has cache hints */
};
/*
@@ -148,7 +138,5 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_sse1x2 = {
.gen_syndrome = raid6_sse12_gen_syndrome,
- .valid = raid6_have_sse1_or_mmxext,
.name = "sse1x2",
- .priority = 1, /* Has cache hints */
};
diff --git a/lib/raid/raid6/x86/sse2.c b/lib/raid/raid6/x86/sse2.c
index f30be4ee14d0..7049c8512f35 100644
--- a/lib/raid/raid6/x86/sse2.c
+++ b/lib/raid/raid6/x86/sse2.c
@@ -22,15 +22,6 @@ static const struct raid6_sse_constants {
{ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
};
-static int raid6_have_sse2(void)
-{
- /* Not really boot_cpu but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX) &&
- boot_cpu_has(X86_FEATURE_FXSR) &&
- boot_cpu_has(X86_FEATURE_XMM) &&
- boot_cpu_has(X86_FEATURE_XMM2);
-}
-
/*
* Plain SSE2 implementation
*/
@@ -136,9 +127,7 @@ static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_sse2x1 = {
.gen_syndrome = raid6_sse21_gen_syndrome,
.xor_syndrome = raid6_sse21_xor_syndrome,
- .valid = raid6_have_sse2,
.name = "sse2x1",
- .priority = 1, /* Has cache hints */
};
/*
@@ -266,9 +255,7 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_sse2x2 = {
.gen_syndrome = raid6_sse22_gen_syndrome,
.xor_syndrome = raid6_sse22_xor_syndrome,
- .valid = raid6_have_sse2,
.name = "sse2x2",
- .priority = 1, /* Has cache hints */
};
#ifdef CONFIG_X86_64
@@ -473,9 +460,7 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
const struct raid6_calls raid6_sse2x4 = {
.gen_syndrome = raid6_sse24_gen_syndrome,
.xor_syndrome = raid6_sse24_xor_syndrome,
- .valid = raid6_have_sse2,
.name = "sse2x4",
- .priority = 1, /* Has cache hints */
};
#endif /* CONFIG_X86_64 */
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 10/17] raid6: use static_call for gen_syndrom and xor_syndrom
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (8 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 09/17] raid6: rework the init helpers Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 11/17] raid6: use static_call for raid6_recov_2data and raid6_recov_datap Christoph Hellwig
` (6 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Avoid indirect calls for P/Q parity generation.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/algos.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index f9e8a8752e2d..b81c7594f6c4 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/raid/pq.h>
+#include <linux/static_call.h>
#include <kunit/visibility.h>
#include "algos.h"
@@ -23,7 +24,8 @@ static unsigned int raid6_nr_algos;
static const struct raid6_recov_calls *raid6_recov_algo;
/* Selected algorithm */
-static struct raid6_calls raid6_call;
+DEFINE_STATIC_CALL_NULL(raid6_gen_syndrome_impl, *raid6_intx1.gen_syndrome);
+DEFINE_STATIC_CALL_NULL(raid6_xor_syndrome_impl, *raid6_intx1.xor_syndrome);
/**
* raid6_gen_syndrome - generate RAID6 P/Q parity
@@ -47,7 +49,7 @@ void raid6_gen_syndrome(int disks, size_t bytes, void **ptrs)
lockdep_assert_preemption_enabled();
WARN_ON_ONCE(bytes & 511);
- raid6_call.gen_syndrome(disks, bytes, ptrs);
+ static_call(raid6_gen_syndrome_impl)(disks, bytes, ptrs);
}
EXPORT_SYMBOL_GPL(raid6_gen_syndrome);
@@ -83,7 +85,7 @@ void raid6_xor_syndrome(int disks, int start, int stop, size_t bytes,
WARN_ON_ONCE(bytes & 511);
WARN_ON_ONCE(stop < start);
- raid6_call.xor_syndrome(disks, start, stop, bytes, ptrs);
+ static_call(raid6_xor_syndrome_impl)(disks, start, stop, bytes, ptrs);
}
EXPORT_SYMBOL_GPL(raid6_xor_syndrome);
@@ -94,7 +96,7 @@ EXPORT_SYMBOL_GPL(raid6_xor_syndrome);
*/
bool raid6_can_xor_syndrome(void)
{
- return !!raid6_call.xor_syndrome;
+ return !!static_call_query(raid6_xor_syndrome_impl);
}
EXPORT_SYMBOL_GPL(raid6_can_xor_syndrome);
@@ -193,7 +195,8 @@ static int raid6_choose_gen(void *(*const dptrs)[RAID6_TEST_DISKS],
return -EINVAL;
}
- raid6_call = *best;
+ static_call_update(raid6_gen_syndrome_impl, best->gen_syndrome);
+ static_call_update(raid6_xor_syndrome_impl, best->xor_syndrome);
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
best->name,
@@ -238,7 +241,10 @@ static int __init raid6_select_algo(void)
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK) || raid6_nr_algos == 1) {
pr_info("raid6: skipped pq benchmark and selected %s\n",
raid6_algos[0]->name);
- raid6_call = *raid6_algos[0];
+ static_call_update(raid6_gen_syndrome_impl,
+ raid6_algos[0]->gen_syndrome);
+ static_call_update(raid6_xor_syndrome_impl,
+ raid6_algos[0]->xor_syndrome);
return 0;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 11/17] raid6: use static_call for raid6_recov_2data and raid6_recov_datap
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (9 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 10/17] raid6: use static_call for gen_syndrom and xor_syndrom Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 12/17] raid6: update top of file comments Christoph Hellwig
` (5 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Avoid expensive indirect calls for the recovery routines as well.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/algos.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index b81c7594f6c4..201443f6cac2 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -26,6 +26,8 @@ static const struct raid6_recov_calls *raid6_recov_algo;
/* Selected algorithm */
DEFINE_STATIC_CALL_NULL(raid6_gen_syndrome_impl, *raid6_intx1.gen_syndrome);
DEFINE_STATIC_CALL_NULL(raid6_xor_syndrome_impl, *raid6_intx1.xor_syndrome);
+DEFINE_STATIC_CALL_NULL(raid6_recov_2data_impl, *raid6_recov_intx1.data2);
+DEFINE_STATIC_CALL_NULL(raid6_recov_datap_impl, *raid6_recov_intx1.datap);
/**
* raid6_gen_syndrome - generate RAID6 P/Q parity
@@ -124,7 +126,7 @@ void raid6_recov_2data(int disks, size_t bytes, int faila, int failb,
WARN_ON_ONCE(bytes > PAGE_SIZE);
WARN_ON_ONCE(failb <= faila);
- raid6_recov_algo->data2(disks, bytes, faila, failb, ptrs);
+ static_call(raid6_recov_2data_impl)(disks, bytes, faila, failb, ptrs);
}
EXPORT_SYMBOL_GPL(raid6_recov_2data);
@@ -149,7 +151,7 @@ void raid6_recov_datap(int disks, size_t bytes, int faila, void **ptrs)
WARN_ON_ONCE(bytes & 511);
WARN_ON_ONCE(bytes > PAGE_SIZE);
- raid6_recov_algo->datap(disks, bytes, faila, ptrs);
+ static_call(raid6_recov_datap_impl)(disks, bytes, faila, ptrs);
}
EXPORT_SYMBOL_GPL(raid6_recov_datap);
@@ -322,6 +324,8 @@ static int __init raid6_init(void)
*/
if (!raid6_recov_algo)
raid6_recov_algo = &raid6_recov_intx1;
+ static_call_update(raid6_recov_2data_impl, raid6_recov_algo->data2);
+ static_call_update(raid6_recov_datap_impl, raid6_recov_algo->datap);
pr_info("raid6: using %s recovery algorithm\n", raid6_recov_algo->name);
#ifdef MODULE
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 12/17] raid6: update top of file comments
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (10 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 11/17] raid6: use static_call for raid6_recov_2data and raid6_recov_datap Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 13/17] raid6_kunit: use KUNIT_CASE_PARAM Christoph Hellwig
` (4 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Drop the pointless mention of the file name, and use standard formatting
for the top of file comments.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/algos.c | 8 +-------
lib/raid/raid6/arm/neon.c | 2 +-
lib/raid/raid6/mktables.c | 12 +++---------
lib/raid/raid6/recov.c | 14 ++++----------
lib/raid/raid6/riscv/rvv.h | 2 --
lib/raid/raid6/x86/avx2.c | 13 ++++---------
lib/raid/raid6/x86/avx512.c | 18 ++++++------------
lib/raid/raid6/x86/mmx.c | 10 ++--------
lib/raid/raid6/x86/sse1.c | 18 ++++++------------
lib/raid/raid6/x86/sse2.c | 9 +--------
10 files changed, 28 insertions(+), 78 deletions(-)
diff --git a/lib/raid/raid6/algos.c b/lib/raid/raid6/algos.c
index 201443f6cac2..5fdf6d362f58 100644
--- a/lib/raid/raid6/algos.c
+++ b/lib/raid/raid6/algos.c
@@ -1,12 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * raid6/algos.c
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* Algorithm list and algorithm selection for RAID-6
*/
diff --git a/lib/raid/raid6/arm/neon.c b/lib/raid/raid6/arm/neon.c
index 341c61af675e..af90869aaffc 100644
--- a/lib/raid/raid6/arm/neon.c
+++ b/lib/raid/raid6/arm/neon.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
+ * RAID6 syndrome calculation using ARM NEON intrinsics
*
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
diff --git a/lib/raid/raid6/mktables.c b/lib/raid/raid6/mktables.c
index 97a17493bbd8..b6327b562fdb 100644
--- a/lib/raid/raid6/mktables.c
+++ b/lib/raid/raid6/mktables.c
@@ -1,15 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * mktables.c
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
- * Make RAID-6 tables. This is a host user space program to be run at
- * compile time.
+ * Make RAID-6 tables. This is a host user space program to be run at compile
+ * time.
*/
#include <stdio.h>
diff --git a/lib/raid/raid6/recov.c b/lib/raid/raid6/recov.c
index 76eb2aef3667..3fa53bc3fde4 100644
--- a/lib/raid/raid6/recov.c
+++ b/lib/raid/raid6/recov.c
@@ -1,16 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * raid6/recov.c
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
*
- * RAID-6 data recovery in dual failure mode. In single failure mode,
- * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
- * the syndrome.)
+ * RAID-6 data recovery in dual failure mode. In single failure mode, use the
+ * RAID-5 algorithm (or, in the case of Q failure, just reconstruct the
+ * syndrome.)
*/
#include <linux/mm.h>
diff --git a/lib/raid/raid6/riscv/rvv.h b/lib/raid/raid6/riscv/rvv.h
index 3a7c2468b1ea..df0e3637cae8 100644
--- a/lib/raid/raid6/riscv/rvv.h
+++ b/lib/raid/raid6/riscv/rvv.h
@@ -2,8 +2,6 @@
/*
* Copyright 2024 Institute of Software, CAS.
*
- * raid6/rvv.h
- *
* Definitions for RISC-V RAID-6 code
*/
diff --git a/lib/raid/raid6/x86/avx2.c b/lib/raid/raid6/x86/avx2.c
index 7efd94e6a87a..7d829c669ea7 100644
--- a/lib/raid/raid6/x86/avx2.c
+++ b/lib/raid/raid6/x86/avx2.c
@@ -1,16 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright (C) 2012 Intel Corporation
- * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com>
*
- * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
*
- * ----------------------------------------------------------------------- */
-
-/*
* AVX2 implementation of RAID-6 syndrome functions
- *
*/
#include <asm/cpufeature.h>
diff --git a/lib/raid/raid6/x86/avx512.c b/lib/raid/raid6/x86/avx512.c
index 0772e798b742..e671eb5bde63 100644
--- a/lib/raid/raid6/x86/avx512.c
+++ b/lib/raid/raid6/x86/avx512.c
@@ -1,20 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- --------------------------------------------------------
- *
- * Copyright (C) 2016 Intel Corporation
+/*
+ * Copyright (C) 2016 Intel Corporation
*
- * Author: Gayatri Kammela <gayatri.kammela@intel.com>
- * Author: Megha Dey <megha.dey@linux.intel.com>
+ * Author: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Author: Megha Dey <megha.dey@linux.intel.com>
*
- * Based on avx2.c: Copyright 2012 Yuanhan Liu All Rights Reserved
- * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Based on avx2.c: Copyright 2012 Yuanhan Liu All Rights Reserved
+ * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
*
- * -----------------------------------------------------------------------
- */
-
-/*
* AVX512 implementation of RAID-6 syndrome functions
- *
*/
#include <asm/cpufeature.h>
diff --git a/lib/raid/raid6/x86/mmx.c b/lib/raid/raid6/x86/mmx.c
index 3228c335965a..afa82536142d 100644
--- a/lib/raid/raid6/x86/mmx.c
+++ b/lib/raid/raid6/x86/mmx.c
@@ -1,14 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * raid6/mmx.c
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
*
- * MMX implementation of RAID-6 syndrome functions
+ * MMX implementation of RAID-6 syndrome functions.
*/
#include <asm/cpufeature.h>
diff --git a/lib/raid/raid6/x86/sse1.c b/lib/raid/raid6/x86/sse1.c
index 6ebdcf824e00..f4b260df522a 100644
--- a/lib/raid/raid6/x86/sse1.c
+++ b/lib/raid/raid6/x86/sse1.c
@@ -1,19 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * raid6/sse1.c
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
*
- * SSE-1/MMXEXT implementation of RAID-6 syndrome functions
+ * SSE-1/MMXEXT implementation of RAID-6 syndrome functions.
*
- * This is really an MMX implementation, but it requires SSE-1 or
- * AMD MMXEXT for prefetch support and a few other features. The
- * support for nontemporal memory accesses is enough to make this
- * worthwhile as a separate implementation.
+ * This is really an MMX implementation, but it requires SSE-1 or AMD MMXEXT for
+ * prefetch support and a few other features. The support for nontemporal
+ * memory accesses is enough to make this worthwhile as a separate
+ * implementation.
*/
#include <asm/cpufeature.h>
diff --git a/lib/raid/raid6/x86/sse2.c b/lib/raid/raid6/x86/sse2.c
index 7049c8512f35..43b09ce58270 100644
--- a/lib/raid/raid6/x86/sse2.c
+++ b/lib/raid/raid6/x86/sse2.c
@@ -1,15 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * ----------------------------------------------------------------------- */
-
/*
- * raid6/sse2.c
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* SSE-2 implementation of RAID-6 syndrome functions
- *
*/
#include <asm/cpufeature.h>
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 13/17] raid6_kunit: use KUNIT_CASE_PARAM
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (11 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 12/17] raid6: update top of file comments Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 14/17] raid6_kunit: dynamically allocate data buffers using vmalloc Christoph Hellwig
` (3 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
The raid6 test combines various generation and recovery algorithms. Use
KUNIT_CASE_PARAM and provide a generator that iterates over the possible
combinations instead of looping inside a single test instance.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/tests/raid6_kunit.c | 120 ++++++++++++++++-------------
1 file changed, 68 insertions(+), 52 deletions(-)
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index abc854cf4a98..7095952ad8e3 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -21,6 +21,15 @@ static char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
static char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
static char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+struct test_args {
+ unsigned int recov_idx;
+ const struct raid6_recov_calls *recov;
+ unsigned int gen_idx;
+ const struct raid6_calls *gen;
+};
+
+static struct test_args args;
+
static void makedata(int start, int stop)
{
int i;
@@ -43,9 +52,10 @@ static char member_type(int d)
}
}
-static void test_disks(struct kunit *test, const struct raid6_calls *calls,
- const struct raid6_recov_calls *ra, int faila, int failb)
+static void test_recover(struct kunit *test, int faila, int failb)
{
+ const struct test_args *ta = test->param_value;
+
memset(recovi, 0xf0, PAGE_SIZE);
memset(recovj, 0xba, PAGE_SIZE);
@@ -64,25 +74,23 @@ static void test_disks(struct kunit *test, const struct raid6_calls *calls,
goto skip;
/* P+Q failure. Just rebuild the syndrome. */
- calls->gen_syndrome(NDISKS, PAGE_SIZE, dataptrs);
+ ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, dataptrs);
} else if (failb == NDISKS - 2) {
/* data+P failure. */
- ra->datap(NDISKS, PAGE_SIZE, faila, dataptrs);
+ ta->recov->datap(NDISKS, PAGE_SIZE, faila, dataptrs);
} else {
/* data+data failure. */
- ra->data2(NDISKS, PAGE_SIZE, faila, failb, dataptrs);
+ ta->recov->data2(NDISKS, PAGE_SIZE, faila, failb, dataptrs);
}
KUNIT_EXPECT_MEMEQ_MSG(test, data[faila], recovi, PAGE_SIZE,
- "algo=%-8s/%-8s faila miscompared: %3d[%c] (failb=%3d[%c])\n",
- calls->name, ra->name,
- faila, member_type(faila),
- failb, member_type(failb));
+ "faila miscompared: %3d[%c] (failb=%3d[%c])\n",
+ faila, member_type(faila),
+ failb, member_type(failb));
KUNIT_EXPECT_MEMEQ_MSG(test, data[failb], recovj, PAGE_SIZE,
- "algo=%-8s/%-8s failb miscompared: %3d[%c] (faila=%3d[%c])\n",
- calls->name, ra->name,
- failb, member_type(failb),
- faila, member_type(faila));
+ "failb miscompared: %3d[%c] (faila=%3d[%c])\n",
+ failb, member_type(failb),
+ faila, member_type(faila));
skip:
dataptrs[faila] = data[faila];
@@ -91,58 +99,65 @@ static void test_disks(struct kunit *test, const struct raid6_calls *calls,
static void raid6_test(struct kunit *test)
{
+ const struct test_args *ta = test->param_value;
int i, j, p1, p2;
- unsigned int r, g;
-
- for (r = 0; ; r++) {
- const struct raid6_recov_calls *ra = raid6_recov_algo_find(r);
-
- if (!ra)
- break;
-
- for (g = 0; ; g++) {
- const struct raid6_calls *calls = raid6_algo_find(g);
-
- if (!calls)
- break;
- /* Nuke syndromes */
- memset(data[NDISKS - 2], 0xee, PAGE_SIZE);
- memset(data[NDISKS - 1], 0xee, PAGE_SIZE);
+ /* Nuke syndromes */
+ memset(data[NDISKS - 2], 0xee, PAGE_SIZE);
+ memset(data[NDISKS - 1], 0xee, PAGE_SIZE);
- /* Generate assumed good syndrome */
- calls->gen_syndrome(NDISKS, PAGE_SIZE,
- (void **)&dataptrs);
+ /* Generate assumed good syndrome */
+ ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, (void **)&dataptrs);
- for (i = 0; i < NDISKS-1; i++)
- for (j = i+1; j < NDISKS; j++)
- test_disks(test, calls, ra, i, j);
+ for (i = 0; i < NDISKS - 1; i++)
+ for (j = i + 1; j < NDISKS; j++)
+ test_recover(test, i, j);
- if (!calls->xor_syndrome)
- continue;
+ if (!ta->gen->xor_syndrome)
+ return;
- for (p1 = 0; p1 < NDISKS-2; p1++)
- for (p2 = p1; p2 < NDISKS-2; p2++) {
+ for (p1 = 0; p1 < NDISKS - 2; p1++) {
+ for (p2 = p1; p2 < NDISKS - 2; p2++) {
+ /* Simulate rmw run */
+ ta->gen->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ (void **)&dataptrs);
+ makedata(p1, p2);
+ ta->gen->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ (void **)&dataptrs);
- /* Simulate rmw run */
- calls->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
- (void **)&dataptrs);
- makedata(p1, p2);
- calls->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
- (void **)&dataptrs);
+ for (i = 0; i < NDISKS - 1; i++)
+ for (j = i + 1; j < NDISKS; j++)
+ test_recover(test, i, j);
+ }
+ }
+}
- for (i = 0; i < NDISKS-1; i++)
- for (j = i+1; j < NDISKS; j++)
- test_disks(test, calls,
- ra, i, j);
- }
+static const void *raid6_gen_params(struct kunit *test, const void *prev,
+ char *desc)
+{
+ if (!args.gen) {
+next_algo:
+ args.recov_idx = 0;
+ args.gen = raid6_algo_find(args.gen_idx);
+ if (!args.gen)
+ return NULL;
+ }
- }
+ if (args.recov)
+ args.recov_idx++;
+ args.recov = raid6_recov_algo_find(args.recov_idx);
+ if (!args.recov) {
+ args.gen_idx++;
+ goto next_algo;
}
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "gen=%s recov=%s",
+ args.gen->name, args.recov->name);
+ return &args;
}
static struct kunit_case raid6_test_cases[] = {
- KUNIT_CASE(raid6_test),
+ KUNIT_CASE_PARAM(raid6_test, raid6_gen_params),
{},
};
@@ -150,6 +165,7 @@ static int raid6_suite_init(struct kunit_suite *suite)
{
prandom_seed_state(&rng, RAID6_KUNIT_SEED);
makedata(0, NDISKS - 1);
+ memset(&args, 0, sizeof(args));
return 0;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 14/17] raid6_kunit: dynamically allocate data buffers using vmalloc
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (12 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 13/17] raid6_kunit: use KUNIT_CASE_PARAM Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 15/17] raid6_kunit: cleanup dataptr handling Christoph Hellwig
` (2 subsequent siblings)
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Use vmalloc for the data buffers instead of using static .data allocations.
This provides for better out of bounds checking and avoids wasting kernel
memory after the test has run. vmalloc is used instead of kmalloc to
provide for better out of bounds access checking as in other kunit tests.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/tests/raid6_kunit.c | 75 +++++++++++++++++++++++-------
1 file changed, 59 insertions(+), 16 deletions(-)
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index 7095952ad8e3..1793b952a595 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -7,19 +7,20 @@
#include <kunit/test.h>
#include <linux/prandom.h>
+#include <linux/vmalloc.h>
#include "../algos.h"
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
#define RAID6_KUNIT_SEED 42
+#define RAID6_KUNIT_MAX_FAILURES 2
#define NDISKS 16 /* Including P and Q */
static struct rnd_state rng;
static void *dataptrs[NDISKS];
-static char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
-static char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
-static char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+static void *test_buffers[NDISKS];
+static void *test_recov_buffers[RAID6_KUNIT_MAX_FAILURES];
struct test_args {
unsigned int recov_idx;
@@ -35,8 +36,8 @@ static void makedata(int start, int stop)
int i;
for (i = start; i <= stop; i++) {
- prandom_bytes_state(&rng, data[i], PAGE_SIZE);
- dataptrs[i] = data[i];
+ prandom_bytes_state(&rng, test_buffers[i], PAGE_SIZE);
+ dataptrs[i] = test_buffers[i];
}
}
@@ -55,12 +56,13 @@ static char member_type(int d)
static void test_recover(struct kunit *test, int faila, int failb)
{
const struct test_args *ta = test->param_value;
+ int i;
- memset(recovi, 0xf0, PAGE_SIZE);
- memset(recovj, 0xba, PAGE_SIZE);
+ for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
+ memset(test_recov_buffers[i], 0xf0, PAGE_SIZE);
- dataptrs[faila] = recovi;
- dataptrs[failb] = recovj;
+ dataptrs[faila] = test_recov_buffers[0];
+ dataptrs[failb] = test_recov_buffers[1];
if (faila > failb)
swap(faila, failb);
@@ -83,18 +85,20 @@ static void test_recover(struct kunit *test, int faila, int failb)
ta->recov->data2(NDISKS, PAGE_SIZE, faila, failb, dataptrs);
}
- KUNIT_EXPECT_MEMEQ_MSG(test, data[faila], recovi, PAGE_SIZE,
+ KUNIT_EXPECT_MEMEQ_MSG(test, test_buffers[faila], test_recov_buffers[0],
+ PAGE_SIZE,
"faila miscompared: %3d[%c] (failb=%3d[%c])\n",
faila, member_type(faila),
failb, member_type(failb));
- KUNIT_EXPECT_MEMEQ_MSG(test, data[failb], recovj, PAGE_SIZE,
+ KUNIT_EXPECT_MEMEQ_MSG(test, test_buffers[failb], test_recov_buffers[1],
+ PAGE_SIZE,
"failb miscompared: %3d[%c] (faila=%3d[%c])\n",
failb, member_type(failb),
faila, member_type(faila));
skip:
- dataptrs[faila] = data[faila];
- dataptrs[failb] = data[failb];
+ dataptrs[faila] = test_buffers[faila];
+ dataptrs[failb] = test_buffers[failb];
}
static void raid6_test(struct kunit *test)
@@ -103,8 +107,8 @@ static void raid6_test(struct kunit *test)
int i, j, p1, p2;
/* Nuke syndromes */
- memset(data[NDISKS - 2], 0xee, PAGE_SIZE);
- memset(data[NDISKS - 1], 0xee, PAGE_SIZE);
+ memset(test_buffers[NDISKS - 2], 0xee, PAGE_SIZE);
+ memset(test_buffers[NDISKS - 1], 0xee, PAGE_SIZE);
/* Generate assumed good syndrome */
ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, (void **)&dataptrs);
@@ -163,16 +167,55 @@ static struct kunit_case raid6_test_cases[] = {
static int raid6_suite_init(struct kunit_suite *suite)
{
+ int i;
+
prandom_seed_state(&rng, RAID6_KUNIT_SEED);
- makedata(0, NDISKS - 1);
memset(&args, 0, sizeof(args));
+
+ /*
+ * Allocate the test buffer using vmalloc() with a page-aligned length
+ * so that it is immediately followed by a guard page. This allows
+ * buffer overreads to be detected, even in assembly code.
+ */
+ for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++) {
+ test_recov_buffers[i] = vmalloc(PAGE_SIZE);
+ if (!test_recov_buffers[i])
+ goto out_free_recov_buffers;
+ }
+ for (i = 0; i < NDISKS; i++) {
+ test_buffers[i] = vmalloc(PAGE_SIZE);
+ if (!test_buffers[i])
+ goto out_free_buffers;
+ }
+
+ makedata(0, NDISKS - 1);
+
return 0;
+
+out_free_buffers:
+ for (i = 0; i < NDISKS; i++)
+ vfree(test_buffers[i]);
+out_free_recov_buffers:
+ for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
+ vfree(test_recov_buffers[i]);
+ return -ENOMEM;
+}
+
+static void raid6_suite_exit(struct kunit_suite *suite)
+{
+ int i;
+
+ for (i = 0; i < NDISKS; i++)
+ vfree(test_buffers[i]);
+ for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
+ vfree(test_recov_buffers[i]);
}
static struct kunit_suite raid6_test_suite = {
.name = "raid6",
.test_cases = raid6_test_cases,
.suite_init = raid6_suite_init,
+ .suite_exit = raid6_suite_exit,
};
kunit_test_suite(raid6_test_suite);
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 15/17] raid6_kunit: cleanup dataptr handling
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (13 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 14/17] raid6_kunit: dynamically allocate data buffers using vmalloc Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 16/17] raid6_kunit: randomize parameters and increase limits Christoph Hellwig
2026-03-24 6:40 ` [PATCH 17/17] raid6_kunit: randomize buffer alignment Christoph Hellwig
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Move the global dataptr array into test_recover() as all sites that fill
data or parity can use test_buffers directly, and this localized the
override for the failed slots to the recovery testing routine.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/tests/raid6_kunit.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index 1793b952a595..ba6cfabc67a4 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -18,7 +18,6 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
#define NDISKS 16 /* Including P and Q */
static struct rnd_state rng;
-static void *dataptrs[NDISKS];
static void *test_buffers[NDISKS];
static void *test_recov_buffers[RAID6_KUNIT_MAX_FAILURES];
@@ -35,10 +34,8 @@ static void makedata(int start, int stop)
{
int i;
- for (i = start; i <= stop; i++) {
+ for (i = start; i <= stop; i++)
prandom_bytes_state(&rng, test_buffers[i], PAGE_SIZE);
- dataptrs[i] = test_buffers[i];
- }
}
static char member_type(int d)
@@ -56,11 +53,13 @@ static char member_type(int d)
static void test_recover(struct kunit *test, int faila, int failb)
{
const struct test_args *ta = test->param_value;
+ void *dataptrs[NDISKS];
int i;
for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
memset(test_recov_buffers[i], 0xf0, PAGE_SIZE);
+ memcpy(dataptrs, test_buffers, sizeof(dataptrs));
dataptrs[faila] = test_recov_buffers[0];
dataptrs[failb] = test_recov_buffers[1];
@@ -73,7 +72,7 @@ static void test_recover(struct kunit *test, int faila, int failb)
* is equivalent to a RAID-5 failure (XOR, then recompute Q).
*/
if (faila != NDISKS - 2)
- goto skip;
+ return;
/* P+Q failure. Just rebuild the syndrome. */
ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, dataptrs);
@@ -95,10 +94,6 @@ static void test_recover(struct kunit *test, int faila, int failb)
"failb miscompared: %3d[%c] (faila=%3d[%c])\n",
failb, member_type(failb),
faila, member_type(faila));
-
-skip:
- dataptrs[faila] = test_buffers[faila];
- dataptrs[failb] = test_buffers[failb];
}
static void raid6_test(struct kunit *test)
@@ -111,7 +106,7 @@ static void raid6_test(struct kunit *test)
memset(test_buffers[NDISKS - 1], 0xee, PAGE_SIZE);
/* Generate assumed good syndrome */
- ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, (void **)&dataptrs);
+ ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, test_buffers);
for (i = 0; i < NDISKS - 1; i++)
for (j = i + 1; j < NDISKS; j++)
@@ -124,10 +119,10 @@ static void raid6_test(struct kunit *test)
for (p2 = p1; p2 < NDISKS - 2; p2++) {
/* Simulate rmw run */
ta->gen->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
- (void **)&dataptrs);
+ test_buffers);
makedata(p1, p2);
ta->gen->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
- (void **)&dataptrs);
+ test_buffers);
for (i = 0; i < NDISKS - 1; i++)
for (j = i + 1; j < NDISKS; j++)
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 16/17] raid6_kunit: randomize parameters and increase limits
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (14 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 15/17] raid6_kunit: cleanup dataptr handling Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
2026-03-24 6:40 ` [PATCH 17/17] raid6_kunit: randomize buffer alignment Christoph Hellwig
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
The current test has double-quadratic behavior in the selection for
the updated ("XORed") disks, and in the selection of updated pointers,
which makes scaling it to more tests difficult. At the same time it
only ever tests with the maximum number of disks, which leaves a
coverage hole for smaller ones.
Fix this by randomizing the total number, failed disks and regions
to update, and increasing the upper number of tests disks.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/tests/raid6_kunit.c | 188 +++++++++++++++++++----------
1 file changed, 124 insertions(+), 64 deletions(-)
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index ba6cfabc67a4..a0a473643e91 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -13,13 +13,15 @@
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
#define RAID6_KUNIT_SEED 42
+#define RAID6_KUNIT_NUM_TEST_ITERS 10
+#define RAID6_KUNIT_MAX_BUFFERS 64 /* Including P and Q */
#define RAID6_KUNIT_MAX_FAILURES 2
-
-#define NDISKS 16 /* Including P and Q */
+#define RAID6_KUNIT_MAX_BYTES PAGE_SIZE
static struct rnd_state rng;
-static void *test_buffers[NDISKS];
+static void *test_buffers[RAID6_KUNIT_MAX_BUFFERS];
static void *test_recov_buffers[RAID6_KUNIT_MAX_FAILURES];
+static size_t test_buflen;
struct test_args {
unsigned int recov_idx;
@@ -30,105 +32,162 @@ struct test_args {
static struct test_args args;
+static u32 rand32(void)
+{
+ return prandom_u32_state(&rng);
+}
+
+/* Generate a random length that is a multiple of 512. */
+static unsigned int random_length(unsigned int max_length)
+{
+ return (rand32() % (max_length + 1)) & ~511;
+}
+
static void makedata(int start, int stop)
{
int i;
for (i = start; i <= stop; i++)
- prandom_bytes_state(&rng, test_buffers[i], PAGE_SIZE);
+ prandom_bytes_state(&rng, test_buffers[i], test_buflen);
}
-static char member_type(int d)
+static char member_type(unsigned int nr_buffers, int d)
{
- switch (d) {
- case NDISKS-2:
+ if (d == nr_buffers - 2)
return 'P';
- case NDISKS-1:
+ if (d == nr_buffers - 1)
return 'Q';
- default:
- return 'D';
- }
+ return 'D';
}
-static void test_recover(struct kunit *test, int faila, int failb)
+static void test_recover_one(struct kunit *test, unsigned int nr_buffers,
+ unsigned int len, int faila, int failb)
{
const struct test_args *ta = test->param_value;
- void *dataptrs[NDISKS];
+ void *dataptrs[RAID6_KUNIT_MAX_BUFFERS];
int i;
+ if (faila > failb)
+ swap(faila, failb);
+
for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
- memset(test_recov_buffers[i], 0xf0, PAGE_SIZE);
+ memset(test_recov_buffers[i], 0xf0, test_buflen);
memcpy(dataptrs, test_buffers, sizeof(dataptrs));
dataptrs[faila] = test_recov_buffers[0];
dataptrs[failb] = test_recov_buffers[1];
- if (faila > failb)
- swap(faila, failb);
-
- if (failb == NDISKS - 1) {
+ if (failb == nr_buffers - 1) {
/*
* We don't implement the data+Q failure scenario, since it
* is equivalent to a RAID-5 failure (XOR, then recompute Q).
*/
- if (faila != NDISKS - 2)
+ if (WARN_ON_ONCE(faila != nr_buffers - 2))
return;
/* P+Q failure. Just rebuild the syndrome. */
- ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, dataptrs);
- } else if (failb == NDISKS - 2) {
+ ta->gen->gen_syndrome(nr_buffers, len, dataptrs);
+ } else if (failb == nr_buffers - 2) {
/* data+P failure. */
- ta->recov->datap(NDISKS, PAGE_SIZE, faila, dataptrs);
+ ta->recov->datap(nr_buffers, len, faila, dataptrs);
} else {
/* data+data failure. */
- ta->recov->data2(NDISKS, PAGE_SIZE, faila, failb, dataptrs);
+ ta->recov->data2(nr_buffers, len, faila, failb, dataptrs);
}
KUNIT_EXPECT_MEMEQ_MSG(test, test_buffers[faila], test_recov_buffers[0],
- PAGE_SIZE,
- "faila miscompared: %3d[%c] (failb=%3d[%c])\n",
- faila, member_type(faila),
- failb, member_type(failb));
+ len,
+ "faila miscompared: %3d[%c] buffers %u len %u (failb=%3d[%c])\n",
+ faila, member_type(nr_buffers, faila),
+ nr_buffers, len,
+ failb, member_type(nr_buffers, failb));
KUNIT_EXPECT_MEMEQ_MSG(test, test_buffers[failb], test_recov_buffers[1],
- PAGE_SIZE,
- "failb miscompared: %3d[%c] (faila=%3d[%c])\n",
- failb, member_type(failb),
- faila, member_type(faila));
+ len,
+ "failb miscompared: %3d[%c] buffers %u len %u (faila=%3d[%c])\n",
+ failb, member_type(nr_buffers, failb),
+ nr_buffers, len,
+ faila, member_type(nr_buffers, faila));
}
-static void raid6_test(struct kunit *test)
+static void test_recover(struct kunit *test, unsigned int nr_buffers,
+ unsigned int len)
+{
+ int iterations, i;
+
+ /* Test P+Q recovery */
+ test_recover_one(test, nr_buffers, len, nr_buffers - 2, nr_buffers - 1);
+
+ /* Test data+P recovery */
+ for (i = 0; i < nr_buffers - 2; i++)
+ test_recover_one(test, nr_buffers, len, i, nr_buffers - 2);
+
+ /* Test data+data recovery using random sampling */
+ iterations = nr_buffers * 2; /* should provide good enough coverage */
+ for (i = 0; i < iterations; i++) {
+ int faila = rand32() % (nr_buffers - 2), failb;
+
+ do {
+ failb = rand32() % (nr_buffers - 2);
+ } while (failb == faila);
+
+ test_recover_one(test, nr_buffers, len, faila, failb);
+ }
+}
+
+/* Simulate rmw run */
+static void test_rmw_one(struct kunit *test, unsigned int nr_buffers,
+ unsigned int len, int p1, int p2)
+{
+ const struct test_args *ta = test->param_value;
+
+ ta->gen->xor_syndrome(nr_buffers, p1, p2, len, test_buffers);
+ makedata(p1, p2);
+ ta->gen->xor_syndrome(nr_buffers, p1, p2, len, test_buffers);
+ test_recover(test, nr_buffers, len);
+}
+
+static void test_rmw(struct kunit *test, unsigned int nr_buffers,
+ unsigned int len)
+{
+ int iterations = nr_buffers / 2, i;
+
+ for (i = 0; i < iterations; i++) {
+ int p1 = rand32() % (nr_buffers - 2);
+ int p2 = rand32() % (nr_buffers - 2);
+
+ if (p2 < p1)
+ swap(p1, p2);
+ test_rmw_one(test, nr_buffers, len, p1, p2);
+ }
+}
+
+static void raid6_test_one(struct kunit *test)
{
const struct test_args *ta = test->param_value;
- int i, j, p1, p2;
+ /* including P/Q we need at least three buffers */
+ unsigned int nr_buffers =
+ (rand32() % (RAID6_KUNIT_MAX_BUFFERS - 2)) + 3;
+ unsigned int len = random_length(RAID6_KUNIT_MAX_BYTES);
/* Nuke syndromes */
- memset(test_buffers[NDISKS - 2], 0xee, PAGE_SIZE);
- memset(test_buffers[NDISKS - 1], 0xee, PAGE_SIZE);
+ memset(test_buffers[nr_buffers - 2], 0xee, test_buflen);
+ memset(test_buffers[nr_buffers - 1], 0xee, test_buflen);
/* Generate assumed good syndrome */
- ta->gen->gen_syndrome(NDISKS, PAGE_SIZE, test_buffers);
-
- for (i = 0; i < NDISKS - 1; i++)
- for (j = i + 1; j < NDISKS; j++)
- test_recover(test, i, j);
-
- if (!ta->gen->xor_syndrome)
- return;
-
- for (p1 = 0; p1 < NDISKS - 2; p1++) {
- for (p2 = p1; p2 < NDISKS - 2; p2++) {
- /* Simulate rmw run */
- ta->gen->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
- test_buffers);
- makedata(p1, p2);
- ta->gen->xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
- test_buffers);
-
- for (i = 0; i < NDISKS - 1; i++)
- for (j = i + 1; j < NDISKS; j++)
- test_recover(test, i, j);
- }
- }
+ ta->gen->gen_syndrome(nr_buffers, len, test_buffers);
+
+ test_recover(test, nr_buffers, len);
+
+ if (ta->gen->xor_syndrome)
+ test_rmw(test, nr_buffers, len);
+}
+
+static void raid6_test(struct kunit *test)
+{
+ int i;
+
+ for (i = 0; i < RAID6_KUNIT_NUM_TEST_ITERS; i++)
+ raid6_test_one(test);
}
static const void *raid6_gen_params(struct kunit *test, const void *prev,
@@ -172,23 +231,24 @@ static int raid6_suite_init(struct kunit_suite *suite)
* so that it is immediately followed by a guard page. This allows
* buffer overreads to be detected, even in assembly code.
*/
+ test_buflen = round_up(RAID6_KUNIT_MAX_BYTES, PAGE_SIZE);
for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++) {
- test_recov_buffers[i] = vmalloc(PAGE_SIZE);
+ test_recov_buffers[i] = vmalloc(test_buflen);
if (!test_recov_buffers[i])
goto out_free_recov_buffers;
}
- for (i = 0; i < NDISKS; i++) {
- test_buffers[i] = vmalloc(PAGE_SIZE);
+ for (i = 0; i < RAID6_KUNIT_MAX_BUFFERS; i++) {
+ test_buffers[i] = vmalloc(test_buflen);
if (!test_buffers[i])
goto out_free_buffers;
}
- makedata(0, NDISKS - 1);
+ makedata(0, RAID6_KUNIT_MAX_BUFFERS - 1);
return 0;
out_free_buffers:
- for (i = 0; i < NDISKS; i++)
+ for (i = 0; i < RAID6_KUNIT_MAX_BUFFERS; i++)
vfree(test_buffers[i]);
out_free_recov_buffers:
for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
@@ -200,7 +260,7 @@ static void raid6_suite_exit(struct kunit_suite *suite)
{
int i;
- for (i = 0; i < NDISKS; i++)
+ for (i = 0; i < RAID6_KUNIT_MAX_BUFFERS; i++)
vfree(test_buffers[i]);
for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
vfree(test_recov_buffers[i]);
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH 17/17] raid6_kunit: randomize buffer alignment
2026-03-24 6:40 cleanup the RAID6 P/Q library Christoph Hellwig
` (15 preceding siblings ...)
2026-03-24 6:40 ` [PATCH 16/17] raid6_kunit: randomize parameters and increase limits Christoph Hellwig
@ 2026-03-24 6:40 ` Christoph Hellwig
16 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2026-03-24 6:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Huacai Chen,
WANG Xuerui, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Paul Walmsley,
Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Heiko Carstens,
Vasily Gorbik, Alexander Gordeev, Christian Borntraeger,
Sven Schnelle, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, x86, H. Peter Anvin, Herbert Xu, Dan Williams,
Chris Mason, David Sterba, Arnd Bergmann, Song Liu, Yu Kuai,
Li Nan, linux-kernel, linux-arm-kernel, loongarch, linuxppc-dev,
linux-riscv, linux-s390, linux-crypto, linux-btrfs, linux-arch,
linux-raid
Add code to add random alignment to the buffers to test the case where
they are not page aligned, and to move the buffers to the end of the
allocation so that they are next to the vmalloc guard page.
This does not include the recovery buffers as the recovery requires
page alignment.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
lib/raid/raid6/tests/raid6_kunit.c | 41 +++++++++++++++++++++++++-----
1 file changed, 35 insertions(+), 6 deletions(-)
diff --git a/lib/raid/raid6/tests/raid6_kunit.c b/lib/raid/raid6/tests/raid6_kunit.c
index a0a473643e91..d2fd4a9b74d4 100644
--- a/lib/raid/raid6/tests/raid6_kunit.c
+++ b/lib/raid/raid6/tests/raid6_kunit.c
@@ -20,6 +20,7 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static struct rnd_state rng;
static void *test_buffers[RAID6_KUNIT_MAX_BUFFERS];
+static void *aligned_buffers[RAID6_KUNIT_MAX_BUFFERS];
static void *test_recov_buffers[RAID6_KUNIT_MAX_FAILURES];
static size_t test_buflen;
@@ -43,6 +44,14 @@ static unsigned int random_length(unsigned int max_length)
return (rand32() % (max_length + 1)) & ~511;
}
+/* Generate a random alignment that is a multiple of 64. */
+static unsigned int random_alignment(unsigned int max_alignment)
+{
+ if (max_alignment == 0)
+ return 0;
+ return (rand32() % (max_alignment + 1)) & ~63;
+}
+
static void makedata(int start, int stop)
{
int i;
@@ -73,7 +82,7 @@ static void test_recover_one(struct kunit *test, unsigned int nr_buffers,
for (i = 0; i < RAID6_KUNIT_MAX_FAILURES; i++)
memset(test_recov_buffers[i], 0xf0, test_buflen);
- memcpy(dataptrs, test_buffers, sizeof(dataptrs));
+ memcpy(dataptrs, aligned_buffers, sizeof(dataptrs));
dataptrs[faila] = test_recov_buffers[0];
dataptrs[failb] = test_recov_buffers[1];
@@ -95,13 +104,13 @@ static void test_recover_one(struct kunit *test, unsigned int nr_buffers,
ta->recov->data2(nr_buffers, len, faila, failb, dataptrs);
}
- KUNIT_EXPECT_MEMEQ_MSG(test, test_buffers[faila], test_recov_buffers[0],
+ KUNIT_EXPECT_MEMEQ_MSG(test, aligned_buffers[faila], dataptrs[faila],
len,
"faila miscompared: %3d[%c] buffers %u len %u (failb=%3d[%c])\n",
faila, member_type(nr_buffers, faila),
nr_buffers, len,
failb, member_type(nr_buffers, failb));
- KUNIT_EXPECT_MEMEQ_MSG(test, test_buffers[failb], test_recov_buffers[1],
+ KUNIT_EXPECT_MEMEQ_MSG(test, aligned_buffers[failb], dataptrs[failb],
len,
"failb miscompared: %3d[%c] buffers %u len %u (faila=%3d[%c])\n",
failb, member_type(nr_buffers, failb),
@@ -140,9 +149,9 @@ static void test_rmw_one(struct kunit *test, unsigned int nr_buffers,
{
const struct test_args *ta = test->param_value;
- ta->gen->xor_syndrome(nr_buffers, p1, p2, len, test_buffers);
+ ta->gen->xor_syndrome(nr_buffers, p1, p2, len, aligned_buffers);
makedata(p1, p2);
- ta->gen->xor_syndrome(nr_buffers, p1, p2, len, test_buffers);
+ ta->gen->xor_syndrome(nr_buffers, p1, p2, len, aligned_buffers);
test_recover(test, nr_buffers, len);
}
@@ -168,13 +177,33 @@ static void raid6_test_one(struct kunit *test)
unsigned int nr_buffers =
(rand32() % (RAID6_KUNIT_MAX_BUFFERS - 2)) + 3;
unsigned int len = random_length(RAID6_KUNIT_MAX_BYTES);
+ unsigned int max_alignment;
+ int i;
/* Nuke syndromes */
memset(test_buffers[nr_buffers - 2], 0xee, test_buflen);
memset(test_buffers[nr_buffers - 1], 0xee, test_buflen);
+ /*
+ * If we're not using the entire buffer size, inject randomize alignment
+ * into the buffer.
+ */
+ max_alignment = RAID6_KUNIT_MAX_BYTES - len;
+ if (rand32() % 2 == 0) {
+ /* Use random alignments mod 64 */
+ for (i = 0; i < nr_buffers; i++)
+ aligned_buffers[i] = test_buffers[i] +
+ random_alignment(max_alignment);
+ } else {
+ /* Go up to the guard page, to catch buffer overreads */
+ unsigned int align = test_buflen - len;
+
+ for (i = 0; i < nr_buffers; i++)
+ aligned_buffers[i] = test_buffers[i] + align;
+ }
+
/* Generate assumed good syndrome */
- ta->gen->gen_syndrome(nr_buffers, len, test_buffers);
+ ta->gen->gen_syndrome(nr_buffers, len, aligned_buffers);
test_recover(test, nr_buffers, len);
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread