* [PATCH v1 00/13] Clean up code style
@ 2025-08-15 1:53 Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 01/13] lib/raid6: Clean up code style in sse2.c Xichao Zhao
` (12 more replies)
0 siblings, 13 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
Clean up comment style.
No functional changes here.
Xichao Zhao (13):
lib/raid6: Clean up code style in sse2.c
lib/raid6: Clean up code style in sse1.c
lib/raid6: Clean up code style in rvv.c
lib/raid6: Clean up code style in recov_ssse3.c
ib/raid6: Clean up code style in recov_s390xc.c
lib/raid6: Clean up code style in recov_avx512.c
lib/raid6: Clean up code style in recov_avx2.c
lib/raid6: Clean up code style in recov.c
lib/raid6: Clean up code style in mmx.c
lib/raid6: Clean up code style in loongarch_simd.c
lib/raid6: Clean up code style in avx512.c
lib/raid6: Clean up code style in algos.c
lib/raid6: Clean up code style in avx2.c
lib/raid6/algos.c | 2 +-
lib/raid6/avx2.c | 122 +++++++++++++++++------------------
lib/raid6/avx512.c | 94 +++++++++++++--------------
lib/raid6/loongarch_simd.c | 116 +++++++++++++++++-----------------
lib/raid6/mmx.c | 24 +++----
lib/raid6/recov.c | 44 +++++++------
lib/raid6/recov_avx2.c | 28 +++++----
lib/raid6/recov_avx512.c | 20 +++---
lib/raid6/recov_s390xc.c | 36 ++++++-----
lib/raid6/recov_ssse3.c | 36 ++++++-----
lib/raid6/rvv.c | 16 ++---
lib/raid6/sse1.c | 30 ++++-----
lib/raid6/sse2.c | 126 ++++++++++++++++++-------------------
13 files changed, 356 insertions(+), 338 deletions(-)
--
2.34.1
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH v1 01/13] lib/raid6: Clean up code style in sse2.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 02/13] lib/raid6: Clean up code style in sse1.c Xichao Zhao
` (11 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/sse2.c | 126 +++++++++++++++++++++++------------------------
1 file changed, 63 insertions(+), 63 deletions(-)
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 2930220249c9..662e0c50fc57 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -40,21 +40,21 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
- for ( d = 0 ; d < bytes ; d += 16 ) {
+ for (d = 0; d < bytes; d += 16) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0 - 1][d]));
asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
- asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
- for ( z = z0-2 ; z >= 0 ; z-- ) {
+ asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0 - 1][d]));
+ for (z = z0 - 2; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
@@ -92,19 +92,19 @@ static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
- for ( d = 0 ; d < bytes ; d += 16 ) {
+ for (d = 0; d < bytes; d += 16) {
asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
asm volatile("pxor %xmm4,%xmm2");
/* P/Q data pages */
- for ( z = z0-1 ; z >= start ; z-- ) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
@@ -115,7 +115,7 @@ static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
asm volatile("pxor %xmm5,%xmm4");
}
/* P/Q left side optimization */
- for ( z = start-1 ; z >= 0 ; z-- ) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
@@ -150,8 +150,8 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -160,13 +160,13 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
/* We uniformly assume a single prefetch covers at least 32 bytes */
- for ( d = 0 ; d < bytes ; d += 32 ) {
+ for (d = 0; d < bytes; d += 32) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
+ asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d + 16])); /* P[1] */
asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
+ for (z = z0 - 1; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
@@ -177,7 +177,7 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
+ asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d + 16]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm5,%xmm4");
@@ -203,22 +203,22 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
- for ( d = 0 ; d < bytes ; d += 32 ) {
+ for (d = 0; d < bytes; d += 32) {
asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
+ asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d + 16]));
asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
- asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
+ asm volatile("movdqa %0,%%xmm3" : : "m" (p[d + 16]));
asm volatile("pxor %xmm4,%xmm2");
asm volatile("pxor %xmm6,%xmm3");
/* P/Q data pages */
- for ( z = z0-1 ; z >= start ; z-- ) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pcmpgtb %xmm4,%xmm5");
@@ -230,14 +230,14 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d + 16]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
}
/* P/Q left side optimization */
- for ( z = start-1 ; z >= 0 ; z-- ) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pcmpgtb %xmm4,%xmm5");
@@ -250,12 +250,12 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
asm volatile("pxor %xmm7,%xmm6");
}
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
- asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
+ asm volatile("pxor %0,%%xmm6" : : "m" (q[d + 16]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
- asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
+ asm volatile("movdqa %%xmm6,%0" : "=m" (q[d + 16]));
asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
- asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
+ asm volatile("movdqa %%xmm3,%0" : "=m" (p[d + 16]));
}
asm volatile("sfence" : : : "memory");
@@ -282,8 +282,8 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -301,11 +301,11 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
- for ( d = 0 ; d < bytes ; d += 64 ) {
- for ( z = z0 ; z >= 0 ; z-- ) {
+ for (d = 0; d < bytes; d += 64) {
+ for (z = z0; z >= 0; z--) {
/* The second prefetch seems to improve performance... */
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d + 32]));
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("pcmpgtb %xmm12,%xmm13");
@@ -323,9 +323,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
- asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
- asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d + 16]));
+ asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d + 32]));
+ asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d + 48]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm13,%xmm10");
@@ -341,11 +341,11 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
asm volatile("pxor %xmm2,%xmm2");
- asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
+ asm volatile("movntdq %%xmm3,%0" : "=m" (p[d + 16]));
asm volatile("pxor %xmm3,%xmm3");
- asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
+ asm volatile("movntdq %%xmm10,%0" : "=m" (p[d + 32]));
asm volatile("pxor %xmm10,%xmm10");
- asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
+ asm volatile("movntdq %%xmm11,%0" : "=m" (p[d + 48]));
asm volatile("pxor %xmm11,%xmm11");
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
asm volatile("pxor %xmm4,%xmm4");
@@ -369,8 +369,8 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
@@ -378,21 +378,21 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
for ( d = 0 ; d < bytes ; d += 64 ) {
asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
- asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
- asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
+ asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d + 16]));
+ asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d + 32]));
+ asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d + 48]));
asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
- asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
- asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
- asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
+ asm volatile("movdqa %0,%%xmm3" : : "m" (p[d + 16]));
+ asm volatile("movdqa %0,%%xmm10" : : "m" (p[d + 32]));
+ asm volatile("movdqa %0,%%xmm11" : : "m" (p[d + 48]));
asm volatile("pxor %xmm4,%xmm2");
asm volatile("pxor %xmm6,%xmm3");
asm volatile("pxor %xmm12,%xmm10");
asm volatile("pxor %xmm14,%xmm11");
/* P/Q data pages */
- for ( z = z0-1 ; z >= start ; z-- ) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d + 32]));
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pxor %xmm13,%xmm13");
@@ -414,9 +414,9 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
- asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
- asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d + 16]));
+ asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d + 32]));
+ asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d + 48]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm13,%xmm10");
@@ -427,7 +427,7 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
asm volatile("pxor %xmm15,%xmm14");
}
asm volatile("prefetchnta %0" :: "m" (q[d]));
- asm volatile("prefetchnta %0" :: "m" (q[d+32]));
+ asm volatile("prefetchnta %0" :: "m" (q[d + 32]));
/* P/Q left side optimization */
for ( z = start-1 ; z >= 0 ; z-- ) {
asm volatile("pxor %xmm5,%xmm5");
@@ -452,17 +452,17 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
asm volatile("pxor %xmm15,%xmm14");
}
asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
- asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
- asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
+ asm volatile("movntdq %%xmm3,%0" : "=m" (p[d + 16]));
+ asm volatile("movntdq %%xmm10,%0" : "=m" (p[d + 32]));
+ asm volatile("movntdq %%xmm11,%0" : "=m" (p[d + 48]));
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
- asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
- asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
- asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
+ asm volatile("pxor %0,%%xmm6" : : "m" (q[d + 16]));
+ asm volatile("pxor %0,%%xmm12" : : "m" (q[d + 32]));
+ asm volatile("pxor %0,%%xmm14" : : "m" (q[d + 48]));
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
- asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
- asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
+ asm volatile("movntdq %%xmm6,%0" : "=m" (q[d + 16]));
+ asm volatile("movntdq %%xmm12,%0" : "=m" (q[d + 32]));
+ asm volatile("movntdq %%xmm14,%0" : "=m" (q[d + 48]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 02/13] lib/raid6: Clean up code style in sse1.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 01/13] lib/raid6: Clean up code style in sse2.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 03/13] lib/raid6: Clean up code style in rvv.c Xichao Zhao
` (10 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/sse1.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c
index 692fa3a93bf0..42fc33b0f364 100644
--- a/lib/raid6/sse1.c
+++ b/lib/raid6/sse1.c
@@ -44,21 +44,21 @@ static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
- for ( d = 0 ; d < bytes ; d += 8 ) {
+ for (d = 0; d < bytes; d += 8) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0 - 1][d]));
asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
- for ( z = z0-2 ; z >= 0 ; z-- ) {
+ asm volatile("movq %0,%%mm6" : : "m" (dptr[z0 - 1][d]));
+ for (z = z0 - 2; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
@@ -103,8 +103,8 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -113,13 +113,13 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %mm7,%mm7"); /* Zero temp */
/* We uniformly assume a single prefetch covers at least 16 bytes */
- for ( d = 0 ; d < bytes ; d += 16 ) {
+ for (d = 0; d < bytes; d += 16) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
+ asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d + 8])); /* P[1] */
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %mm3,%mm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
+ for (z = z0 - 1; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("pcmpgtb %mm6,%mm7");
@@ -130,7 +130,7 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
- asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
+ asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d + 8]));
asm volatile("pxor %mm5,%mm2");
asm volatile("pxor %mm7,%mm3");
asm volatile("pxor %mm5,%mm4");
@@ -139,9 +139,9 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %mm7,%mm7");
}
asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
+ asm volatile("movntq %%mm3,%0" : "=m" (p[d + 8]));
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
- asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
+ asm volatile("movntq %%mm6,%0" : "=m" (q[d + 8]));
}
asm volatile("sfence" : :: "memory");
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 03/13] lib/raid6: Clean up code style in rvv.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 01/13] lib/raid6: Clean up code style in sse2.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 02/13] lib/raid6: Clean up code style in sse1.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 04/13] lib/raid6: Clean up code style in recov_ssse3.c Xichao Zhao
` (9 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/rvv.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/lib/raid6/rvv.c b/lib/raid6/rvv.c
index 7d82efa5b14f..0446872e9390 100644
--- a/lib/raid6/rvv.c
+++ b/lib/raid6/rvv.c
@@ -31,8 +31,8 @@ static void raid6_rvv1_gen_syndrome_real(int disks, unsigned long bytes, void **
int z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0 + 1]; /* XOR parity */
- q = dptr[z0 + 2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
asm volatile (".option push\n"
".option arch,+v\n"
@@ -53,7 +53,7 @@ static void raid6_rvv1_gen_syndrome_real(int disks, unsigned long bytes, void **
[wp0]"r"(&dptr[z0][d + 0 * NSIZE])
);
- for (z = z0 - 1 ; z >= 0 ; z--) {
+ for (z = z0 - 1; z >= 0; z--) {
/*
* w2$$ = MASK(wq$$);
* w1$$ = SHLBYTE(wq$$);
@@ -115,7 +115,7 @@ static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop,
);
/* v0:wp0, v1:wq0, v2:wd0/w20, v3:w10 */
- for (d = 0 ; d < bytes ; d += NSIZE * 1) {
+ for (d = 0; d < bytes; d += NSIZE * 1) {
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
asm volatile (".option push\n"
".option arch,+v\n"
@@ -202,8 +202,8 @@ static void raid6_rvv2_gen_syndrome_real(int disks, unsigned long bytes, void **
int z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0 + 1]; /* XOR parity */
- q = dptr[z0 + 2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
asm volatile (".option push\n"
".option arch,+v\n"
@@ -421,7 +421,7 @@ static void raid6_rvv4_gen_syndrome_real(int disks, unsigned long bytes, void **
unsigned long vl, d;
int z, z0;
- z0 = disks - 3; /* Highest data disk */
+ z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
q = dptr[z0 + 2]; /* RS syndrome */
@@ -731,7 +731,7 @@ static void raid6_rvv8_gen_syndrome_real(int disks, unsigned long bytes, void **
unsigned long vl, d;
int z, z0;
- z0 = disks - 3; /* Highest data disk */
+ z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
q = dptr[z0 + 2]; /* RS syndrome */
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 04/13] lib/raid6: Clean up code style in recov_ssse3.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (2 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 03/13] lib/raid6: Clean up code style in rvv.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 05/13] ib/raid6: Clean up code style in recov_s390xc.c Xichao Zhao
` (8 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
Clean up comment style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/recov_ssse3.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 2e849185c32b..2e9f372f8b43 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -23,26 +23,28 @@ static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data pages
- Use the dead data pages as temporary storage for
- delta p and delta q */
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
dp = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-2] = dp;
+ ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
- ptrs[disks-2] = p;
- ptrs[disks-1] = q;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
@@ -197,20 +199,22 @@ static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
- /* Compute syndrome with zero for the missing data page
- Use the dead data page as temporary storage for delta q */
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
dq = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
- ptrs[disks-1] = q;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 05/13] ib/raid6: Clean up code style in recov_s390xc.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (3 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 04/13] lib/raid6: Clean up code style in recov_ssse3.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 06/13] lib/raid6: Clean up code style in recov_avx512.c Xichao Zhao
` (7 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
Clean up comment style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/recov_s390xc.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/lib/raid6/recov_s390xc.c b/lib/raid6/recov_s390xc.c
index 487018f81192..f1b329ab0229 100644
--- a/lib/raid6/recov_s390xc.c
+++ b/lib/raid6/recov_s390xc.c
@@ -27,26 +27,28 @@ static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
const u8 *qmul; /* Q multiplier table (for both) */
int i;
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data pages
- Use the dead data pages as temporary storage for
- delta p and delta q */
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
dp = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-2] = dp;
+ ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
- ptrs[disks-2] = p;
- ptrs[disks-1] = q;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
@@ -75,20 +77,22 @@ static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
const u8 *qmul; /* Q multiplier table */
int i;
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
- /* Compute syndrome with zero for the missing data page
- Use the dead data page as temporary storage for delta q */
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
dq = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
- ptrs[disks-1] = q;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 06/13] lib/raid6: Clean up code style in recov_avx512.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (4 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 05/13] ib/raid6: Clean up code style in recov_s390xc.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 07/13] lib/raid6: Clean up code style in recov_avx2.c Xichao Zhao
` (6 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/recov_avx512.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/lib/raid6/recov_avx512.c b/lib/raid6/recov_avx512.c
index 7986120ca444..00ad10a930c3 100644
--- a/lib/raid6/recov_avx512.c
+++ b/lib/raid6/recov_avx512.c
@@ -27,8 +27,8 @@ static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
const u8 *qmul; /* Q multiplier table (for both) */
const u8 x0f = 0x0f;
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data pages
@@ -38,18 +38,18 @@ static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
dp = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-2] = dp;
+ ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
- ptrs[disks-2] = p;
- ptrs[disks-1] = q;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
@@ -229,8 +229,8 @@ static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
const u8 *qmul; /* Q multiplier table */
const u8 x0f = 0x0f;
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data page
@@ -239,13 +239,13 @@ static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
dq = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
- ptrs[disks-1] = q;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 07/13] lib/raid6: Clean up code style in recov_avx2.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (5 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 06/13] lib/raid6: Clean up code style in recov_avx512.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 08/13] lib/raid6: Clean up code style in recov.c Xichao Zhao
` (5 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
Clean up comment style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/recov_avx2.c | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
index 97d598d2535c..9cfd0aff11e3 100644
--- a/lib/raid6/recov_avx2.c
+++ b/lib/raid6/recov_avx2.c
@@ -21,12 +21,14 @@ static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
const u8 *qmul; /* Q multiplier table (for both) */
const u8 x0f = 0x0f;
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data pages
- Use the dead data pages as temporary storage for
- delta p and delta q */
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
dp = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
ptrs[disks-2] = dp;
@@ -190,20 +192,22 @@ static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
const u8 *qmul; /* Q multiplier table */
const u8 x0f = 0x0f;
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
- /* Compute syndrome with zero for the missing data page
- Use the dead data page as temporary storage for delta q */
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
dq = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
- ptrs[disks-1] = q;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 08/13] lib/raid6: Clean up code style in recov.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (6 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 07/13] lib/raid6: Clean up code style in recov_avx2.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 09/13] lib/raid6: Clean up code style in mmx.c Xichao Zhao
` (4 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
Clean up comment style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/recov.c | 44 +++++++++++++++++++++++++-------------------
1 file changed, 25 insertions(+), 19 deletions(-)
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index b5e47c008b41..bccf459c3914 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -24,26 +24,28 @@ static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data pages
- Use the dead data pages as temporary storage for
- delta p and delta q */
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
dp = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-2] = dp;
+ ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
- ptrs[disks-2] = p;
- ptrs[disks-1] = q;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
@@ -66,20 +68,22 @@ static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
- /* Compute syndrome with zero for the missing data page
- Use the dead data page as temporary storage for delta q */
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
dq = (u8 *)ptrs[faila];
ptrs[faila] = raid6_get_zero_page();
- ptrs[disks-1] = dq;
+ ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
- ptrs[disks-1] = q;
+ ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
@@ -117,9 +121,11 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs
/* P+Q failure. Just rebuild the syndrome. */
raid6_call.gen_syndrome(disks, bytes, ptrs);
} else {
- /* data+Q failure. Reconstruct data from P,
- then rebuild syndrome. */
- /* NOT IMPLEMENTED - equivalent to RAID-5 */
+ /*
+ * data+Q failure. Reconstruct data from P,
+ * then rebuild syndrome.
+ * NOT IMPLEMENTED - equivalent to RAID-5
+ */
}
} else {
if ( failb == disks-2 ) {
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 09/13] lib/raid6: Clean up code style in mmx.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (7 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 08/13] lib/raid6: Clean up code style in recov.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 10/13] lib/raid6: Clean up code style in loongarch_simd.c Xichao Zhao
` (3 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/mmx.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c
index 3a5bf53a297b..91e5ae78759e 100644
--- a/lib/raid6/mmx.c
+++ b/lib/raid6/mmx.c
@@ -39,18 +39,18 @@ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
- for ( d = 0 ; d < bytes ; d += 8 ) {
+ for (d = 0; d < bytes; d += 8) {
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("movq %mm2,%mm4"); /* Q[0] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
+ for (z = z0 - 1; z >= 0; z--) {
asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
@@ -87,8 +87,8 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -96,12 +96,12 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("pxor %mm7,%mm7"); /* Zero temp */
- for ( d = 0 ; d < bytes ; d += 16 ) {
+ for (d = 0; d < bytes; d += 16) {
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8]));
+ asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d + 8]));
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %mm3,%mm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
+ for (z = z0 - 1; z >= 0; z--) {
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("pcmpgtb %mm6,%mm7");
asm volatile("paddb %mm4,%mm4");
@@ -111,7 +111,7 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
- asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
+ asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d + 8]));
asm volatile("pxor %mm5,%mm2");
asm volatile("pxor %mm7,%mm3");
asm volatile("pxor %mm5,%mm4");
@@ -120,9 +120,9 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("pxor %mm7,%mm7");
}
asm volatile("movq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
+ asm volatile("movq %%mm3,%0" : "=m" (p[d + 8]));
asm volatile("movq %%mm4,%0" : "=m" (q[d]));
- asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
+ asm volatile("movq %%mm6,%0" : "=m" (q[d + 8]));
}
kernel_fpu_end();
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 10/13] lib/raid6: Clean up code style in loongarch_simd.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (8 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 09/13] lib/raid6: Clean up code style in mmx.c Xichao Zhao
@ 2025-08-15 1:53 ` Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 11/13] lib/raid6: Clean up code style in avx512.c Xichao Zhao
` (2 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:53 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/loongarch_simd.c | 116 ++++++++++++++++++-------------------
1 file changed, 58 insertions(+), 58 deletions(-)
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid6/loongarch_simd.c
index aa5d9f924ca3..03aab64ffc30 100644
--- a/lib/raid6/loongarch_simd.c
+++ b/lib/raid6/loongarch_simd.c
@@ -37,8 +37,8 @@ static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -49,22 +49,22 @@ static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
* $vr12, $vr13, $vr14, $vr15: w2
* $vr16, $vr17, $vr18, $vr19: w1
*/
- for (d = 0; d < bytes; d += NSIZE*4) {
+ for (d = 0; d < bytes; d += NSIZE * 4) {
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
- asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
- asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
- asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
- asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d + 0 * NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d + 1 * NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d + 2 * NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d + 3 * NSIZE]));
asm volatile("vori.b $vr4, $vr0, 0");
asm volatile("vori.b $vr5, $vr1, 0");
asm volatile("vori.b $vr6, $vr2, 0");
asm volatile("vori.b $vr7, $vr3, 0");
- for (z = z0-1; z >= 0; z--) {
+ for (z = z0 - 1; z >= 0; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
- asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
- asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
- asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
- asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d + 0 * NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d + 1 * NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d + 2 * NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d + 3 * NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("vxor.v $vr0, $vr0, $vr8");
asm volatile("vxor.v $vr1, $vr1, $vr9");
@@ -97,15 +97,15 @@ static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("vxor.v $vr7, $vr19, $vr11");
}
/* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
- asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
- asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
- asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
- asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
+ asm volatile("vst $vr0, %0" : "=m"(p[d + NSIZE * 0]));
+ asm volatile("vst $vr1, %0" : "=m"(p[d + NSIZE * 1]));
+ asm volatile("vst $vr2, %0" : "=m"(p[d + NSIZE * 2]));
+ asm volatile("vst $vr3, %0" : "=m"(p[d + NSIZE * 3]));
/* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
- asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
- asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
- asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
- asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
+ asm volatile("vst $vr4, %0" : "=m"(q[d + NSIZE * 0]));
+ asm volatile("vst $vr5, %0" : "=m"(q[d + NSIZE * 1]));
+ asm volatile("vst $vr6, %0" : "=m"(q[d + NSIZE * 2]));
+ asm volatile("vst $vr7, %0" : "=m"(q[d + NSIZE * 3]));
}
kernel_fpu_end();
@@ -119,8 +119,8 @@ static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
@@ -131,23 +131,23 @@ static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
* $vr12, $vr13, $vr14, $vr15: w2
* $vr16, $vr17, $vr18, $vr19: w1
*/
- for (d = 0; d < bytes; d += NSIZE*4) {
+ for (d = 0; d < bytes; d += NSIZE * 4) {
/* P/Q data pages */
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
- asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
- asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
- asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
- asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d + 0 * NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d + 1 * NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d + 2 * NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d + 3 * NSIZE]));
asm volatile("vori.b $vr4, $vr0, 0");
asm volatile("vori.b $vr5, $vr1, 0");
asm volatile("vori.b $vr6, $vr2, 0");
asm volatile("vori.b $vr7, $vr3, 0");
- for (z = z0-1; z >= start; z--) {
+ for (z = z0 - 1; z >= start; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
- asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
- asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
- asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
- asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d + 0 * NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d + 1 * NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d + 2 * NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d + 3 * NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("vxor.v $vr0, $vr0, $vr8");
asm volatile("vxor.v $vr1, $vr1, $vr9");
@@ -181,7 +181,7 @@ static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
}
/* P/Q left side optimization */
- for (z = start-1; z >= 0; z--) {
+ for (z = start - 1; z >= 0; z--) {
/* w2$$ = MASK(wq$$); */
asm volatile("vslti.b $vr12, $vr4, 0");
asm volatile("vslti.b $vr13, $vr5, 0");
@@ -232,10 +232,10 @@ static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
"vst $vr25, %5\n\t"
"vst $vr26, %6\n\t"
"vst $vr27, %7\n\t"
- : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
- "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
- "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
- "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
+ : "+m"(p[d + NSIZE * 0]), "+m"(p[d + NSIZE * 1]),
+ "+m"(p[d + NSIZE * 2]), "+m"(p[d + NSIZE * 3]),
+ "+m"(q[d + NSIZE * 0]), "+m"(q[d + NSIZE * 1]),
+ "+m"(q[d + NSIZE * 2]), "+m"(q[d + NSIZE * 3])
);
}
@@ -268,8 +268,8 @@ static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -282,14 +282,14 @@ static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
*/
for (d = 0; d < bytes; d += NSIZE*2) {
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
- asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
- asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d + 0 * NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d + 1 * NSIZE]));
asm volatile("xvori.b $xr2, $xr0, 0");
asm volatile("xvori.b $xr3, $xr1, 0");
- for (z = z0-1; z >= 0; z--) {
+ for (z = z0 - 1; z >= 0; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
- asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
- asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d + 0 * NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d + 1 * NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("xvxor.v $xr0, $xr0, $xr4");
asm volatile("xvxor.v $xr1, $xr1, $xr5");
@@ -310,11 +310,11 @@ static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("xvxor.v $xr3, $xr9, $xr5");
}
/* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
- asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
- asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
+ asm volatile("xvst $xr0, %0" : "=m"(p[d + NSIZE * 0]));
+ asm volatile("xvst $xr1, %0" : "=m"(p[d + NSIZE * 1]));
/* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
- asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
- asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
+ asm volatile("xvst $xr2, %0" : "=m"(q[d + NSIZE * 0]));
+ asm volatile("xvst $xr3, %0" : "=m"(q[d + NSIZE * 1]));
}
kernel_fpu_end();
@@ -328,8 +328,8 @@ static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
@@ -340,17 +340,17 @@ static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
* $xr6, $xr7: w2
* $xr8, $xr9: w1
*/
- for (d = 0; d < bytes; d += NSIZE*2) {
+ for (d = 0; d < bytes; d += NSIZE * 2) {
/* P/Q data pages */
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
- asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
- asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d + 0 * NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d + 1 * NSIZE]));
asm volatile("xvori.b $xr2, $xr0, 0");
asm volatile("xvori.b $xr3, $xr1, 0");
for (z = z0-1; z >= start; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
- asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
- asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d + 0 * NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d + 1 * NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("xvxor.v $xr0, $xr0, $xr4");
asm volatile("xvxor.v $xr1, $xr1, $xr5");
@@ -372,7 +372,7 @@ static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
}
/* P/Q left side optimization */
- for (z = start-1; z >= 0; z--) {
+ for (z = start - 1; z >= 0; z--) {
/* w2$$ = MASK(wq$$); */
asm volatile("xvslti.b $xr6, $xr2, 0");
asm volatile("xvslti.b $xr7, $xr3, 0");
@@ -403,8 +403,8 @@ static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
"xvst $xr11, %1\n\t"
"xvst $xr12, %2\n\t"
"xvst $xr13, %3\n\t"
- : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
- "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
+ : "+m"(p[d + NSIZE * 0]), "+m"(p[d + NSIZE * 1]),
+ "+m"(q[d + NSIZE * 0]), "+m"(q[d + NSIZE * 1])
);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 11/13] lib/raid6: Clean up code style in avx512.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (9 preceding siblings ...)
2025-08-15 1:53 ` [PATCH v1 10/13] lib/raid6: Clean up code style in loongarch_simd.c Xichao Zhao
@ 2025-08-15 1:54 ` Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 12/13] lib/raid6: Clean up code style in algos.c Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 13/13] lib/raid6: Clean up code style in avx2.c Xichao Zhao
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:54 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/avx512.c | 94 +++++++++++++++++++++++-----------------------
1 file changed, 47 insertions(+), 47 deletions(-)
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
index 009bd0adeebf..18707cbb2bf1 100644
--- a/lib/raid6/avx512.c
+++ b/lib/raid6/avx512.c
@@ -46,8 +46,8 @@ static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -64,7 +64,7 @@ static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
"vmovdqa64 %1,%%zmm6"
:
: "m" (dptr[z0][d]), "m" (dptr[z0-1][d]));
- for (z = z0-2; z >= 0; z--) {
+ for (z = z0 - 2; z >= 0; z--) {
asm volatile("prefetchnta %0\n\t"
"vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
@@ -104,22 +104,22 @@ static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0"
: : "m" (raid6_avx512_constants.x1d[0]));
- for (d = 0 ; d < bytes ; d += 64) {
+ for (d = 0; d < bytes; d += 64) {
asm volatile("vmovdqa64 %0,%%zmm4\n\t"
"vmovdqa64 %1,%%zmm2\n\t"
"vpxorq %%zmm4,%%zmm2,%%zmm2"
:
: "m" (dptr[z0][d]), "m" (p[d]));
/* P/Q data pages */
- for (z = z0-1 ; z >= start ; z--) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
@@ -133,7 +133,7 @@ static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
: "m" (dptr[z][d]));
}
/* P/Q left side optimization */
- for (z = start-1 ; z >= 0 ; z--) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
@@ -173,8 +173,8 @@ static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -192,8 +192,8 @@ static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
"vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
"vmovdqa64 %%zmm3,%%zmm6" /* Q[1] */
:
- : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]));
- for (z = z0-1; z >= 0; z--) {
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d + 64]));
+ for (z = z0 - 1; z >= 0; z--) {
asm volatile("prefetchnta %0\n\t"
"prefetchnta %1\n\t"
"vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
@@ -213,7 +213,7 @@ static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6"
:
- : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
+ : "m" (dptr[z][d]), "m" (dptr[z][d + 64]));
}
asm volatile("vmovntdq %%zmm2,%0\n\t"
"vmovntdq %%zmm3,%1\n\t"
@@ -221,7 +221,7 @@ static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
"vmovntdq %%zmm6,%3"
:
: "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
- "m" (q[d+64]));
+ "m" (q[d + 64]));
}
asm volatile("sfence" : : : "memory");
@@ -236,15 +236,15 @@ static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0"
: : "m" (raid6_avx512_constants.x1d[0]));
- for (d = 0 ; d < bytes ; d += 128) {
+ for (d = 0; d < bytes; d += 128) {
asm volatile("vmovdqa64 %0,%%zmm4\n\t"
"vmovdqa64 %1,%%zmm6\n\t"
"vmovdqa64 %2,%%zmm2\n\t"
@@ -252,10 +252,10 @@ static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
"vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm6,%%zmm3,%%zmm3"
:
- : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
- "m" (p[d]), "m" (p[d+64]));
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d + 64]),
+ "m" (p[d]), "m" (p[d + 64]));
/* P/Q data pages */
- for (z = z0-1 ; z >= start ; z--) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
@@ -275,10 +275,10 @@ static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6"
:
- : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
+ : "m" (dptr[z][d]), "m" (dptr[z][d + 64]));
}
/* P/Q left side optimization */
- for (z = start-1 ; z >= 0 ; z--) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
@@ -304,8 +304,8 @@ static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
"vmovdqa64 %%zmm2,%2\n\t"
"vmovdqa64 %%zmm3,%3"
:
- : "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
- "m" (p[d+64]));
+ : "m" (q[d]), "m" (q[d + 64]), "m" (p[d]),
+ "m" (p[d + 64]));
}
asm volatile("sfence" : : : "memory");
@@ -332,8 +332,8 @@ static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -389,8 +389,8 @@ static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14"
:
- : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
- "m" (dptr[z][d+128]), "m" (dptr[z][d+192]));
+ : "m" (dptr[z][d]), "m" (dptr[z][d + 64]),
+ "m" (dptr[z][d + 128]), "m" (dptr[z][d + 192]));
}
asm volatile("vmovntdq %%zmm2,%0\n\t"
"vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
@@ -409,9 +409,9 @@ static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
"vmovntdq %%zmm14,%7\n\t"
"vpxorq %%zmm14,%%zmm14,%%zmm14"
:
- : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
- "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
- "m" (q[d+128]), "m" (q[d+192]));
+ : "m" (p[d]), "m" (p[d + 64]), "m" (p[d + 128]),
+ "m" (p[d + 192]), "m" (q[d]), "m" (q[d + 64]),
+ "m" (q[d + 128]), "m" (q[d + 192]));
}
asm volatile("sfence" : : : "memory");
@@ -426,15 +426,15 @@ static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0"
:: "m" (raid6_avx512_constants.x1d[0]));
- for (d = 0 ; d < bytes ; d += 256) {
+ for (d = 0; d < bytes; d += 256) {
asm volatile("vmovdqa64 %0,%%zmm4\n\t"
"vmovdqa64 %1,%%zmm6\n\t"
"vmovdqa64 %2,%%zmm12\n\t"
@@ -448,12 +448,12 @@ static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
"vpxorq %%zmm12,%%zmm10,%%zmm10\n\t"
"vpxorq %%zmm14,%%zmm11,%%zmm11"
:
- : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
- "m" (dptr[z0][d+128]), "m" (dptr[z0][d+192]),
- "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
- "m" (p[d+192]));
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d + 64]),
+ "m" (dptr[z0][d + 128]), "m" (dptr[z0][d + 192]),
+ "m" (p[d]), "m" (p[d + 64]), "m" (p[d + 128]),
+ "m" (p[d + 192]));
/* P/Q data pages */
- for (z = z0-1 ; z >= start ; z--) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
@@ -493,16 +493,16 @@ static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14"
:
- : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
- "m" (dptr[z][d+128]),
- "m" (dptr[z][d+192]));
+ : "m" (dptr[z][d]), "m" (dptr[z][d + 64]),
+ "m" (dptr[z][d + 128]),
+ "m" (dptr[z][d + 192]));
}
asm volatile("prefetchnta %0\n\t"
"prefetchnta %1\n\t"
:
- : "m" (q[d]), "m" (q[d+128]));
+ : "m" (q[d]), "m" (q[d + 128]));
/* P/Q left side optimization */
- for (z = start-1 ; z >= 0 ; z--) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
@@ -543,9 +543,9 @@ static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
"vmovntdq %%zmm12,%6\n\t"
"vmovntdq %%zmm14,%7"
:
- : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
- "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
- "m" (q[d+128]), "m" (q[d+192]));
+ : "m" (p[d]), "m" (p[d + 64]), "m" (p[d + 128]),
+ "m" (p[d + 192]), "m" (q[d]), "m" (q[d + 64]),
+ "m" (q[d + 128]), "m" (q[d + 192]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 12/13] lib/raid6: Clean up code style in algos.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (10 preceding siblings ...)
2025-08-15 1:54 ` [PATCH v1 11/13] lib/raid6: Clean up code style in avx512.c Xichao Zhao
@ 2025-08-15 1:54 ` Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 13/13] lib/raid6: Clean up code style in avx2.c Xichao Zhao
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:54 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/algos.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 799e0e5eac26..92f908c9e44b 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -188,7 +188,7 @@ static inline const struct raid6_calls *raid6_choose_gen(
best = *algo;
}
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
- (perf * HZ * (disks-2)) >>
+ (perf * HZ * (disks - 2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
}
}
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v1 13/13] lib/raid6: Clean up code style in avx2.c
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
` (11 preceding siblings ...)
2025-08-15 1:54 ` [PATCH v1 12/13] lib/raid6: Clean up code style in algos.c Xichao Zhao
@ 2025-08-15 1:54 ` Xichao Zhao
12 siblings, 0 replies; 14+ messages in thread
From: Xichao Zhao @ 2025-08-15 1:54 UTC (permalink / raw)
To: Song Liu, Yu Kuai,
open list:SOFTWARE RAID (Multiple Disks) SUPPORT, open list
Cc: Xichao Zhao
Reduce or add spaces to clean up code style.
No functional changes here.
Signed-off-by: Xichao Zhao <zhao.xichao@vivo.com>
---
lib/raid6/avx2.c | 122 +++++++++++++++++++++++------------------------
1 file changed, 61 insertions(+), 61 deletions(-)
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 059024234dce..949f6a71d810 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -87,19 +87,19 @@ static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
- for (d = 0 ; d < bytes ; d += 32) {
+ for (d = 0; d < bytes; d += 32) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
/* P/Q data pages */
- for (z = z0-1 ; z >= start ; z--) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
@@ -145,8 +145,8 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -156,14 +156,14 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
/* We uniformly assume a single prefetch covers at least 32 bytes */
for (d = 0; d < bytes; d += 64) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
- asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
- asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d + 32]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d + 32]));/* P[1] */
asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
- for (z = z0-1; z >= 0; z--) {
+ for (z = z0 - 1; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d + 32]));
asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
@@ -173,7 +173,7 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
- asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
+ asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d + 32]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
@@ -197,22 +197,22 @@ static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
- for (d = 0 ; d < bytes ; d += 64) {
+ for (d = 0; d < bytes; d += 64) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
- asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d + 32]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
- asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d + 32]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm3,%ymm3");
/* P/Q data pages */
- for (z = z0-1 ; z >= start ; z--) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
@@ -225,14 +225,14 @@ static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7"
- :: "m" (dptr[z][d+32]));
+ :: "m" (dptr[z][d + 32]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
/* P/Q left side optimization */
- for (z = start-1 ; z >= 0 ; z--) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
@@ -245,12 +245,12 @@ static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
- asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+ asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d + 32]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
- asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d + 32]));
asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
- asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d + 32]));
}
asm volatile("sfence" : : : "memory");
@@ -277,8 +277,8 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
kernel_fpu_begin();
@@ -296,9 +296,9 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
for (d = 0; d < bytes; d += 128) {
for (z = z0; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d + 32]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d + 64]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d + 96]));
asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
@@ -316,9 +316,9 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
- asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
- asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
- asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
+ asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d + 32]));
+ asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d + 64]));
+ asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d + 96]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm13,%ymm10,%ymm10");
@@ -330,19 +330,19 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
asm volatile("vpxor %ymm2,%ymm2,%ymm2");
- asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d + 32]));
asm volatile("vpxor %ymm3,%ymm3,%ymm3");
- asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+ asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d + 64]));
asm volatile("vpxor %ymm10,%ymm10,%ymm10");
- asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+ asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d + 96]));
asm volatile("vpxor %ymm11,%ymm11,%ymm11");
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
asm volatile("vpxor %ymm4,%ymm4,%ymm4");
- asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d + 32]));
asm volatile("vpxor %ymm6,%ymm6,%ymm6");
- asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+ asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d + 64]));
asm volatile("vpxor %ymm12,%ymm12,%ymm12");
- asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+ asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d + 96]));
asm volatile("vpxor %ymm14,%ymm14,%ymm14");
}
@@ -358,30 +358,30 @@ static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
- p = dptr[disks-2]; /* XOR parity */
- q = dptr[disks-1]; /* RS syndrome */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
- for (d = 0 ; d < bytes ; d += 128) {
+ for (d = 0; d < bytes; d += 128) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
- asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
- asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
- asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
+ asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d + 32]));
+ asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d + 64]));
+ asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d + 96]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
- asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
- asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
- asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d + 32]));
+ asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d + 64]));
+ asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d + 96]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm3,%ymm3");
asm volatile("vpxor %ymm12,%ymm10,%ymm10");
asm volatile("vpxor %ymm14,%ymm11,%ymm11");
/* P/Q data pages */
- for (z = z0-1 ; z >= start ; z--) {
+ for (z = z0 - 1; z >= start; z--) {
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d + 64]));
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpxor %ymm13,%ymm13,%ymm13");
@@ -404,11 +404,11 @@ static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7"
- :: "m" (dptr[z][d+32]));
+ :: "m" (dptr[z][d + 32]));
asm volatile("vmovdqa %0,%%ymm13"
- :: "m" (dptr[z][d+64]));
+ :: "m" (dptr[z][d + 64]));
asm volatile("vmovdqa %0,%%ymm15"
- :: "m" (dptr[z][d+96]));
+ :: "m" (dptr[z][d + 96]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm13,%ymm10,%ymm10");
@@ -421,7 +421,7 @@ static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
asm volatile("prefetchnta %0" :: "m" (q[d]));
asm volatile("prefetchnta %0" :: "m" (q[d+64]));
/* P/Q left side optimization */
- for (z = start-1 ; z >= 0 ; z--) {
+ for (z = start - 1; z >= 0; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpxor %ymm13,%ymm13,%ymm13");
@@ -444,17 +444,17 @@ static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
}
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
- asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
- asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
- asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d + 32]));
+ asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d + 64]));
+ asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d + 96]));
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
- asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
- asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
- asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
+ asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d + 32]));
+ asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d + 64]));
+ asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d + 96]));
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
- asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
- asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
- asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d + 32]));
+ asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d + 64]));
+ asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d + 96]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
end of thread, other threads:[~2025-08-15 1:54 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-15 1:53 [PATCH v1 00/13] Clean up code style Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 01/13] lib/raid6: Clean up code style in sse2.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 02/13] lib/raid6: Clean up code style in sse1.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 03/13] lib/raid6: Clean up code style in rvv.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 04/13] lib/raid6: Clean up code style in recov_ssse3.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 05/13] ib/raid6: Clean up code style in recov_s390xc.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 06/13] lib/raid6: Clean up code style in recov_avx512.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 07/13] lib/raid6: Clean up code style in recov_avx2.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 08/13] lib/raid6: Clean up code style in recov.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 09/13] lib/raid6: Clean up code style in mmx.c Xichao Zhao
2025-08-15 1:53 ` [PATCH v1 10/13] lib/raid6: Clean up code style in loongarch_simd.c Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 11/13] lib/raid6: Clean up code style in avx512.c Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 12/13] lib/raid6: Clean up code style in algos.c Xichao Zhao
2025-08-15 1:54 ` [PATCH v1 13/13] lib/raid6: Clean up code style in avx2.c Xichao Zhao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).