* [PATCH V4 01/14] LoongArch: Add atomic operations for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 02/14] LoongArch: Add adaptive CSR accessors " Huacai Chen
` (12 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
LoongArch64 has both AMO and LL/SC instructions, while LoongArch32 only
has LL/SC intstructions. So we add a Kconfig option CPU_HAS_AMO here and
implement atomic operations (also including local operations and percpu
operations) for both 32BIT and 64BIT platforms.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/Kconfig | 4 +
arch/loongarch/include/asm/atomic-amo.h | 206 +++++++++++++++++++++++
arch/loongarch/include/asm/atomic-llsc.h | 100 +++++++++++
arch/loongarch/include/asm/atomic.h | 197 ++--------------------
arch/loongarch/include/asm/cmpxchg.h | 48 ++++--
arch/loongarch/include/asm/local.h | 37 ++++
arch/loongarch/include/asm/percpu.h | 40 +++--
7 files changed, 413 insertions(+), 219 deletions(-)
create mode 100644 arch/loongarch/include/asm/atomic-amo.h
create mode 100644 arch/loongarch/include/asm/atomic-llsc.h
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index a672f689cb03..730f34214519 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -568,6 +568,10 @@ config ARCH_STRICT_ALIGN
to run kernel only on systems with h/w unaligned access support in
order to optimise for performance.
+config CPU_HAS_AMO
+ bool
+ default 64BIT
+
config CPU_HAS_FPU
bool
default y
diff --git a/arch/loongarch/include/asm/atomic-amo.h b/arch/loongarch/include/asm/atomic-amo.h
new file mode 100644
index 000000000000..d5efa5252d56
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic-amo.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations (AMO).
+ *
+ * Copyright (C) 2020-2025 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_ATOMIC_AMO_H
+#define _ASM_ATOMIC_AMO_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+
+#define ATOMIC_OP(op, I, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op".w" " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
+static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
+static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op, I, asm_op, c_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
+ ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC_OPS(add, i, add, +)
+ATOMIC_OPS(sub, -i, add, +)
+
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_OP(op, I, asm_op) \
+static inline void arch_atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op".d " " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
+static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
+{ \
+ long result; \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
+static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
+{ \
+ long result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op#mb".d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC64_OPS(op, I, asm_op, c_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC64_OPS(add, i, add, +)
+ATOMIC64_OPS(sub, -i, add, +)
+
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+
+#define ATOMIC64_OPS(op, I, asm_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
+
+ATOMIC64_OPS(and, i, and)
+ATOMIC64_OPS(or, i, or)
+ATOMIC64_OPS(xor, i, xor)
+
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+#endif
+
+#endif /* _ASM_ATOMIC_AMO_H */
diff --git a/arch/loongarch/include/asm/atomic-llsc.h b/arch/loongarch/include/asm/atomic-llsc.h
new file mode 100644
index 000000000000..b4f5670b85cf
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic-llsc.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations (LLSC).
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_ATOMIC_LLSC_H
+#define _ASM_ATOMIC_LLSC_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+
+#define ATOMIC_OP(op, I, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ "1: ll.w %0, %1 #atomic_" #op " \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " sc.w %0, %1 \n" \
+ " beq %0, $r0, 1b \n" \
+ :"=&r" (temp) , "+ZB"(v->counter) \
+ :"r" (I) \
+ ); \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ int result, temp; \
+ \
+ __asm__ __volatile__( \
+ "1: ll.w %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc.w %0, %2 \n" \
+ " beq %0, $r0 ,1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ : "=&r" (result), "=&r" (temp), "+ZB"(v->counter) \
+ : "r" (I)); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ int result, temp; \
+ \
+ __asm__ __volatile__( \
+ "1: ll.w %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc.w %0, %2 \n" \
+ " beq %0, $r0 ,1b \n" \
+ " add.w %0, %1 ,$r0 \n" \
+ : "=&r" (result), "=&r" (temp), "+ZB" (v->counter) \
+ : "r" (I)); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op,I ,asm_op, c_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_OP_RETURN(op, I , asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(add, i , add.w ,+=)
+ATOMIC_OPS(sub, -i , add.w ,+=)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#ifdef CONFIG_64BIT
+#error "64-bit LLSC atomic operations are not supported"
+#endif
+
+#endif /* _ASM_ATOMIC_LLSC_H */
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index c86f0ab922ec..444b9ddcd004 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -11,6 +11,16 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
+#ifdef CONFIG_CPU_HAS_AMO
+#include <asm/atomic-amo.h>
+#else
+#include <asm/atomic-llsc.h>
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
#if __SIZEOF_LONG__ == 4
#define __LL "ll.w "
#define __SC "sc.w "
@@ -34,100 +44,6 @@
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
-#define ATOMIC_OP(op, I, asm_op) \
-static inline void arch_atomic_##op(int i, atomic_t *v) \
-{ \
- __asm__ __volatile__( \
- "am"#asm_op".w" " $zero, %1, %0 \n" \
- : "+ZB" (v->counter) \
- : "r" (I) \
- : "memory"); \
-}
-
-#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
-static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
-{ \
- int result; \
- \
- __asm__ __volatile__( \
- "am"#asm_op#mb".w" " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result c_op I; \
-}
-
-#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
-static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
-{ \
- int result; \
- \
- __asm__ __volatile__( \
- "am"#asm_op#mb".w" " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result; \
-}
-
-#define ATOMIC_OPS(op, I, asm_op, c_op) \
- ATOMIC_OP(op, I, asm_op) \
- ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
- ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
- ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC_OPS(add, i, add, +)
-ATOMIC_OPS(sub, -i, add, +)
-
-#define arch_atomic_add_return arch_atomic_add_return
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
-#define arch_atomic_sub_return arch_atomic_sub_return
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define arch_atomic_fetch_add arch_atomic_fetch_add
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-
-#undef ATOMIC_OPS
-
-#define ATOMIC_OPS(op, I, asm_op) \
- ATOMIC_OP(op, I, asm_op) \
- ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC_OPS(and, i, and)
-ATOMIC_OPS(or, i, or)
-ATOMIC_OPS(xor, i, xor)
-
-#define arch_atomic_fetch_and arch_atomic_fetch_and
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_or arch_atomic_fetch_or
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -194,99 +110,6 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-#define ATOMIC64_OP(op, I, asm_op) \
-static inline void arch_atomic64_##op(long i, atomic64_t *v) \
-{ \
- __asm__ __volatile__( \
- "am"#asm_op".d " " $zero, %1, %0 \n" \
- : "+ZB" (v->counter) \
- : "r" (I) \
- : "memory"); \
-}
-
-#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
-static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
-{ \
- long result; \
- __asm__ __volatile__( \
- "am"#asm_op#mb".d " " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result c_op I; \
-}
-
-#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
-static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
-{ \
- long result; \
- \
- __asm__ __volatile__( \
- "am"#asm_op#mb".d " " %1, %2, %0 \n" \
- : "+ZB" (v->counter), "=&r" (result) \
- : "r" (I) \
- : "memory"); \
- \
- return result; \
-}
-
-#define ATOMIC64_OPS(op, I, asm_op, c_op) \
- ATOMIC64_OP(op, I, asm_op) \
- ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
- ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
- ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC64_OPS(add, i, add, +)
-ATOMIC64_OPS(sub, -i, add, +)
-
-#define arch_atomic64_add_return arch_atomic64_add_return
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
-#define arch_atomic64_sub_return arch_atomic64_sub_return
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-
-#define ATOMIC64_OPS(op, I, asm_op) \
- ATOMIC64_OP(op, I, asm_op) \
- ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
- ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
-
-ATOMIC64_OPS(and, i, and)
-ATOMIC64_OPS(or, i, or)
-ATOMIC64_OPS(xor, i, xor)
-
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
index 979fde61bba8..0494c2ab553e 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -9,17 +9,33 @@
#include <linux/build_bug.h>
#include <asm/barrier.h>
-#define __xchg_asm(amswap_db, m, val) \
+#define __xchg_amo_asm(amswap_db, m, val) \
({ \
- __typeof(val) __ret; \
+ __typeof(val) __ret; \
\
- __asm__ __volatile__ ( \
- " "amswap_db" %1, %z2, %0 \n" \
- : "+ZB" (*m), "=&r" (__ret) \
- : "Jr" (val) \
- : "memory"); \
+ __asm__ __volatile__ ( \
+ " "amswap_db" %1, %z2, %0 \n" \
+ : "+ZB" (*m), "=&r" (__ret) \
+ : "Jr" (val) \
+ : "memory"); \
\
- __ret; \
+ __ret; \
+})
+
+#define __xchg_llsc_asm(ld, st, m, val) \
+({ \
+ __typeof(val) __ret, __tmp; \
+ \
+ asm volatile ( \
+ "1: ll.w %0, %3 \n" \
+ " move %1, %z4 \n" \
+ " sc.w %1, %2 \n" \
+ " beqz %1, 1b \n" \
+ : "=&r" (__ret), "=&r" (__tmp), "=ZC" (*m) \
+ : "ZC" (*m), "Jr" (val) \
+ : "memory"); \
+ \
+ __ret; \
})
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
@@ -67,13 +83,23 @@ __arch_xchg(volatile void *ptr, unsigned long x, int size)
switch (size) {
case 1:
case 2:
- return __xchg_small(ptr, x, size);
+ return __xchg_small((volatile void *)ptr, x, size);
case 4:
- return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+#ifdef CONFIG_CPU_HAS_AMO
+ return __xchg_amo_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+#else
+ return __xchg_llsc_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)x);
+#endif /* CONFIG_CPU_HAS_AMO */
+#ifdef CONFIG_64BIT
case 8:
- return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+#ifdef CONFIG_CPU_HAS_AMO
+ return __xchg_amo_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+#else
+ return __xchg_llsc_asm("ll.d", "sc.d", (volatile u64 *)ptr, (u64)x);
+#endif /* CONFIG_CPU_HAS_AMO */
+#endif /* CONFIG_64BIT */
default:
BUILD_BUG();
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
index f53ea653af76..65ace8e4350a 100644
--- a/arch/loongarch/include/asm/local.h
+++ b/arch/loongarch/include/asm/local.h
@@ -8,6 +8,7 @@
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
+#include <asm/asm.h>
#include <asm/cmpxchg.h>
typedef struct {
@@ -27,6 +28,7 @@ typedef struct {
/*
* Same as above, but return the result value
*/
+#ifdef CONFIG_CPU_HAS_AMO
static inline long local_add_return(long i, local_t *l)
{
unsigned long result;
@@ -55,6 +57,41 @@ static inline long local_sub_return(long i, local_t *l)
return result;
}
+#else
+static inline long local_add_return(long i, local_t * l)
+{
+ unsigned long result, temp;
+
+ __asm__ __volatile__(
+ "1:" __LL "%1, %2 # local_add_return \n"
+ __stringify(LONG_ADD) " %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beq %0, $r0, 1b \n"
+ __stringify(LONG_ADD) " %0, %1, %3 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "r" (i), "m" (l->a.counter)
+ : "memory");
+
+ return result;
+}
+
+static inline long local_sub_return(long i, local_t * l)
+{
+ unsigned long result, temp;
+
+ __asm__ __volatile__(
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ __stringify(LONG_SUB) " %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beq %0, $r0, 1b \n"
+ __stringify(LONG_SUB) " %0, %1, %3 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "r" (i), "m" (l->a.counter)
+ : "memory");
+
+ return result;
+}
+#endif
static inline long local_cmpxchg(local_t *l, long old, long new)
{
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 87be9b14e9da..1619c1d15e6b 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -36,6 +36,8 @@ static inline void set_my_cpu_offset(unsigned long off)
__my_cpu_offset; \
})
+#ifdef CONFIG_CPU_HAS_AMO
+
#define PERCPU_OP(op, asm_op, c_op) \
static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
@@ -68,25 +70,9 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- case 2:
- return __xchg_small((volatile void *)ptr, val, size);
-
- case 4:
- return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
-
- case 8:
- return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
+#endif
- default:
- BUILD_BUG();
- }
-
- return 0;
-}
+#ifdef CONFIG_64BIT
#define __pcpu_op_1(op) op ".b "
#define __pcpu_op_2(op) op ".h "
@@ -115,6 +101,10 @@ do { \
: "memory"); \
} while (0)
+#endif
+
+#define __percpu_xchg __arch_xchg
+
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@@ -135,6 +125,8 @@ do { \
__retval; \
})
+#ifdef CONFIG_CPU_HAS_AMO
+
#define _percpu_add(pcp, val) \
_pcp_protect(__percpu_add, pcp, val)
@@ -146,9 +138,6 @@ do { \
#define _percpu_or(pcp, val) \
_pcp_protect(__percpu_or, pcp, val)
-#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
- _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
-
#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
@@ -161,6 +150,10 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+#endif
+
+#ifdef CONFIG_64BIT
+
#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
@@ -171,6 +164,11 @@ do { \
#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
+#endif
+
+#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
+
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 02/14] LoongArch: Add adaptive CSR accessors for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
2025-11-27 15:48 ` [PATCH V4 01/14] LoongArch: Add atomic operations for 32BIT/64BIT Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 03/14] LoongArch: Adjust common macro definitions " Huacai Chen
` (11 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
32BIT platforms only have 32bit CSR/IOCSR registers, 64BIT platforms
have both 32bit/64bit CSR/IOCSR registers. Now there are both 32bit and
64bit CSR accessors:
csr_read32()/csr_write32()/csr_xchg32();
csr_read64()/csr_write64()/csr_xchg64();
Some CSR registers (address and timer registers) are 32bit length on
32BIT platform and 64bit length on 64BIT platform. To avoid #ifdefs here
and there, they need adaptive accessors, so we define and use:
csr_read()/csr_write()/csr_xchg();
IOCSR doesn't have a "natural length", which means a 64bit register can
be treated as two 32bit registers, so we just use two 32bit accessors to
emulate a 64bit accessors.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/loongarch.h | 46 +++++++++++++++---------
arch/loongarch/include/asm/percpu.h | 2 +-
arch/loongarch/kernel/cpu-probe.c | 7 ++++
arch/loongarch/kernel/time.c | 16 ++++-----
arch/loongarch/kernel/traps.c | 15 ++++----
arch/loongarch/lib/dump_tlb.c | 6 ++--
arch/loongarch/mm/tlb.c | 10 +++---
arch/loongarch/power/hibernate.c | 6 ++--
arch/loongarch/power/suspend.c | 24 ++++++-------
drivers/firmware/efi/libstub/loongarch.c | 8 ++---
10 files changed, 81 insertions(+), 59 deletions(-)
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 3de03cb864b2..9f71a79271da 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -182,6 +182,16 @@
#define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg)
#define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg)
+#ifdef CONFIG_32BIT
+#define csr_read(reg) csr_read32(reg)
+#define csr_write(val, reg) csr_write32(val, reg)
+#define csr_xchg(val, mask, reg) csr_xchg32(val, mask, reg)
+#else
+#define csr_read(reg) csr_read64(reg)
+#define csr_write(val, reg) csr_write64(val, reg)
+#define csr_xchg(val, mask, reg) csr_xchg64(val, mask, reg)
+#endif
+
/* IOCSR */
#define iocsr_read32(reg) __iocsrrd_w(reg)
#define iocsr_read64(reg) __iocsrrd_d(reg)
@@ -1223,6 +1233,7 @@ static inline unsigned int get_csr_cpuid(void)
return csr_read32(LOONGARCH_CSR_CPUID);
}
+#ifdef CONFIG_64BIT
static inline void csr_any_send(unsigned int addr, unsigned int data,
unsigned int data_mask, unsigned int cpu)
{
@@ -1234,6 +1245,7 @@ static inline void csr_any_send(unsigned int addr, unsigned int data,
val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
}
+#endif
static inline unsigned int read_csr_excode(void)
{
@@ -1257,22 +1269,22 @@ static inline void write_csr_pagesize(unsigned int size)
static inline unsigned int read_csr_tlbrefill_pagesize(void)
{
- return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
+ return (csr_read(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
}
static inline void write_csr_tlbrefill_pagesize(unsigned int size)
{
- csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
+ csr_xchg(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
}
#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID)
#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID)
-#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI)
-#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI)
-#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0)
-#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0)
-#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1)
-#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1)
+#define read_csr_entryhi() csr_read(LOONGARCH_CSR_TLBEHI)
+#define write_csr_entryhi(val) csr_write(val, LOONGARCH_CSR_TLBEHI)
+#define read_csr_entrylo0() csr_read(LOONGARCH_CSR_TLBELO0)
+#define write_csr_entrylo0(val) csr_write(val, LOONGARCH_CSR_TLBELO0)
+#define read_csr_entrylo1() csr_read(LOONGARCH_CSR_TLBELO1)
+#define write_csr_entrylo1(val) csr_write(val, LOONGARCH_CSR_TLBELO1)
#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG)
#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG)
#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT)
@@ -1282,20 +1294,20 @@ static inline void write_csr_tlbrefill_pagesize(unsigned int size)
#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN)
#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN)
#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID)
-#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1)
-#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1)
-#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2)
-#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2)
-#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3)
-#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3)
+#define read_csr_prcfg1() csr_read(LOONGARCH_CSR_PRCFG1)
+#define write_csr_prcfg1(val) csr_write(val, LOONGARCH_CSR_PRCFG1)
+#define read_csr_prcfg2() csr_read(LOONGARCH_CSR_PRCFG2)
+#define write_csr_prcfg2(val) csr_write(val, LOONGARCH_CSR_PRCFG2)
+#define read_csr_prcfg3() csr_read(LOONGARCH_CSR_PRCFG3)
+#define write_csr_prcfg3(val) csr_write(val, LOONGARCH_CSR_PRCFG3)
#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE)
#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE)
#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG)
#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG)
#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR)
-#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1)
-#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1)
-#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2)
+#define read_csr_impctl1() csr_read(LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl1(val) csr_write(val, LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl2(val) csr_write(val, LOONGARCH_CSR_IMPCTL2)
#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0)
#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0)
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 1619c1d15e6b..44a8aea2b0e5 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -27,7 +27,7 @@ register unsigned long __my_cpu_offset __asm__("$r21");
static inline void set_my_cpu_offset(unsigned long off)
{
__my_cpu_offset = off;
- csr_write64(off, PERCPU_BASE_KS);
+ csr_write(off, PERCPU_BASE_KS);
}
#define __my_cpu_offset \
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index a2060a24b39f..3726cd0885b6 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -298,8 +298,15 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
return;
}
+#ifdef CONFIG_64BIT
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+#else
+ *vendor = iocsr_read32(LOONGARCH_IOCSR_VENDOR) |
+ (u64)iocsr_read32(LOONGARCH_IOCSR_VENDOR + 4) << 32;
+ *cpuname = iocsr_read32(LOONGARCH_IOCSR_CPUNAME) |
+ (u64)iocsr_read32(LOONGARCH_IOCSR_CPUNAME + 4) << 32;
+#endif
if (!__cpu_full_name[cpu]) {
if (((char *)vendor)[0] == 0)
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index 6fb92cc1a4c9..1c31bf3a16ed 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -50,10 +50,10 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
raw_spin_lock(&state_lock);
- timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config = csr_read(LOONGARCH_CSR_TCFG);
timer_config |= CSR_TCFG_EN;
timer_config &= ~CSR_TCFG_PERIOD;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@@ -70,7 +70,7 @@ static int constant_set_state_periodic(struct clock_event_device *evt)
period = const_clock_freq / HZ;
timer_config = period & CSR_TCFG_VAL;
timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@@ -83,9 +83,9 @@ static int constant_set_state_shutdown(struct clock_event_device *evt)
raw_spin_lock(&state_lock);
- timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config = csr_read(LOONGARCH_CSR_TCFG);
timer_config &= ~CSR_TCFG_EN;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@@ -98,7 +98,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
delta &= CSR_TCFG_VAL;
timer_config = delta | CSR_TCFG_EN;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ csr_write(timer_config, LOONGARCH_CSR_TCFG);
return 0;
}
@@ -137,7 +137,7 @@ void save_counter(void)
void sync_counter(void)
{
/* Ensure counter begin at 0 */
- csr_write64(init_offset, LOONGARCH_CSR_CNTC);
+ csr_write(init_offset, LOONGARCH_CSR_CNTC);
}
int constant_clockevent_init(void)
@@ -235,7 +235,7 @@ void __init time_init(void)
else
const_clock_freq = calc_const_freq();
- init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
+ init_offset = -(drdtime() - csr_read(LOONGARCH_CSR_CNTC));
constant_clockevent_init();
constant_clocksource_init();
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index da5926fead4a..004b8ebf0051 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -625,7 +625,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
- u64 badv = 0, lower = 0, upper = ULONG_MAX;
+ unsigned long badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
@@ -1070,10 +1070,13 @@ asmlinkage void noinstr do_reserved(struct pt_regs *regs)
asmlinkage void cache_parity_error(void)
{
+ u32 merrctl = csr_read32(LOONGARCH_CSR_MERRCTL);
+ unsigned long merrera = csr_read(LOONGARCH_CSR_MERRERA);
+
/* For the moment, report the problem and hang. */
pr_err("Cache error exception:\n");
- pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
- pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
+ pr_err("csr_merrctl == %08x\n", merrctl);
+ pr_err("csr_merrera == %016lx\n", merrera);
panic("Can't handle the cache error!");
}
@@ -1130,9 +1133,9 @@ static void configure_exception_vector(void)
eentry = (unsigned long)exception_handlers;
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
- csr_write64(eentry, LOONGARCH_CSR_EENTRY);
- csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
- csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
+ csr_write(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
+ csr_write(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
}
void per_cpu_trap_init(int cpu)
diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c
index 0b886a6e260f..116f21ea4e2c 100644
--- a/arch/loongarch/lib/dump_tlb.c
+++ b/arch/loongarch/lib/dump_tlb.c
@@ -20,9 +20,9 @@ void dump_tlb_regs(void)
pr_info("Index : 0x%0x\n", read_csr_tlbidx());
pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
- pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi());
- pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0());
- pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1());
+ pr_info("EntryHi : 0x%0*lx\n", field, (unsigned long)read_csr_entryhi());
+ pr_info("EntryLo0 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo0());
+ pr_info("EntryLo1 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo1());
}
static void dump_tlb(int first, int last)
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 3b427b319db2..6e474469e210 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -229,11 +229,11 @@ static void setup_ptwalker(void)
if (cpu_has_ptw)
pwctl1 |= CSR_PWCTL1_PTW;
- csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
- csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
- csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
- csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
- csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
+ csr_write(pwctl0, LOONGARCH_CSR_PWCTL0);
+ csr_write(pwctl1, LOONGARCH_CSR_PWCTL1);
+ csr_write((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
+ csr_write((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ csr_write((long)smp_processor_id(), LOONGARCH_CSR_TMID);
}
static void output_pgtable_bits_defines(void)
diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
index e7b7346592cb..817270410ef9 100644
--- a/arch/loongarch/power/hibernate.c
+++ b/arch/loongarch/power/hibernate.c
@@ -10,7 +10,7 @@ static u32 saved_crmd;
static u32 saved_prmd;
static u32 saved_euen;
static u32 saved_ecfg;
-static u64 saved_pcpu_base;
+static unsigned long saved_pcpu_base;
struct pt_regs saved_regs;
void save_processor_state(void)
@@ -20,7 +20,7 @@ void save_processor_state(void)
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
- saved_pcpu_base = csr_read64(PERCPU_BASE_KS);
+ saved_pcpu_base = csr_read(PERCPU_BASE_KS);
if (is_fpu_owner())
save_fp(current);
@@ -33,7 +33,7 @@ void restore_processor_state(void)
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG);
- csr_write64(saved_pcpu_base, PERCPU_BASE_KS);
+ csr_write(saved_pcpu_base, PERCPU_BASE_KS);
if (is_fpu_owner())
restore_fp(current);
diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c
index c9e594925c47..7e3d79f8bbd4 100644
--- a/arch/loongarch/power/suspend.c
+++ b/arch/loongarch/power/suspend.c
@@ -20,24 +20,24 @@ u64 loongarch_suspend_addr;
struct saved_registers {
u32 ecfg;
u32 euen;
- u64 pgd;
- u64 kpgd;
u32 pwctl0;
u32 pwctl1;
- u64 pcpu_base;
+ unsigned long pgd;
+ unsigned long kpgd;
+ unsigned long pcpu_base;
};
static struct saved_registers saved_regs;
void loongarch_common_suspend(void)
{
save_counter();
- saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL);
- saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH);
+ saved_regs.pgd = csr_read(LOONGARCH_CSR_PGDL);
+ saved_regs.kpgd = csr_read(LOONGARCH_CSR_PGDH);
saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0);
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
- saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
+ saved_regs.pcpu_base = csr_read(PERCPU_BASE_KS);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
@@ -46,17 +46,17 @@ void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
- csr_write64(eentry, LOONGARCH_CSR_EENTRY);
- csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
- csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+ csr_write(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write(eentry, LOONGARCH_CSR_MERRENTRY);
+ csr_write(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
- csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL);
- csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
+ csr_write(saved_regs.pgd, LOONGARCH_CSR_PGDL);
+ csr_write(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
- csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
+ csr_write(saved_regs.pcpu_base, PERCPU_BASE_KS);
}
int loongarch_acpi_suspend(void)
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index 3782d0a187d1..9825f5218137 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -72,10 +72,10 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
desc_ver, priv.runtime_map);
/* Config Direct Mapping */
- csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
- csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
- csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2);
- csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3);
+ csr_write(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
+ csr_write(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
+ csr_write(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2);
+ csr_write(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3);
real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image);
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 03/14] LoongArch: Adjust common macro definitions for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
2025-11-27 15:48 ` [PATCH V4 01/14] LoongArch: Add atomic operations for 32BIT/64BIT Huacai Chen
2025-11-27 15:48 ` [PATCH V4 02/14] LoongArch: Add adaptive CSR accessors " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 04/14] LoongArch: Adjust boot & setup " Huacai Chen
` (10 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Most common macros are defined in asm.h, asmmacro.h and stackframe.h.
Adjust these macros for both 32BIT and 64BIT.
Add SETUP_TWINS (Setup Trampoline Windows) and SETUP_MODES (Setup CRMD/
PRMD/EUEN) which will be used later.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/asm.h | 77 ++++++++++++----
arch/loongarch/include/asm/asmmacro.h | 118 ++++++++++++++++++------
arch/loongarch/include/asm/stackframe.h | 34 +++++--
3 files changed, 174 insertions(+), 55 deletions(-)
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
index f018d26fc995..719cab1a0ad8 100644
--- a/arch/loongarch/include/asm/asm.h
+++ b/arch/loongarch/include/asm/asm.h
@@ -72,11 +72,11 @@
#define INT_SUB sub.w
#define INT_L ld.w
#define INT_S st.w
-#define INT_SLL slli.w
+#define INT_SLLI slli.w
#define INT_SLLV sll.w
-#define INT_SRL srli.w
+#define INT_SRLI srli.w
#define INT_SRLV srl.w
-#define INT_SRA srai.w
+#define INT_SRAI srai.w
#define INT_SRAV sra.w
#endif
@@ -86,11 +86,11 @@
#define INT_SUB sub.d
#define INT_L ld.d
#define INT_S st.d
-#define INT_SLL slli.d
+#define INT_SLLI slli.d
#define INT_SLLV sll.d
-#define INT_SRL srli.d
+#define INT_SRLI srli.d
#define INT_SRLV srl.d
-#define INT_SRA srai.d
+#define INT_SRAI srai.d
#define INT_SRAV sra.d
#endif
@@ -100,15 +100,23 @@
#if (__SIZEOF_LONG__ == 4)
#define LONG_ADD add.w
#define LONG_ADDI addi.w
+#define LONG_ALSL alsl.w
+#define LONG_BSTRINS bstrins.w
+#define LONG_BSTRPICK bstrpick.w
#define LONG_SUB sub.w
#define LONG_L ld.w
+#define LONG_LI li.w
+#define LONG_LPTR ld.w
#define LONG_S st.w
-#define LONG_SLL slli.w
+#define LONG_SPTR st.w
+#define LONG_SLLI slli.w
#define LONG_SLLV sll.w
-#define LONG_SRL srli.w
+#define LONG_SRLI srli.w
#define LONG_SRLV srl.w
-#define LONG_SRA srai.w
+#define LONG_SRAI srai.w
#define LONG_SRAV sra.w
+#define LONG_ROTR rotr.w
+#define LONG_ROTRI rotri.w
#ifdef __ASSEMBLER__
#define LONG .word
@@ -121,15 +129,23 @@
#if (__SIZEOF_LONG__ == 8)
#define LONG_ADD add.d
#define LONG_ADDI addi.d
+#define LONG_ALSL alsl.d
+#define LONG_BSTRINS bstrins.d
+#define LONG_BSTRPICK bstrpick.d
#define LONG_SUB sub.d
#define LONG_L ld.d
+#define LONG_LI li.d
+#define LONG_LPTR ldptr.d
#define LONG_S st.d
-#define LONG_SLL slli.d
+#define LONG_SPTR stptr.d
+#define LONG_SLLI slli.d
#define LONG_SLLV sll.d
-#define LONG_SRL srli.d
+#define LONG_SRLI srli.d
#define LONG_SRLV srl.d
-#define LONG_SRA srai.d
+#define LONG_SRAI srai.d
#define LONG_SRAV sra.d
+#define LONG_ROTR rotr.d
+#define LONG_ROTRI rotri.d
#ifdef __ASSEMBLER__
#define LONG .dword
@@ -145,16 +161,23 @@
#if (__SIZEOF_POINTER__ == 4)
#define PTR_ADD add.w
#define PTR_ADDI addi.w
+#define PTR_ALSL alsl.w
+#define PTR_BSTRINS bstrins.w
+#define PTR_BSTRPICK bstrpick.w
#define PTR_SUB sub.w
#define PTR_L ld.w
-#define PTR_S st.w
#define PTR_LI li.w
-#define PTR_SLL slli.w
+#define PTR_LPTR ld.w
+#define PTR_S st.w
+#define PTR_SPTR st.w
+#define PTR_SLLI slli.w
#define PTR_SLLV sll.w
-#define PTR_SRL srli.w
+#define PTR_SRLI srli.w
#define PTR_SRLV srl.w
-#define PTR_SRA srai.w
+#define PTR_SRAI srai.w
#define PTR_SRAV sra.w
+#define PTR_ROTR rotr.w
+#define PTR_ROTRI rotri.w
#define PTR_SCALESHIFT 2
@@ -168,16 +191,23 @@
#if (__SIZEOF_POINTER__ == 8)
#define PTR_ADD add.d
#define PTR_ADDI addi.d
+#define PTR_ALSL alsl.d
+#define PTR_BSTRINS bstrins.d
+#define PTR_BSTRPICK bstrpick.d
#define PTR_SUB sub.d
#define PTR_L ld.d
-#define PTR_S st.d
#define PTR_LI li.d
-#define PTR_SLL slli.d
+#define PTR_LPTR ldptr.d
+#define PTR_S st.d
+#define PTR_SPTR stptr.d
+#define PTR_SLLI slli.d
#define PTR_SLLV sll.d
-#define PTR_SRL srli.d
+#define PTR_SRLI srli.d
#define PTR_SRLV srl.d
-#define PTR_SRA srai.d
+#define PTR_SRAI srai.d
#define PTR_SRAV sra.d
+#define PTR_ROTR rotr.d
+#define PTR_ROTRI rotri.d
#define PTR_SCALESHIFT 3
@@ -190,10 +220,17 @@
/* Annotate a function as being unsuitable for kprobes. */
#ifdef CONFIG_KPROBES
+#ifdef CONFIG_32BIT
+#define _ASM_NOKPROBE(name) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ .long name; \
+ .popsection
+#else
#define _ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
.quad name; \
.popsection
+#endif
#else
#define _ASM_NOKPROBE(name)
#endif
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 8d7f501b0a12..a648be5f723f 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -5,43 +5,55 @@
#ifndef _ASM_ASMMACRO_H
#define _ASM_ASMMACRO_H
+#include <linux/sizes.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
+#ifdef CONFIG_64BIT
+#define TASK_STRUCT_OFFSET 0
+#else
+#define TASK_STRUCT_OFFSET 2000
+#endif
+
.macro cpu_save_nonscratch thread
- stptr.d s0, \thread, THREAD_REG23
- stptr.d s1, \thread, THREAD_REG24
- stptr.d s2, \thread, THREAD_REG25
- stptr.d s3, \thread, THREAD_REG26
- stptr.d s4, \thread, THREAD_REG27
- stptr.d s5, \thread, THREAD_REG28
- stptr.d s6, \thread, THREAD_REG29
- stptr.d s7, \thread, THREAD_REG30
- stptr.d s8, \thread, THREAD_REG31
- stptr.d sp, \thread, THREAD_REG03
- stptr.d fp, \thread, THREAD_REG22
+ LONG_SPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET)
+ LONG_SPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET)
+ LONG_SPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET)
+ LONG_SPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET)
+ LONG_SPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET)
.endm
.macro cpu_restore_nonscratch thread
- ldptr.d s0, \thread, THREAD_REG23
- ldptr.d s1, \thread, THREAD_REG24
- ldptr.d s2, \thread, THREAD_REG25
- ldptr.d s3, \thread, THREAD_REG26
- ldptr.d s4, \thread, THREAD_REG27
- ldptr.d s5, \thread, THREAD_REG28
- ldptr.d s6, \thread, THREAD_REG29
- ldptr.d s7, \thread, THREAD_REG30
- ldptr.d s8, \thread, THREAD_REG31
- ldptr.d ra, \thread, THREAD_REG01
- ldptr.d sp, \thread, THREAD_REG03
- ldptr.d fp, \thread, THREAD_REG22
+ LONG_LPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET)
+ LONG_LPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET)
+ LONG_LPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET)
+ LONG_LPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET)
+ LONG_LPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET)
.endm
.macro fpu_save_csr thread tmp
movfcsr2gr \tmp, fcsr0
+#ifdef CONFIG_32BIT
+ st.w \tmp, \thread, THREAD_FCSR
+#else
stptr.w \tmp, \thread, THREAD_FCSR
+#endif
#ifdef CONFIG_CPU_HAS_LBT
/* TM bit is always 0 if LBT not supported */
andi \tmp, \tmp, FPU_CSR_TM
@@ -56,7 +68,11 @@
.endm
.macro fpu_restore_csr thread tmp0 tmp1
+#ifdef CONFIG_32BIT
+ ld.w \tmp0, \thread, THREAD_FCSR
+#else
ldptr.w \tmp0, \thread, THREAD_FCSR
+#endif
movgr2fcsr fcsr0, \tmp0
#ifdef CONFIG_CPU_HAS_LBT
/* TM bit is always 0 if LBT not supported */
@@ -88,9 +104,52 @@
#endif
.endm
+#ifdef CONFIG_32BIT
.macro fpu_save_cc thread tmp0 tmp1
movcf2gr \tmp0, $fcc0
- move \tmp1, \tmp0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.w \tmp1, \tmp0, 31, 24
+ st.w \tmp1, \thread, THREAD_FCC
+ movcf2gr \tmp0, $fcc4
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc5
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc6
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc7
+ bstrins.w \tmp1, \tmp0, 31, 24
+ st.w \tmp1, \thread, (THREAD_FCC + 4)
+ .endm
+
+ .macro fpu_restore_cc thread tmp0 tmp1
+ ld.w \tmp0, \thread, THREAD_FCC
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ ld.w \tmp0, \thread, (THREAD_FCC + 4)
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc4, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc5, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc6, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc7, \tmp1
+ .endm
+#else
+ .macro fpu_save_cc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.d \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
@@ -109,7 +168,7 @@
.endm
.macro fpu_restore_cc thread tmp0 tmp1
- ldptr.d \tmp0, \thread, THREAD_FCC
+ ldptr.d \tmp0, \thread, THREAD_FCC
bstrpick.d \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.d \tmp1, \tmp0, 15, 8
@@ -127,6 +186,7 @@
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
+#endif
.macro fpu_save_double thread tmp
li.w \tmp, THREAD_FPR0
@@ -606,12 +666,14 @@
766:
lu12i.w \reg, 0
ori \reg, \reg, 0
+#ifdef CONFIG_64BIT
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
+#endif
.pushsection ".la_abs", "aw", %progbits
- .p2align 3
- .dword 766b
- .dword \sym
+ .p2align PTRLOG
+ PTR 766b
+ PTR \sym
.popsection
#endif
.endm
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 5cb568a60cf8..b44930fbb675 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -38,22 +38,42 @@
cfi_restore \reg \offset \docfi
.endm
+ .macro SETUP_TWINS temp
+ pcaddi t0, 0
+ PTR_LI t1, ~TO_PHYS_MASK
+ and t0, t0, t1
+ ori t0, t0, (1 << 4 | 1)
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ PTR_LI t0, CSR_DMW1_INIT
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+ .endm
+
+ .macro SETUP_MODES temp
+ /* Enable PG */
+ li.w \temp, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr \temp, LOONGARCH_CSR_CRMD
+ li.w \temp, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr \temp, LOONGARCH_CSR_PRMD
+ li.w \temp, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr \temp, LOONGARCH_CSR_EUEN
+ .endm
+
.macro SETUP_DMWINS temp
- li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
+ PTR_LI \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN0
- li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
+ PTR_LI \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN1
- li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
+ PTR_LI \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN2
- li.d \temp, CSR_DMW3_INIT # 0x0, unused
+ PTR_LI \temp, CSR_DMW3_INIT # 0x0, unused
csrwr \temp, LOONGARCH_CSR_DMWIN3
.endm
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
- li.d \temp1, CACHE_BASE
+ PTR_LI \temp1, CACHE_BASE
pcaddi \temp2, 0
- bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
+ PTR_BSTRINS \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
@@ -171,7 +191,7 @@
andi t0, t0, 0x3 /* extract pplv bit */
beqz t0, 9f
- li.d tp, ~_THREAD_MASK
+ LONG_LI tp, ~_THREAD_MASK
and tp, tp, sp
cfi_st u0, PT_R21, \docfi
csrrd u0, PERCPU_BASE_KS
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 04/14] LoongArch: Adjust boot & setup for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (2 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 03/14] LoongArch: Adjust common macro definitions " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 05/14] LoongArch: Adjust memory management " Huacai Chen
` (9 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust boot & setup for both 32BIT and 64BIT, including: efi header
definition, MAX_IO_PICS definition, kernel entry and environment setup
routines, etc.
Add a fallback path in fdt_cpu_clk_init() to avoid 0MHz in /proc/cpuinfo
if there is no valid clock freq from firmware.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/addrspace.h | 2 +-
arch/loongarch/include/asm/irq.h | 5 ++++
| 4 +++
arch/loongarch/kernel/efi.c | 4 ++-
arch/loongarch/kernel/env.c | 5 +++-
arch/loongarch/kernel/head.S | 39 +++++++++++---------------
arch/loongarch/kernel/relocate.c | 9 +++++-
7 files changed, 42 insertions(+), 26 deletions(-)
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index e739dbc6329d..9766a100504a 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -42,7 +42,7 @@ extern unsigned long vm_map_base;
#endif
#define DMW_PABITS 48
-#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
+#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1)
/*
* Memory above this physical address will be considered highmem.
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 12bd15578c33..cf6c82a9117b 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -53,7 +53,12 @@ void spurious_interrupt(void);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
+#ifdef CONFIG_32BIT
+#define MAX_IO_PICS 1
+#else
#define MAX_IO_PICS 8
+#endif
+
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
--git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
index ba0bdbf86aa8..6df56241cb95 100644
--- a/arch/loongarch/kernel/efi-header.S
+++ b/arch/loongarch/kernel/efi-header.S
@@ -9,7 +9,11 @@
.macro __EFI_PE_HEADER
.long IMAGE_NT_SIGNATURE
.Lcoff_header:
+#ifdef CONFIG_32BIT
+ .short IMAGE_FILE_MACHINE_LOONGARCH32 /* Machine */
+#else
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
+#endif
.short .Lsection_count /* NumberOfSections */
.long 0 /* TimeDateStamp */
.long 0 /* PointerToSymbolTable */
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index 860a3bc030e0..52c21c895318 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -115,7 +115,9 @@ void __init efi_init(void)
efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
- set_bit(EFI_64BIT, &efi.flags);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_bit(EFI_64BIT, &efi.flags);
+
efi_nr_tables = efi_systab->nr_tables;
efi_config_table = (unsigned long)efi_systab->tables;
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 23bd5ae2212c..841206fde3ab 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -72,9 +72,12 @@ static int __init fdt_cpu_clk_init(void)
clk = of_clk_get(np, 0);
of_node_put(np);
+ cpu_clock_freq = 200 * 1000 * 1000;
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ pr_warn("No valid CPU clock freq, assume 200MHz.\n");
return -ENODEV;
+ }
cpu_clock_freq = clk_get_rate(clk);
clk_put(clk);
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index e3865e92a917..aba548db2446 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -43,36 +43,29 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize);
SYM_CODE_START(kernel_entry) # kernel entry point
- /* Config direct window and set PG */
- SETUP_DMWINS t0
+ SETUP_TWINS
+ SETUP_MODES t0
JUMP_VIRT_ADDR t0, t1
-
- /* Enable PG */
- li.w t0, 0xb0 # PLV=0, IE=0, PG=1
- csrwr t0, LOONGARCH_CSR_CRMD
- li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
- csrwr t0, LOONGARCH_CSR_PRMD
- li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
- csrwr t0, LOONGARCH_CSR_EUEN
+ SETUP_DMWINS t0
la.pcrel t0, __bss_start # clear .bss
- st.d zero, t0, 0
+ LONG_S zero, t0, 0
la.pcrel t1, __bss_stop - LONGSIZE
1:
- addi.d t0, t0, LONGSIZE
- st.d zero, t0, 0
+ PTR_ADDI t0, t0, LONGSIZE
+ LONG_S zero, t0, 0
bne t0, t1, 1b
la.pcrel t0, fw_arg0
- st.d a0, t0, 0 # firmware arguments
+ PTR_S a0, t0, 0 # firmware arguments
la.pcrel t0, fw_arg1
- st.d a1, t0, 0
+ PTR_S a1, t0, 0
la.pcrel t0, fw_arg2
- st.d a2, t0, 0
+ PTR_S a2, t0, 0
#ifdef CONFIG_PAGE_SIZE_4KB
- li.d t0, 0
- li.d t1, CSR_STFILL
+ LONG_LI t0, 0
+ LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* KSave3 used for percpu base, initialized as 0 */
@@ -98,7 +91,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
/* Jump to the new kernel: new_pc = current_pc + random_offset */
pcaddi t0, 0
- add.d t0, t0, a0
+ PTR_ADD t0, t0, a0
jirl zero, t0, 0xc
#endif /* CONFIG_RANDOMIZE_BASE */
@@ -121,12 +114,14 @@ SYM_CODE_END(kernel_entry)
*/
SYM_CODE_START(smpboot_entry)
- SETUP_DMWINS t0
+ SETUP_TWINS
+ SETUP_MODES t0
JUMP_VIRT_ADDR t0, t1
+ SETUP_DMWINS t0
#ifdef CONFIG_PAGE_SIZE_4KB
- li.d t0, 0
- li.d t1, CSR_STFILL
+ LONG_LI t0, 0
+ LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* Enable PG */
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 76abbb8d2931..82aa3f035927 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -68,18 +68,25 @@ static inline void __init relocate_absolute(long random_offset)
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
- uint32_t lu12iw, ori, lu32id, lu52id;
+ uint32_t lu12iw, ori;
+#ifdef CONFIG_64BIT
+ uint32_t lu32id, lu52id;
+#endif
union loongarch_instruction *insn = (void *)p->pc;
lu12iw = (v >> 12) & 0xfffff;
ori = v & 0xfff;
+#ifdef CONFIG_64BIT
lu32id = (v >> 32) & 0xfffff;
lu52id = v >> 52;
+#endif
insn[0].reg1i20_format.immediate = lu12iw;
insn[1].reg2i12_format.immediate = ori;
+#ifdef CONFIG_64BIT
insn[2].reg1i20_format.immediate = lu32id;
insn[3].reg2i12_format.immediate = lu52id;
+#endif
}
}
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 05/14] LoongArch: Adjust memory management for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (3 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 04/14] LoongArch: Adjust boot & setup " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 06/14] LoongArch: Adjust process " Huacai Chen
` (8 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann, Yawei Li
Adjust memory management for both 32BIT and 64BIT, including: address
space definition, DMW CSR definition, page table bits definition, boot
time detection of VA/PA bits, page table init, tlb exception handling,
copy_page/clear_page/dump_tlb libraries, etc.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Yawei Li <liyawei@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/addrspace.h | 13 +
arch/loongarch/include/asm/cpu-features.h | 3 -
arch/loongarch/include/asm/loongarch.h | 24 ++
arch/loongarch/include/asm/page.h | 2 +-
arch/loongarch/include/asm/pgtable-bits.h | 36 ++-
arch/loongarch/include/asm/pgtable.h | 74 +++--
arch/loongarch/kernel/cpu-probe.c | 6 +-
arch/loongarch/lib/dump_tlb.c | 8 +
arch/loongarch/mm/init.c | 4 +-
arch/loongarch/mm/page.S | 118 ++++----
arch/loongarch/mm/tlb.c | 2 +
arch/loongarch/mm/tlbex.S | 322 +++++++++++++++-------
12 files changed, 418 insertions(+), 194 deletions(-)
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index 9766a100504a..d6472cafb32c 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -38,11 +38,20 @@ extern unsigned long vm_map_base;
#endif
#ifndef WRITECOMBINE_BASE
+#ifdef CONFIG_32BIT
+#define WRITECOMBINE_BASE CSR_DMW0_BASE
+#else
#define WRITECOMBINE_BASE CSR_DMW2_BASE
#endif
+#endif
+#ifdef CONFIG_32BIT
+#define DMW_PABITS 29
+#define TO_PHYS_MASK ((_UL(1) << _UL(DMW_PABITS)) - 1)
+#else
#define DMW_PABITS 48
#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1)
+#endif
/*
* Memory above this physical address will be considered highmem.
@@ -112,7 +121,11 @@ extern unsigned long vm_map_base;
/*
* Returns the physical address of a KPRANGEx / XKPRANGE address
*/
+#ifdef CONFIG_32BIT
+#define PHYSADDR(a) ((_ACAST32_(a)) & TO_PHYS_MASK)
+#else
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
+#endif
/*
* On LoongArch, I/O ports mappring is following:
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index bd5f0457ad21..3745d991a99a 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -20,16 +20,13 @@
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#ifdef CONFIG_32BIT
-# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31
# define cpu_pabits 31
#endif
#ifdef CONFIG_64BIT
-# define cpu_has_64bits 1
# define cpu_vabits cpu_data[0].vabits
# define cpu_pabits cpu_data[0].pabits
-# define __NEED_ADDRBITS_PROBE
#endif
/*
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 9f71a79271da..804341bd8d2e 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -912,6 +912,26 @@
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
/* Direct Map window 0/1/2/3 */
+
+#ifdef CONFIG_32BIT
+
+#define CSR_DMW0_PLV0 (1 << 0)
+#define CSR_DMW0_VSEG (0x4)
+#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
+#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0)
+
+#define CSR_DMW1_PLV0 (1 << 0)
+#define CSR_DMW1_MAT (1 << 4)
+#define CSR_DMW1_VSEG (0x5)
+#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
+#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+
+#define CSR_DMW2_INIT 0x0
+
+#define CSR_DMW3_INIT 0x0
+
+#else
+
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
#define CSR_DMW0_VSEG _CONST64_(0x8000)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@@ -931,6 +951,8 @@
#define CSR_DMW3_INIT 0x0
+#endif
+
/* Performance Counter registers */
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@@ -1388,8 +1410,10 @@ __BUILD_CSR_OP(tlbidx)
#define ENTRYLO_C_SHIFT 4
#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT)
#define ENTRYLO_G (_ULCAST_(1) << 6)
+#ifdef CONFIG_64BIT
#define ENTRYLO_NR (_ULCAST_(1) << 61)
#define ENTRYLO_NX (_ULCAST_(1) << 62)
+#endif
/* Values for PageSize register */
#define PS_4K 0x0000000c
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index a3aaf34fba16..256d1ff7a1e3 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -10,7 +10,7 @@
#include <vdso/page.h>
-#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 2fc3789220ac..b565573cd82e 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -6,6 +6,26 @@
#define _ASM_PGTABLE_BITS_H
/* Page table bits */
+
+#ifdef CONFIG_32BIT
+#define _PAGE_VALID_SHIFT 0
+#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
+#define _PAGE_DIRTY_SHIFT 1
+#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
+#define _CACHE_SHIFT 4 /* 4~5, two bits */
+#define _PAGE_GLOBAL_SHIFT 6
+#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
+#define _PAGE_PRESENT_SHIFT 7
+#define _PAGE_PFN_SHIFT 8
+#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
+#define _PAGE_SWP_EXCLUSIVE_SHIFT 13
+#define _PAGE_PFN_END_SHIFT 28
+#define _PAGE_WRITE_SHIFT 29
+#define _PAGE_MODIFIED_SHIFT 30
+#define _PAGE_PRESENT_INVALID_SHIFT 31
+#endif
+
+#ifdef CONFIG_64BIT
#define _PAGE_VALID_SHIFT 0
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
#define _PAGE_DIRTY_SHIFT 1
@@ -18,14 +38,15 @@
#define _PAGE_MODIFIED_SHIFT 9
#define _PAGE_PROTNONE_SHIFT 10
#define _PAGE_SPECIAL_SHIFT 11
-#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_PFN_SHIFT 12
+#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
#define _PAGE_RPLV_SHIFT 63
+#endif
/* Used by software */
#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
@@ -33,10 +54,15 @@
#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
+#ifdef CONFIG_32BIT
+#define _PAGE_PROTNONE 0
+#define _PAGE_SPECIAL 0
+#else
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+#endif
-/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
+/* We borrow bit 13/23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
/* Used by TLB hardware (placed in EntryLo*) */
@@ -46,9 +72,15 @@
#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
+#ifdef CONFIG_32BIT
+#define _PAGE_NO_READ 0
+#define _PAGE_NO_EXEC 0
+#define _PAGE_RPLV 0
+#else
#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
+#endif
#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
#define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 03fb60432fde..9ed2ea23c580 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <asm/addrspace.h>
+#include <asm/asm.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
@@ -23,37 +24,45 @@
#endif
#if CONFIG_PGTABLE_LEVELS == 2
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 3
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 4
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
+#ifdef CONFIG_32BIT
+#define VA_BITS 32
+#else
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
+#endif
-#define PTRS_PER_PGD (PAGE_SIZE >> 3)
+#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG)
#if CONFIG_PGTABLE_LEVELS > 3
-#define PTRS_PER_PUD (PAGE_SIZE >> 3)
+#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-#define PTRS_PER_PMD (PAGE_SIZE >> 3)
+#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG)
#endif
-#define PTRS_PER_PTE (PAGE_SIZE >> 3)
+#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG)
+#ifdef CONFIG_32BIT
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#else
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+#endif
#ifndef __ASSEMBLER__
@@ -74,11 +83,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-/*
- * TLB refill handlers may also map the vmalloc area into xkvrange.
- * Avoid the first couple of pages so NULL pointer dereferences will
- * still reliably trap.
- */
+#ifdef CONFIG_32BIT
+
+#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
+#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE))
+
+#endif
+
+#ifdef CONFIG_64BIT
+
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
@@ -106,6 +119,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+#endif
+
#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
@@ -277,7 +292,16 @@ extern void kernel_pte_init(void *addr);
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
- * Format of swap PTEs:
+ * Format of 32bit swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------ offset -------------> E <- type -> <-- zeroes -->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * The zero'ed bits include _PAGE_PRESENT.
+ *
+ * Format of 64bit swap PTEs:
*
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
@@ -290,11 +314,21 @@ extern void kernel_pte_init(void *addr);
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
*/
+
+#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
-{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
+{
+ pte_t pte;
+ pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
+ return pte;
+}
-#define __swp_type(x) (((x).val >> 16) & 0x7f)
-#define __swp_offset(x) ((x).val >> 24)
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 3726cd0885b6..08a227034042 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base);
static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
{
-#ifdef __NEED_ADDRBITS_PROBE
+#ifdef CONFIG_32BIT
+ c->pabits = cpu_pabits;
+ c->vabits = cpu_vabits;
+ vm_map_base = KVRANGE;
+#else
c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
vm_map_base = 0UL - (1UL << c->vabits);
diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c
index 116f21ea4e2c..e1cdad7a676e 100644
--- a/arch/loongarch/lib/dump_tlb.c
+++ b/arch/loongarch/lib/dump_tlb.c
@@ -73,12 +73,16 @@ static void dump_tlb(int first, int last)
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
/* NR/NX are in awkward places, so mask them off separately */
+#ifdef CONFIG_64BIT
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
+#endif
pa = pa & PAGE_MASK;
pr_cont("\n\t[");
+#ifdef CONFIG_64BIT
pr_cont("nr=%d nx=%d ",
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
+#endif
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
@@ -86,11 +90,15 @@ static void dump_tlb(int first, int last)
(entrylo0 & ENTRYLO_G) ? 1 : 0,
(entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
/* NR/NX are in awkward places, so mask them off separately */
+#ifdef CONFIG_64BIT
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
+#endif
pa = pa & PAGE_MASK;
+#ifdef CONFIG_64BIT
pr_cont("nr=%d nx=%d ",
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
+#endif
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 6bfd4b8dad1b..0946662afdd6 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(invalid_pmd_table);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
-#ifdef CONFIG_EXECMEM
+#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
static struct execmem_info execmem_info __ro_after_init;
struct execmem_info __init *execmem_arch_setup(void)
@@ -242,4 +242,4 @@ struct execmem_info __init *execmem_arch_setup(void)
return &execmem_info;
}
-#endif /* CONFIG_EXECMEM */
+#endif /* CONFIG_EXECMEM && MODULES_VADDR */
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
index 7ad76551d313..7286b804756d 100644
--- a/arch/loongarch/mm/page.S
+++ b/arch/loongarch/mm/page.S
@@ -10,75 +10,75 @@
.align 5
SYM_FUNC_START(clear_page)
- lu12i.w t0, 1 << (PAGE_SHIFT - 12)
- add.d t0, t0, a0
+ lu12i.w t0, 1 << (PAGE_SHIFT - 12)
+ PTR_ADD t0, t0, a0
1:
- st.d zero, a0, 0
- st.d zero, a0, 8
- st.d zero, a0, 16
- st.d zero, a0, 24
- st.d zero, a0, 32
- st.d zero, a0, 40
- st.d zero, a0, 48
- st.d zero, a0, 56
- addi.d a0, a0, 128
- st.d zero, a0, -64
- st.d zero, a0, -56
- st.d zero, a0, -48
- st.d zero, a0, -40
- st.d zero, a0, -32
- st.d zero, a0, -24
- st.d zero, a0, -16
- st.d zero, a0, -8
- bne t0, a0, 1b
+ LONG_S zero, a0, (LONGSIZE * 0)
+ LONG_S zero, a0, (LONGSIZE * 1)
+ LONG_S zero, a0, (LONGSIZE * 2)
+ LONG_S zero, a0, (LONGSIZE * 3)
+ LONG_S zero, a0, (LONGSIZE * 4)
+ LONG_S zero, a0, (LONGSIZE * 5)
+ LONG_S zero, a0, (LONGSIZE * 6)
+ LONG_S zero, a0, (LONGSIZE * 7)
+ PTR_ADDI a0, a0, (LONGSIZE * 16)
+ LONG_S zero, a0, -(LONGSIZE * 8)
+ LONG_S zero, a0, -(LONGSIZE * 7)
+ LONG_S zero, a0, -(LONGSIZE * 6)
+ LONG_S zero, a0, -(LONGSIZE * 5)
+ LONG_S zero, a0, -(LONGSIZE * 4)
+ LONG_S zero, a0, -(LONGSIZE * 3)
+ LONG_S zero, a0, -(LONGSIZE * 2)
+ LONG_S zero, a0, -(LONGSIZE * 1)
+ bne t0, a0, 1b
- jr ra
+ jr ra
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
.align 5
SYM_FUNC_START(copy_page)
- lu12i.w t8, 1 << (PAGE_SHIFT - 12)
- add.d t8, t8, a0
+ lu12i.w t8, 1 << (PAGE_SHIFT - 12)
+ PTR_ADD t8, t8, a0
1:
- ld.d t0, a1, 0
- ld.d t1, a1, 8
- ld.d t2, a1, 16
- ld.d t3, a1, 24
- ld.d t4, a1, 32
- ld.d t5, a1, 40
- ld.d t6, a1, 48
- ld.d t7, a1, 56
+ LONG_L t0, a1, (LONGSIZE * 0)
+ LONG_L t1, a1, (LONGSIZE * 1)
+ LONG_L t2, a1, (LONGSIZE * 2)
+ LONG_L t3, a1, (LONGSIZE * 3)
+ LONG_L t4, a1, (LONGSIZE * 4)
+ LONG_L t5, a1, (LONGSIZE * 5)
+ LONG_L t6, a1, (LONGSIZE * 6)
+ LONG_L t7, a1, (LONGSIZE * 7)
- st.d t0, a0, 0
- st.d t1, a0, 8
- ld.d t0, a1, 64
- ld.d t1, a1, 72
- st.d t2, a0, 16
- st.d t3, a0, 24
- ld.d t2, a1, 80
- ld.d t3, a1, 88
- st.d t4, a0, 32
- st.d t5, a0, 40
- ld.d t4, a1, 96
- ld.d t5, a1, 104
- st.d t6, a0, 48
- st.d t7, a0, 56
- ld.d t6, a1, 112
- ld.d t7, a1, 120
- addi.d a0, a0, 128
- addi.d a1, a1, 128
+ LONG_S t0, a0, (LONGSIZE * 0)
+ LONG_S t1, a0, (LONGSIZE * 1)
+ LONG_L t0, a1, (LONGSIZE * 8)
+ LONG_L t1, a1, (LONGSIZE * 9)
+ LONG_S t2, a0, (LONGSIZE * 2)
+ LONG_S t3, a0, (LONGSIZE * 3)
+ LONG_L t2, a1, (LONGSIZE * 10)
+ LONG_L t3, a1, (LONGSIZE * 11)
+ LONG_S t4, a0, (LONGSIZE * 4)
+ LONG_S t5, a0, (LONGSIZE * 5)
+ LONG_L t4, a1, (LONGSIZE * 12)
+ LONG_L t5, a1, (LONGSIZE * 13)
+ LONG_S t6, a0, (LONGSIZE * 6)
+ LONG_S t7, a0, (LONGSIZE * 7)
+ LONG_L t6, a1, (LONGSIZE * 14)
+ LONG_L t7, a1, (LONGSIZE * 15)
+ PTR_ADDI a0, a0, (LONGSIZE * 16)
+ PTR_ADDI a1, a1, (LONGSIZE * 16)
- st.d t0, a0, -64
- st.d t1, a0, -56
- st.d t2, a0, -48
- st.d t3, a0, -40
- st.d t4, a0, -32
- st.d t5, a0, -24
- st.d t6, a0, -16
- st.d t7, a0, -8
+ LONG_S t0, a0, -(LONGSIZE * 8)
+ LONG_S t1, a0, -(LONGSIZE * 7)
+ LONG_S t2, a0, -(LONGSIZE * 6)
+ LONG_S t3, a0, -(LONGSIZE * 5)
+ LONG_S t4, a0, -(LONGSIZE * 4)
+ LONG_S t5, a0, -(LONGSIZE * 3)
+ LONG_S t6, a0, -(LONGSIZE * 2)
+ LONG_S t7, a0, -(LONGSIZE * 1)
- bne t8, a0, 1b
- jr ra
+ bne t8, a0, 1b
+ jr ra
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 6e474469e210..6a3c91b9cacd 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -251,8 +251,10 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+#ifdef CONFIG_64BIT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+#endif
pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index c08682a89c58..84a881a339a7 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -11,10 +11,18 @@
#define INVTLB_ADDR_GFALSE_AND_ASID 5
-#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
+#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG)
+
+#ifdef CONFIG_32BIT
+#define PTE_LL ll.w
+#define PTE_SC sc.w
+#else
+#define PTE_LL ll.d
+#define PTE_SC sc.d
+#endif
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
@@ -60,52 +68,61 @@ SYM_CODE_START(handle_tlb_load)
vmalloc_done_load:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
+
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
+
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_load
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_load:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
andi ra, t0, _PAGE_PRESENT
beqz ra, nopage_tlb_load
ori t0, t0, _PAGE_VALID
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_load
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
+
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@@ -115,30 +132,28 @@ smp_pgtable_change_load:
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_load:
la_abs t1, swapper_pg_dir
b vmalloc_done_load
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_load:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
ori t0, ra, _PAGE_VALID
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@@ -158,27 +173,27 @@ tlb_huge_update_load:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@@ -216,53 +231,71 @@ SYM_CODE_START(handle_tlb_store)
vmalloc_done_store:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_store
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_store:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
+
+#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
+#else
+ PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE
+ and ra, ra, t0
+ nor ra, ra, zero
+#endif
bnez ra, nopage_tlb_store
+#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_store
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@@ -272,31 +305,42 @@ smp_pgtable_change_store:
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_store:
la_abs t1, swapper_pg_dir
b vmalloc_done_store
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_store:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
+
+#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
+#else
+ PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE
+ and t0, t0, ra
+ nor t0, t0, zero
+#endif
+
bnez t0, nopage_tlb_store
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_store
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
+#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
+#else
+ PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@@ -316,28 +360,28 @@ tlb_huge_update_store:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@@ -375,52 +419,69 @@ SYM_CODE_START(handle_tlb_modify)
vmalloc_done_modify:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_modify
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_modify:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
+#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_WRITE
+#else
+ PTR_LI ra, _PAGE_WRITE
+ and ra, t0, ra
+#endif
+
beqz ra, nopage_tlb_modify
+#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_modify
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@@ -430,30 +491,40 @@ smp_pgtable_change_modify:
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_modify:
la_abs t1, swapper_pg_dir
b vmalloc_done_modify
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_modify:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
+
+#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_WRITE
+#else
+ PTR_LI t0, _PAGE_WRITE
+ and t0, ra, t0
+#endif
+
beqz t0, nopage_tlb_modify
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_modify
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
+#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
+#else
+ PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@@ -473,28 +544,28 @@ tlb_huge_update_modify:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@@ -517,6 +588,44 @@ SYM_CODE_START(handle_tlb_modify_ptw)
jr t0
SYM_CODE_END(handle_tlb_modify_ptw)
+#ifdef CONFIG_32BIT
+SYM_CODE_START(handle_tlb_refill)
+ UNWIND_HINT_UNDEFINED
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+ li.w ra, 0x1fffffff
+
+ csrrd t0, LOONGARCH_CSR_PGD
+ csrrd t1, LOONGARCH_CSR_TLBRBADV
+ srli.w t1, t1, PGDIR_SHIFT
+ slli.w t1, t1, 0x2
+ add.w t0, t0, t1
+ and t0, t0, ra
+
+ ld.w t0, t0, 0
+ csrrd t1, LOONGARCH_CSR_TLBRBADV
+ slli.w t1, t1, (32 - PGDIR_SHIFT)
+ srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1)
+ slli.w t1, t1, (0x2 + 1)
+ add.w t0, t0, t1
+ and t0, t0, ra
+
+ ld.w t1, t0, 0x0
+ csrwr t1, LOONGARCH_CSR_TLBRELO0
+
+ ld.w t1, t0, 0x4
+ csrwr t1, LOONGARCH_CSR_TLBRELO1
+
+ tlbfill
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+SYM_CODE_END(handle_tlb_refill)
+#endif
+
+#ifdef CONFIG_64BIT
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
@@ -534,3 +643,4 @@ SYM_CODE_START(handle_tlb_refill)
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
SYM_CODE_END(handle_tlb_refill)
+#endif
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 06/14] LoongArch: Adjust process management for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (4 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 05/14] LoongArch: Adjust memory management " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 07/14] LoongArch: Adjust time routines " Huacai Chen
` (7 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust memory management for both 32BIT and 64BIT, including: CPU
context switching, FPU loading/restoring, process dumping and process
tracing routines.
Q: Why modify switch.S?
A: LoongArch32 has no ldptr.d/stptr.d instructions, and asm offsets of
thead_struct members are too large to be filled in the 12b immediate
field of ld.w/st.w.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/elf.h | 1 +
arch/loongarch/include/asm/inst.h | 12 ++-
arch/loongarch/include/uapi/asm/ptrace.h | 10 ++
arch/loongarch/kernel/fpu.S | 111 +++++++++++++++++++++++
arch/loongarch/kernel/process.c | 6 +-
arch/loongarch/kernel/ptrace.c | 5 +
arch/loongarch/kernel/switch.S | 28 ++++--
7 files changed, 157 insertions(+), 16 deletions(-)
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index f16bd42456e4..1b6489427e30 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -156,6 +156,7 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs);
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#ifdef CONFIG_32BIT
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 55e64a12a124..f9f207082d0e 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -438,8 +438,10 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
- /* st.d $ra, $sp, offset */
- return ip->reg2i12_format.opcode == std_op &&
+ const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? stw_op : std_op;
+
+ /* st.w / st.d $ra, $sp, offset */
+ return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
!is_imm12_negative(ip->reg2i12_format.immediate);
@@ -447,8 +449,10 @@ static inline bool is_ra_save_ins(union loongarch_instruction *ip)
static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
{
- /* addi.d $sp, $sp, -imm */
- return ip->reg2i12_format.opcode == addid_op &&
+ const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? addiw_op : addid_op;
+
+ /* addi.w / addi.d $sp, $sp, -imm */
+ return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
is_imm12_negative(ip->reg2i12_format.immediate);
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index 215e0f9e8aa3..b35c794323bc 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -61,8 +61,13 @@ struct user_lbt_state {
struct user_watch_state {
__u64 dbg_info;
struct {
+#if __BITS_PER_LONG == 32
+ __u32 addr;
+ __u32 mask;
+#else
__u64 addr;
__u64 mask;
+#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[8];
@@ -71,8 +76,13 @@ struct user_watch_state {
struct user_watch_state_v2 {
__u64 dbg_info;
struct {
+#if __BITS_PER_LONG == 32
+ __u32 addr;
+ __u32 mask;
+#else
__u64 addr;
__u64 mask;
+#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[14];
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 28caf416ae36..f225dcc5b530 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -96,6 +96,49 @@
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
.endm
+#ifdef CONFIG_32BIT
+ .macro sc_save_fcc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.w \tmp1, \tmp0, 31, 24
+ EX st.w \tmp1, \thread, THREAD_FCC
+ movcf2gr \tmp0, $fcc4
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc5
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc6
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc7
+ bstrins.w \tmp1, \tmp0, 31, 24
+ EX st.w \tmp1, \thread, (THREAD_FCC + 4)
+ .endm
+
+ .macro sc_restore_fcc thread tmp0 tmp1
+ EX ld.w \tmp0, \thread, THREAD_FCC
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ EX ld.w \tmp0, \thread, (THREAD_FCC + 4)
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc4, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc5, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc6, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc7, \tmp1
+ .endm
+#else
.macro sc_save_fcc base, tmp0, tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
@@ -135,6 +178,7 @@
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
+#endif
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
@@ -410,6 +454,72 @@ SYM_FUNC_START(_init_fpu)
li.w t1, -1 # SNaN
+#ifdef CONFIG_32BIT
+ movgr2fr.w $f0, t1
+ movgr2frh.w $f0, t1
+ movgr2fr.w $f1, t1
+ movgr2frh.w $f1, t1
+ movgr2fr.w $f2, t1
+ movgr2frh.w $f2, t1
+ movgr2fr.w $f3, t1
+ movgr2frh.w $f3, t1
+ movgr2fr.w $f4, t1
+ movgr2frh.w $f4, t1
+ movgr2fr.w $f5, t1
+ movgr2frh.w $f5, t1
+ movgr2fr.w $f6, t1
+ movgr2frh.w $f6, t1
+ movgr2fr.w $f7, t1
+ movgr2frh.w $f7, t1
+ movgr2fr.w $f8, t1
+ movgr2frh.w $f8, t1
+ movgr2fr.w $f9, t1
+ movgr2frh.w $f9, t1
+ movgr2fr.w $f10, t1
+ movgr2frh.w $f10, t1
+ movgr2fr.w $f11, t1
+ movgr2frh.w $f11, t1
+ movgr2fr.w $f12, t1
+ movgr2frh.w $f12, t1
+ movgr2fr.w $f13, t1
+ movgr2frh.w $f13, t1
+ movgr2fr.w $f14, t1
+ movgr2frh.w $f14, t1
+ movgr2fr.w $f15, t1
+ movgr2frh.w $f15, t1
+ movgr2fr.w $f16, t1
+ movgr2frh.w $f16, t1
+ movgr2fr.w $f17, t1
+ movgr2frh.w $f17, t1
+ movgr2fr.w $f18, t1
+ movgr2frh.w $f18, t1
+ movgr2fr.w $f19, t1
+ movgr2frh.w $f19, t1
+ movgr2fr.w $f20, t1
+ movgr2frh.w $f20, t1
+ movgr2fr.w $f21, t1
+ movgr2frh.w $f21, t1
+ movgr2fr.w $f22, t1
+ movgr2frh.w $f22, t1
+ movgr2fr.w $f23, t1
+ movgr2frh.w $f23, t1
+ movgr2fr.w $f24, t1
+ movgr2frh.w $f24, t1
+ movgr2fr.w $f25, t1
+ movgr2frh.w $f25, t1
+ movgr2fr.w $f26, t1
+ movgr2frh.w $f26, t1
+ movgr2fr.w $f27, t1
+ movgr2frh.w $f27, t1
+ movgr2fr.w $f28, t1
+ movgr2frh.w $f28, t1
+ movgr2fr.w $f29, t1
+ movgr2frh.w $f29, t1
+ movgr2fr.w $f30, t1
+ movgr2frh.w $f30, t1
+ movgr2fr.w $f31, t1
+ movgr2frh.w $f31, t1
+#else
movgr2fr.d $f0, t1
movgr2fr.d $f1, t1
movgr2fr.d $f2, t1
@@ -442,6 +552,7 @@ SYM_FUNC_START(_init_fpu)
movgr2fr.d $f29, t1
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
+#endif
jr ra
SYM_FUNC_END(_init_fpu)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index d1e04f9e0f79..4ac1c3086152 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -382,8 +382,11 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_32BIT
+void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs)
+#else
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+#endif
{
unsigned int i;
@@ -400,4 +403,3 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
}
-#endif /* CONFIG_64BIT */
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index 8edd0954e55a..bea376e87d4e 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -650,8 +650,13 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event_attr attr;
/* Kernel-space address cannot be monitored by user-space */
+#ifdef CONFIG_32BIT
+ if ((unsigned long)addr >= KPRANGE1)
+ return -EINVAL;
+#else
if ((unsigned long)addr >= XKPRANGE)
return -EINVAL;
+#endif
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
index 3007e909e0d8..f377d8f5c51a 100644
--- a/arch/loongarch/kernel/switch.S
+++ b/arch/loongarch/kernel/switch.S
@@ -16,18 +16,23 @@
*/
.align 5
SYM_FUNC_START(__switch_to)
- csrrd t1, LOONGARCH_CSR_PRMD
- stptr.d t1, a0, THREAD_CSRPRMD
+#ifdef CONFIG_32BIT
+ PTR_ADDI a0, a0, TASK_STRUCT_OFFSET
+ PTR_ADDI a1, a1, TASK_STRUCT_OFFSET
+#endif
+ csrrd t1, LOONGARCH_CSR_PRMD
+ LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
cpu_save_nonscratch a0
- stptr.d ra, a0, THREAD_REG01
- stptr.d a3, a0, THREAD_SCHED_RA
- stptr.d a4, a0, THREAD_SCHED_CFA
+ LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET)
+ LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET)
+
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
- la t7, __stack_chk_guard
- ldptr.d t8, a1, TASK_STACK_CANARY
- stptr.d t8, t7, 0
+ la t7, __stack_chk_guard
+ LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET)
+ LONG_SPTR t8, t7, 0
#endif
+
move tp, a2
cpu_restore_nonscratch a1
@@ -35,8 +40,11 @@ SYM_FUNC_START(__switch_to)
PTR_ADD t0, t0, tp
set_saved_sp t0, t1, t2
- ldptr.d t1, a1, THREAD_CSRPRMD
- csrwr t1, LOONGARCH_CSR_PRMD
+ LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
+ csrwr t1, LOONGARCH_CSR_PRMD
+#ifdef CONFIG_32BIT
+ PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET
+#endif
jr ra
SYM_FUNC_END(__switch_to)
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 07/14] LoongArch: Adjust time routines for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (5 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 06/14] LoongArch: Adjust process " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 08/14] LoongArch: Adjust module loader " Huacai Chen
` (6 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust time routines for both 32BIT and 64BIT, including: rdtime_h() /
rdtime_l() definitions for 32BIT and rdtime_d() definition for 64BIT,
get_cycles() and get_cycles64() definitions for 32BIT/64BIT, show time
frequency info ("CPU MHz" and "BogoMIPS") in /proc/cpuinfo, etc.
Use do_div() for division which works on both 32BIT and 64BIT platforms.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/loongarch.h | 32 ++++++++++++++++++++++++-
arch/loongarch/include/asm/timex.h | 33 +++++++++++++++++++++++++-
arch/loongarch/kernel/proc.c | 10 ++++----
arch/loongarch/kernel/syscall.c | 2 +-
arch/loongarch/kernel/time.c | 15 ++++++------
arch/loongarch/kvm/vcpu.c | 5 ++--
6 files changed, 80 insertions(+), 17 deletions(-)
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 804341bd8d2e..19e3f2c183fe 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -1238,7 +1238,35 @@
#ifndef __ASSEMBLER__
-static __always_inline u64 drdtime(void)
+#ifdef CONFIG_32BIT
+
+static __always_inline u32 rdtime_h(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__(
+ "rdtimeh.w %0, $zero\n\t"
+ : "=r"(val)
+ :
+ );
+ return val;
+}
+
+static __always_inline u32 rdtime_l(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__(
+ "rdtimel.w %0, $zero\n\t"
+ : "=r"(val)
+ :
+ );
+ return val;
+}
+
+#else
+
+static __always_inline u64 rdtime_d(void)
{
u64 val = 0;
@@ -1250,6 +1278,8 @@ static __always_inline u64 drdtime(void)
return val;
}
+#endif
+
static inline unsigned int get_csr_cpuid(void)
{
return csr_read32(LOONGARCH_CSR_CPUID);
diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h
index fb41e9e7a222..9ea52fad9690 100644
--- a/arch/loongarch/include/asm/timex.h
+++ b/arch/loongarch/include/asm/timex.h
@@ -18,7 +18,38 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
- return drdtime();
+#ifdef CONFIG_32BIT
+ return rdtime_l();
+#else
+ return rdtime_d();
+#endif
+}
+
+#ifdef CONFIG_32BIT
+
+#define get_cycles_hi get_cycles_hi
+
+static inline cycles_t get_cycles_hi(void)
+{
+ return rdtime_h();
+}
+
+#endif
+
+static inline u64 get_cycles64(void)
+{
+#ifdef CONFIG_32BIT
+ u32 hi, lo;
+
+ do {
+ hi = rdtime_h();
+ lo = rdtime_l();
+ } while (hi != rdtime_h());
+
+ return ((u64)hi << 32) | lo;
+#else
+ return rdtime_d();
+#endif
}
#endif /* __KERNEL__ */
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 63d2b7e7e844..a8800d20e11b 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -20,11 +20,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned int prid = cpu_data[n].processor_id;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
+ u64 freq = cpu_clock_freq, bogomips = lpj_fine * cpu_clock_freq;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
+ do_div(freq, 10000);
+ do_div(bogomips, const_clock_freq * (5000/HZ));
/*
* For the first processor also print the system type
@@ -41,11 +44,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
- seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
- cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
- seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
- (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
- ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
+ seq_printf(m, "CPU MHz\t\t\t: %u.%02u\n", (u32)freq / 100, (u32)freq % 100);
+ seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", (u32)bogomips / 100, (u32)bogomips % 100);
seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
cpu_pabits + 1, cpu_vabits + 1);
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index 168bd97540f8..ab94eb5ce039 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -75,7 +75,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
*
* The resulting 6 bits of entropy is seen in SP[9:4].
*/
- choose_random_kstack_offset(drdtime());
+ choose_random_kstack_offset(get_cycles());
syscall_exit_to_user_mode(regs);
}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index 1c31bf3a16ed..5892f6da07a5 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -18,6 +18,7 @@
#include <asm/loongarch.h>
#include <asm/paravirt.h>
#include <asm/time.h>
+#include <asm/timex.h>
u64 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
@@ -62,12 +63,12 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
static int constant_set_state_periodic(struct clock_event_device *evt)
{
- unsigned long period;
unsigned long timer_config;
+ u64 period = const_clock_freq;
raw_spin_lock(&state_lock);
- period = const_clock_freq / HZ;
+ do_div(period, HZ);
timer_config = period & CSR_TCFG_VAL;
timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
csr_write(timer_config, LOONGARCH_CSR_TCFG);
@@ -120,7 +121,7 @@ static int arch_timer_dying(unsigned int cpu)
static unsigned long get_loops_per_jiffy(void)
{
- unsigned long lpj = (unsigned long)const_clock_freq;
+ u64 lpj = const_clock_freq;
do_div(lpj, HZ);
@@ -131,7 +132,7 @@ static long init_offset;
void save_counter(void)
{
- init_offset = drdtime();
+ init_offset = get_cycles();
}
void sync_counter(void)
@@ -197,12 +198,12 @@ int constant_clockevent_init(void)
static u64 read_const_counter(struct clocksource *clk)
{
- return drdtime();
+ return get_cycles64();
}
static noinstr u64 sched_clock_read(void)
{
- return drdtime();
+ return get_cycles64();
}
static struct clocksource clocksource_const = {
@@ -235,7 +236,7 @@ void __init time_init(void)
else
const_clock_freq = calc_const_freq();
- init_offset = -(drdtime() - csr_read(LOONGARCH_CSR_CNTC));
+ init_offset = -(get_cycles() - csr_read(LOONGARCH_CSR_CNTC));
constant_clockevent_init();
constant_clocksource_init();
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 1245a6b35896..803224d297eb 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -9,6 +9,7 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
+#include <asm/timex.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -811,7 +812,7 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
case KVM_REG_LOONGARCH_COUNTER:
- *v = drdtime() + vcpu->kvm->arch.time_offset;
+ *v = get_cycles() + vcpu->kvm->arch.time_offset;
break;
case KVM_REG_LOONGARCH_DEBUG_INST:
*v = INSN_HVCL | KVM_HCALL_SWDBG;
@@ -906,7 +907,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
* only set for the first time for smp system
*/
if (vcpu->vcpu_id == 0)
- vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
+ vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
break;
case KVM_REG_LOONGARCH_VCPU_RESET:
vcpu->arch.st.guest_addr = 0;
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 08/14] LoongArch: Adjust module loader for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (6 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 07/14] LoongArch: Adjust time routines " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 09/14] LoongArch: Adjust system call " Huacai Chen
` (5 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust module loader for both 32BIT and 64BIT, including: change the s64
type to long, change the u64 type to unsigned long, change the plt entry
definition and handling, etc.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/module.h | 11 ++++
arch/loongarch/include/asm/percpu.h | 2 +-
arch/loongarch/kernel/module.c | 80 +++++++++++++++++------------
3 files changed, 59 insertions(+), 34 deletions(-)
diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h
index f33f3fd32ecc..d56a968273de 100644
--- a/arch/loongarch/include/asm/module.h
+++ b/arch/loongarch/include/asm/module.h
@@ -38,8 +38,10 @@ struct got_entry {
struct plt_entry {
u32 inst_lu12iw;
+#ifdef CONFIG_64BIT
u32 inst_lu32id;
u32 inst_lu52id;
+#endif
u32 inst_jirl;
};
@@ -57,6 +59,14 @@ static inline struct got_entry emit_got_entry(Elf_Addr val)
static inline struct plt_entry emit_plt_entry(unsigned long val)
{
+#ifdef CONFIG_32BIT
+ u32 lu12iw, jirl;
+
+ lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
+ jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI));
+
+ return (struct plt_entry) { lu12iw, jirl };
+#else
u32 lu12iw, lu32id, lu52id, jirl;
lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
@@ -65,6 +75,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val)
jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI));
return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl };
+#endif
}
static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 44a8aea2b0e5..583f2466262f 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -13,7 +13,7 @@
* the loading address of main kernel image, but far from where the modules are
* loaded. Tell the compiler this fact when using explicit relocs.
*/
-#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS)
+#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) && defined(CONFIG_64BIT)
# if __has_attribute(model)
# define PER_CPU_ATTRIBUTES __attribute__((model("extreme")))
# else
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index 36d6d9eeb7c7..27f6d6b2e3ff 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -22,72 +22,76 @@
#include <asm/inst.h>
#include <asm/unwind.h>
-static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
+static int rela_stack_push(long stack_value, long *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top >= RELA_STACK_DEPTH)
return -ENOEXEC;
rela_stack[(*rela_stack_top)++] = stack_value;
- pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
+ pr_debug("%s stack_value = 0x%lx\n", __func__, stack_value);
return 0;
}
-static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
+static int rela_stack_pop(long *stack_value, long *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top == 0)
return -ENOEXEC;
*stack_value = rela_stack[--(*rela_stack_top)];
- pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
+ pr_debug("%s stack_value = 0x%lx\n", __func__, *stack_value);
return 0;
}
static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return 0;
}
static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
return -EINVAL;
}
static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*location = v;
return 0;
}
+#ifdef CONFIG_32BIT
+#define apply_r_larch_64 apply_r_larch_error
+#else
static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*(Elf_Addr *)location = v;
return 0;
}
+#endif
static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
- return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
+ return rela_stack_push(v - (unsigned long)location, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return rela_stack_push(v, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
- s64 opr1;
+ long opr1;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
if (err)
@@ -104,7 +108,7 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add
static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
@@ -118,10 +122,10 @@ static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
}
static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
- s64 opr1, opr2, opr3;
+ long opr1, opr2, opr3;
if (type == R_LARCH_SOP_IF_ELSE) {
err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
@@ -164,10 +168,10 @@ static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
}
static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
- s64 opr1;
+ long opr1;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
@@ -244,31 +248,33 @@ static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Ad
}
overflow:
- pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
+ pr_err("module %s: opr1 = 0x%lx overflow! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
unaligned:
- pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
+ pr_err("module %s: opr1 = 0x%lx unaligned! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
}
static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
switch (type) {
case R_LARCH_ADD32:
*(s32 *)location += v;
return 0;
- case R_LARCH_ADD64:
- *(s64 *)location += v;
- return 0;
case R_LARCH_SUB32:
*(s32 *)location -= v;
return 0;
+#ifdef CONFIG_64BIT
+ case R_LARCH_ADD64:
+ *(s64 *)location += v;
+ return 0;
case R_LARCH_SUB64:
*(s64 *)location -= v;
+#endif
return 0;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
@@ -278,7 +284,7 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
static int apply_r_larch_b26(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
@@ -311,14 +317,16 @@ static int apply_r_larch_b26(struct module *mod,
}
static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
union loongarch_instruction *insn = (union loongarch_instruction *)location;
/* Use s32 for a sign-extension deliberately. */
s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
(void *)((Elf_Addr)location & ~0xfff);
+#ifdef CONFIG_64BIT
Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
ptrdiff_t offset_rem = (void *)v - (void *)anchor;
+#endif
switch (type) {
case R_LARCH_PCALA_LO12:
@@ -328,6 +336,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
v = offset_hi20 >> 12;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
+#ifdef CONFIG_64BIT
case R_LARCH_PCALA64_LO20:
v = offset_rem >> 32;
insn->reg1i20_format.immediate = v & 0xfffff;
@@ -336,6 +345,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
v = offset_rem >> 52;
insn->reg2i12_format.immediate = v & 0xfff;
break;
+#endif
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
@@ -346,7 +356,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
static int apply_r_larch_got_pc(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
Elf_Addr got = module_emit_got_entry(mod, sechdrs, v);
@@ -369,7 +379,7 @@ static int apply_r_larch_got_pc(struct module *mod,
}
static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
@@ -377,14 +387,18 @@ static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
return 0;
}
+#ifdef CONFIG_32BIT
+#define apply_r_larch_64_pcrel apply_r_larch_error
+#else
static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+ long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
*(u64 *)location = offset;
return 0;
}
+#endif
/*
* reloc_handlers_rela() - Apply a particular relocation to a module
@@ -397,7 +411,7 @@ static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
* Return: 0 upon success, else -ERRNO
*/
typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
- s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
+ long *rela_stack, size_t *rela_stack_top, unsigned int type);
/* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = {
@@ -425,7 +439,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
{
int i, err;
unsigned int type;
- s64 rela_stack[RELA_STACK_DEPTH];
+ long rela_stack[RELA_STACK_DEPTH];
size_t rela_stack_top = 0;
reloc_rela_handler handler;
void *location;
@@ -462,9 +476,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return -EINVAL;
}
- pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
+ pr_debug("type %d st_value %lx r_addend %lx loc %lx\n",
(int)ELF_R_TYPE(rel[i].r_info),
- sym->st_value, rel[i].r_addend, (u64)location);
+ (unsigned long)sym->st_value, (unsigned long)rel[i].r_addend, (unsigned long)location);
v = sym->st_value + rel[i].r_addend;
switch (type) {
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 09/14] LoongArch: Adjust system call for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (7 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 08/14] LoongArch: Adjust module loader " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 10/14] LoongArch: Adjust user accessors " Huacai Chen
` (4 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust system call for both 32BIT and 64BIT, including: add the uapi
unistd_{32,64}.h and syscall_table_{32,64}.h inclusion, add sys_mmap2()
definition, change the system call entry routines, etc.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/Kbuild | 1 +
arch/loongarch/include/uapi/asm/Kbuild | 1 +
arch/loongarch/include/uapi/asm/unistd.h | 6 ++++++
arch/loongarch/kernel/Makefile.syscalls | 1 +
arch/loongarch/kernel/entry.S | 22 +++++++++++-----------
arch/loongarch/kernel/syscall.c | 13 +++++++++++++
6 files changed, 33 insertions(+), 11 deletions(-)
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index b04d2cef935f..9034b583a88a 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
syscall-y += syscall_table_64.h
generated-y += orc_hash.h
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index 517761419999..89ac01faa5ae 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
syscall-y += unistd_64.h
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
index 1f01980f9c94..e19c7f2f9f87 100644
--- a/arch/loongarch/include/uapi/asm/unistd.h
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -1,3 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 32
+#include <asm/unistd_32.h>
+#else
#include <asm/unistd_64.h>
+#endif
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
index ab7d9baa2915..cd46c2b69c7f 100644
--- a/arch/loongarch/kernel/Makefile.syscalls
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# No special ABIs on loongarch so far
+syscall_abis_32 +=
syscall_abis_64 +=
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 47e1db9a1ce4..b53d333a7c42 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -23,24 +23,24 @@ SYM_CODE_START(handle_syscall)
UNWIND_HINT_UNDEFINED
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
- add.d t1, t1, t0
+ PTR_ADD t1, t1, t0
move t2, sp
- ld.d sp, t1, 0
+ PTR_L sp, t1, 0
- addi.d sp, sp, -PT_SIZE
+ PTR_ADDI sp, sp, -PT_SIZE
cfi_st t2, PT_R3
cfi_rel_offset sp, PT_R3
- st.d zero, sp, PT_R0
+ LONG_S zero, sp, PT_R0
csrrd t2, LOONGARCH_CSR_PRMD
- st.d t2, sp, PT_PRMD
+ LONG_S t2, sp, PT_PRMD
csrrd t2, LOONGARCH_CSR_CRMD
- st.d t2, sp, PT_CRMD
+ LONG_S t2, sp, PT_CRMD
csrrd t2, LOONGARCH_CSR_EUEN
- st.d t2, sp, PT_EUEN
+ LONG_S t2, sp, PT_EUEN
csrrd t2, LOONGARCH_CSR_ECFG
- st.d t2, sp, PT_ECFG
+ LONG_S t2, sp, PT_ECFG
csrrd t2, LOONGARCH_CSR_ESTAT
- st.d t2, sp, PT_ESTAT
+ LONG_S t2, sp, PT_ESTAT
cfi_st ra, PT_R1
cfi_st a0, PT_R4
cfi_st a1, PT_R5
@@ -51,7 +51,7 @@ SYM_CODE_START(handle_syscall)
cfi_st a6, PT_R10
cfi_st a7, PT_R11
csrrd ra, LOONGARCH_CSR_ERA
- st.d ra, sp, PT_ERA
+ LONG_S ra, sp, PT_ERA
cfi_rel_offset ra, PT_ERA
cfi_st tp, PT_R2
@@ -67,7 +67,7 @@ SYM_CODE_START(handle_syscall)
#endif
move u0, t0
- li.d tp, ~_THREAD_MASK
+ LONG_LI tp, ~_THREAD_MASK
and tp, tp, sp
move a0, sp
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index ab94eb5ce039..1249d82c1cd0 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -34,9 +34,22 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long,
+ prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
+{
+ if (offset & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - 12));
+}
+
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#ifdef CONFIG_32BIT
+#include <asm/syscall_table_32.h>
+#else
#include <asm/syscall_table_64.h>
+#endif
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 10/14] LoongArch: Adjust user accessors for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (8 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 09/14] LoongArch: Adjust system call " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 11/14] LoongArch: Adjust misc routines " Huacai Chen
` (3 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust user accessors for both 32BIT and 64BIT, including: get_user(),
put_user(), copy_user(), clear_user(), etc.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/uaccess.h | 63 ++++++++++++++++++++++++++--
arch/loongarch/lib/clear_user.S | 22 ++++++----
arch/loongarch/lib/copy_user.S | 28 ++++++++-----
3 files changed, 91 insertions(+), 22 deletions(-)
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 0d22991ae430..4e259d490e45 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -19,10 +19,16 @@
#include <asm/asm-extable.h>
#include <asm-generic/access_ok.h>
+#define __LSW 0
+#define __MSW 1
+
extern u64 __ua_limit;
-#define __UA_ADDR ".dword"
+#ifdef CONFIG_64BIT
#define __UA_LIMIT __ua_limit
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
/*
* get_user: - Get a simple variable from user space.
@@ -126,6 +132,7 @@ extern u64 __ua_limit;
*
* Returns zero on success, or -EFAULT on error.
*/
+
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
@@ -146,7 +153,7 @@ do { \
case 1: __get_data_asm(val, "ld.b", ptr); break; \
case 2: __get_data_asm(val, "ld.h", ptr); break; \
case 4: __get_data_asm(val, "ld.w", ptr); break; \
- case 8: __get_data_asm(val, "ld.d", ptr); break; \
+ case 8: __get_data_asm_8(val, ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@@ -167,13 +174,39 @@ do { \
(val) = (__typeof__(*(ptr))) __gu_tmp; \
}
+#ifdef CONFIG_64BIT
+#define __get_data_asm_8(val, ptr) \
+ __get_data_asm(val, "ld.d", ptr)
+#else /* !CONFIG_64BIT */
+#define __get_data_asm_8(val, ptr) \
+{ \
+ u32 __lo, __hi; \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " ld.w %1, %3 \n" \
+ "2:\n" \
+ " ld.w %2, %4 \n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
+ : "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
+ if (__gu_err) \
+ __hi = 0; \
+ (val) = (__typeof__(val))((__typeof__((val)-(val))) \
+ ((((u64)__hi << 32) | __lo))); \
+}
+#endif /* CONFIG_64BIT */
+
#define __put_user_common(ptr, size) \
do { \
switch (size) { \
case 1: __put_data_asm("st.b", ptr); break; \
case 2: __put_data_asm("st.h", ptr); break; \
case 4: __put_data_asm("st.w", ptr); break; \
- case 8: __put_data_asm("st.d", ptr); break; \
+ case 8: __put_data_asm_8(ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@@ -190,6 +223,30 @@ do { \
: "Jr" (__pu_val)); \
}
+#ifdef CONFIG_64BIT
+#define __put_data_asm_8(ptr) \
+ __put_data_asm("st.d", ptr)
+#else /* !CONFIG_64BIT */
+#define __put_data_asm_8(ptr) \
+{ \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \
+ \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " st.w %z3, %1 \n" \
+ "2:\n" \
+ " st.w %z4, %2 \n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
+ : "+r" (__pu_err), \
+ "=m" (__ptr[__LSW]), \
+ "=m" (__ptr[__MSW]) \
+ : "rJ" (__x), "rJ" (__x >> 32)); \
+}
+#endif /* CONFIG_64BIT */
+
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __gu_err = 0; \
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index 7a0db643b286..58c667dde882 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
+#ifdef CONFIG_32BIT
+ b __clear_user_generic
+#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
+#endif
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
@@ -29,19 +33,20 @@ EXPORT_SYMBOL(__clear_user)
* a1: size
*/
SYM_FUNC_START(__clear_user_generic)
- beqz a1, 2f
+ beqz a1, 2f
-1: st.b zero, a0, 0
- addi.d a0, a0, 1
- addi.d a1, a1, -1
- bgtz a1, 1b
+1: st.b zero, a0, 0
+ PTR_ADDI a0, a0, 1
+ PTR_ADDI a1, a1, -1
+ bgtz a1, 1b
-2: move a0, a1
- jr ra
+2: move a0, a1
+ jr ra
- _asm_extable 1b, 2b
+ _asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic)
+#ifdef CONFIG_64BIT
/*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
@@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast)
SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast
+#endif
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index 095ce9181c6c..c7264b779f6e 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
+#ifdef CONFIG_32BIT
+ b __copy_user_generic
+#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
+#endif
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
@@ -30,22 +34,23 @@ EXPORT_SYMBOL(__copy_user)
* a2: n
*/
SYM_FUNC_START(__copy_user_generic)
- beqz a2, 3f
+ beqz a2, 3f
-1: ld.b t0, a1, 0
-2: st.b t0, a0, 0
- addi.d a0, a0, 1
- addi.d a1, a1, 1
- addi.d a2, a2, -1
- bgtz a2, 1b
+1: ld.b t0, a1, 0
+2: st.b t0, a0, 0
+ PTR_ADDI a0, a0, 1
+ PTR_ADDI a1, a1, 1
+ PTR_ADDI a2, a2, -1
+ bgtz a2, 1b
-3: move a0, a2
- jr ra
+3: move a0, a2
+ jr ra
- _asm_extable 1b, 3b
- _asm_extable 2b, 3b
+ _asm_extable 1b, 3b
+ _asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic)
+#ifdef CONFIG_64BIT
/*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
@@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast)
SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast
+#endif
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 11/14] LoongArch: Adjust misc routines for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (9 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 10/14] LoongArch: Adjust user accessors " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 12/14] LoongArch: Adjust VDSO/VSYSCALL " Huacai Chen
` (2 subsequent siblings)
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust misc routines for both 32BIT and 64BIT, including: checksum,
jump label, unaligned access emulator, sleep/wakeup routines, etc.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/checksum.h | 4 ++
arch/loongarch/include/asm/jump_label.h | 12 ++++-
arch/loongarch/include/asm/string.h | 2 +
arch/loongarch/kernel/unaligned.c | 30 ++++++++---
arch/loongarch/lib/unaligned.S | 72 ++++++++++++-------------
arch/loongarch/power/suspend_asm.S | 72 ++++++++++++-------------
6 files changed, 112 insertions(+), 80 deletions(-)
diff --git a/arch/loongarch/include/asm/checksum.h b/arch/loongarch/include/asm/checksum.h
index cabbf6af44c4..cc2754e0aa25 100644
--- a/arch/loongarch/include/asm/checksum.h
+++ b/arch/loongarch/include/asm/checksum.h
@@ -9,6 +9,8 @@
#include <linux/bitops.h>
#include <linux/in6.h>
+#ifdef CONFIG_64BIT
+
#define _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
@@ -61,6 +63,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
extern unsigned int do_csum(const unsigned char *buff, int len);
#define do_csum do_csum
+#endif
+
#include <asm-generic/checksum.h>
#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 4000c7603d8e..dcaecf69ea5a 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -10,15 +10,23 @@
#ifndef __ASSEMBLER__
#include <linux/types.h>
+#include <linux/stringify.h>
+#include <asm/asm.h>
#define JUMP_LABEL_NOP_SIZE 4
+#ifdef CONFIG_32BIT
+#define JUMP_LABEL_TYPE ".long "
+#else
+#define JUMP_LABEL_TYPE ".quad "
+#endif
+
/* This macro is also expanded on the Rust side. */
#define JUMP_TABLE_ENTRY(key, label) \
".pushsection __jump_table, \"aw\" \n\t" \
- ".align 3 \n\t" \
+ ".align " __stringify(PTRLOG) " \n\t" \
".long 1b - ., " label " - . \n\t" \
- ".quad " key " - . \n\t" \
+ JUMP_LABEL_TYPE key " - . \n\t" \
".popsection \n\t"
#define ARCH_STATIC_BRANCH_ASM(key, label) \
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index 5bb5a90d2681..bfa3fd879c7f 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -5,6 +5,7 @@
#ifndef _ASM_STRING_H
#define _ASM_STRING_H
+#ifdef CONFIG_64BIT
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
extern void *__memset(void *__s, int __c, size_t __count);
@@ -16,6 +17,7 @@ extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+#endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c
index 487be604b96a..cc929c9fe7e9 100644
--- a/arch/loongarch/kernel/unaligned.c
+++ b/arch/loongarch/kernel/unaligned.c
@@ -27,12 +27,21 @@ static u32 unaligned_instructions_user;
static u32 unaligned_instructions_kernel;
#endif
-static inline unsigned long read_fpr(unsigned int idx)
+static inline u64 read_fpr(unsigned int idx)
{
+#ifdef CONFIG_64BIT
#define READ_FPR(idx, __value) \
__asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value));
-
- unsigned long __value;
+#else
+#define READ_FPR(idx, __value) \
+{ \
+ u32 __value_lo, __value_hi; \
+ __asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=r"(__value_lo)); \
+ __asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=r"(__value_hi)); \
+ __value = (__value_lo | ((u64)__value_hi << 32)); \
+}
+#endif
+ u64 __value;
switch (idx) {
case 0:
@@ -138,11 +147,20 @@ static inline unsigned long read_fpr(unsigned int idx)
return __value;
}
-static inline void write_fpr(unsigned int idx, unsigned long value)
+static inline void write_fpr(unsigned int idx, u64 value)
{
+#ifdef CONFIG_64BIT
#define WRITE_FPR(idx, value) \
__asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value));
-
+#else
+#define WRITE_FPR(idx, value) \
+{ \
+ u32 value_lo = value; \
+ u32 value_hi = value >> 32; \
+ __asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \
+ __asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \
+}
+#endif
switch (idx) {
case 0:
WRITE_FPR(0, value);
@@ -252,7 +270,7 @@ void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned i
bool sign, write;
bool user = user_mode(regs);
unsigned int res, size = 0;
- unsigned long value = 0;
+ u64 value = 0;
union loongarch_instruction insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S
index 185f82d85810..470c0bfa3463 100644
--- a/arch/loongarch/lib/unaligned.S
+++ b/arch/loongarch/lib/unaligned.S
@@ -24,35 +24,35 @@
* a3: sign
*/
SYM_FUNC_START(unaligned_read)
- beqz a2, 5f
+ beqz a2, 5f
- li.w t2, 0
- addi.d t0, a2, -1
- slli.d t1, t0, 3
- add.d a0, a0, t0
+ li.w t2, 0
+ LONG_ADDI t0, a2, -1
+ PTR_SLLI t1, t0, LONGLOG
+ PTR_ADD a0, a0, t0
- beqz a3, 2f
-1: ld.b t3, a0, 0
- b 3f
+ beqz a3, 2f
+1: ld.b t3, a0, 0
+ b 3f
-2: ld.bu t3, a0, 0
-3: sll.d t3, t3, t1
- or t2, t2, t3
- addi.d t1, t1, -8
- addi.d a0, a0, -1
- addi.d a2, a2, -1
- bgtz a2, 2b
-4: st.d t2, a1, 0
+2: ld.bu t3, a0, 0
+3: LONG_SLLV t3, t3, t1
+ or t2, t2, t3
+ LONG_ADDI t1, t1, -8
+ PTR_ADDI a0, a0, -1
+ PTR_ADDI a2, a2, -1
+ bgtz a2, 2b
+4: LONG_S t2, a1, 0
- move a0, a2
- jr ra
+ move a0, a2
+ jr ra
-5: li.w a0, -EFAULT
- jr ra
+5: li.w a0, -EFAULT
+ jr ra
- _asm_extable 1b, .L_fixup_handle_unaligned
- _asm_extable 2b, .L_fixup_handle_unaligned
- _asm_extable 4b, .L_fixup_handle_unaligned
+ _asm_extable 1b, .L_fixup_handle_unaligned
+ _asm_extable 2b, .L_fixup_handle_unaligned
+ _asm_extable 4b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_read)
/*
@@ -63,21 +63,21 @@ SYM_FUNC_END(unaligned_read)
* a2: n
*/
SYM_FUNC_START(unaligned_write)
- beqz a2, 3f
+ beqz a2, 3f
- li.w t0, 0
-1: srl.d t1, a1, t0
-2: st.b t1, a0, 0
- addi.d t0, t0, 8
- addi.d a2, a2, -1
- addi.d a0, a0, 1
- bgtz a2, 1b
+ li.w t0, 0
+1: LONG_SRLV t1, a1, t0
+2: st.b t1, a0, 0
+ LONG_ADDI t0, t0, 8
+ PTR_ADDI a2, a2, -1
+ PTR_ADDI a0, a0, 1
+ bgtz a2, 1b
- move a0, a2
- jr ra
+ move a0, a2
+ jr ra
-3: li.w a0, -EFAULT
- jr ra
+3: li.w a0, -EFAULT
+ jr ra
- _asm_extable 2b, .L_fixup_handle_unaligned
+ _asm_extable 2b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_write)
diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S
index df0865df26fa..c8119ad5fb2c 100644
--- a/arch/loongarch/power/suspend_asm.S
+++ b/arch/loongarch/power/suspend_asm.S
@@ -14,41 +14,41 @@
/* preparatory stuff */
.macro SETUP_SLEEP
- addi.d sp, sp, -PT_SIZE
- st.d $r1, sp, PT_R1
- st.d $r2, sp, PT_R2
- st.d $r3, sp, PT_R3
- st.d $r4, sp, PT_R4
- st.d $r21, sp, PT_R21
- st.d $r22, sp, PT_R22
- st.d $r23, sp, PT_R23
- st.d $r24, sp, PT_R24
- st.d $r25, sp, PT_R25
- st.d $r26, sp, PT_R26
- st.d $r27, sp, PT_R27
- st.d $r28, sp, PT_R28
- st.d $r29, sp, PT_R29
- st.d $r30, sp, PT_R30
- st.d $r31, sp, PT_R31
+ PTR_ADDI sp, sp, -PT_SIZE
+ REG_S $r1, sp, PT_R1
+ REG_S $r2, sp, PT_R2
+ REG_S $r3, sp, PT_R3
+ REG_S $r4, sp, PT_R4
+ REG_S $r21, sp, PT_R21
+ REG_S $r22, sp, PT_R22
+ REG_S $r23, sp, PT_R23
+ REG_S $r24, sp, PT_R24
+ REG_S $r25, sp, PT_R25
+ REG_S $r26, sp, PT_R26
+ REG_S $r27, sp, PT_R27
+ REG_S $r28, sp, PT_R28
+ REG_S $r29, sp, PT_R29
+ REG_S $r30, sp, PT_R30
+ REG_S $r31, sp, PT_R31
.endm
.macro SETUP_WAKEUP
- ld.d $r1, sp, PT_R1
- ld.d $r2, sp, PT_R2
- ld.d $r3, sp, PT_R3
- ld.d $r4, sp, PT_R4
- ld.d $r21, sp, PT_R21
- ld.d $r22, sp, PT_R22
- ld.d $r23, sp, PT_R23
- ld.d $r24, sp, PT_R24
- ld.d $r25, sp, PT_R25
- ld.d $r26, sp, PT_R26
- ld.d $r27, sp, PT_R27
- ld.d $r28, sp, PT_R28
- ld.d $r29, sp, PT_R29
- ld.d $r30, sp, PT_R30
- ld.d $r31, sp, PT_R31
- addi.d sp, sp, PT_SIZE
+ REG_L $r1, sp, PT_R1
+ REG_L $r2, sp, PT_R2
+ REG_L $r3, sp, PT_R3
+ REG_L $r4, sp, PT_R4
+ REG_L $r21, sp, PT_R21
+ REG_L $r22, sp, PT_R22
+ REG_L $r23, sp, PT_R23
+ REG_L $r24, sp, PT_R24
+ REG_L $r25, sp, PT_R25
+ REG_L $r26, sp, PT_R26
+ REG_L $r27, sp, PT_R27
+ REG_L $r28, sp, PT_R28
+ REG_L $r29, sp, PT_R29
+ REG_L $r30, sp, PT_R30
+ REG_L $r31, sp, PT_R31
+ PTR_ADDI sp, sp, PT_SIZE
.endm
.text
@@ -59,15 +59,15 @@ SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
la.pcrel t0, acpi_saved_sp
- st.d sp, t0, 0
+ REG_S sp, t0, 0
bl __flush_cache_all
/* Pass RA and SP to BIOS */
- addi.d a1, sp, 0
+ PTR_ADDI a1, sp, 0
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
- ld.d t0, t0, 0
+ REG_L t0, t0, 0
jirl ra, t0, 0 /* Call BIOS's STR sleep routine */
/*
@@ -83,7 +83,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
csrwr t0, LOONGARCH_CSR_CRMD
la.pcrel t0, acpi_saved_sp
- ld.d sp, t0, 0
+ REG_L sp, t0, 0
SETUP_WAKEUP
jr ra
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 12/14] LoongArch: Adjust VDSO/VSYSCALL for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (10 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 11/14] LoongArch: Adjust misc routines " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 13/14] LoongArch: Adjust default config files " Huacai Chen
2025-11-27 15:48 ` [PATCH V4 14/14] LoongArch: Adjust build infrastructure " Huacai Chen
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust VDSO/VSYSCALL because read_cpu_id() for 32BIT/64BIT are
different, and LoongArch32 doesn't support GENERIC_GETTIMEOFDAY now
(will be supported in future).
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/vdso/gettimeofday.h | 4 ++++
arch/loongarch/kernel/time.c | 2 ++
arch/loongarch/vdso/Makefile | 7 ++++++-
arch/loongarch/vdso/vdso.lds.S | 4 ++--
arch/loongarch/vdso/vgetcpu.c | 8 ++++++++
5 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
index dcafabca9bb6..bae76767c693 100644
--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -12,6 +12,8 @@
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+
#define VDSO_HAS_CLOCK_GETRES 1
static __always_inline long gettimeofday_fallback(
@@ -89,6 +91,8 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
+
#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index 5892f6da07a5..dbaaabcaf6f0 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -212,7 +212,9 @@ static struct clocksource clocksource_const = {
.read = read_const_counter,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
.vdso_clock_mode = VDSO_CLOCKMODE_CPU,
+#endif
};
int __init constant_clocksource_init(void)
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index d8316f993482..a8ac0e811e39 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -4,8 +4,9 @@
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile.include
-obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
+obj-vdso-y := elf.o vgetcpu.o vgetrandom.o \
vgetrandom-chacha.o sigreturn.o
+obj-vdso-$(CONFIG_GENERIC_GETTIMEOFDAY) += vgettimeofday.o
# Common compiler flags between ABIs.
ccflags-vdso := \
@@ -16,6 +17,10 @@ ccflags-vdso := \
$(CLANG_FLAGS) \
-D__VDSO__
+ifdef CONFIG_32BIT
+ccflags-vdso += -DBUILD_VDSO32
+endif
+
cflags-vdso := $(ccflags-vdso) \
-isystem $(shell $(CC) -print-file-name=include) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 8ff986499947..ac537e02beb1 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -7,8 +7,6 @@
#include <generated/asm-offsets.h>
#include <vdso/datapage.h>
-OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
-
OUTPUT_ARCH(loongarch)
SECTIONS
@@ -63,9 +61,11 @@ VERSION
LINUX_5.10 {
global:
__vdso_getcpu;
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
+#endif
__vdso_getrandom;
__vdso_rt_sigreturn;
local: *;
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
index 5301cd9d0f83..73af49242ecd 100644
--- a/arch/loongarch/vdso/vgetcpu.c
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -10,11 +10,19 @@ static __always_inline int read_cpu_id(void)
{
int cpu_id;
+#ifdef CONFIG_64BIT
__asm__ __volatile__(
" rdtime.d $zero, %0\n"
: "=r" (cpu_id)
:
: "memory");
+#else
+ __asm__ __volatile__(
+ " rdtimel.w $zero, %0\n"
+ : "=r" (cpu_id)
+ :
+ : "memory");
+#endif
return cpu_id;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 13/14] LoongArch: Adjust default config files for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (11 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 12/14] LoongArch: Adjust VDSO/VSYSCALL " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-27 15:48 ` [PATCH V4 14/14] LoongArch: Adjust build infrastructure " Huacai Chen
13 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Add loongson32_defconfig (for 32BIT) and rename loongson3_defconfig to
loongson64_defconfig (for 64BIT).
Also adjust graphics drivers, such as FB_EFI is replaced with EFIDRM.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/Makefile | 7 +-
arch/loongarch/configs/loongson32_defconfig | 1104 +++++++++++++++++
...ongson3_defconfig => loongson64_defconfig} | 6 +-
3 files changed, 1113 insertions(+), 4 deletions(-)
create mode 100644 arch/loongarch/configs/loongson32_defconfig
rename arch/loongarch/configs/{loongson3_defconfig => loongson64_defconfig} (99%)
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 96ca1a688984..8d45b860fe56 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -5,7 +5,12 @@
boot := arch/loongarch/boot
-KBUILD_DEFCONFIG := loongson3_defconfig
+ifeq ($(shell uname -m),loongarch32)
+KBUILD_DEFCONFIG := loongson32_defconfig
+else
+KBUILD_DEFCONFIG := loongson64_defconfig
+endif
+
KBUILD_DTBS := dtbs
image-name-y := vmlinux
diff --git a/arch/loongarch/configs/loongson32_defconfig b/arch/loongarch/configs/loongson32_defconfig
new file mode 100644
index 000000000000..af6ae7e5ccc4
--- /dev/null
+++ b/arch/loongarch/configs/loongson32_defconfig
@@ -0,0 +1,1104 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_ZSTD=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_DYNAMIC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_KEXEC=y
+CONFIG_LOONGARCH=y
+CONFIG_32BIT=y
+CONFIG_32BIT_STANDARD=y
+CONFIG_MACH_LOONGSON32=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_250=y
+CONFIG_DMI=y
+CONFIG_EFI=y
+CONFIG_SUSPEND=y
+CONFIG_HIBERNATION=y
+CONFIG_ACPI=y
+CONFIG_ACPI_SPCR_TABLE=y
+CONFIG_ACPI_TAD=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_IPMI=m
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_ACPI_BGRT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_COMPRESS=y
+CONFIG_MODULE_COMPRESS_ZSTD=y
+CONFIG_MODULE_DECOMPRESS=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_FC_APPID=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CMDLINE_PARTITION=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_ZSWAP=y
+CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+CONFIG_ZSMALLOC=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MEMORY_HOTPLUG=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set
+CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_CMA=y
+CONFIG_CMA_SYSFS=y
+CONFIG_USERFAULTFD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_TLS=m
+CONFIG_TLS_DEVICE=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
+CONFIG_INET_ESPINTCP=y
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_NV=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_DCTCP=m
+CONFIG_TCP_CONG_CDG=m
+CONFIG_TCP_CONG_BBR=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
+CONFIG_INET6_ESPINTCP=y
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_MPTCP=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_CT=m
+CONFIG_NFT_CONNLIMIT=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
+CONFIG_NFT_REJECT=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
+CONFIG_NF_TABLES_ARP=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_FIB_IPV6=m
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_SRH=m
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_IP_SCTP=m
+CONFIG_RDS=y
+CONFIG_L2TP=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_LLC2=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_CBS=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_SKBPRIO=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_CAKE=m
+CONFIG_NET_SCH_FQ=m
+CONFIG_NET_SCH_PIE=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_DEFAULT=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_BPF=m
+CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HS=y
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
+CONFIG_BT_HCIBTUSB_MTK=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_AG6XX=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_ATH3K=m
+CONFIG_BT_VIRTIO=m
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_CEPH_LIB=m
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_SHPC=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+CONFIG_RAPIDIO=y
+CONFIG_RAPIDIO_TSI721=y
+CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
+CONFIG_RAPIDIO_ENUM_BASIC=m
+CONFIG_RAPIDIO_CHMAN=m
+CONFIG_RAPIDIO_MPORT_CDEV=m
+CONFIG_UEVENT_HELPER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_COMPRESS=y
+CONFIG_FW_LOADER_COMPRESS_ZSTD=y
+CONFIG_EFI_ZBOOT=y
+CONFIG_EFI_BOOTLOADER_CONTROL=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EFI_TEST=m
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_RAW_NAND=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_NAND_LOONGSON=m
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_SERIAL=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_ZRAM=m
+CONFIG_ZRAM_BACKEND_LZ4=y
+CONFIG_ZRAM_BACKEND_LZ4HC=y
+CONFIG_ZRAM_BACKEND_ZSTD=y
+CONFIG_ZRAM_BACKEND_DEFLATE=y
+CONFIG_ZRAM_BACKEND_842=y
+CONFIG_ZRAM_BACKEND_LZO=y
+CONFIG_ZRAM_DEF_COMP_ZSTD=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_MULTI_COMP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_DRBD=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_RBD=m
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_NVME_RDMA=m
+CONFIG_NVME_FC=m
+CONFIG_NVME_TCP=m
+CONFIG_NVME_TARGET=m
+CONFIG_NVME_TARGET_PASSTHRU=y
+CONFIG_NVME_TARGET_LOOP=m
+CONFIG_NVME_TARGET_RDMA=m
+CONFIG_NVME_TARGET_FC=m
+CONFIG_NVME_TARGET_TCP=m
+CONFIG_EEPROM_AT24=m
+CONFIG_PVPANIC=y
+CONFIG_PVPANIC_MMIO=m
+CONFIG_PVPANIC_PCI=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_MVSAS=y
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_MVSAS_TASKLET=y
+CONFIG_SCSI_MVUMI=y
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_FCOE=m
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_TCM_QLA2XXX=m
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_DWC=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_PATA_PCMCIA=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LLBITMAP=y
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BCACHE=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
+CONFIG_DM_WRITECACHE=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
+CONFIG_DM_INIT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_INTEGRITY=m
+CONFIG_DM_ZONED=m
+CONFIG_DM_VDO=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_TCM_USER2=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=m
+CONFIG_IFB=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_RANDOM=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
+CONFIG_VXLAN=y
+CONFIG_RIONET=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_BNX2=y
+# CONFIG_NET_VENDOR_CAVIUM is not set
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGBE=y
+CONFIG_I40E=y
+CONFIG_ICE=y
+CONFIG_FM10K=y
+CONFIG_IGC=y
+CONFIG_IDPF=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_NGBE=y
+CONFIG_TXGBE=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_MOTORCOMM_PHY=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_HUAWEI_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
+# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_RNDIS_HOST=m
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_ATH9K=m
+CONFIG_ATH9K_HTC=m
+CONFIG_IWLWIFI=m
+CONFIG_IWLDVM=m
+CONFIG_IWLMVM=m
+CONFIG_MT7601U=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8723BE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192EE=m
+CONFIG_RTL8821AE=m
+CONFIG_RTL8192CU=m
+CONFIG_RTL8192DU=m
+# CONFIG_RTLWIFI_DEBUG is not set
+CONFIG_RTL8XXXU=m
+CONFIG_RTW88=m
+CONFIG_RTW88_8822BE=m
+CONFIG_RTW88_8822BU=m
+CONFIG_RTW88_8822CE=m
+CONFIG_RTW88_8822CU=m
+CONFIG_RTW88_8723DE=m
+CONFIG_RTW88_8723DU=m
+CONFIG_RTW88_8821CE=m
+CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
+CONFIG_RTW89=m
+CONFIG_RTW89_8851BE=m
+CONFIG_RTW89_8852AE=m
+CONFIG_RTW89_8852BE=m
+CONFIG_RTW89_8852BTE=m
+CONFIG_RTW89_8852CE=m
+CONFIG_RTW89_8922AE=m
+CONFIG_ZD1211RW=m
+CONFIG_USB4_NET=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+CONFIG_KEYBOARD_MATRIX=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_RUNTIME_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_PRINTER=m
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_I2C_DESIGNWARE_CORE=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_DESIGNWARE_PCI=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_LOONGSON_PCI=m
+CONFIG_SPI_LOONGSON_PLATFORM=m
+CONFIG_PINCTRL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_LOONGSON1=y
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCA9570=m
+CONFIG_GPIO_PCF857X=m
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_RESTART=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83795=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_WATCHDOG=y
+CONFIG_LOONGSON1_WDT=m
+CONFIG_RC_CORE=m
+CONFIG_LIRC=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_IMON_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_SHARP_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_XMP_DECODER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_EFIDRM=y
+CONFIG_DRM_SIMPLEDRM=y
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_RADEON_USERPTR=y
+CONFIG_DRM_QXL=m
+CONFIG_DRM_VIRTIO_GPU=m
+CONFIG_DRM_LOONGSON=y
+CONFIG_FB=y
+CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_BT87X=m
+CONFIG_SND_BT87X_OVERCLOCK=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_REALTEK_LIB=y
+CONFIG_SND_HDA_CODEC_ALC260=y
+CONFIG_SND_HDA_CODEC_ALC262=y
+CONFIG_SND_HDA_CODEC_ALC268=y
+CONFIG_SND_HDA_CODEC_ALC269=y
+CONFIG_SND_HDA_CODEC_ALC662=y
+CONFIG_SND_HDA_CODEC_ALC680=y
+CONFIG_SND_HDA_CODEC_ALC861=y
+CONFIG_SND_HDA_CODEC_ALC861VD=y
+CONFIG_SND_HDA_CODEC_ALC880=y
+CONFIG_SND_HDA_CODEC_ALC882=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y
+CONFIG_SND_HDA_CODEC_HDMI_INTEL=y
+CONFIG_SND_HDA_CODEC_HDMI_ATI=y
+CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_AUDIO_MIDI_V2=y
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_LOONGSON_CARD=m
+CONFIG_SND_LOONGSON1_AC97=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_ES8311=m
+CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8323=m
+CONFIG_SND_SOC_ES8326=m
+CONFIG_SND_SOC_ES8328_I2C=m
+CONFIG_SND_SOC_ES8328_SPI=m
+CONFIG_SND_SOC_UDA1334=m
+CONFIG_SND_SOC_UDA1342=m
+CONFIG_SND_VIRTIO=m
+CONFIG_HIDRAW=y
+CONFIG_UHID=m
+CONFIG_HID_A4TECH=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_ELAN=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_WACOM=m
+CONFIG_USB_HIDDEV=y
+CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID_OF=m
+CONFIG_I2C_HID_OF_ELAN=m
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_USB_UAS=m
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_HOST=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_GADGET=y
+CONFIG_TYPEC=m
+CONFIG_TYPEC_TCPM=m
+CONFIG_TYPEC_TCPCI=m
+CONFIG_TYPEC_UCSI=m
+CONFIG_UCSI_ACPI=m
+CONFIG_MMC=y
+CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_LOONGSON=y
+CONFIG_DMADEVICES=y
+CONFIG_LOONGSON1_APB_DMA=y
+CONFIG_UDMABUF=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DMABUF_HEAPS_CMA=y
+CONFIG_UIO=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_INPUT=m
+CONFIG_VIRTIO_MMIO=m
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_SCSI=m
+CONFIG_VHOST_VSOCK=m
+CONFIG_COMEDI=m
+CONFIG_COMEDI_PCI_DRIVERS=m
+CONFIG_COMEDI_8255_PCI=m
+CONFIG_COMEDI_ADL_PCI6208=m
+CONFIG_COMEDI_ADL_PCI7X3X=m
+CONFIG_COMEDI_ADL_PCI8164=m
+CONFIG_COMEDI_ADL_PCI9111=m
+CONFIG_COMEDI_ADL_PCI9118=m
+CONFIG_COMEDI_ADV_PCI1710=m
+CONFIG_COMEDI_ADV_PCI1720=m
+CONFIG_COMEDI_ADV_PCI1723=m
+CONFIG_COMEDI_ADV_PCI1724=m
+CONFIG_COMEDI_ADV_PCI1760=m
+CONFIG_COMEDI_ADV_PCI_DIO=m
+CONFIG_COMEDI_NI_LABPC_PCI=m
+CONFIG_COMEDI_NI_PCIDIO=m
+CONFIG_COMEDI_NI_PCIMIO=m
+CONFIG_STAGING=y
+CONFIG_CLKSRC_LOONGSON1_PWM=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_NTB=m
+CONFIG_NTB_MSI=y
+CONFIG_NTB_IDT=m
+CONFIG_NTB_EPF=m
+CONFIG_NTB_SWITCHTEC=m
+CONFIG_NTB_PERF=m
+CONFIG_NTB_TRANSPORT=m
+CONFIG_PWM=y
+CONFIG_GENERIC_PHY=y
+CONFIG_USB4=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_SUPPORT_V4=y
+CONFIG_XFS_SUPPORT_ASCII_CI=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_F2FS_FS=m
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_CHECK_FS=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
+CONFIG_OVERLAY_FS=y
+CONFIG_OVERLAY_FS_INDEX=y
+CONFIG_OVERLAY_FS_XINO_AUTO=y
+CONFIG_OVERLAY_FS_METACOPY=y
+CONFIG_FSCACHE=y
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_EXFAT_FS=m
+CONFIG_NTFS3_FS=m
+CONFIG_NTFS3_LZX_XPRESS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_ORANGEFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_UBIFS_FS=m
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_PSTORE=m
+CONFIG_PSTORE_COMPRESS=y
+CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
+CONFIG_EROFS_FS_ZIP_LZMA=y
+CONFIG_EROFS_FS_ZIP_DEFLATE=y
+CONFIG_EROFS_FS_ZIP_ZSTD=y
+CONFIG_EROFS_FS_ONDEMAND=y
+CONFIG_EROFS_FS_PCPU_KTHREAD=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_CEPH_FS=m
+CONFIG_CEPH_FSCACHE=y
+CONFIG_CEPH_FS_POSIX_ACL=y
+CONFIG_CEPH_FS_SECURITY_LABEL=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_9P_FS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_DLM=m
+CONFIG_KEY_DH_OPERATIONS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_SELFTESTS=y
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4_GENERIC=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_SM3_GENERIC=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_DEV_LOONGSON_RNG=m
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson64_defconfig
similarity index 99%
rename from arch/loongarch/configs/loongson3_defconfig
rename to arch/loongarch/configs/loongson64_defconfig
index 3e838c229cd5..e9e14a744434 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson64_defconfig
@@ -435,7 +435,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
-CONFIG_SYSFB_SIMPLEFB=y
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_BOOTLOADER_CONTROL=m
CONFIG_EFI_CAPSULE_LOADER=m
@@ -530,6 +529,7 @@ CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LLBITMAP=y
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
@@ -801,6 +801,8 @@ CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_EFIDRM=y
+CONFIG_DRM_SIMPLEDRM=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -811,9 +813,7 @@ CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_DRM_LOONGSON=y
-CONFIG_DRM_SIMPLEDRM=y
CONFIG_FB=y
-CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2025-11-27 15:48 [PATCH V4 00/14] LoongArch: Add basic LoongArch32 support Huacai Chen
` (12 preceding siblings ...)
2025-11-27 15:48 ` [PATCH V4 13/14] LoongArch: Adjust default config files " Huacai Chen
@ 2025-11-27 15:48 ` Huacai Chen
2025-11-28 13:57 ` Arnd Bergmann
2026-04-23 22:06 ` Nathan Chancellor
13 siblings, 2 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-27 15:48 UTC (permalink / raw)
To: Huacai Chen
Cc: loongarch, Xuefeng Li, Guo Ren, Xuerui Wang, Jiaxun Yang,
linux-kernel, Huacai Chen, Arnd Bergmann
Adjust build infrastructure (Kconfig, Makefile and ld scripts) to let
us enable both 32BIT/64BIT kernel build.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/Kconfig | 113 ++++++++++++++++++--------
arch/loongarch/Makefile | 23 +++++-
arch/loongarch/boot/Makefile | 6 ++
arch/loongarch/kernel/vmlinux.lds.S | 7 +-
arch/loongarch/kvm/Kconfig | 2 +-
arch/loongarch/lib/Makefile | 5 +-
drivers/firmware/efi/libstub/Makefile | 1 +
drivers/pci/controller/Kconfig | 2 +-
lib/crc/Kconfig | 2 +-
9 files changed, 120 insertions(+), 41 deletions(-)
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 730f34214519..4bacde9f46d1 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -21,11 +21,11 @@ config LOONGARCH
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
- select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU
+ select ARCH_HAS_KERNEL_FPU_SUPPORT if 64BIT && CPU_HAS_FPU
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PREEMPT_LAZY
- select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_PTE_SPECIAL if 64BIT
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -60,16 +60,15 @@ config LOONGARCH
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
- select ARCH_SPARSEMEM_ENABLE
select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_HUGETLBFS
+ select ARCH_SUPPORTS_HUGETLBFS if 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
- select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_NUMA_BALANCING if NUMA
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_SUPPORTS_RT
select ARCH_SUPPORTS_SCHED_SMT if SMP
@@ -79,10 +78,10 @@ config LOONGARCH
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
- select ARCH_WANT_DEFAULT_BPF_JIT
+ select ARCH_WANT_DEFAULT_BPF_JIT if HAVE_EBPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_LD_ORPHAN_WARN
- select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+ select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if 64BIT
select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BUILDTIME_TABLE_SORT
@@ -90,13 +89,14 @@ config LOONGARCH
select CPU_PM
select EDAC_SUPPORT
select EFI
+ select GENERIC_ATOMIC64 if 32BIT
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES
select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
- select GENERIC_GETTIMEOFDAY
+ select GENERIC_GETTIMEOFDAY if 64BIT
select GENERIC_IOREMAP if !ARCH_IOREMAP
select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_IRQ_MULTI_HANDLER
@@ -111,15 +111,15 @@ config LOONGARCH
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_TIME_VSYSCALL
+ select GENERIC_TIME_VSYSCALL if GENERIC_GETTIMEOFDAY
select GPIOLIB
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_BITREVERSE if 64BIT
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
- select HAVE_ARCH_KASAN
- select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KASAN if 64BIT
+ select HAVE_ARCH_KFENCE if 64BIT
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS if MMU
@@ -127,8 +127,8 @@ config LOONGARCH
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
+ select HAVE_ARCH_USERFAULTFD_MINOR if 64BIT && USERFAULTFD
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
@@ -140,7 +140,7 @@ config LOONGARCH
select HAVE_FTRACE_REGS_HAVING_PT_REGS
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
- select HAVE_EBPF_JIT
+ select HAVE_EBPF_JIT if 64BIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
select HAVE_EXIT_THREAD
select HAVE_GENERIC_TIF_BITS
@@ -163,9 +163,9 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && 64BIT
select HAVE_PCI
- select HAVE_PERF_EVENTS
+ select HAVE_PERF_EVENTS if 64BIT
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
@@ -205,18 +205,50 @@ config LOONGARCH
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_EXCEPTION_TRACE
- select SWIOTLB
+ select SWIOTLB if 64BIT
select TRACE_IRQFLAGS_SUPPORT
select USE_PERCPU_NUMA_NODE_ID
select USER_STACKTRACE_SUPPORT
select VDSO_GETRANDOM
- select ZONE_DMA32
+ select ZONE_DMA32 if 64BIT
+
+menu "Kernel type and options"
+
+choice
+ prompt "Kernel type"
config 32BIT
- bool
+ bool "32-bit kernel"
+ help
+ Select this option if you want to build a 32-bit kernel.
config 64BIT
- def_bool y
+ bool "64-bit kernel"
+ help
+ Select this option if you want to build a 64-bit kernel.
+
+endchoice
+
+if 32BIT
+
+choice
+ prompt "32-bit kernel sub-type"
+
+config 32BIT_REDUCED
+ bool "32-bit kernel for LA32R"
+ help
+ Select this option if you want to build a 32-bit kernel for
+ LoongArch32 Reduced (LA32R).
+
+config 32BIT_STANDARD
+ bool "32-bit kernel for LA32S"
+ help
+ Select this option if you want to build a 32-bit kernel for
+ LoongArch32 Standard (LA32S).
+
+endchoice
+
+endif
config GENERIC_BUG
def_bool y
@@ -306,8 +338,6 @@ config RUSTC_HAS_ANNOTATE_TABLEJUMP
depends on RUST
def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump)
-menu "Kernel type and options"
-
source "kernel/Kconfig.hz"
choice
@@ -319,8 +349,17 @@ choice
of page size and page table levels. The size of virtual memory
address space are determined by the page table layout.
+config 4KB_2LEVEL
+ bool "4KB with 2 levels"
+ select HAVE_PAGE_SIZE_4KB
+ select PGTABLE_2LEVEL
+ help
+ This option selects 16KB page size with 2 level page tables, which
+ support a maximum of 32 bits of application virtual memory.
+
config 4KB_3LEVEL
bool "4KB with 3 levels"
+ depends on 64BIT
select HAVE_PAGE_SIZE_4KB
select PGTABLE_3LEVEL
help
@@ -329,6 +368,7 @@ config 4KB_3LEVEL
config 4KB_4LEVEL
bool "4KB with 4 levels"
+ depends on 64BIT
select HAVE_PAGE_SIZE_4KB
select PGTABLE_4LEVEL
help
@@ -345,6 +385,7 @@ config 16KB_2LEVEL
config 16KB_3LEVEL
bool "16KB with 3 levels"
+ depends on 64BIT
select HAVE_PAGE_SIZE_16KB
select PGTABLE_3LEVEL
help
@@ -361,6 +402,7 @@ config 64KB_2LEVEL
config 64KB_3LEVEL
bool "64KB with 3 levels"
+ depends on 64BIT
select HAVE_PAGE_SIZE_64KB
select PGTABLE_3LEVEL
help
@@ -458,6 +500,7 @@ config EFI_STUB
config SMP
bool "Multi-Processing support"
+ depends on 64BIT
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
@@ -496,6 +539,7 @@ config NR_CPUS
config NUMA
bool "NUMA Support"
select SMP
+ depends on 64BIT
help
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
support. This option improves performance on systems with more
@@ -578,7 +622,7 @@ config CPU_HAS_FPU
config CPU_HAS_LSX
bool "Support for the Loongson SIMD Extension"
- depends on AS_HAS_LSX_EXTENSION
+ depends on AS_HAS_LSX_EXTENSION && 64BIT
help
Loongson SIMD Extension (LSX) introduces 128 bit wide vector registers
and a set of SIMD instructions to operate on them. When this option
@@ -593,7 +637,7 @@ config CPU_HAS_LSX
config CPU_HAS_LASX
bool "Support for the Loongson Advanced SIMD Extension"
depends on CPU_HAS_LSX
- depends on AS_HAS_LASX_EXTENSION
+ depends on AS_HAS_LASX_EXTENSION && 64BIT
help
Loongson Advanced SIMD Extension (LASX) introduces 256 bit wide vector
registers and a set of SIMD instructions to operate on them. When this
@@ -607,7 +651,7 @@ config CPU_HAS_LASX
config CPU_HAS_LBT
bool "Support for the Loongson Binary Translation Extension"
- depends on AS_HAS_LBT_EXTENSION
+ depends on AS_HAS_LBT_EXTENSION && 64BIT
help
Loongson Binary Translation (LBT) introduces 4 scratch registers (SCR0
to SCR3), x86/ARM eflags (eflags) and x87 fpu stack pointer (ftop).
@@ -635,13 +679,13 @@ config ARCH_SELECTS_KEXEC_FILE
select HAVE_IMA_KEXEC if IMA
config ARCH_SUPPORTS_CRASH_DUMP
- def_bool y
+ def_bool 64BIT
config ARCH_DEFAULT_CRASH_DUMP
- def_bool y
+ def_bool 64BIT
config ARCH_SELECTS_CRASH_DUMP
- def_bool y
+ def_bool 64BIT
depends on CRASH_DUMP
select RELOCATABLE
@@ -650,6 +694,7 @@ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
config RELOCATABLE
bool "Relocatable kernel"
+ depends on 64BIT
select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
@@ -686,7 +731,7 @@ source "kernel/livepatch/Kconfig"
config PARAVIRT
bool "Enable paravirtualization code"
- depends on AS_HAS_LVZ_EXTENSION
+ depends on AS_HAS_LVZ_EXTENSION && 64BIT
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
@@ -714,7 +759,7 @@ config ARCH_FLATMEM_ENABLE
depends on !NUMA
config ARCH_SPARSEMEM_ENABLE
- def_bool y
+ def_bool 64BIT
select SPARSEMEM_VMEMMAP_ENABLE
help
Say Y to support efficient handling of sparse physical memory,
@@ -731,10 +776,12 @@ config MMU
default y
config ARCH_MMAP_RND_BITS_MIN
- default 12
+ default 10 if 32BIT
+ default 12 if 64BIT
config ARCH_MMAP_RND_BITS_MAX
- default 18
+ default 15 if 32BIT
+ default 18 if 64BIT
config ARCH_SUPPORTS_UPROBES
def_bool y
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 8d45b860fe56..47516aeea9d2 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -25,6 +25,7 @@ endif
#
# Select the object file format to substitute into the linker script.
#
+32bit-tool-archpref = loongarch32
64bit-tool-archpref = loongarch64
32bit-bfd = elf32-loongarch
64bit-bfd = elf64-loongarch
@@ -51,7 +52,10 @@ KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif
-ifdef CONFIG_64BIT
+ifdef CONFIG_32BIT
+tool-archpref = $(32bit-tool-archpref)
+UTS_MACHINE := loongarch32
+else
tool-archpref = $(64bit-tool-archpref)
UTS_MACHINE := loongarch64
endif
@@ -62,9 +66,19 @@ ifneq ($(SUBARCH),$(ARCH))
endif
endif
+ifdef CONFIG_32BIT
+ifdef CONFIG_32BIT_STANDARD
+ld-emul = $(32bit-emul)
+cflags-y += -march=la32v1.0 -mabi=ilp32s -mcmodel=normal
+else # CONFIG_32BIT_REDUCED
+ld-emul = $(32bit-emul)
+cflags-y += -march=la32rv1.0 -mabi=ilp32s -mcmodel=normal
+endif
+endif
+
ifdef CONFIG_64BIT
ld-emul = $(64bit-emul)
-cflags-y += -mabi=lp64s -mcmodel=normal
+cflags-y += -march=loongarch64 -mabi=lp64s -mcmodel=normal
endif
cflags-y += -pipe $(CC_FLAGS_NO_FPU)
@@ -140,7 +154,12 @@ ifndef CONFIG_KASAN
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
endif
+ifdef CONFIG_32BIT
+load-y = 0xa0200000
+else
load-y = 0x9000000000200000
+endif
+
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
drivers-$(CONFIG_PCI) += arch/loongarch/pci/
diff --git a/arch/loongarch/boot/Makefile b/arch/loongarch/boot/Makefile
index 4e1c374c5782..8b6d9b42b5f0 100644
--- a/arch/loongarch/boot/Makefile
+++ b/arch/loongarch/boot/Makefile
@@ -20,7 +20,13 @@ $(obj)/vmlinux.efi: vmlinux FORCE
$(call if_changed,objcopy)
EFI_ZBOOT_PAYLOAD := vmlinux.efi
+
+ifdef CONFIG_32BIT
+EFI_ZBOOT_BFD_TARGET := elf32-loongarch
+EFI_ZBOOT_MACH_TYPE := LOONGARCH32
+else
EFI_ZBOOT_BFD_TARGET := elf64-loongarch
EFI_ZBOOT_MACH_TYPE := LOONGARCH64
+endif
include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 08ea921cdec1..b95c0acdab90 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -6,7 +6,12 @@
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
-#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
+
+#ifdef CONFIG_32BIT
+#define PHYSADDR_MASK 0x1fffffff /* 29-bit */
+#else
+#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
+#endif
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index ae64bbdf83a7..3dc5ebc546d5 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -19,7 +19,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on AS_HAS_LVZ_EXTENSION
+ depends on AS_HAS_LVZ_EXTENSION && 64BIT
select HAVE_KVM_DIRTY_RING_ACQ_REL
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_IRQCHIP
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index ccea3bbd4353..d8f1e8559487 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -3,8 +3,9 @@
# Makefile for LoongArch-specific library files.
#
-lib-y += delay.o memset.o memcpy.o memmove.o \
- clear_user.o copy_user.o csum.o dump_tlb.o unaligned.o
+lib-y += delay.o clear_user.o copy_user.o dump_tlb.o unaligned.o
+
+lib-$(CONFIG_64BIT) += memset.o memcpy.o memmove.o csum.o
obj-$(CONFIG_ARCH_SUPPORTS_INT128) += tishift.o
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 94b05e4451dd..2ba0f7c400a7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -97,6 +97,7 @@ zboot-obj-$(CONFIG_KERNEL_ZSTD) := zboot-decompress-zstd.o lib-xxhash.o
CFLAGS_zboot-decompress-zstd.o += -I$(srctree)/lib/zstd
zboot-obj-$(CONFIG_RISCV) += lib-clz_ctz.o lib-ashldi3.o
+zboot-obj-$(CONFIG_LOONGARCH) += lib-clz_ctz.o lib-ashldi3.o
lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 41748d083b93..35ef71b0695d 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -171,7 +171,7 @@ config VMD
config PCI_LOONGSON
bool "LOONGSON PCIe controller"
- depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST
depends on OF || ACPI
depends on PCI_QUIRKS
default MACH_LOONGSON64
diff --git a/lib/crc/Kconfig b/lib/crc/Kconfig
index 70e7a6016de3..5bf613405fdd 100644
--- a/lib/crc/Kconfig
+++ b/lib/crc/Kconfig
@@ -65,7 +65,7 @@ config CRC32_ARCH
depends on CRC32 && CRC_OPTIMIZATIONS
default y if ARM && KERNEL_MODE_NEON
default y if ARM64
- default y if LOONGARCH
+ default y if LOONGARCH && 64BIT
default y if MIPS && CPU_MIPSR6
default y if PPC64 && ALTIVEC
default y if RISCV && RISCV_ISA_ZBC
--
2.47.3
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2025-11-27 15:48 ` [PATCH V4 14/14] LoongArch: Adjust build infrastructure " Huacai Chen
@ 2025-11-28 13:57 ` Arnd Bergmann
2025-11-28 14:27 ` Huacai Chen
2026-04-23 22:06 ` Nathan Chancellor
1 sibling, 1 reply; 22+ messages in thread
From: Arnd Bergmann @ 2025-11-28 13:57 UTC (permalink / raw)
To: Huacai Chen, Huacai Chen
Cc: loongarch, Xuefeng Li, guoren, WANG Xuerui, Jiaxun Yang,
linux-kernel
On Thu, Nov 27, 2025, at 16:48, Huacai Chen wrote:
>
> +config 4KB_2LEVEL
> + bool "4KB with 2 levels"
> + select HAVE_PAGE_SIZE_4KB
> + select PGTABLE_2LEVEL
> + help
> + This option selects 16KB page size with 2 level page tables, which
> + support a maximum of 32 bits of application virtual memory.
> +
I just noticed this inconsistency: the text says 16KB pages, the
option says 4KB, so one of them is wrong. Usually 4KB pages have
a 32-bit address space (using 32-bit descriptors), so I assume
this one is correct.
arnd
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2025-11-28 13:57 ` Arnd Bergmann
@ 2025-11-28 14:27 ` Huacai Chen
0 siblings, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2025-11-28 14:27 UTC (permalink / raw)
To: Arnd Bergmann
Cc: Huacai Chen, loongarch, Xuefeng Li, guoren, WANG Xuerui,
Jiaxun Yang, linux-kernel
On Fri, Nov 28, 2025 at 9:57 PM Arnd Bergmann <arnd@arndb.de> wrote:
>
> On Thu, Nov 27, 2025, at 16:48, Huacai Chen wrote:
> >
> > +config 4KB_2LEVEL
> > + bool "4KB with 2 levels"
> > + select HAVE_PAGE_SIZE_4KB
> > + select PGTABLE_2LEVEL
> > + help
> > + This option selects 16KB page size with 2 level page tables, which
> > + support a maximum of 32 bits of application virtual memory.
> > +
>
> I just noticed this inconsistency: the text says 16KB pages, the
> option says 4KB, so one of them is wrong. Usually 4KB pages have
> a 32-bit address space (using 32-bit descriptors), so I assume
> this one is correct.
Yes, 4KB is correct, thank you for pointing out.
Huacai
>
> arnd
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2025-11-27 15:48 ` [PATCH V4 14/14] LoongArch: Adjust build infrastructure " Huacai Chen
2025-11-28 13:57 ` Arnd Bergmann
@ 2026-04-23 22:06 ` Nathan Chancellor
2026-04-24 4:14 ` Huacai Chen
2026-04-24 11:06 ` WANG Rui
1 sibling, 2 replies; 22+ messages in thread
From: Nathan Chancellor @ 2026-04-23 22:06 UTC (permalink / raw)
To: Huacai Chen
Cc: Huacai Chen, loongarch, Xuefeng Li, Guo Ren, Xuerui Wang,
Jiaxun Yang, linux-kernel, Arnd Bergmann
Hi Huacai,
Now that this is in -next as commit 3d9aba6618d1 ("LoongArch: Adjust
build infrastructure for 32BIT/64BIT"), I am seeing two issues:
On Thu, Nov 27, 2025 at 11:48:32PM +0800, Huacai Chen wrote:
> Adjust build infrastructure (Kconfig, Makefile and ld scripts) to let
> us enable both 32BIT/64BIT kernel build.
...
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 730f34214519..4bacde9f46d1 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> +menu "Kernel type and options"
> +
> +choice
> + prompt "Kernel type"
>
> config 32BIT
> - bool
> + bool "32-bit kernel"
> + help
> + Select this option if you want to build a 32-bit kernel.
>
> config 64BIT
> - def_bool y
> + bool "64-bit kernel"
> + help
> + Select this option if you want to build a 64-bit kernel.
> +
> +endchoice
The way this is written results in a 32-bit kernel when building
ARCH=loongarch allmodconfig or having a previously 64-bit configuration
and running olddefconfig on it, which seems surprising to me. I think it
would be good to either adjust the ordering such that 64BIT is first or
include an explicit default statement like so:
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 3b042dbb2c41..3addd06b3f5a 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -220,6 +220,7 @@ menu "Kernel type and options"
choice
prompt "Kernel type"
+ default 64BIT
config 32BIT
bool "32-bit kernel"
--
> diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
> index 8d45b860fe56..47516aeea9d2 100644
> --- a/arch/loongarch/Makefile
> +++ b/arch/loongarch/Makefile
...
> @@ -62,9 +66,19 @@ ifneq ($(SUBARCH),$(ARCH))
> endif
> endif
>
> +ifdef CONFIG_32BIT
> +ifdef CONFIG_32BIT_STANDARD
> +ld-emul = $(32bit-emul)
> +cflags-y += -march=la32v1.0 -mabi=ilp32s -mcmodel=normal
> +else # CONFIG_32BIT_REDUCED
> +ld-emul = $(32bit-emul)
> +cflags-y += -march=la32rv1.0 -mabi=ilp32s -mcmodel=normal
> +endif
> +endif
As a result of the above problem, I see
clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
when building configurations that I expected to be 64-bit but had been
turned into 32-bit ones with LLVM. This was also reported by the test
robot with allnoconfig.
https://lore.kernel.org/202604232041.ESJDwVG4-lkp@intel.com/
Maybe some change is needed on the LLVM side?
Cheers,
Nathan
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2026-04-23 22:06 ` Nathan Chancellor
@ 2026-04-24 4:14 ` Huacai Chen
2026-04-24 11:06 ` WANG Rui
1 sibling, 0 replies; 22+ messages in thread
From: Huacai Chen @ 2026-04-24 4:14 UTC (permalink / raw)
To: Nathan Chancellor
Cc: Huacai Chen, loongarch, Xuefeng Li, Guo Ren, Xuerui Wang,
Jiaxun Yang, linux-kernel, Arnd Bergmann
Hi, Nathan,
On Fri, Apr 24, 2026 at 6:06 AM Nathan Chancellor <nathan@kernel.org> wrote:
>
> Hi Huacai,
>
> Now that this is in -next as commit 3d9aba6618d1 ("LoongArch: Adjust
> build infrastructure for 32BIT/64BIT"), I am seeing two issues:
>
> On Thu, Nov 27, 2025 at 11:48:32PM +0800, Huacai Chen wrote:
> > Adjust build infrastructure (Kconfig, Makefile and ld scripts) to let
> > us enable both 32BIT/64BIT kernel build.
> ...
> > diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> > index 730f34214519..4bacde9f46d1 100644
> > --- a/arch/loongarch/Kconfig
> > +++ b/arch/loongarch/Kconfig
> > +menu "Kernel type and options"
> > +
> > +choice
> > + prompt "Kernel type"
> >
> > config 32BIT
> > - bool
> > + bool "32-bit kernel"
> > + help
> > + Select this option if you want to build a 32-bit kernel.
> >
> > config 64BIT
> > - def_bool y
> > + bool "64-bit kernel"
> > + help
> > + Select this option if you want to build a 64-bit kernel.
> > +
> > +endchoice
>
> The way this is written results in a 32-bit kernel when building
> ARCH=loongarch allmodconfig or having a previously 64-bit configuration
> and running olddefconfig on it, which seems surprising to me. I think it
> would be good to either adjust the ordering such that 64BIT is first or
> include an explicit default statement like so:
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 3b042dbb2c41..3addd06b3f5a 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -220,6 +220,7 @@ menu "Kernel type and options"
>
> choice
> prompt "Kernel type"
> + default 64BIT
>
> config 32BIT
> bool "32-bit kernel"
> --
Thank you for pointing out, since the code has been tagged, I will fix
it after 7.1-rc1 as soon as possible.
Huacai
>
> > diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
> > index 8d45b860fe56..47516aeea9d2 100644
> > --- a/arch/loongarch/Makefile
> > +++ b/arch/loongarch/Makefile
> ...
> > @@ -62,9 +66,19 @@ ifneq ($(SUBARCH),$(ARCH))
> > endif
> > endif
> >
> > +ifdef CONFIG_32BIT
> > +ifdef CONFIG_32BIT_STANDARD
> > +ld-emul = $(32bit-emul)
> > +cflags-y += -march=la32v1.0 -mabi=ilp32s -mcmodel=normal
> > +else # CONFIG_32BIT_REDUCED
> > +ld-emul = $(32bit-emul)
> > +cflags-y += -march=la32rv1.0 -mabi=ilp32s -mcmodel=normal
> > +endif
> > +endif
>
> As a result of the above problem, I see
>
> clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
>
> when building configurations that I expected to be 64-bit but had been
> turned into 32-bit ones with LLVM. This was also reported by the test
> robot with allnoconfig.
>
> https://lore.kernel.org/202604232041.ESJDwVG4-lkp@intel.com/
>
> Maybe some change is needed on the LLVM side?
>
> Cheers,
> Nathan
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2026-04-23 22:06 ` Nathan Chancellor
2026-04-24 4:14 ` Huacai Chen
@ 2026-04-24 11:06 ` WANG Rui
2026-04-24 18:19 ` Nathan Chancellor
1 sibling, 1 reply; 22+ messages in thread
From: WANG Rui @ 2026-04-24 11:06 UTC (permalink / raw)
To: nathan
Cc: arnd, chenhuacai, chenhuacai, guoren, jiaxun.yang, kernel,
linux-kernel, lixuefeng, loongarch, WANG Rui
Hi Nathan,
>> diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
>> index 8d45b860fe56..47516aeea9d2 100644
>> --- a/arch/loongarch/Makefile
>> +++ b/arch/loongarch/Makefile
>...
>> @@ -62,9 +66,19 @@ ifneq ($(SUBARCH),$(ARCH))
>> endif
>> endif
>>
>> +ifdef CONFIG_32BIT
>> +ifdef CONFIG_32BIT_STANDARD
>> +ld-emul = $(32bit-emul)
>> +cflags-y += -march=la32v1.0 -mabi=ilp32s -mcmodel=normal
>> +else # CONFIG_32BIT_REDUCED
>> +ld-emul = $(32bit-emul)
>> +cflags-y += -march=la32rv1.0 -mabi=ilp32s -mcmodel=normal
>> +endif
>> +endif
>
> As a result of the above problem, I see
>
> clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
>
> when building configurations that I expected to be 64-bit but had been
> turned into 32-bit ones with LLVM. This was also reported by the test
> robot with allnoconfig.
>
> https://lore.kernel.org/202604232041.ESJDwVG4-lkp@intel.com/
>
> Maybe some change is needed on the LLVM side?
The root cause is that scripts/Makefile.clang unconditionally sets
CLANG_TARGET_FLAGS_loongarch to loongarch64-linux-gnusf.
However, Clang with --target=loongarch64 does not support a 32-bit ABI.
LLVM clearly separates loongarch32 and loongarch64 targets.
Since CLANG_TARGET_FLAGS gets initialized before CONFIG is available, the
current setup effectively assumes that a 64-bit target must also handle
a 32-bit ABI. That assumption may not hold for LLVM, which models 32-bit
and 64-bit LoongArch as separate targets.
I have a workaround patch for now, though I’d appreciate suggestions if
there’s a cleaner fix.
diff --git a/Makefile b/Makefile
index 54e1ae602000..f03ff854014a 100644
--- a/Makefile
+++ b/Makefile
@@ -725,6 +725,10 @@ CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null
RUSTC_VERSION_TEXT = $(subst $(pound),,$(shell $(RUSTC) --version 2>/dev/null))
PAHOLE_VERSION = $(shell $(srctree)/scripts/pahole-version.sh $(PAHOLE))
+ifdef need-config
+-include $(objtree)/include/config/auto.conf
+endif
+
ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
include $(srctree)/scripts/Makefile.clang
endif
diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang
index b67636b28c35..1c4db2f6543e 100644
--- a/scripts/Makefile.clang
+++ b/scripts/Makefile.clang
@@ -4,7 +4,11 @@
CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
+ifdef CONFIG_32BIT
+CLANG_TARGET_FLAGS_loongarch := loongarch32-linux-gnusf
+else
CLANG_TARGET_FLAGS_loongarch := loongarch64-linux-gnusf
+endif
CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu
CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu
CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
--
Thanks,
Rui
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2026-04-24 11:06 ` WANG Rui
@ 2026-04-24 18:19 ` Nathan Chancellor
2026-04-25 2:29 ` hev
0 siblings, 1 reply; 22+ messages in thread
From: Nathan Chancellor @ 2026-04-24 18:19 UTC (permalink / raw)
To: WANG Rui
Cc: arnd, chenhuacai, chenhuacai, guoren, jiaxun.yang, kernel,
linux-kernel, lixuefeng, loongarch
Hi Rui,
On Fri, Apr 24, 2026 at 07:06:23PM +0800, WANG Rui wrote:
> > As a result of the above problem, I see
> >
> > clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> > clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> > clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> >
> > when building configurations that I expected to be 64-bit but had been
> > turned into 32-bit ones with LLVM. This was also reported by the test
> > robot with allnoconfig.
> >
> > https://lore.kernel.org/202604232041.ESJDwVG4-lkp@intel.com/
> >
> > Maybe some change is needed on the LLVM side?
>
> The root cause is that scripts/Makefile.clang unconditionally sets
> CLANG_TARGET_FLAGS_loongarch to loongarch64-linux-gnusf.
> However, Clang with --target=loongarch64 does not support a 32-bit ABI.
> LLVM clearly separates loongarch32 and loongarch64 targets.
>
> Since CLANG_TARGET_FLAGS gets initialized before CONFIG is available, the
> current setup effectively assumes that a 64-bit target must also handle
> a 32-bit ABI. That assumption may not hold for LLVM, which models 32-bit
> and 64-bit LoongArch as separate targets.
Hmmm, okay, that definitely explains it. As the comment at the top of
scripts/Makefile.clang states, we rely on targets supporting switching
their word size via '-m32' / '-m64' so that we can use a 64-bit triple
but still get a 32-bit one if necessary. For example:
--target=x86_64-linux-gnu -m32
effectively becomes
--target=i386-linux-gnu
It seems like this works for LoongArch in clang as well?
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 47516aeea9d2..2837004d0d75 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -67,6 +67,9 @@ ifneq ($(SUBARCH),$(ARCH))
endif
ifdef CONFIG_32BIT
+ifdef CONFIG_CC_IS_CLANG
+cflags-y += -m32
+endif
ifdef CONFIG_32BIT_STANDARD
ld-emul = $(32bit-emul)
cflags-y += -march=la32v1.0 -mabi=ilp32s -mcmodel=normal
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 42aa96249828..eeda13ecb711 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -14,6 +14,7 @@ ccflags-vdso := \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS)) \
$(filter -m%-float,$(KBUILD_CFLAGS)) \
+ $(filter -m32,$(KBUILD_CFLAGS)) \
$(CLANG_FLAGS) \
-D__VDSO__
--
appears to do the right thing?
$ make -skj"$(nproc)" ARCH=loongarch LLVM=1 mrproper loongarch32_defconfig all
$ file vmlinux arch/loongarch/boot/vmlinux.efi
vmlinux: ELF 32-bit LSB executable, LoongArch, version 1 (SYSV), statically linked, BuildID[sha1]=26b4af26bb14c923270429f4ccdc66a519c059b1, not stripped
arch/loongarch/boot/vmlinux.efi: PE32+ executable for EFI (application), LoongArch 32-bit (stripped to external PDB), 2 section
Cheers,
Nathan
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT
2026-04-24 18:19 ` Nathan Chancellor
@ 2026-04-25 2:29 ` hev
0 siblings, 0 replies; 22+ messages in thread
From: hev @ 2026-04-25 2:29 UTC (permalink / raw)
To: Nathan Chancellor
Cc: arnd, chenhuacai, chenhuacai, guoren, jiaxun.yang, kernel,
linux-kernel, lixuefeng, loongarch
Hi Nathan,
On Sat, Apr 25, 2026 at 2:19 AM Nathan Chancellor <nathan@kernel.org> wrote:
>
> Hi Rui,
>
> On Fri, Apr 24, 2026 at 07:06:23PM +0800, WANG Rui wrote:
> > > As a result of the above problem, I see
> > >
> > > clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> > > clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> > > clang: error: ignoring '-mabi=ilp32s' as it conflicts with that implied by '-msoft-float' (lp64s) [-Werror,-Woption-ignored]
> > >
> > > when building configurations that I expected to be 64-bit but had been
> > > turned into 32-bit ones with LLVM. This was also reported by the test
> > > robot with allnoconfig.
> > >
> > > https://lore.kernel.org/202604232041.ESJDwVG4-lkp@intel.com/
> > >
> > > Maybe some change is needed on the LLVM side?
> >
> > The root cause is that scripts/Makefile.clang unconditionally sets
> > CLANG_TARGET_FLAGS_loongarch to loongarch64-linux-gnusf.
> > However, Clang with --target=loongarch64 does not support a 32-bit ABI.
> > LLVM clearly separates loongarch32 and loongarch64 targets.
> >
> > Since CLANG_TARGET_FLAGS gets initialized before CONFIG is available, the
> > current setup effectively assumes that a 64-bit target must also handle
> > a 32-bit ABI. That assumption may not hold for LLVM, which models 32-bit
> > and 64-bit LoongArch as separate targets.
>
> Hmmm, okay, that definitely explains it. As the comment at the top of
> scripts/Makefile.clang states, we rely on targets supporting switching
> their word size via '-m32' / '-m64' so that we can use a 64-bit triple
> but still get a 32-bit one if necessary. For example:
>
> --target=x86_64-linux-gnu -m32
>
> effectively becomes
>
> --target=i386-linux-gnu
>
> It seems like this works for LoongArch in clang as well?
>
> diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
> index 47516aeea9d2..2837004d0d75 100644
> --- a/arch/loongarch/Makefile
> +++ b/arch/loongarch/Makefile
> @@ -67,6 +67,9 @@ ifneq ($(SUBARCH),$(ARCH))
> endif
>
> ifdef CONFIG_32BIT
> +ifdef CONFIG_CC_IS_CLANG
> +cflags-y += -m32
> +endif
> ifdef CONFIG_32BIT_STANDARD
> ld-emul = $(32bit-emul)
> cflags-y += -march=la32v1.0 -mabi=ilp32s -mcmodel=normal
> diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
> index 42aa96249828..eeda13ecb711 100644
> --- a/arch/loongarch/vdso/Makefile
> +++ b/arch/loongarch/vdso/Makefile
> @@ -14,6 +14,7 @@ ccflags-vdso := \
> $(filter -E%,$(KBUILD_CFLAGS)) \
> $(filter -march=%,$(KBUILD_CFLAGS)) \
> $(filter -m%-float,$(KBUILD_CFLAGS)) \
> + $(filter -m32,$(KBUILD_CFLAGS)) \
> $(CLANG_FLAGS) \
> -D__VDSO__
>
> --
>
> appears to do the right thing?
>
> $ make -skj"$(nproc)" ARCH=loongarch LLVM=1 mrproper loongarch32_defconfig all
>
> $ file vmlinux arch/loongarch/boot/vmlinux.efi
> vmlinux: ELF 32-bit LSB executable, LoongArch, version 1 (SYSV), statically linked, BuildID[sha1]=26b4af26bb14c923270429f4ccdc66a519c059b1, not stripped
> arch/loongarch/boot/vmlinux.efi: PE32+ executable for EFI (application), LoongArch 32-bit (stripped to external PDB), 2 section
>
Ah, I see now that Clang already maps -m32/-m64 to the corresponding
triple variants in the common layer.
Thanks for the clarification!
Rui
> Cheers,
> Nathan
^ permalink raw reply [flat|nested] 22+ messages in thread