* [PATCH v14 1/8] arm64: cpufeature: add FEAT_LSUI
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 2/8] KVM: arm64: expose FEAT_LSUI to guest Yeoreum Yun
` (6 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
Since Armv9.6, FEAT_LSUI introduces load/store instructions that allow
privileged code to access user memory without clearing the PSTATE.PAN bit.
Add CPU feature detection for FEAT_LSUI and enable its use
when FEAT_PAN is present so that removes the need for SW_PAN handling
when using LSUI instructions.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
arch/arm64/kernel/cpufeature.c | 10 ++++++++++
arch/arm64/tools/cpucaps | 1 +
2 files changed, 11 insertions(+)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c31f8e17732a..5074ff32176f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -281,6 +281,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
static const struct arm64_ftr_bits ftr_id_aa64isar3[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FPRCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_LSUI_SHIFT, 4, ID_AA64ISAR3_EL1_LSUI_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_LSFE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0),
ARM64_FTR_END,
@@ -3169,6 +3170,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_ls64_v,
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V)
},
+#ifdef CONFIG_ARM64_LSUI
+ {
+ .desc = "Unprivileged Load Store Instructions (LSUI)",
+ .capability = ARM64_HAS_LSUI,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR3_EL1, LSUI, IMP)
+ },
+#endif
{},
};
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 7261553b644b..b7286d977788 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -48,6 +48,7 @@ HAS_LPA2
HAS_LSE_ATOMICS
HAS_LS64
HAS_LS64_V
+HAS_LSUI
HAS_MOPS
HAS_NESTED_VIRT
HAS_BBML2_NOABORT
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* [PATCH v14 2/8] KVM: arm64: expose FEAT_LSUI to guest
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 1/8] arm64: cpufeature: add FEAT_LSUI Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 3/8] KVM: arm64: kselftest: set_id_regs: add test for FEAT_LSUI Yeoreum Yun
` (5 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
expose FEAT_LSUI to guest.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/kvm/sys_regs.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a7cd0badc20c..b43e2bec35db 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1805,7 +1805,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64ISAR3_EL1:
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
- ID_AA64ISAR3_EL1_FAMINMAX;
+ ID_AA64ISAR3_EL1_FAMINMAX | ID_AA64ISAR3_EL1_LSUI;
break;
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
@@ -3249,6 +3249,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64ISAR2_EL1_GPA3)),
ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
ID_AA64ISAR3_EL1_LSFE |
+ ID_AA64ISAR3_EL1_LSUI |
ID_AA64ISAR3_EL1_FAMINMAX)),
ID_UNALLOCATED(6,4),
ID_UNALLOCATED(6,5),
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* [PATCH v14 3/8] KVM: arm64: kselftest: set_id_regs: add test for FEAT_LSUI
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 1/8] arm64: cpufeature: add FEAT_LSUI Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 2/8] KVM: arm64: expose FEAT_LSUI to guest Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 4/8] arm64: futex: refactor futex atomic operation Yeoreum Yun
` (4 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
Add test coverage for FEAT_LSUI.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
---
tools/testing/selftests/kvm/arm64/set_id_regs.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index 73de5be58bab..fa3478a6c914 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -124,6 +124,7 @@ static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
static const struct reg_ftr_bits ftr_id_aa64isar3_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FPRCVT, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSUI, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSFE, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FAMINMAX, 0),
REG_FTR_END,
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* [PATCH v14 4/8] arm64: futex: refactor futex atomic operation
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
` (2 preceding siblings ...)
2026-02-25 18:27 ` [PATCH v14 3/8] KVM: arm64: kselftest: set_id_regs: add test for FEAT_LSUI Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 5/8] arm64: futex: support futex with FEAT_LSUI Yeoreum Yun
` (3 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
Refactor futex atomic operations using ll/sc method with
clearing PSTATE.PAN to prepare to apply FEAT_LSUI on them.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/include/asm/futex.h | 137 +++++++++++++++++++++------------
1 file changed, 87 insertions(+), 50 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index bc06691d2062..9a0efed50743 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -7,21 +7,25 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <linux/stringify.h>
#include <asm/errno.h>
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
-#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
-do { \
+#define LLSC_FUTEX_ATOMIC_OP(op, insn) \
+static __always_inline int \
+__llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
+{ \
unsigned int loops = FUTEX_MAX_LOOPS; \
+ int ret, oldval, newval; \
\
uaccess_enable_privileged(); \
- asm volatile( \
+ asm volatile("// __llsc_futex_atomic_" #op "\n" \
" prfm pstl1strm, %2\n" \
-"1: ldxr %w1, %2\n" \
+"1: ldxr %w[oldval], %2\n" \
insn "\n" \
-"2: stlxr %w0, %w3, %2\n" \
+"2: stlxr %w0, %w[newval], %2\n" \
" cbz %w0, 3f\n" \
" sub %w4, %w4, %w0\n" \
" cbnz %w4, 1b\n" \
@@ -30,50 +34,109 @@ do { \
" dmb ish\n" \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \
- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
+ : "=&r" (ret), [oldval] "=&r" (oldval), "+Q" (*uaddr), \
+ [newval] "=&r" (newval), \
"+r" (loops) \
- : "r" (oparg), "Ir" (-EAGAIN) \
+ : [oparg] "r" (oparg), "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable_privileged(); \
-} while (0)
+ \
+ if (!ret) \
+ *oval = oldval; \
+ \
+ return ret; \
+}
+
+LLSC_FUTEX_ATOMIC_OP(add, "add %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(or, "orr %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(and, "and %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(eor, "eor %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(set, "mov %w[newval], %w[oparg]")
+
+static __always_inline int
+__llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ int ret = 0;
+ unsigned int loops = FUTEX_MAX_LOOPS;
+ u32 val, tmp;
+
+ uaccess_enable_privileged();
+ asm volatile("//__llsc_futex_cmpxchg\n"
+" prfm pstl1strm, %2\n"
+"1: ldxr %w1, %2\n"
+" eor %w3, %w1, %w5\n"
+" cbnz %w3, 4f\n"
+"2: stlxr %w3, %w6, %2\n"
+" cbz %w3, 3f\n"
+" sub %w4, %w4, %w3\n"
+" cbnz %w4, 1b\n"
+" mov %w0, %w7\n"
+"3:\n"
+" dmb ish\n"
+"4:\n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
+ _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
+ : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
+ : "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
+ : "memory");
+ uaccess_disable_privileged();
+
+ if (!ret)
+ *oval = val;
+
+ return ret;
+}
+
+#define FUTEX_ATOMIC_OP(op) \
+static __always_inline int \
+__futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
+{ \
+ return __llsc_futex_atomic_##op(oparg, uaddr, oval); \
+}
+
+FUTEX_ATOMIC_OP(add)
+FUTEX_ATOMIC_OP(or)
+FUTEX_ATOMIC_OP(and)
+FUTEX_ATOMIC_OP(eor)
+FUTEX_ATOMIC_OP(set)
+
+static __always_inline int
+__futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval);
+}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
- int oldval = 0, ret, tmp;
- u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+ int ret;
+ u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32)))
return -EFAULT;
+ uaddr = __uaccess_mask_ptr(_uaddr);
+
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %w3, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_set(oparg, uaddr, oval);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_add(oparg, uaddr, oval);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_or(oparg, uaddr, oval);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, ~oparg);
+ ret = __futex_atomic_and(~oparg, uaddr, oval);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_eor(oparg, uaddr, oval);
break;
default:
ret = -ENOSYS;
}
- if (!ret)
- *oval = oldval;
-
return ret;
}
@@ -81,40 +144,14 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
- int ret = 0;
- unsigned int loops = FUTEX_MAX_LOOPS;
- u32 val, tmp;
u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32)))
return -EFAULT;
uaddr = __uaccess_mask_ptr(_uaddr);
- uaccess_enable_privileged();
- asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-" prfm pstl1strm, %2\n"
-"1: ldxr %w1, %2\n"
-" sub %w3, %w1, %w5\n"
-" cbnz %w3, 4f\n"
-"2: stlxr %w3, %w6, %2\n"
-" cbz %w3, 3f\n"
-" sub %w4, %w4, %w3\n"
-" cbnz %w4, 1b\n"
-" mov %w0, %w7\n"
-"3:\n"
-" dmb ish\n"
-"4:\n"
- _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
- _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
- : "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
- : "memory");
- uaccess_disable_privileged();
-
- if (!ret)
- *uval = val;
- return ret;
+ return __futex_cmpxchg(uaddr, oldval, newval, uval);
}
#endif /* __ASM_FUTEX_H */
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* [PATCH v14 5/8] arm64: futex: support futex with FEAT_LSUI
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
` (3 preceding siblings ...)
2026-02-25 18:27 ` [PATCH v14 4/8] arm64: futex: refactor futex atomic operation Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 6/8] arm64: armv8_deprecated: disable swp emulation when FEAT_LSUI present Yeoreum Yun
` (2 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
Current futex atomic operations are implemented with ll/sc instructions
and clearing PSTATE.PAN.
Since Armv9.6, FEAT_LSUI supplies not only load/store instructions but
also atomic operation for user memory access in kernel it doesn't need
to clear PSTATE.PAN bit anymore.
With theses instructions some of futex atomic operations don't need to
be implmented with ldxr/stlxr pair instead can be implmented with
one atomic operation supplied by FEAT_LSUI and don't enable mto like
ldtr*/sttr* instructions usage.
However, some of futex atomic operation don't have matched
instructuion i.e) eor or cmpxchg with word size.
For those operation, uses cas{al}t to implement them.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
arch/arm64/include/asm/futex.h | 166 ++++++++++++++++++++++++++++++++-
1 file changed, 164 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 9a0efed50743..b579e9d0964d 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -9,6 +9,8 @@
#include <linux/uaccess.h>
#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/alternative-macros.h>
#include <asm/errno.h>
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
@@ -87,11 +89,171 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
return ret;
}
+#ifdef CONFIG_ARM64_LSUI
+
+#define __LSUI_PREAMBLE ".arch_extension lsui\n"
+
+#define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
+static __always_inline int \
+__lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
+{ \
+ int ret = 0; \
+ int oldval; \
+ \
+ uaccess_ttbr0_enable(); \
+ \
+ asm volatile("// __lsui_futex_atomic_" #op "\n" \
+ __LSUI_PREAMBLE \
+"1: " #asm_op "al %w3, %w2, %1\n" \
+"2:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
+ : "+r" (ret), "+Q" (*uaddr), "=r" (oldval) \
+ : "r" (oparg) \
+ : "memory"); \
+ \
+ uaccess_ttbr0_disable(); \
+ \
+ if (!ret) \
+ *oval = oldval; \
+ return ret; \
+}
+
+LSUI_FUTEX_ATOMIC_OP(add, ldtadd)
+LSUI_FUTEX_ATOMIC_OP(or, ldtset)
+LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr)
+LSUI_FUTEX_ATOMIC_OP(set, swpt)
+
+static __always_inline int
+__lsui_cmpxchg64(u64 __user *uaddr, u64 *oldval, u64 newval)
+{
+ int ret = 0;
+
+ uaccess_ttbr0_enable();
+
+ asm volatile("// __lsui_cmpxchg64\n"
+ __LSUI_PREAMBLE
+"1: casalt %2, %3, %1\n"
+"2:\n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
+ : "+r" (ret), "+Q" (*uaddr), "+r" (*oldval)
+ : "r" (newval)
+ : "memory");
+
+ uaccess_ttbr0_disable();
+
+ return ret;
+}
+
+static __always_inline int
+__lsui_cmpxchg32(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ u64 __user *uaddr64;
+ bool futex_pos, other_pos;
+ int ret, i;
+ u32 other, orig_other;
+ union {
+ u32 futex[2];
+ u64 raw;
+ } oval64, orig64, nval64;
+
+ uaddr64 = (u64 __user *) PTR_ALIGN_DOWN(uaddr, sizeof(u64));
+ futex_pos = !IS_ALIGNED((unsigned long)uaddr, sizeof(u64));
+ other_pos = !futex_pos;
+
+ oval64.futex[futex_pos] = oldval;
+ ret = get_user(oval64.futex[other_pos], (u32 __user *)uaddr64 + other_pos);
+ if (ret)
+ return -EFAULT;
+
+ ret = -EAGAIN;
+ for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
+ orig64.raw = nval64.raw = oval64.raw;
+
+ nval64.futex[futex_pos] = newval;
+
+ if (__lsui_cmpxchg64(uaddr64, &oval64.raw, nval64.raw))
+ return -EFAULT;
+
+ oldval = oval64.futex[futex_pos];
+ other = oval64.futex[other_pos];
+ orig_other = orig64.futex[other_pos];
+
+ if (other == orig_other) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static __always_inline int
+__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
+{
+ /*
+ * Undo the bitwise negation applied to the oparg passed from
+ * arch_futex_atomic_op_inuser() with FUTEX_OP_ANDN.
+ */
+ return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
+}
+
+static __always_inline int
+__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
+{
+ u32 oldval, newval, val;
+ int ret, i;
+
+ if (get_user(oldval, uaddr))
+ return -EFAULT;
+
+ /*
+ * there are no ldteor/stteor instructions...
+ */
+ for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
+ newval = oldval ^ oparg;
+
+ ret = __lsui_cmpxchg32(uaddr, oldval, newval, &val);
+ if (ret)
+ return ret;
+
+ if (val == oldval) {
+ *oval = val;
+ return 0;
+ }
+
+ oldval = val;
+ }
+
+ return -EAGAIN;
+}
+
+static __always_inline int
+__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
+}
+
+#define __lsui_llsc_body(op, ...) \
+({ \
+ alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
+ __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
+})
+
+#else /* CONFIG_ARM64_LSUI */
+
+#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
+
+#endif /* CONFIG_ARM64_LSUI */
+
+
#define FUTEX_ATOMIC_OP(op) \
static __always_inline int \
__futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
{ \
- return __llsc_futex_atomic_##op(oparg, uaddr, oval); \
+ return __lsui_llsc_body(futex_atomic_##op, oparg, uaddr, oval); \
}
FUTEX_ATOMIC_OP(add)
@@ -103,7 +265,7 @@ FUTEX_ATOMIC_OP(set)
static __always_inline int
__futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
{
- return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval);
+ return __lsui_llsc_body(futex_cmpxchg, uaddr, oldval, newval, oval);
}
static inline int
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* [PATCH v14 6/8] arm64: armv8_deprecated: disable swp emulation when FEAT_LSUI present
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
` (4 preceding siblings ...)
2026-02-25 18:27 ` [PATCH v14 5/8] arm64: futex: support futex with FEAT_LSUI Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor Yeoreum Yun
2026-02-25 18:27 ` [PATCH v14 8/8] arm64: Kconfig: add support for LSUI Yeoreum Yun
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
The purpose of supporting LSUI is to eliminate PAN toggling.
CPUs that support LSUI are unlikely to support a 32-bit runtime.
Since environments that support both LSUI and
a 32-bit runtimeare expected to be extremely rare,
not to emulate the SWP instruction using LSUI instructions
in order to remove PAN toggling, and instead simply disable SWP emulation.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
arch/arm64/kernel/armv8_deprecated.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index e737c6295ec7..049754f7da36 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -610,6 +610,22 @@ static int __init armv8_deprecated_init(void)
}
#endif
+
+#ifdef CONFIG_SWP_EMULATION
+ /*
+ * The purpose of supporting LSUI is to eliminate PAN toggling.
+ * CPUs that support LSUI are unlikely to support a 32-bit runtime.
+ * Since environments that support both LSUI and a 32-bit runtime
+ * are expected to be extremely rare, we choose not to emulate
+ * the SWP instruction using LSUI instructions in order to remove PAN toggling,
+ * and instead simply disable SWP emulation.
+ */
+ if (cpus_have_final_cap(ARM64_HAS_LSUI)) {
+ insn_swp.status = INSN_UNAVAILABLE;
+ pr_info("swp/swpb instruction emulation is not supported on this system\n");
+ }
+#endif
+
for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
struct insn_emulation *ie = insn_emulations[i];
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
` (5 preceding siblings ...)
2026-02-25 18:27 ` [PATCH v14 6/8] arm64: armv8_deprecated: disable swp emulation when FEAT_LSUI present Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
2026-02-26 11:16 ` Marc Zyngier
` (2 more replies)
2026-02-25 18:27 ` [PATCH v14 8/8] arm64: Kconfig: add support for LSUI Yeoreum Yun
7 siblings, 3 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
is enabled, avoiding the need to clear the PAN bit.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
arch/arm64/include/asm/cpucaps.h | 2 ++
arch/arm64/include/asm/futex.h | 17 +----------------
arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
4 files changed, 59 insertions(+), 17 deletions(-)
create mode 100644 arch/arm64/include/asm/lsui.h
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 177c691914f8..6e3da333442e 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
return true;
case ARM64_HAS_PMUV3:
return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
+ case ARM64_HAS_LSUI:
+ return IS_ENABLED(CONFIG_ARM64_LSUI);
}
return true;
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index b579e9d0964d..6779c4ad927f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -7,11 +7,9 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
-#include <linux/stringify.h>
-#include <asm/alternative.h>
-#include <asm/alternative-macros.h>
#include <asm/errno.h>
+#include <asm/lsui.h>
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
@@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
#ifdef CONFIG_ARM64_LSUI
-#define __LSUI_PREAMBLE ".arch_extension lsui\n"
-
#define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
static __always_inline int \
__lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
@@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
{
return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
}
-
-#define __lsui_llsc_body(op, ...) \
-({ \
- alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
- __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
-})
-
-#else /* CONFIG_ARM64_LSUI */
-
-#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
-
#endif /* CONFIG_ARM64_LSUI */
diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
new file mode 100644
index 000000000000..8f0d81953eb6
--- /dev/null
+++ b/arch/arm64/include/asm/lsui.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LSUI_H
+#define __ASM_LSUI_H
+
+#include <linux/compiler_types.h>
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/alternative-macros.h>
+#include <asm/cpucaps.h>
+
+#define __LSUI_PREAMBLE ".arch_extension lsui\n"
+
+#ifdef CONFIG_ARM64_LSUI
+
+#define __lsui_llsc_body(op, ...) \
+({ \
+ alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
+ __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
+})
+
+#else /* CONFIG_ARM64_LSUI */
+
+#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
+
+#endif /* CONFIG_ARM64_LSUI */
+
+#endif /* __ASM_LSUI_H */
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 885bd5bb2f41..fd3c5749e853 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -9,6 +9,7 @@
#include <asm/esr.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+#include <asm/lsui.h>
static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
{
@@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
}
}
+static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
+{
+ u64 tmp = old;
+ int ret = 0;
+
+ uaccess_ttbr0_enable();
+
+ asm volatile(__LSUI_PREAMBLE
+ "1: caslt %[old], %[new], %[addr]\n"
+ "2:\n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
+ : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
+ : [new] "r" (new)
+ : "memory");
+
+ uaccess_ttbr0_disable();
+
+ if (ret)
+ return ret;
+ if (tmp != old)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
{
u64 tmp = old;
@@ -1779,7 +1805,9 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
return -EPERM;
ptep = (u64 __user *)hva + offset;
- if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
+ if (cpucap_is_possible(ARM64_HAS_LSUI) && cpus_have_final_cap(ARM64_HAS_LSUI))
+ r = __lsui_swap_desc(ptep, old, new);
+ else if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
r = __lse_swap_desc(ptep, old, new);
else
r = __llsc_swap_desc(ptep, old, new);
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-25 18:27 ` [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor Yeoreum Yun
@ 2026-02-26 11:16 ` Marc Zyngier
2026-02-26 14:05 ` Yeoreum Yun
2026-02-26 11:28 ` Suzuki K Poulose
2026-02-26 11:38 ` Oliver Upton
2 siblings, 1 reply; 17+ messages in thread
From: Marc Zyngier @ 2026-02-26 11:16 UTC (permalink / raw)
To: Yeoreum Yun
Cc: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest,
catalin.marinas, will, oupton, miko.lenczewski, kevin.brodsky,
broonie, ardb, suzuki.poulose, lpieralisi, joey.gouly, yuzenghui
On Wed, 25 Feb 2026 18:27:07 +0000,
Yeoreum Yun <yeoreum.yun@arm.com> wrote:
>
> Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
> is enabled, avoiding the need to clear the PAN bit.
>
> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
> ---
> arch/arm64/include/asm/cpucaps.h | 2 ++
> arch/arm64/include/asm/futex.h | 17 +----------------
> arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
> arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
> 4 files changed, 59 insertions(+), 17 deletions(-)
> create mode 100644 arch/arm64/include/asm/lsui.h
>
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index 177c691914f8..6e3da333442e 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
> return true;
> case ARM64_HAS_PMUV3:
> return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
> + case ARM64_HAS_LSUI:
> + return IS_ENABLED(CONFIG_ARM64_LSUI);
> }
>
> return true;
It would make more sense to move this hunk to the first patch, where
you deal with features and capabilities, instead of having this in a
random KVM-specific patch.
> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> index b579e9d0964d..6779c4ad927f 100644
> --- a/arch/arm64/include/asm/futex.h
> +++ b/arch/arm64/include/asm/futex.h
> @@ -7,11 +7,9 @@
>
> #include <linux/futex.h>
> #include <linux/uaccess.h>
> -#include <linux/stringify.h>
>
> -#include <asm/alternative.h>
> -#include <asm/alternative-macros.h>
> #include <asm/errno.h>
> +#include <asm/lsui.h>
>
> #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
>
> @@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
>
> #ifdef CONFIG_ARM64_LSUI
>
> -#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> -
> #define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
> static __always_inline int \
> __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
> @@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> {
> return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
> }
> -
> -#define __lsui_llsc_body(op, ...) \
> -({ \
> - alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> -})
> -
> -#else /* CONFIG_ARM64_LSUI */
> -
> -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> -
> #endif /* CONFIG_ARM64_LSUI */
>
>
> diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
> new file mode 100644
> index 000000000000..8f0d81953eb6
> --- /dev/null
> +++ b/arch/arm64/include/asm/lsui.h
> @@ -0,0 +1,27 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_LSUI_H
> +#define __ASM_LSUI_H
> +
> +#include <linux/compiler_types.h>
> +#include <linux/stringify.h>
> +#include <asm/alternative.h>
> +#include <asm/alternative-macros.h>
> +#include <asm/cpucaps.h>
> +
> +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> +
> +#ifdef CONFIG_ARM64_LSUI
> +
> +#define __lsui_llsc_body(op, ...) \
> +({ \
> + alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> +})
> +
> +#else /* CONFIG_ARM64_LSUI */
> +
> +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> +
> +#endif /* CONFIG_ARM64_LSUI */
> +
> +#endif /* __ASM_LSUI_H */
Similarly, fold this into the patch that introduces FEAT_LSUI support
for futexes (#5) so that the code is in its final position from the
beginning. This will avoid churn that makes the patches pointlessly
hard to follow, since this change is unrelated to KVM.
> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> index 885bd5bb2f41..fd3c5749e853 100644
> --- a/arch/arm64/kvm/at.c
> +++ b/arch/arm64/kvm/at.c
> @@ -9,6 +9,7 @@
> #include <asm/esr.h>
> #include <asm/kvm_hyp.h>
> #include <asm/kvm_mmu.h>
> +#include <asm/lsui.h>
>
> static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
> {
> @@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
> }
> }
>
> +static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
> +{
> + u64 tmp = old;
> + int ret = 0;
> +
> + uaccess_ttbr0_enable();
Why do we need this? If FEAT_LSUI is present, than FEAT_PAN is also
present. And since PAN support not a compilation option anymore, we
should be able to rely on PAN being enabled.
Or am I missing something? If so, please document why we require it.
> +
> + asm volatile(__LSUI_PREAMBLE
> + "1: caslt %[old], %[new], %[addr]\n"
> + "2:\n"
> + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
> + : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
> + : [new] "r" (new)
> + : "memory");
> +
> + uaccess_ttbr0_disable();
> +
> + if (ret)
> + return ret;
> + if (tmp != old)
> + return -EAGAIN;
> +
> + return ret;
> +}
> +
> static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
> {
> u64 tmp = old;
> @@ -1779,7 +1805,9 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
> return -EPERM;
>
> ptep = (u64 __user *)hva + offset;
> - if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
> + if (cpucap_is_possible(ARM64_HAS_LSUI) && cpus_have_final_cap(ARM64_HAS_LSUI))
> + r = __lsui_swap_desc(ptep, old, new);
> + else if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
> r = __lse_swap_desc(ptep, old, new);
> else
> r = __llsc_swap_desc(ptep, old, new);
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-26 11:16 ` Marc Zyngier
@ 2026-02-26 14:05 ` Yeoreum Yun
2026-02-26 14:52 ` Suzuki K Poulose
0 siblings, 1 reply; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-26 14:05 UTC (permalink / raw)
To: Marc Zyngier
Cc: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest,
catalin.marinas, will, oupton, miko.lenczewski, kevin.brodsky,
broonie, ardb, suzuki.poulose, lpieralisi, joey.gouly, yuzenghui
Hi Marc,
> On Wed, 25 Feb 2026 18:27:07 +0000,
> Yeoreum Yun <yeoreum.yun@arm.com> wrote:
> >
> > Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
> > is enabled, avoiding the need to clear the PAN bit.
> >
> > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
> > ---
> > arch/arm64/include/asm/cpucaps.h | 2 ++
> > arch/arm64/include/asm/futex.h | 17 +----------------
> > arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
> > arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
> > 4 files changed, 59 insertions(+), 17 deletions(-)
> > create mode 100644 arch/arm64/include/asm/lsui.h
> >
> > diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> > index 177c691914f8..6e3da333442e 100644
> > --- a/arch/arm64/include/asm/cpucaps.h
> > +++ b/arch/arm64/include/asm/cpucaps.h
> > @@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
> > return true;
> > case ARM64_HAS_PMUV3:
> > return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
> > + case ARM64_HAS_LSUI:
> > + return IS_ENABLED(CONFIG_ARM64_LSUI);
> > }
> >
> > return true;
>
> It would make more sense to move this hunk to the first patch, where
> you deal with features and capabilities, instead of having this in a
> random KVM-specific patch.
Okay. But as Suzuki mention, I think it seems to be redundant.
I'll remove it.
>
> > diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> > index b579e9d0964d..6779c4ad927f 100644
> > --- a/arch/arm64/include/asm/futex.h
> > +++ b/arch/arm64/include/asm/futex.h
> > @@ -7,11 +7,9 @@
> >
> > #include <linux/futex.h>
> > #include <linux/uaccess.h>
> > -#include <linux/stringify.h>
> >
> > -#include <asm/alternative.h>
> > -#include <asm/alternative-macros.h>
> > #include <asm/errno.h>
> > +#include <asm/lsui.h>
> >
> > #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
> >
> > @@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> >
> > #ifdef CONFIG_ARM64_LSUI
> >
> > -#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> > -
> > #define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
> > static __always_inline int \
> > __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
> > @@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> > {
> > return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
> > }
> > -
> > -#define __lsui_llsc_body(op, ...) \
> > -({ \
> > - alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> > - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> > -})
> > -
> > -#else /* CONFIG_ARM64_LSUI */
> > -
> > -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> > -
> > #endif /* CONFIG_ARM64_LSUI */
> >
> >
> > diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
> > new file mode 100644
> > index 000000000000..8f0d81953eb6
> > --- /dev/null
> > +++ b/arch/arm64/include/asm/lsui.h
> > @@ -0,0 +1,27 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef __ASM_LSUI_H
> > +#define __ASM_LSUI_H
> > +
> > +#include <linux/compiler_types.h>
> > +#include <linux/stringify.h>
> > +#include <asm/alternative.h>
> > +#include <asm/alternative-macros.h>
> > +#include <asm/cpucaps.h>
> > +
> > +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> > +
> > +#ifdef CONFIG_ARM64_LSUI
> > +
> > +#define __lsui_llsc_body(op, ...) \
> > +({ \
> > + alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> > + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> > +})
> > +
> > +#else /* CONFIG_ARM64_LSUI */
> > +
> > +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> > +
> > +#endif /* CONFIG_ARM64_LSUI */
> > +
> > +#endif /* __ASM_LSUI_H */
>
> Similarly, fold this into the patch that introduces FEAT_LSUI support
> for futexes (#5) so that the code is in its final position from the
> beginning. This will avoid churn that makes the patches pointlessly
> hard to follow, since this change is unrelated to KVM.
Okay. I'll fold it into #5.
>
> > diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> > index 885bd5bb2f41..fd3c5749e853 100644
> > --- a/arch/arm64/kvm/at.c
> > +++ b/arch/arm64/kvm/at.c
> > @@ -9,6 +9,7 @@
> > #include <asm/esr.h>
> > #include <asm/kvm_hyp.h>
> > #include <asm/kvm_mmu.h>
> > +#include <asm/lsui.h>
> >
> > static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
> > {
> > @@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
> > }
> > }
> >
> > +static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
> > +{
> > + u64 tmp = old;
> > + int ret = 0;
> > +
> > + uaccess_ttbr0_enable();
>
> Why do we need this? If FEAT_LSUI is present, than FEAT_PAN is also
> present. And since PAN support not a compilation option anymore, we
> should be able to rely on PAN being enabled.
>
> Or am I missing something? If so, please document why we require it.
That was my origin thought but there was relevant discussion about this:
- https://lore.kernel.org/all/aW5dzb0ldp8u8Rdm@willie-the-truck/
- https://lore.kernel.org/all/aYtZfpWjRJ1r23nw@arm.com/
In summary, I couldn't make that assumption --
PAN always presents when LSUI presents for :
- CPU bugs happen all the time
- Virtualisation and idreg overrides mean illegal feature combinations
can show up
So, uaccess_ttbr0_enable() is for when SW_PAN is enabled.
I'll make a comment for this.
[...]
Thanks!
--
Sincerely,
Yeoreum Yun
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-26 14:05 ` Yeoreum Yun
@ 2026-02-26 14:52 ` Suzuki K Poulose
2026-02-27 8:31 ` Yeoreum Yun
0 siblings, 1 reply; 17+ messages in thread
From: Suzuki K Poulose @ 2026-02-26 14:52 UTC (permalink / raw)
To: Yeoreum Yun, Marc Zyngier
Cc: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest,
catalin.marinas, will, oupton, miko.lenczewski, kevin.brodsky,
broonie, ardb, lpieralisi, joey.gouly, yuzenghui
On 26/02/2026 14:05, Yeoreum Yun wrote:
> Hi Marc,
>
>> On Wed, 25 Feb 2026 18:27:07 +0000,
>> Yeoreum Yun <yeoreum.yun@arm.com> wrote:
>>>
>>> Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
>>> is enabled, avoiding the need to clear the PAN bit.
>>>
>>> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
>>> ---
>>> arch/arm64/include/asm/cpucaps.h | 2 ++
>>> arch/arm64/include/asm/futex.h | 17 +----------------
>>> arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
>>> arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
>>> 4 files changed, 59 insertions(+), 17 deletions(-)
>>> create mode 100644 arch/arm64/include/asm/lsui.h
>>>
>>> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
>>> index 177c691914f8..6e3da333442e 100644
>>> --- a/arch/arm64/include/asm/cpucaps.h
>>> +++ b/arch/arm64/include/asm/cpucaps.h
>>> @@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
>>> return true;
>>> case ARM64_HAS_PMUV3:
>>> return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
>>> + case ARM64_HAS_LSUI:
>>> + return IS_ENABLED(CONFIG_ARM64_LSUI);
>>> }
>>>
>>> return true;
>>
>> It would make more sense to move this hunk to the first patch, where
>> you deal with features and capabilities, instead of having this in a
>> random KVM-specific patch.
>
> Okay. But as Suzuki mention, I think it seems to be redundant.
> I'll remove it.
>
No, this is required and Marc is right. This hunk should be part of the
original patch that adds the cap. What I am saying is that you don't
need to explicitly call the cpucap_is_poissible() down, but it is
implicitly called by cpus_have_final_cap().
Kind regards
Suzuki
>>
>>> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
>>> index b579e9d0964d..6779c4ad927f 100644
>>> --- a/arch/arm64/include/asm/futex.h
>>> +++ b/arch/arm64/include/asm/futex.h
>>> @@ -7,11 +7,9 @@
>>>
>>> #include <linux/futex.h>
>>> #include <linux/uaccess.h>
>>> -#include <linux/stringify.h>
>>>
>>> -#include <asm/alternative.h>
>>> -#include <asm/alternative-macros.h>
>>> #include <asm/errno.h>
>>> +#include <asm/lsui.h>
>>>
>>> #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
>>>
>>> @@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
>>>
>>> #ifdef CONFIG_ARM64_LSUI
>>>
>>> -#define __LSUI_PREAMBLE ".arch_extension lsui\n"
>>> -
>>> #define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
>>> static __always_inline int \
>>> __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
>>> @@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
>>> {
>>> return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
>>> }
>>> -
>>> -#define __lsui_llsc_body(op, ...) \
>>> -({ \
>>> - alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
>>> - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
>>> -})
>>> -
>>> -#else /* CONFIG_ARM64_LSUI */
>>> -
>>> -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
>>> -
>>> #endif /* CONFIG_ARM64_LSUI */
>>>
>>>
>>> diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
>>> new file mode 100644
>>> index 000000000000..8f0d81953eb6
>>> --- /dev/null
>>> +++ b/arch/arm64/include/asm/lsui.h
>>> @@ -0,0 +1,27 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>> +#ifndef __ASM_LSUI_H
>>> +#define __ASM_LSUI_H
>>> +
>>> +#include <linux/compiler_types.h>
>>> +#include <linux/stringify.h>
>>> +#include <asm/alternative.h>
>>> +#include <asm/alternative-macros.h>
>>> +#include <asm/cpucaps.h>
>>> +
>>> +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
>>> +
>>> +#ifdef CONFIG_ARM64_LSUI
>>> +
>>> +#define __lsui_llsc_body(op, ...) \
>>> +({ \
>>> + alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
>>> + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
>>> +})
>>> +
>>> +#else /* CONFIG_ARM64_LSUI */
>>> +
>>> +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
>>> +
>>> +#endif /* CONFIG_ARM64_LSUI */
>>> +
>>> +#endif /* __ASM_LSUI_H */
>>
>> Similarly, fold this into the patch that introduces FEAT_LSUI support
>> for futexes (#5) so that the code is in its final position from the
>> beginning. This will avoid churn that makes the patches pointlessly
>> hard to follow, since this change is unrelated to KVM.
>
> Okay. I'll fold it into #5.
>
>>
>>> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
>>> index 885bd5bb2f41..fd3c5749e853 100644
>>> --- a/arch/arm64/kvm/at.c
>>> +++ b/arch/arm64/kvm/at.c
>>> @@ -9,6 +9,7 @@
>>> #include <asm/esr.h>
>>> #include <asm/kvm_hyp.h>
>>> #include <asm/kvm_mmu.h>
>>> +#include <asm/lsui.h>
>>>
>>> static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
>>> {
>>> @@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
>>> }
>>> }
>>>
>>> +static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
>>> +{
>>> + u64 tmp = old;
>>> + int ret = 0;
>>> +
>>> + uaccess_ttbr0_enable();
>>
>> Why do we need this? If FEAT_LSUI is present, than FEAT_PAN is also
>> present. And since PAN support not a compilation option anymore, we
>> should be able to rely on PAN being enabled.
>>
>> Or am I missing something? If so, please document why we require it.
>
> That was my origin thought but there was relevant discussion about this:
> - https://lore.kernel.org/all/aW5dzb0ldp8u8Rdm@willie-the-truck/
> - https://lore.kernel.org/all/aYtZfpWjRJ1r23nw@arm.com/
>
> In summary, I couldn't make that assumption --
> PAN always presents when LSUI presents for :
>
> - CPU bugs happen all the time
> - Virtualisation and idreg overrides mean illegal feature combinations
> can show up
>
> So, uaccess_ttbr0_enable() is for when SW_PAN is enabled.
>
> I'll make a comment for this.
>
> [...]
>
> Thanks!
>
> --
> Sincerely,
> Yeoreum Yun
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-26 14:52 ` Suzuki K Poulose
@ 2026-02-27 8:31 ` Yeoreum Yun
0 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-27 8:31 UTC (permalink / raw)
To: Suzuki K Poulose
Cc: Marc Zyngier, linux-arm-kernel, linux-kernel, kvmarm, kvm,
linux-kselftest, catalin.marinas, will, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, lpieralisi, joey.gouly, yuzenghui
> On 26/02/2026 14:05, Yeoreum Yun wrote:
> > Hi Marc,
> >
> > > On Wed, 25 Feb 2026 18:27:07 +0000,
> > > Yeoreum Yun <yeoreum.yun@arm.com> wrote:
> > > >
> > > > Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
> > > > is enabled, avoiding the need to clear the PAN bit.
> > > >
> > > > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
> > > > ---
> > > > arch/arm64/include/asm/cpucaps.h | 2 ++
> > > > arch/arm64/include/asm/futex.h | 17 +----------------
> > > > arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
> > > > arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
> > > > 4 files changed, 59 insertions(+), 17 deletions(-)
> > > > create mode 100644 arch/arm64/include/asm/lsui.h
> > > >
> > > > diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> > > > index 177c691914f8..6e3da333442e 100644
> > > > --- a/arch/arm64/include/asm/cpucaps.h
> > > > +++ b/arch/arm64/include/asm/cpucaps.h
> > > > @@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
> > > > return true;
> > > > case ARM64_HAS_PMUV3:
> > > > return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
> > > > + case ARM64_HAS_LSUI:
> > > > + return IS_ENABLED(CONFIG_ARM64_LSUI);
> > > > }
> > > >
> > > > return true;
> > >
> > > It would make more sense to move this hunk to the first patch, where
> > > you deal with features and capabilities, instead of having this in a
> > > random KVM-specific patch.
> >
> > Okay. But as Suzuki mention, I think it seems to be redundant.
> > I'll remove it.
> >
>
> No, this is required and Marc is right. This hunk should be part of the
> original patch that adds the cap. What I am saying is that you don't
> need to explicitly call the cpucap_is_poissible() down, but it is
> implicitly called by cpus_have_final_cap().
Ah. my bad eyes, I miss alternative_has_cap_unlikely() calls
cpucap_is_poissible().
Thanks to point out this!
>
>
>
> > >
> > > > diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> > > > index b579e9d0964d..6779c4ad927f 100644
> > > > --- a/arch/arm64/include/asm/futex.h
> > > > +++ b/arch/arm64/include/asm/futex.h
> > > > @@ -7,11 +7,9 @@
> > > >
> > > > #include <linux/futex.h>
> > > > #include <linux/uaccess.h>
> > > > -#include <linux/stringify.h>
> > > >
> > > > -#include <asm/alternative.h>
> > > > -#include <asm/alternative-macros.h>
> > > > #include <asm/errno.h>
> > > > +#include <asm/lsui.h>
> > > >
> > > > #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
> > > >
> > > > @@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> > > >
> > > > #ifdef CONFIG_ARM64_LSUI
> > > >
> > > > -#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> > > > -
> > > > #define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
> > > > static __always_inline int \
> > > > __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
> > > > @@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> > > > {
> > > > return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
> > > > }
> > > > -
> > > > -#define __lsui_llsc_body(op, ...) \
> > > > -({ \
> > > > - alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> > > > - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> > > > -})
> > > > -
> > > > -#else /* CONFIG_ARM64_LSUI */
> > > > -
> > > > -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> > > > -
> > > > #endif /* CONFIG_ARM64_LSUI */
> > > >
> > > >
> > > > diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
> > > > new file mode 100644
> > > > index 000000000000..8f0d81953eb6
> > > > --- /dev/null
> > > > +++ b/arch/arm64/include/asm/lsui.h
> > > > @@ -0,0 +1,27 @@
> > > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > > +#ifndef __ASM_LSUI_H
> > > > +#define __ASM_LSUI_H
> > > > +
> > > > +#include <linux/compiler_types.h>
> > > > +#include <linux/stringify.h>
> > > > +#include <asm/alternative.h>
> > > > +#include <asm/alternative-macros.h>
> > > > +#include <asm/cpucaps.h>
> > > > +
> > > > +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> > > > +
> > > > +#ifdef CONFIG_ARM64_LSUI
> > > > +
> > > > +#define __lsui_llsc_body(op, ...) \
> > > > +({ \
> > > > + alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> > > > + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> > > > +})
> > > > +
> > > > +#else /* CONFIG_ARM64_LSUI */
> > > > +
> > > > +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> > > > +
> > > > +#endif /* CONFIG_ARM64_LSUI */
> > > > +
> > > > +#endif /* __ASM_LSUI_H */
> > >
> > > Similarly, fold this into the patch that introduces FEAT_LSUI support
> > > for futexes (#5) so that the code is in its final position from the
> > > beginning. This will avoid churn that makes the patches pointlessly
> > > hard to follow, since this change is unrelated to KVM.
> >
> > Okay. I'll fold it into #5.
> >
> > >
> > > > diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> > > > index 885bd5bb2f41..fd3c5749e853 100644
> > > > --- a/arch/arm64/kvm/at.c
> > > > +++ b/arch/arm64/kvm/at.c
> > > > @@ -9,6 +9,7 @@
> > > > #include <asm/esr.h>
> > > > #include <asm/kvm_hyp.h>
> > > > #include <asm/kvm_mmu.h>
> > > > +#include <asm/lsui.h>
> > > >
> > > > static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
> > > > {
> > > > @@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
> > > > }
> > > > }
> > > >
> > > > +static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
> > > > +{
> > > > + u64 tmp = old;
> > > > + int ret = 0;
> > > > +
> > > > + uaccess_ttbr0_enable();
> > >
> > > Why do we need this? If FEAT_LSUI is present, than FEAT_PAN is also
> > > present. And since PAN support not a compilation option anymore, we
> > > should be able to rely on PAN being enabled.
> > >
> > > Or am I missing something? If so, please document why we require it.
> >
> > That was my origin thought but there was relevant discussion about this:
> > - https://lore.kernel.org/all/aW5dzb0ldp8u8Rdm@willie-the-truck/
> > - https://lore.kernel.org/all/aYtZfpWjRJ1r23nw@arm.com/
> >
> > In summary, I couldn't make that assumption --
> > PAN always presents when LSUI presents for :
> >
> > - CPU bugs happen all the time
> > - Virtualisation and idreg overrides mean illegal feature combinations
> > can show up
> >
> > So, uaccess_ttbr0_enable() is for when SW_PAN is enabled.
> >
> > I'll make a comment for this.
> >
> > [...]
> >
> > Thanks!
> >
> > --
> > Sincerely,
> > Yeoreum Yun
>
--
Sincerely,
Yeoreum Yun
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-25 18:27 ` [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor Yeoreum Yun
2026-02-26 11:16 ` Marc Zyngier
@ 2026-02-26 11:28 ` Suzuki K Poulose
2026-02-26 13:53 ` Yeoreum Yun
2026-02-26 11:38 ` Oliver Upton
2 siblings, 1 reply; 17+ messages in thread
From: Suzuki K Poulose @ 2026-02-26 11:28 UTC (permalink / raw)
To: Yeoreum Yun, linux-arm-kernel, linux-kernel, kvmarm, kvm,
linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, lpieralisi, joey.gouly, yuzenghui
On 25/02/2026 18:27, Yeoreum Yun wrote:
> Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
> is enabled, avoiding the need to clear the PAN bit.
>
> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
> ---
> arch/arm64/include/asm/cpucaps.h | 2 ++
> arch/arm64/include/asm/futex.h | 17 +----------------
> arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
> arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
> 4 files changed, 59 insertions(+), 17 deletions(-)
> create mode 100644 arch/arm64/include/asm/lsui.h
>
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index 177c691914f8..6e3da333442e 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
> return true;
> case ARM64_HAS_PMUV3:
> return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
> + case ARM64_HAS_LSUI:
> + return IS_ENABLED(CONFIG_ARM64_LSUI);
> }
>
> return true;
> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> index b579e9d0964d..6779c4ad927f 100644
> --- a/arch/arm64/include/asm/futex.h
> +++ b/arch/arm64/include/asm/futex.h
> @@ -7,11 +7,9 @@
>
> #include <linux/futex.h>
> #include <linux/uaccess.h>
> -#include <linux/stringify.h>
>
> -#include <asm/alternative.h>
> -#include <asm/alternative-macros.h>
> #include <asm/errno.h>
> +#include <asm/lsui.h>
>
> #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
>
> @@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
>
> #ifdef CONFIG_ARM64_LSUI
>
> -#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> -
> #define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
> static __always_inline int \
> __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
> @@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> {
> return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
> }
> -
> -#define __lsui_llsc_body(op, ...) \
> -({ \
> - alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> -})
> -
> -#else /* CONFIG_ARM64_LSUI */
> -
> -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> -
> #endif /* CONFIG_ARM64_LSUI */
>
>
> diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
> new file mode 100644
> index 000000000000..8f0d81953eb6
> --- /dev/null
> +++ b/arch/arm64/include/asm/lsui.h
> @@ -0,0 +1,27 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_LSUI_H
> +#define __ASM_LSUI_H
> +
> +#include <linux/compiler_types.h>
> +#include <linux/stringify.h>
> +#include <asm/alternative.h>
> +#include <asm/alternative-macros.h>
> +#include <asm/cpucaps.h>
> +
> +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> +
> +#ifdef CONFIG_ARM64_LSUI
> +
> +#define __lsui_llsc_body(op, ...) \
> +({ \
> + alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> +})
> +
> +#else /* CONFIG_ARM64_LSUI */
> +
> +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> +
> +#endif /* CONFIG_ARM64_LSUI */
> +
> +#endif /* __ASM_LSUI_H */
> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> index 885bd5bb2f41..fd3c5749e853 100644
> --- a/arch/arm64/kvm/at.c
> +++ b/arch/arm64/kvm/at.c
> @@ -9,6 +9,7 @@
> #include <asm/esr.h>
> #include <asm/kvm_hyp.h>
> #include <asm/kvm_mmu.h>
> +#include <asm/lsui.h>
>
> static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
> {
> @@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
> }
> }
>
> +static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
> +{
> + u64 tmp = old;
> + int ret = 0;
> +
> + uaccess_ttbr0_enable();
> +
> + asm volatile(__LSUI_PREAMBLE
> + "1: caslt %[old], %[new], %[addr]\n"
> + "2:\n"
> + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
> + : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
> + : [new] "r" (new)
> + : "memory");
> +
> + uaccess_ttbr0_disable();
> +
> + if (ret)
> + return ret;
> + if (tmp != old)
> + return -EAGAIN;
> +
> + return ret;
> +}
> +
> static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
> {
> u64 tmp = old;
> @@ -1779,7 +1805,9 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
> return -EPERM;
>
> ptep = (u64 __user *)hva + offset;
> - if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
> + if (cpucap_is_possible(ARM64_HAS_LSUI) && cpus_have_final_cap(ARM64_HAS_LSUI))
minor nit:
You don't need the cpucap_is_possible() as it is already checked via
cpus_have_final_cap()->alternative_has_cap_unlikely()
Suzuki
> + r = __lsui_swap_desc(ptep, old, new);
> + else if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
> r = __lse_swap_desc(ptep, old, new);
> else
> r = __llsc_swap_desc(ptep, old, new);
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-26 11:28 ` Suzuki K Poulose
@ 2026-02-26 13:53 ` Yeoreum Yun
0 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-26 13:53 UTC (permalink / raw)
To: Suzuki K Poulose
Cc: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest,
catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, lpieralisi, joey.gouly, yuzenghui
Hi Suzuki,
> On 25/02/2026 18:27, Yeoreum Yun wrote:
> > Use the CASLT instruction to swap the guest descriptor when FEAT_LSUI
> > is enabled, avoiding the need to clear the PAN bit.
> >
> > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
> > ---
> > arch/arm64/include/asm/cpucaps.h | 2 ++
> > arch/arm64/include/asm/futex.h | 17 +----------------
> > arch/arm64/include/asm/lsui.h | 27 +++++++++++++++++++++++++++
> > arch/arm64/kvm/at.c | 30 +++++++++++++++++++++++++++++-
> > 4 files changed, 59 insertions(+), 17 deletions(-)
> > create mode 100644 arch/arm64/include/asm/lsui.h
> >
> > diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> > index 177c691914f8..6e3da333442e 100644
> > --- a/arch/arm64/include/asm/cpucaps.h
> > +++ b/arch/arm64/include/asm/cpucaps.h
> > @@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
> > return true;
> > case ARM64_HAS_PMUV3:
> > return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
> > + case ARM64_HAS_LSUI:
> > + return IS_ENABLED(CONFIG_ARM64_LSUI);
> > }
> > return true;
> > diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> > index b579e9d0964d..6779c4ad927f 100644
> > --- a/arch/arm64/include/asm/futex.h
> > +++ b/arch/arm64/include/asm/futex.h
> > @@ -7,11 +7,9 @@
> > #include <linux/futex.h>
> > #include <linux/uaccess.h>
> > -#include <linux/stringify.h>
> > -#include <asm/alternative.h>
> > -#include <asm/alternative-macros.h>
> > #include <asm/errno.h>
> > +#include <asm/lsui.h>
> > #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
> > @@ -91,8 +89,6 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> > #ifdef CONFIG_ARM64_LSUI
> > -#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> > -
> > #define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
> > static __always_inline int \
> > __lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
> > @@ -235,17 +231,6 @@ __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> > {
> > return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
> > }
> > -
> > -#define __lsui_llsc_body(op, ...) \
> > -({ \
> > - alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> > - __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> > -})
> > -
> > -#else /* CONFIG_ARM64_LSUI */
> > -
> > -#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> > -
> > #endif /* CONFIG_ARM64_LSUI */
> > diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
> > new file mode 100644
> > index 000000000000..8f0d81953eb6
> > --- /dev/null
> > +++ b/arch/arm64/include/asm/lsui.h
> > @@ -0,0 +1,27 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef __ASM_LSUI_H
> > +#define __ASM_LSUI_H
> > +
> > +#include <linux/compiler_types.h>
> > +#include <linux/stringify.h>
> > +#include <asm/alternative.h>
> > +#include <asm/alternative-macros.h>
> > +#include <asm/cpucaps.h>
> > +
> > +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> > +
> > +#ifdef CONFIG_ARM64_LSUI
> > +
> > +#define __lsui_llsc_body(op, ...) \
> > +({ \
> > + alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
> > + __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
> > +})
> > +
> > +#else /* CONFIG_ARM64_LSUI */
> > +
> > +#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
> > +
> > +#endif /* CONFIG_ARM64_LSUI */
> > +
> > +#endif /* __ASM_LSUI_H */
> > diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> > index 885bd5bb2f41..fd3c5749e853 100644
> > --- a/arch/arm64/kvm/at.c
> > +++ b/arch/arm64/kvm/at.c
> > @@ -9,6 +9,7 @@
> > #include <asm/esr.h>
> > #include <asm/kvm_hyp.h>
> > #include <asm/kvm_mmu.h>
> > +#include <asm/lsui.h>
> > static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
> > {
> > @@ -1704,6 +1705,31 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
> > }
> > }
> > +static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
> > +{
> > + u64 tmp = old;
> > + int ret = 0;
> > +
> > + uaccess_ttbr0_enable();
> > +
> > + asm volatile(__LSUI_PREAMBLE
> > + "1: caslt %[old], %[new], %[addr]\n"
> > + "2:\n"
> > + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
> > + : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
> > + : [new] "r" (new)
> > + : "memory");
> > +
> > + uaccess_ttbr0_disable();
> > +
> > + if (ret)
> > + return ret;
> > + if (tmp != old)
> > + return -EAGAIN;
> > +
> > + return ret;
> > +}
> > +
> > static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
> > {
> > u64 tmp = old;
> > @@ -1779,7 +1805,9 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
> > return -EPERM;
> > ptep = (u64 __user *)hva + offset;
> > - if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
> > + if (cpucap_is_possible(ARM64_HAS_LSUI) && cpus_have_final_cap(ARM64_HAS_LSUI))
>
> minor nit:
>
> You don't need the cpucap_is_possible() as it is already checked via
> cpus_have_final_cap()->alternative_has_cap_unlikely()
Right. It seems a little bit of redundant.
I'll remove it.
Thanks!
--
Sincerely,
Yeoreum Yun
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-25 18:27 ` [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor Yeoreum Yun
2026-02-26 11:16 ` Marc Zyngier
2026-02-26 11:28 ` Suzuki K Poulose
@ 2026-02-26 11:38 ` Oliver Upton
2026-02-26 13:52 ` Yeoreum Yun
2 siblings, 1 reply; 17+ messages in thread
From: Oliver Upton @ 2026-02-26 11:38 UTC (permalink / raw)
To: Yeoreum Yun
Cc: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest,
catalin.marinas, will, maz, miko.lenczewski, kevin.brodsky,
broonie, ardb, suzuki.poulose, lpieralisi, joey.gouly, yuzenghui
On Wed, Feb 25, 2026 at 06:27:07PM +0000, Yeoreum Yun wrote:
> + asm volatile(__LSUI_PREAMBLE
> + "1: caslt %[old], %[new], %[addr]\n"
The other two flavors of this use relaxed ordering, why can't we do the
same with LSUI?
Thanks,
Oliver
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor
2026-02-26 11:38 ` Oliver Upton
@ 2026-02-26 13:52 ` Yeoreum Yun
0 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-26 13:52 UTC (permalink / raw)
To: Oliver Upton
Cc: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest,
catalin.marinas, will, maz, miko.lenczewski, kevin.brodsky,
broonie, ardb, suzuki.poulose, lpieralisi, joey.gouly, yuzenghui
Hi Oliver,
> On Wed, Feb 25, 2026 at 06:27:07PM +0000, Yeoreum Yun wrote:
> > + asm volatile(__LSUI_PREAMBLE
> > + "1: caslt %[old], %[new], %[addr]\n"
>
> The other two flavors of this use relaxed ordering, why can't we do the
> same with LSUI?
Right. I've misunderstood caslt is relaxed symantic.
I should change with CAST.
Thanks!
--
Sincerely,
Yeoreum Yun
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v14 8/8] arm64: Kconfig: add support for LSUI
2026-02-25 18:27 [PATCH v14 0/8] support FEAT_LSUI Yeoreum Yun
` (6 preceding siblings ...)
2026-02-25 18:27 ` [PATCH v14 7/8] KVM: arm64: use CASLT instruction for swapping guest descriptor Yeoreum Yun
@ 2026-02-25 18:27 ` Yeoreum Yun
7 siblings, 0 replies; 17+ messages in thread
From: Yeoreum Yun @ 2026-02-25 18:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, kvmarm, kvm, linux-kselftest
Cc: catalin.marinas, will, maz, oupton, miko.lenczewski,
kevin.brodsky, broonie, ardb, suzuki.poulose, lpieralisi,
joey.gouly, yuzenghui, yeoreum.yun
Since Armv9.6, FEAT_LSUI supplies the load/store instructions for
previleged level to access to access user memory without clearing
PSTATE.PAN bit.
Add Kconfig option entry for FEAT_LSUI.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/Kconfig | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 38dba5f7e4d2..890a1bedbf4a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2215,6 +2215,26 @@ config ARM64_GCS
endmenu # "ARMv9.4 architectural features"
+config AS_HAS_LSUI
+ def_bool $(as-instr,.arch_extension lsui)
+ help
+ Supported by LLVM 20+ and binutils 2.45+.
+
+menu "ARMv9.6 architectural features"
+
+config ARM64_LSUI
+ bool "Support Unprivileged Load Store Instructions (LSUI)"
+ default y
+ depends on AS_HAS_LSUI && !CPU_BIG_ENDIAN
+ help
+ The Unprivileged Load Store Instructions (LSUI) provides
+ variants load/store instructions that access user-space memory
+ from the kernel without clearing PSTATE.PAN bit.
+
+ This feature is supported by LLVM 20+ and binutils 2.45+.
+
+endmenu # "ARMv9.6 architectural feature"
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
^ permalink raw reply related [flat|nested] 17+ messages in thread