* [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines
@ 2025-02-28 17:00 Kristina Martšenko
2025-02-28 17:00 ` [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions Kristina Martšenko
` (3 more replies)
0 siblings, 4 replies; 14+ messages in thread
From: Kristina Martšenko @ 2025-02-28 17:00 UTC (permalink / raw)
To: linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
Robin Murphy, James Morse
Hi,
This is a follow-up to the series using CPY/SET instructions in the kernel's
memory copy routines [1]. This series extends that to the usercopy routines.
This required some additions to exception fixups since a CPY instruction
accesses both user and kernel memory.
Changes in v2:
- Improved readability of cpy_faulted_on_uaccess()
- v1: https://lore.kernel.org/all/20250218171430.28227-1-kristina.martsenko@arm.com/
Note that this conflicts with Tong's series [2].
Thanks,
Kristina
[1] https://lore.kernel.org/all/20240930161051.3777828-1-kristina.martsenko@arm.com/
[2] https://lore.kernel.org/all/20241209024257.3618492-1-tongtiangen@huawei.com/
Kristina Martšenko (3):
arm64: extable: Add fixup handling for uaccess CPY* instructions
arm64: mm: Handle PAN faults on uaccess CPY* instructions
arm64: lib: Use MOPS for usercopy routines
arch/arm64/include/asm/asm-extable.h | 10 +++++++-
arch/arm64/include/asm/asm-uaccess.h | 4 ++++
arch/arm64/include/asm/extable.h | 5 +++-
arch/arm64/lib/clear_user.S | 25 +++++++++++++++----
arch/arm64/lib/copy_from_user.S | 10 ++++++++
arch/arm64/lib/copy_template.S | 10 ++++++++
arch/arm64/lib/copy_to_user.S | 10 ++++++++
arch/arm64/mm/extable.c | 36 +++++++++++++++++++++++++++-
arch/arm64/mm/fault.c | 6 +++--
9 files changed, 107 insertions(+), 9 deletions(-)
base-commit: d082ecbc71e9e0bf49883ee4afd435a77a5101b6
--
2.34.1
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions
2025-02-28 17:00 [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines Kristina Martšenko
@ 2025-02-28 17:00 ` Kristina Martšenko
2025-03-06 15:23 ` Robin Murphy
2025-02-28 17:00 ` [PATCH v2 2/3] arm64: mm: Handle PAN faults on " Kristina Martšenko
` (2 subsequent siblings)
3 siblings, 1 reply; 14+ messages in thread
From: Kristina Martšenko @ 2025-02-28 17:00 UTC (permalink / raw)
To: linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
Robin Murphy, James Morse
A subsequent patch will use CPY* instructions to copy between user and
kernel memory. Add a new exception fixup type to avoid fixing up faults
on kernel memory accesses, in order to make it easier to debug kernel
bugs and to keep the same behavior as with regular loads/stores.
Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com>
---
arch/arm64/include/asm/asm-extable.h | 10 +++++++++-
arch/arm64/include/asm/extable.h | 2 +-
arch/arm64/mm/extable.c | 25 ++++++++++++++++++++++++-
arch/arm64/mm/fault.c | 2 +-
4 files changed, 35 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index b8a5861dc7b7..292f2687a12e 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -9,7 +9,8 @@
#define EX_TYPE_BPF 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_KACCESS_ERR_ZERO 3
-#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+#define EX_TYPE_UACCESS_CPY 4
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 5
/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
#define EX_DATA_REG_ERR_SHIFT 0
@@ -23,6 +24,9 @@
#define EX_DATA_REG_ADDR_SHIFT 5
#define EX_DATA_REG_ADDR GENMASK(9, 5)
+/* Data fields for EX_TYPE_UACCESS_CPY */
+#define EX_DATA_UACCESS_WRITE BIT(0)
+
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
@@ -69,6 +73,10 @@
.endif
.endm
+ .macro _asm_extable_uaccess_cpy, insn, fixup, uaccess_is_write
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_CPY, \uaccess_is_write)
+ .endm
+
#else /* __ASSEMBLY__ */
#include <linux/stringify.h>
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 72b0e71cc3de..5892b8977710 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -45,5 +45,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
}
#endif /* !CONFIG_BPF_JIT */
-bool fixup_exception(struct pt_regs *regs);
+bool fixup_exception(struct pt_regs *regs, unsigned long esr);
#endif
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 228d681a8715..afb5241e4d91 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -8,8 +8,18 @@
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
+#include <asm/esr.h>
#include <asm/ptrace.h>
+static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex,
+ unsigned long esr)
+{
+ bool uaccess_is_write = FIELD_GET(EX_DATA_UACCESS_WRITE, ex->data);
+ bool fault_on_write = esr & ESR_ELx_WNR;
+
+ return uaccess_is_write == fault_on_write;
+}
+
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
@@ -29,6 +39,17 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
return true;
}
+static bool ex_handler_uaccess_cpy(const struct exception_table_entry *ex,
+ struct pt_regs *regs, unsigned long esr)
+{
+ /* Do not fix up faults on kernel memory accesses */
+ if (!cpy_faulted_on_uaccess(ex, esr))
+ return false;
+
+ regs->pc = get_ex_fixup(ex);
+ return true;
+}
+
static bool
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -56,7 +77,7 @@ ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
return true;
}
-bool fixup_exception(struct pt_regs *regs)
+bool fixup_exception(struct pt_regs *regs, unsigned long esr)
{
const struct exception_table_entry *ex;
@@ -70,6 +91,8 @@ bool fixup_exception(struct pt_regs *regs)
case EX_TYPE_UACCESS_ERR_ZERO:
case EX_TYPE_KACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
+ case EX_TYPE_UACCESS_CPY:
+ return ex_handler_uaccess_cpy(ex, regs, esr);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index ef63651099a9..da4854fc6150 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -375,7 +375,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
* Are we prepared to handle this kernel fault?
* We are almost certainly not prepared to handle instruction faults.
*/
- if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
+ if (!is_el1_instruction_abort(esr) && fixup_exception(regs, esr))
return;
if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-02-28 17:00 [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines Kristina Martšenko
2025-02-28 17:00 ` [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions Kristina Martšenko
@ 2025-02-28 17:00 ` Kristina Martšenko
2025-03-06 15:28 ` Robin Murphy
2025-03-07 18:45 ` Catalin Marinas
2025-02-28 17:00 ` [PATCH v2 3/3] arm64: lib: Use MOPS for usercopy routines Kristina Martšenko
2025-03-07 19:23 ` [PATCH v2 0/3] arm64: Use memory copy instructions in " Catalin Marinas
3 siblings, 2 replies; 14+ messages in thread
From: Kristina Martšenko @ 2025-02-28 17:00 UTC (permalink / raw)
To: linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
Robin Murphy, James Morse
A subsequent patch will use CPY* instructions to copy between user and
kernel memory. Add handling for PAN faults caused by an intended kernel
memory access erroneously accessing user memory, in order to make it
easier to debug kernel bugs and to keep the same behavior as with
regular loads/stores.
Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com>
---
arch/arm64/include/asm/extable.h | 3 +++
arch/arm64/mm/extable.c | 11 +++++++++++
arch/arm64/mm/fault.c | 4 +++-
3 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 5892b8977710..0a8cb2015c97 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -33,6 +33,9 @@ do { \
(b)->data = (tmp).data; \
} while (0)
+bool extable_insn_may_access_user(const struct exception_table_entry *ex,
+ unsigned long esr);
+
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index afb5241e4d91..f137596dda88 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -20,6 +20,17 @@ static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex,
return uaccess_is_write == fault_on_write;
}
+bool extable_insn_may_access_user(const struct exception_table_entry *ex,
+ unsigned long esr)
+{
+ switch (ex->type) {
+ case EX_TYPE_UACCESS_CPY:
+ return cpy_faulted_on_uaccess(ex, esr);
+ default:
+ return true;
+ }
+}
+
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index da4854fc6150..c2f14f9c2d92 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -553,6 +553,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
+ const struct exception_table_entry *ex;
int si_code;
int pkey = -1;
@@ -606,7 +607,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
die_kernel_fault("execution of user memory",
addr, esr, regs);
- if (!search_exception_tables(regs->pc))
+ ex = search_exception_tables(regs->pc);
+ if (!ex || !extable_insn_may_access_user(ex, esr))
die_kernel_fault("access to user memory outside uaccess routines",
addr, esr, regs);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH v2 3/3] arm64: lib: Use MOPS for usercopy routines
2025-02-28 17:00 [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines Kristina Martšenko
2025-02-28 17:00 ` [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions Kristina Martšenko
2025-02-28 17:00 ` [PATCH v2 2/3] arm64: mm: Handle PAN faults on " Kristina Martšenko
@ 2025-02-28 17:00 ` Kristina Martšenko
2025-03-06 15:47 ` Robin Murphy
2025-03-07 19:23 ` [PATCH v2 0/3] arm64: Use memory copy instructions in " Catalin Marinas
3 siblings, 1 reply; 14+ messages in thread
From: Kristina Martšenko @ 2025-02-28 17:00 UTC (permalink / raw)
To: linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
Robin Murphy, James Morse
Similarly to what was done with the memcpy() routines, make
copy_to_user(), copy_from_user() and clear_user() also use the Armv8.8
FEAT_MOPS instructions.
Both MOPS implementation options (A and B) are supported, including
asymmetric systems. The exception fixup code fixes up the registers
according to the option used.
In case of a fault the routines return precisely how much was not copied
(as required by the comment in include/linux/uaccess.h), as unprivileged
versions of CPY/SET are guaranteed not to have written past the
addresses reported in the GPRs.
The MOPS instructions could possibly be inlined into callers (and
patched to branch to the generic implementation if not detected;
similarly to what x86 does), but as a first step this patch just uses
them in the out-of-line routines.
Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com>
---
arch/arm64/include/asm/asm-uaccess.h | 4 ++++
arch/arm64/lib/clear_user.S | 25 +++++++++++++++++++++----
arch/arm64/lib/copy_from_user.S | 10 ++++++++++
arch/arm64/lib/copy_template.S | 10 ++++++++++
arch/arm64/lib/copy_to_user.S | 10 ++++++++++
5 files changed, 55 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 5b6efe8abeeb..9148f5a31968 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -61,6 +61,10 @@ alternative_else_nop_endif
9999: x; \
_asm_extable_uaccess 9999b, l
+#define USER_CPY(l, uaccess_is_write, x...) \
+9999: x; \
+ _asm_extable_uaccess_cpy 9999b, l, uaccess_is_write
+
/*
* Generate the assembly for LDTR/STTR with exception table entries.
* This is complicated as there is no post-increment or pair versions of the
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a5a5f5b97b17..de9a303b6ad0 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,14 +17,27 @@
* Alignment fixed up by hardware.
*/
- .p2align 4
- // Alignment is for the loop, but since the prologue (including BTI)
- // is also 16 bytes we can keep any padding outside the function
SYM_FUNC_START(__arch_clear_user)
add x2, x0, x1
+
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+
+USER(9f, setpt [x0]!, x1!, xzr)
+USER(6f, setmt [x0]!, x1!, xzr)
+USER(6f, setet [x0]!, x1!, xzr)
+ mov x0, #0
+ ret
+.Lno_mops:
+#endif
+
subs x1, x1, #8
b.mi 2f
-1:
+
+1: .p2align 4
USER(9f, sttr xzr, [x0])
add x0, x0, #8
subs x1, x1, #8
@@ -47,6 +60,10 @@ USER(7f, sttrb wzr, [x2, #-1])
ret
// Exception fixups
+6: b.cs 9f
+ // Registers are in Option A format
+ add x0, x0, x1
+ b 9f
7: sub x0, x2, #5 // Adjust for faulting on the final byte...
8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
9: sub x0, x2, x0
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..400057d607ec 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -52,6 +52,13 @@
stp \reg1, \reg2, [\ptr], \val
.endm
+ .macro cpy1 dst, src, count
+ .arch_extension mops
+ USER_CPY(9997f, 0, cpyfprt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 0, cpyfmrt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 0, cpyfert [\dst]!, [\src]!, \count!)
+ .endm
+
end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
@@ -62,6 +69,9 @@ SYM_FUNC_START(__arch_copy_from_user)
ret
// Exception fixups
+9996: b.cs 9997f
+ // Registers are in Option A format
+ add dst, dst, count
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
index 488df234c49a..7f2f5a0e2fb9 100644
--- a/arch/arm64/lib/copy_template.S
+++ b/arch/arm64/lib/copy_template.S
@@ -40,6 +40,16 @@ D_l .req x13
D_h .req x14
mov dst, dstin
+
+#ifdef CONFIG_AS_HAS_MOPS
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+ cpy1 dst, src, count
+ b .Lexitfunc
+.Lno_mops:
+#endif
+
cmp count, #16
/*When memory length is less than 16, the accessed are not aligned.*/
b.lo .Ltiny15
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 802231772608..819f2e3fc7a9 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -51,6 +51,13 @@
user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
+ .macro cpy1 dst, src, count
+ .arch_extension mops
+ USER_CPY(9997f, 1, cpyfpwt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 1, cpyfmwt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 1, cpyfewt [\dst]!, [\src]!, \count!)
+ .endm
+
end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
@@ -61,6 +68,9 @@ SYM_FUNC_START(__arch_copy_to_user)
ret
// Exception fixups
+9996: b.cs 9997f
+ // Registers are in Option A format
+ add dst, dst, count
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
--
2.34.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions
2025-02-28 17:00 ` [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions Kristina Martšenko
@ 2025-03-06 15:23 ` Robin Murphy
0 siblings, 0 replies; 14+ messages in thread
From: Robin Murphy @ 2025-03-06 15:23 UTC (permalink / raw)
To: Kristina Martšenko, linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
James Morse
On 28/02/2025 5:00 pm, Kristina Martšenko wrote:
> A subsequent patch will use CPY* instructions to copy between user and
> kernel memory. Add a new exception fixup type to avoid fixing up faults
> on kernel memory accesses, in order to make it easier to debug kernel
> bugs and to keep the same behavior as with regular loads/stores.
Trusting my "the rest looks OK to me" from last time since I've already
paged the details back out again... :)
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com>
> ---
> arch/arm64/include/asm/asm-extable.h | 10 +++++++++-
> arch/arm64/include/asm/extable.h | 2 +-
> arch/arm64/mm/extable.c | 25 ++++++++++++++++++++++++-
> arch/arm64/mm/fault.c | 2 +-
> 4 files changed, 35 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
> index b8a5861dc7b7..292f2687a12e 100644
> --- a/arch/arm64/include/asm/asm-extable.h
> +++ b/arch/arm64/include/asm/asm-extable.h
> @@ -9,7 +9,8 @@
> #define EX_TYPE_BPF 1
> #define EX_TYPE_UACCESS_ERR_ZERO 2
> #define EX_TYPE_KACCESS_ERR_ZERO 3
> -#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
> +#define EX_TYPE_UACCESS_CPY 4
> +#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 5
>
> /* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
> #define EX_DATA_REG_ERR_SHIFT 0
> @@ -23,6 +24,9 @@
> #define EX_DATA_REG_ADDR_SHIFT 5
> #define EX_DATA_REG_ADDR GENMASK(9, 5)
>
> +/* Data fields for EX_TYPE_UACCESS_CPY */
> +#define EX_DATA_UACCESS_WRITE BIT(0)
> +
> #ifdef __ASSEMBLY__
>
> #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
> @@ -69,6 +73,10 @@
> .endif
> .endm
>
> + .macro _asm_extable_uaccess_cpy, insn, fixup, uaccess_is_write
> + __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_CPY, \uaccess_is_write)
> + .endm
> +
> #else /* __ASSEMBLY__ */
>
> #include <linux/stringify.h>
> diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
> index 72b0e71cc3de..5892b8977710 100644
> --- a/arch/arm64/include/asm/extable.h
> +++ b/arch/arm64/include/asm/extable.h
> @@ -45,5 +45,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
> }
> #endif /* !CONFIG_BPF_JIT */
>
> -bool fixup_exception(struct pt_regs *regs);
> +bool fixup_exception(struct pt_regs *regs, unsigned long esr);
> #endif
> diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
> index 228d681a8715..afb5241e4d91 100644
> --- a/arch/arm64/mm/extable.c
> +++ b/arch/arm64/mm/extable.c
> @@ -8,8 +8,18 @@
> #include <linux/uaccess.h>
>
> #include <asm/asm-extable.h>
> +#include <asm/esr.h>
> #include <asm/ptrace.h>
>
> +static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex,
> + unsigned long esr)
> +{
> + bool uaccess_is_write = FIELD_GET(EX_DATA_UACCESS_WRITE, ex->data);
> + bool fault_on_write = esr & ESR_ELx_WNR;
> +
> + return uaccess_is_write == fault_on_write;
> +}
> +
> static inline unsigned long
> get_ex_fixup(const struct exception_table_entry *ex)
> {
> @@ -29,6 +39,17 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
> return true;
> }
>
> +static bool ex_handler_uaccess_cpy(const struct exception_table_entry *ex,
> + struct pt_regs *regs, unsigned long esr)
> +{
> + /* Do not fix up faults on kernel memory accesses */
> + if (!cpy_faulted_on_uaccess(ex, esr))
> + return false;
> +
> + regs->pc = get_ex_fixup(ex);
> + return true;
> +}
> +
> static bool
> ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
> struct pt_regs *regs)
> @@ -56,7 +77,7 @@ ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
> return true;
> }
>
> -bool fixup_exception(struct pt_regs *regs)
> +bool fixup_exception(struct pt_regs *regs, unsigned long esr)
> {
> const struct exception_table_entry *ex;
>
> @@ -70,6 +91,8 @@ bool fixup_exception(struct pt_regs *regs)
> case EX_TYPE_UACCESS_ERR_ZERO:
> case EX_TYPE_KACCESS_ERR_ZERO:
> return ex_handler_uaccess_err_zero(ex, regs);
> + case EX_TYPE_UACCESS_CPY:
> + return ex_handler_uaccess_cpy(ex, regs, esr);
> case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
> return ex_handler_load_unaligned_zeropad(ex, regs);
> }
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index ef63651099a9..da4854fc6150 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -375,7 +375,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
> * Are we prepared to handle this kernel fault?
> * We are almost certainly not prepared to handle instruction faults.
> */
> - if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
> + if (!is_el1_instruction_abort(esr) && fixup_exception(regs, esr))
> return;
>
> if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-02-28 17:00 ` [PATCH v2 2/3] arm64: mm: Handle PAN faults on " Kristina Martšenko
@ 2025-03-06 15:28 ` Robin Murphy
2025-03-07 16:57 ` Catalin Marinas
2025-03-07 18:45 ` Catalin Marinas
1 sibling, 1 reply; 14+ messages in thread
From: Robin Murphy @ 2025-03-06 15:28 UTC (permalink / raw)
To: Kristina Martšenko, linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
James Morse
On 28/02/2025 5:00 pm, Kristina Martšenko wrote:
> A subsequent patch will use CPY* instructions to copy between user and
> kernel memory. Add handling for PAN faults caused by an intended kernel
> memory access erroneously accessing user memory, in order to make it
> easier to debug kernel bugs and to keep the same behavior as with
> regular loads/stores.
I'd be tempted to fold the search_exception_tables() call into
insn_may_access_user() itself, but either way,
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com>
> ---
> arch/arm64/include/asm/extable.h | 3 +++
> arch/arm64/mm/extable.c | 11 +++++++++++
> arch/arm64/mm/fault.c | 4 +++-
> 3 files changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
> index 5892b8977710..0a8cb2015c97 100644
> --- a/arch/arm64/include/asm/extable.h
> +++ b/arch/arm64/include/asm/extable.h
> @@ -33,6 +33,9 @@ do { \
> (b)->data = (tmp).data; \
> } while (0)
>
> +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
> + unsigned long esr);
> +
> #ifdef CONFIG_BPF_JIT
> bool ex_handler_bpf(const struct exception_table_entry *ex,
> struct pt_regs *regs);
> diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
> index afb5241e4d91..f137596dda88 100644
> --- a/arch/arm64/mm/extable.c
> +++ b/arch/arm64/mm/extable.c
> @@ -20,6 +20,17 @@ static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex,
> return uaccess_is_write == fault_on_write;
> }
>
> +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
> + unsigned long esr)
> +{
> + switch (ex->type) {
> + case EX_TYPE_UACCESS_CPY:
> + return cpy_faulted_on_uaccess(ex, esr);
> + default:
> + return true;
> + }
> +}
> +
> static inline unsigned long
> get_ex_fixup(const struct exception_table_entry *ex)
> {
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index da4854fc6150..c2f14f9c2d92 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -553,6 +553,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
> unsigned int mm_flags = FAULT_FLAG_DEFAULT;
> unsigned long addr = untagged_addr(far);
> struct vm_area_struct *vma;
> + const struct exception_table_entry *ex;
> int si_code;
> int pkey = -1;
>
> @@ -606,7 +607,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
> die_kernel_fault("execution of user memory",
> addr, esr, regs);
>
> - if (!search_exception_tables(regs->pc))
> + ex = search_exception_tables(regs->pc);
> + if (!ex || !extable_insn_may_access_user(ex, esr))
> die_kernel_fault("access to user memory outside uaccess routines",
> addr, esr, regs);
> }
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 3/3] arm64: lib: Use MOPS for usercopy routines
2025-02-28 17:00 ` [PATCH v2 3/3] arm64: lib: Use MOPS for usercopy routines Kristina Martšenko
@ 2025-03-06 15:47 ` Robin Murphy
0 siblings, 0 replies; 14+ messages in thread
From: Robin Murphy @ 2025-03-06 15:47 UTC (permalink / raw)
To: Kristina Martšenko, linux-arm-kernel
Cc: Catalin Marinas, Will Deacon, Mark Rutland, Tong Tiangen,
James Morse
On 28/02/2025 5:00 pm, Kristina Martšenko wrote:
> Similarly to what was done with the memcpy() routines, make
> copy_to_user(), copy_from_user() and clear_user() also use the Armv8.8
> FEAT_MOPS instructions.
>
> Both MOPS implementation options (A and B) are supported, including
> asymmetric systems. The exception fixup code fixes up the registers
> according to the option used.
>
> In case of a fault the routines return precisely how much was not copied
> (as required by the comment in include/linux/uaccess.h), as unprivileged
> versions of CPY/SET are guaranteed not to have written past the
> addresses reported in the GPRs.
>
> The MOPS instructions could possibly be inlined into callers (and
> patched to branch to the generic implementation if not detected;
> similarly to what x86 does), but as a first step this patch just uses
> them in the out-of-line routines.
I still think it's cleaner and demonstrably simpler to keep the
clear_user() implementations independent (especially with a mind to
future inlining), but we're clearly well into personal preferences on
that, so FWIW,
Acked-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com>
> ---
> arch/arm64/include/asm/asm-uaccess.h | 4 ++++
> arch/arm64/lib/clear_user.S | 25 +++++++++++++++++++++----
> arch/arm64/lib/copy_from_user.S | 10 ++++++++++
> arch/arm64/lib/copy_template.S | 10 ++++++++++
> arch/arm64/lib/copy_to_user.S | 10 ++++++++++
> 5 files changed, 55 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
> index 5b6efe8abeeb..9148f5a31968 100644
> --- a/arch/arm64/include/asm/asm-uaccess.h
> +++ b/arch/arm64/include/asm/asm-uaccess.h
> @@ -61,6 +61,10 @@ alternative_else_nop_endif
> 9999: x; \
> _asm_extable_uaccess 9999b, l
>
> +#define USER_CPY(l, uaccess_is_write, x...) \
> +9999: x; \
> + _asm_extable_uaccess_cpy 9999b, l, uaccess_is_write
> +
> /*
> * Generate the assembly for LDTR/STTR with exception table entries.
> * This is complicated as there is no post-increment or pair versions of the
> diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
> index a5a5f5b97b17..de9a303b6ad0 100644
> --- a/arch/arm64/lib/clear_user.S
> +++ b/arch/arm64/lib/clear_user.S
> @@ -17,14 +17,27 @@
> * Alignment fixed up by hardware.
> */
>
> - .p2align 4
> - // Alignment is for the loop, but since the prologue (including BTI)
> - // is also 16 bytes we can keep any padding outside the function
> SYM_FUNC_START(__arch_clear_user)
> add x2, x0, x1
> +
> +#ifdef CONFIG_AS_HAS_MOPS
> + .arch_extension mops
> +alternative_if_not ARM64_HAS_MOPS
> + b .Lno_mops
> +alternative_else_nop_endif
> +
> +USER(9f, setpt [x0]!, x1!, xzr)
> +USER(6f, setmt [x0]!, x1!, xzr)
> +USER(6f, setet [x0]!, x1!, xzr)
> + mov x0, #0
> + ret
> +.Lno_mops:
> +#endif
> +
> subs x1, x1, #8
> b.mi 2f
> -1:
> +
> +1: .p2align 4
> USER(9f, sttr xzr, [x0])
> add x0, x0, #8
> subs x1, x1, #8
> @@ -47,6 +60,10 @@ USER(7f, sttrb wzr, [x2, #-1])
> ret
>
> // Exception fixups
> +6: b.cs 9f
> + // Registers are in Option A format
> + add x0, x0, x1
> + b 9f
> 7: sub x0, x2, #5 // Adjust for faulting on the final byte...
> 8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
> 9: sub x0, x2, x0
> diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
> index 34e317907524..400057d607ec 100644
> --- a/arch/arm64/lib/copy_from_user.S
> +++ b/arch/arm64/lib/copy_from_user.S
> @@ -52,6 +52,13 @@
> stp \reg1, \reg2, [\ptr], \val
> .endm
>
> + .macro cpy1 dst, src, count
> + .arch_extension mops
> + USER_CPY(9997f, 0, cpyfprt [\dst]!, [\src]!, \count!)
> + USER_CPY(9996f, 0, cpyfmrt [\dst]!, [\src]!, \count!)
> + USER_CPY(9996f, 0, cpyfert [\dst]!, [\src]!, \count!)
> + .endm
> +
> end .req x5
> srcin .req x15
> SYM_FUNC_START(__arch_copy_from_user)
> @@ -62,6 +69,9 @@ SYM_FUNC_START(__arch_copy_from_user)
> ret
>
> // Exception fixups
> +9996: b.cs 9997f
> + // Registers are in Option A format
> + add dst, dst, count
> 9997: cmp dst, dstin
> b.ne 9998f
> // Before being absolutely sure we couldn't copy anything, try harder
> diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
> index 488df234c49a..7f2f5a0e2fb9 100644
> --- a/arch/arm64/lib/copy_template.S
> +++ b/arch/arm64/lib/copy_template.S
> @@ -40,6 +40,16 @@ D_l .req x13
> D_h .req x14
>
> mov dst, dstin
> +
> +#ifdef CONFIG_AS_HAS_MOPS
> +alternative_if_not ARM64_HAS_MOPS
> + b .Lno_mops
> +alternative_else_nop_endif
> + cpy1 dst, src, count
> + b .Lexitfunc
> +.Lno_mops:
> +#endif
> +
> cmp count, #16
> /*When memory length is less than 16, the accessed are not aligned.*/
> b.lo .Ltiny15
> diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
> index 802231772608..819f2e3fc7a9 100644
> --- a/arch/arm64/lib/copy_to_user.S
> +++ b/arch/arm64/lib/copy_to_user.S
> @@ -51,6 +51,13 @@
> user_stp 9997f, \reg1, \reg2, \ptr, \val
> .endm
>
> + .macro cpy1 dst, src, count
> + .arch_extension mops
> + USER_CPY(9997f, 1, cpyfpwt [\dst]!, [\src]!, \count!)
> + USER_CPY(9996f, 1, cpyfmwt [\dst]!, [\src]!, \count!)
> + USER_CPY(9996f, 1, cpyfewt [\dst]!, [\src]!, \count!)
> + .endm
> +
> end .req x5
> srcin .req x15
> SYM_FUNC_START(__arch_copy_to_user)
> @@ -61,6 +68,9 @@ SYM_FUNC_START(__arch_copy_to_user)
> ret
>
> // Exception fixups
> +9996: b.cs 9997f
> + // Registers are in Option A format
> + add dst, dst, count
> 9997: cmp dst, dstin
> b.ne 9998f
> // Before being absolutely sure we couldn't copy anything, try harder
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-03-06 15:28 ` Robin Murphy
@ 2025-03-07 16:57 ` Catalin Marinas
0 siblings, 0 replies; 14+ messages in thread
From: Catalin Marinas @ 2025-03-07 16:57 UTC (permalink / raw)
To: Robin Murphy
Cc: Kristina Martšenko, linux-arm-kernel, Will Deacon,
Mark Rutland, Tong Tiangen, James Morse
On Thu, Mar 06, 2025 at 03:28:26PM +0000, Robin Murphy wrote:
> On 28/02/2025 5:00 pm, Kristina Martšenko wrote:
> > A subsequent patch will use CPY* instructions to copy between user and
> > kernel memory. Add handling for PAN faults caused by an intended kernel
> > memory access erroneously accessing user memory, in order to make it
> > easier to debug kernel bugs and to keep the same behavior as with
> > regular loads/stores.
>
> I'd be tempted to fold the search_exception_tables() call into
> insn_may_access_user() itself, but either way,
Yeah, I think it looks better folded in. I did it locally.
> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Thanks.
--
Catalin
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-02-28 17:00 ` [PATCH v2 2/3] arm64: mm: Handle PAN faults on " Kristina Martšenko
2025-03-06 15:28 ` Robin Murphy
@ 2025-03-07 18:45 ` Catalin Marinas
2025-03-07 18:53 ` Robin Murphy
1 sibling, 1 reply; 14+ messages in thread
From: Catalin Marinas @ 2025-03-07 18:45 UTC (permalink / raw)
To: Kristina Martšenko
Cc: linux-arm-kernel, Will Deacon, Mark Rutland, Tong Tiangen,
Robin Murphy, James Morse
On Fri, Feb 28, 2025 at 05:00:05PM +0000, Kristina Martsenko wrote:
> +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
> + unsigned long esr)
> +{
> + switch (ex->type) {
> + case EX_TYPE_UACCESS_CPY:
> + return cpy_faulted_on_uaccess(ex, esr);
> + default:
> + return true;
> + }
> +}
Not a problem with this patch but I wonder whether we should return
false for EX_TYPE_LOAD_UNALIGNED_ZEROPAD for completeness (and remove
the EX_TYPE_KACCESS_ERR_ZERO, it's no longer used.
--
Catalin
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-03-07 18:45 ` Catalin Marinas
@ 2025-03-07 18:53 ` Robin Murphy
2025-03-07 21:37 ` Catalin Marinas
0 siblings, 1 reply; 14+ messages in thread
From: Robin Murphy @ 2025-03-07 18:53 UTC (permalink / raw)
To: Catalin Marinas, Kristina Martšenko
Cc: linux-arm-kernel, Will Deacon, Mark Rutland, Tong Tiangen,
James Morse
On 2025-03-07 6:45 pm, Catalin Marinas wrote:
> On Fri, Feb 28, 2025 at 05:00:05PM +0000, Kristina Martsenko wrote:
>> +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
>> + unsigned long esr)
>> +{
>> + switch (ex->type) {
>> + case EX_TYPE_UACCESS_CPY:
>> + return cpy_faulted_on_uaccess(ex, esr);
>> + default:
>> + return true;
>> + }
>> +}
>
> Not a problem with this patch but I wonder whether we should return
> false for EX_TYPE_LOAD_UNALIGNED_ZEROPAD for completeness
Or maybe rather, true for EX_TYPE_UACCESS_ERR_ZERO and then false in the
default case?
> (and remove
> the EX_TYPE_KACCESS_ERR_ZERO, it's no longer used.
I think that's just hidden in the macro swamp of
__get_mem_asm()/__put_mem_asm()...
Cheers,
Robin.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines
2025-02-28 17:00 [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines Kristina Martšenko
` (2 preceding siblings ...)
2025-02-28 17:00 ` [PATCH v2 3/3] arm64: lib: Use MOPS for usercopy routines Kristina Martšenko
@ 2025-03-07 19:23 ` Catalin Marinas
3 siblings, 0 replies; 14+ messages in thread
From: Catalin Marinas @ 2025-03-07 19:23 UTC (permalink / raw)
To: linux-arm-kernel, Kristina Martšenko
Cc: Will Deacon, Mark Rutland, Tong Tiangen, Robin Murphy,
James Morse
On Fri, 28 Feb 2025 17:00:03 +0000, Kristina Martšenko wrote:
> This is a follow-up to the series using CPY/SET instructions in the kernel's
> memory copy routines [1]. This series extends that to the usercopy routines.
> This required some additions to exception fixups since a CPY instruction
> accesses both user and kernel memory.
>
> Changes in v2:
> - Improved readability of cpy_faulted_on_uaccess()
> - v1: https://lore.kernel.org/all/20250218171430.28227-1-kristina.martsenko@arm.com/
>
> [...]
Applied to arm64 (for-next/uaccess-mops), thanks!
I made some modifications to patch 2 as per Robin's suggestion.
[1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions
https://git.kernel.org/arm64/c/653884f88777
[2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
https://git.kernel.org/arm64/c/04a9f771d81c
[3/3] arm64: lib: Use MOPS for usercopy routines
https://git.kernel.org/arm64/c/fe59e0358d9b
--
Catalin
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-03-07 18:53 ` Robin Murphy
@ 2025-03-07 21:37 ` Catalin Marinas
2025-03-10 14:15 ` Kristina Martšenko
0 siblings, 1 reply; 14+ messages in thread
From: Catalin Marinas @ 2025-03-07 21:37 UTC (permalink / raw)
To: Robin Murphy
Cc: Kristina Martšenko, linux-arm-kernel, Will Deacon,
Mark Rutland, Tong Tiangen, James Morse
On Fri, Mar 07, 2025 at 06:53:37PM +0000, Robin Murphy wrote:
> On 2025-03-07 6:45 pm, Catalin Marinas wrote:
> > On Fri, Feb 28, 2025 at 05:00:05PM +0000, Kristina Martsenko wrote:
> > > +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
> > > + unsigned long esr)
> > > +{
> > > + switch (ex->type) {
> > > + case EX_TYPE_UACCESS_CPY:
> > > + return cpy_faulted_on_uaccess(ex, esr);
> > > + default:
> > > + return true;
> > > + }
> > > +}
> >
> > Not a problem with this patch but I wonder whether we should return
> > false for EX_TYPE_LOAD_UNALIGNED_ZEROPAD for completeness
>
> Or maybe rather, true for EX_TYPE_UACCESS_ERR_ZERO and then false in the
> default case?
Yes.
> > (and remove
> > the EX_TYPE_KACCESS_ERR_ZERO, it's no longer used.
>
> I think that's just hidden in the macro swamp of
> __get_mem_asm()/__put_mem_asm()...
Ah, you are right. I now recall I found them about a year ago but forgot
since.
--
Catalin
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-03-07 21:37 ` Catalin Marinas
@ 2025-03-10 14:15 ` Kristina Martšenko
2025-03-10 15:43 ` Catalin Marinas
0 siblings, 1 reply; 14+ messages in thread
From: Kristina Martšenko @ 2025-03-10 14:15 UTC (permalink / raw)
To: Catalin Marinas
Cc: linux-arm-kernel, Will Deacon, Mark Rutland, Tong Tiangen,
James Morse, Robin Murphy
On 07/03/2025 21:37, Catalin Marinas wrote:
> On Fri, Mar 07, 2025 at 06:53:37PM +0000, Robin Murphy wrote:
>> On 2025-03-07 6:45 pm, Catalin Marinas wrote:
>>> On Fri, Feb 28, 2025 at 05:00:05PM +0000, Kristina Martsenko wrote:
>>>> +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
>>>> + unsigned long esr)
>>>> +{
>>>> + switch (ex->type) {
>>>> + case EX_TYPE_UACCESS_CPY:
>>>> + return cpy_faulted_on_uaccess(ex, esr);
>>>> + default:
>>>> + return true;
>>>> + }
>>>> +}
>>>
>>> Not a problem with this patch but I wonder whether we should return
>>> false for EX_TYPE_LOAD_UNALIGNED_ZEROPAD for completeness
>>
>> Or maybe rather, true for EX_TYPE_UACCESS_ERR_ZERO and then false in the
>> default case?
>
> Yes.
I thought you said in an earlier (off-list) discussion that
EX_TYPE_KACCESS_ERR_ZERO shouldn't return false here because
__get_kernel_nofault() may get called with untrusted addresses? Or did I
misunderstand?
Thanks,
Kristina
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 2/3] arm64: mm: Handle PAN faults on uaccess CPY* instructions
2025-03-10 14:15 ` Kristina Martšenko
@ 2025-03-10 15:43 ` Catalin Marinas
0 siblings, 0 replies; 14+ messages in thread
From: Catalin Marinas @ 2025-03-10 15:43 UTC (permalink / raw)
To: Kristina Martšenko
Cc: linux-arm-kernel, Will Deacon, Mark Rutland, Tong Tiangen,
James Morse, Robin Murphy
On Mon, Mar 10, 2025 at 02:15:53PM +0000, Kristina Martsenko wrote:
> On 07/03/2025 21:37, Catalin Marinas wrote:
> > On Fri, Mar 07, 2025 at 06:53:37PM +0000, Robin Murphy wrote:
> >> On 2025-03-07 6:45 pm, Catalin Marinas wrote:
> >>> On Fri, Feb 28, 2025 at 05:00:05PM +0000, Kristina Martsenko wrote:
> >>>> +bool extable_insn_may_access_user(const struct exception_table_entry *ex,
> >>>> + unsigned long esr)
> >>>> +{
> >>>> + switch (ex->type) {
> >>>> + case EX_TYPE_UACCESS_CPY:
> >>>> + return cpy_faulted_on_uaccess(ex, esr);
> >>>> + default:
> >>>> + return true;
> >>>> + }
> >>>> +}
> >>>
> >>> Not a problem with this patch but I wonder whether we should return
> >>> false for EX_TYPE_LOAD_UNALIGNED_ZEROPAD for completeness
> >>
> >> Or maybe rather, true for EX_TYPE_UACCESS_ERR_ZERO and then false in the
> >> default case?
> >
> > Yes.
>
> I thought you said in an earlier (off-list) discussion that
> EX_TYPE_KACCESS_ERR_ZERO shouldn't return false here because
> __get_kernel_nofault() may get called with untrusted addresses? Or did I
> misunderstand?
TBH, I don't remember. Thinking about it, if we have a bug and someone
exploits __get_kernel_nofault() by giving it a user address, the above
function should indeed return false in principle. Not a big problem
since the actual user access won't happen but we may get into an
infinite loop.
I think something like this should do:
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 6e0528831cd3..ab6775747601 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -28,10 +28,12 @@ bool insn_may_access_user(unsigned long addr, unsigned long esr)
return false;
switch (ex->type) {
+ case EX_TYPE_UACCESS_ERR_ZERO:
+ return true;
case EX_TYPE_UACCESS_CPY:
return cpy_faulted_on_uaccess(ex, esr);
default:
- return true;
+ return false;
}
}
--
Catalin
^ permalink raw reply related [flat|nested] 14+ messages in thread
end of thread, other threads:[~2025-03-10 16:14 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-02-28 17:00 [PATCH v2 0/3] arm64: Use memory copy instructions in usercopy routines Kristina Martšenko
2025-02-28 17:00 ` [PATCH v2 1/3] arm64: extable: Add fixup handling for uaccess CPY* instructions Kristina Martšenko
2025-03-06 15:23 ` Robin Murphy
2025-02-28 17:00 ` [PATCH v2 2/3] arm64: mm: Handle PAN faults on " Kristina Martšenko
2025-03-06 15:28 ` Robin Murphy
2025-03-07 16:57 ` Catalin Marinas
2025-03-07 18:45 ` Catalin Marinas
2025-03-07 18:53 ` Robin Murphy
2025-03-07 21:37 ` Catalin Marinas
2025-03-10 14:15 ` Kristina Martšenko
2025-03-10 15:43 ` Catalin Marinas
2025-02-28 17:00 ` [PATCH v2 3/3] arm64: lib: Use MOPS for usercopy routines Kristina Martšenko
2025-03-06 15:47 ` Robin Murphy
2025-03-07 19:23 ` [PATCH v2 0/3] arm64: Use memory copy instructions in " Catalin Marinas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).