* [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec()
@ 2024-10-17 21:55 Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 1/6] x86/uaccess: Avoid barrier_nospec() in copy_from_user() Josh Poimboeuf
` (6 more replies)
0 siblings, 7 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
At least for now, continue to assume mask_user_address() is safe on AMD
when combined with STAC/CLAC -- as get_user(), put_user() and
masked_user_access_begin() already do today.
v2:
- Separate copy_to_user() and clear_user() changes out into separate patches
- Add masking to __get_user() and __put_user()
v1:
https://lore.kernel.org/b626840e55d4aa86b4b9b377a4cc2cda7038d33d.1728706156.git.jpoimboe@kernel.org
Josh Poimboeuf (6):
x86/uaccess: Avoid barrier_nospec() in copy_from_user()
x86/uaccess: Avoid barrier_nospec() in __get_user()
x86/uaccess: Rearrange putuser.S
x86/uaccess: Add user pointer masking to __put_user()
x86/uaccess: Add user pointer masking to copy_to_user()
x86/uaccess: Add user pointer masking to clear_user()
arch/powerpc/include/asm/uaccess.h | 2 +
arch/x86/include/asm/uaccess_64.h | 10 ++--
arch/x86/lib/getuser.S | 27 +++++++--
arch/x86/lib/putuser.S | 92 ++++++++++++++++++------------
include/linux/uaccess.h | 6 --
5 files changed, 86 insertions(+), 51 deletions(-)
--
2.47.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v2 1/6] x86/uaccess: Avoid barrier_nospec() in copy_from_user()
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
@ 2024-10-17 21:55 ` Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 2/6] x86/uaccess: Avoid barrier_nospec() in __get_user() Josh Poimboeuf
` (5 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
For x86-64, the barrier_nospec() in copy_from_user() is overkill and
painfully slow. Instead, use pointer masking to force the user pointer
to a non-kernel value in speculative paths.
To avoid regressing powerpc, move the barrier_nospec() to the powerpc
raw_copy_from_user() implementation so there's no functional change.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/powerpc/include/asm/uaccess.h | 2 ++
arch/x86/include/asm/uaccess_64.h | 7 ++++---
arch/x86/lib/getuser.S | 2 +-
arch/x86/lib/putuser.S | 2 +-
include/linux/uaccess.h | 6 ------
5 files changed, 8 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4f5a46a77fa2..12abb8bf5eda 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -7,6 +7,7 @@
#include <asm/extable.h>
#include <asm/kup.h>
#include <asm/asm-compat.h>
+#include <asm/barrier.h>
#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
@@ -341,6 +342,7 @@ static inline unsigned long raw_copy_from_user(void *to,
{
unsigned long ret;
+ barrier_nospec();
allow_read_from_user(from, n);
ret = __copy_tofrom_user((__force void __user *)to, from, n);
prevent_read_from_user(from, n);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index afce8ee5d7b7..61693028ea2b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,11 +54,11 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#define valid_user_address(x) ((__force long)(x) >= 0)
/*
- * Masking the user address is an alternative to a conditional
- * user_access_begin that can avoid the fencing. This only works
- * for dense accesses starting at the address.
+ * If it's a kernel address, force it to all 1's. This prevents a mispredicted
+ * access_ok() from speculatively accessing kernel space.
*/
#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
+
#define masked_user_access_begin(x) ({ \
__auto_type __masked_ptr = (x); \
__masked_ptr = mask_user_address(__masked_ptr); \
@@ -133,6 +133,7 @@ copy_user_generic(void *to, const void *from, unsigned long len)
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
+ src = mask_user_address(src);
return copy_user_generic(dst, (__force void *)src, size);
}
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index d066aecf8aeb..094224ec9dca 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -39,7 +39,7 @@
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
- mov %rax, %rdx
+ mov %rax, %rdx /* mask_user_address() */
sar $63, %rdx
or %rdx, %rax
.else
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 975c9c18263d..09b7e37934ab 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -34,7 +34,7 @@
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
- mov %rcx, %rbx
+ mov %rcx, %rbx /* mask_user_address() */
sar $63, %rbx
or %rbx, %rcx
.else
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 39c7cf82b0c2..dda9725a9559 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -160,12 +160,6 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- /*
- * Ensure that bad access_ok() speculation will not
- * lead to nasty side effects *after* the copy is
- * finished:
- */
- barrier_nospec();
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
--
2.47.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 2/6] x86/uaccess: Avoid barrier_nospec() in __get_user()
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 1/6] x86/uaccess: Avoid barrier_nospec() in copy_from_user() Josh Poimboeuf
@ 2024-10-17 21:55 ` Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S Josh Poimboeuf
` (4 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
On 64-bit, the barrier_nospec() in __get_user() is overkill and
painfully slow. Instead, use pointer masking to force the user pointer
to a non-kernel value in speculative paths.
Doing so makes get_user() and __get_user() identical in behavior, so
converge their implementations.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/lib/getuser.S | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 094224ec9dca..7c9bf8f0b3ac 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -105,6 +105,26 @@ SYM_FUNC_START(__get_user_8)
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
+#ifdef CONFIG_X86_64
+
+/*
+ * On x86-64, get_user() does address masking rather than a conditional
+ * bounds check so there's no functional difference with __get_user().
+ */
+SYM_FUNC_ALIAS(__get_user_nocheck_1, __get_user_1);
+EXPORT_SYMBOL(__get_user_nocheck_1);
+
+SYM_FUNC_ALIAS(__get_user_nocheck_2, __get_user_2);
+EXPORT_SYMBOL(__get_user_nocheck_2);
+
+SYM_FUNC_ALIAS(__get_user_nocheck_4, __get_user_4);
+EXPORT_SYMBOL(__get_user_nocheck_4);
+
+SYM_FUNC_ALIAS(__get_user_nocheck_8, __get_user_8);
+EXPORT_SYMBOL(__get_user_nocheck_8);
+
+#else /* CONFIG_X86_32 */
+
/* .. and the same for __get_user, just without the range checks */
SYM_FUNC_START(__get_user_nocheck_1)
ASM_STAC
@@ -139,19 +159,16 @@ EXPORT_SYMBOL(__get_user_nocheck_4)
SYM_FUNC_START(__get_user_nocheck_8)
ASM_STAC
ASM_BARRIER_NOSPEC
-#ifdef CONFIG_X86_64
- UACCESS movq (%_ASM_AX),%rdx
-#else
xor %ecx,%ecx
UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx
-#endif
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)
+#endif /* CONFIG_X86_32 */
SYM_CODE_START_LOCAL(__get_user_handle_exception)
ASM_CLAC
--
2.47.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 1/6] x86/uaccess: Avoid barrier_nospec() in copy_from_user() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 2/6] x86/uaccess: Avoid barrier_nospec() in __get_user() Josh Poimboeuf
@ 2024-10-17 21:55 ` Josh Poimboeuf
2024-10-18 8:51 ` Kirill A . Shutemov
2024-10-17 21:55 ` [PATCH v2 4/6] x86/uaccess: Add user pointer masking to __put_user() Josh Poimboeuf
` (3 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
Separate __put_user_*() from __put_user_nocheck_*() to make the layout
similar to getuser.S. This will also make it easier to do a subsequent
change. No functional changes.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/lib/putuser.S | 67 ++++++++++++++++++++++--------------------
1 file changed, 35 insertions(+), 32 deletions(-)
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 09b7e37934ab..cb137e0286be 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -54,59 +54,32 @@ SYM_FUNC_START(__put_user_1)
SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
-SYM_FUNC_START(__put_user_nocheck_1)
- ASM_STAC
-2: movb %al,(%_ASM_CX)
- xor %ecx,%ecx
- ASM_CLAC
- RET
-SYM_FUNC_END(__put_user_nocheck_1)
-EXPORT_SYMBOL(__put_user_nocheck_1)
-
SYM_FUNC_START(__put_user_2)
check_range size=2
ASM_STAC
-3: movw %ax,(%_ASM_CX)
+2: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
-SYM_FUNC_START(__put_user_nocheck_2)
- ASM_STAC
-4: movw %ax,(%_ASM_CX)
- xor %ecx,%ecx
- ASM_CLAC
- RET
-SYM_FUNC_END(__put_user_nocheck_2)
-EXPORT_SYMBOL(__put_user_nocheck_2)
-
SYM_FUNC_START(__put_user_4)
check_range size=4
ASM_STAC
-5: movl %eax,(%_ASM_CX)
+3: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
-SYM_FUNC_START(__put_user_nocheck_4)
- ASM_STAC
-6: movl %eax,(%_ASM_CX)
- xor %ecx,%ecx
- ASM_CLAC
- RET
-SYM_FUNC_END(__put_user_nocheck_4)
-EXPORT_SYMBOL(__put_user_nocheck_4)
-
SYM_FUNC_START(__put_user_8)
check_range size=8
ASM_STAC
-7: mov %_ASM_AX,(%_ASM_CX)
+4: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32
-8: movl %edx,4(%_ASM_CX)
+5: movl %edx,4(%_ASM_CX)
#endif
xor %ecx,%ecx
ASM_CLAC
@@ -114,6 +87,34 @@ SYM_FUNC_START(__put_user_8)
SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
+/* .. and the same for __put_user, just without the range checks */
+SYM_FUNC_START(__put_user_nocheck_1)
+ ASM_STAC
+6: movb %al,(%_ASM_CX)
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_1)
+EXPORT_SYMBOL(__put_user_nocheck_1)
+
+SYM_FUNC_START(__put_user_nocheck_2)
+ ASM_STAC
+7: movw %ax,(%_ASM_CX)
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_2)
+EXPORT_SYMBOL(__put_user_nocheck_2)
+
+SYM_FUNC_START(__put_user_nocheck_4)
+ ASM_STAC
+8: movl %eax,(%_ASM_CX)
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_4)
+EXPORT_SYMBOL(__put_user_nocheck_4)
+
SYM_FUNC_START(__put_user_nocheck_8)
ASM_STAC
9: mov %_ASM_AX,(%_ASM_CX)
@@ -137,11 +138,13 @@ SYM_CODE_END(__put_user_handle_exception)
_ASM_EXTABLE_UA(2b, __put_user_handle_exception)
_ASM_EXTABLE_UA(3b, __put_user_handle_exception)
_ASM_EXTABLE_UA(4b, __put_user_handle_exception)
+#ifdef CONFIG_X86_32
_ASM_EXTABLE_UA(5b, __put_user_handle_exception)
+#endif
_ASM_EXTABLE_UA(6b, __put_user_handle_exception)
_ASM_EXTABLE_UA(7b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(8b, __put_user_handle_exception)
_ASM_EXTABLE_UA(9b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
- _ASM_EXTABLE_UA(8b, __put_user_handle_exception)
_ASM_EXTABLE_UA(10b, __put_user_handle_exception)
#endif
--
2.47.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 4/6] x86/uaccess: Add user pointer masking to __put_user()
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
` (2 preceding siblings ...)
2024-10-17 21:55 ` [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S Josh Poimboeuf
@ 2024-10-17 21:55 ` Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 5/6] x86/uaccess: Add user pointer masking to copy_to_user() Josh Poimboeuf
` (2 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
Add user pointer masking to __put_user() to mitigate Spectre v1.
A write in a mispredicted access_ok() branch to a user-controlled kernel
address can populate the rest of the affected cache line with kernel
data.
This makes its behavior identical to put_user(), so converge their
implementations.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/lib/putuser.S | 27 ++++++++++++++++++++++-----
1 file changed, 22 insertions(+), 5 deletions(-)
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index cb137e0286be..1b122261b7aa 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -87,7 +87,26 @@ SYM_FUNC_START(__put_user_8)
SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
-/* .. and the same for __put_user, just without the range checks */
+#ifdef CONFIG_X86_64
+
+/*
+ * On x86-64, put_user() does address masking rather than a conditional
+ * bounds check so there's no functional difference with __put_user().
+ */
+SYM_FUNC_ALIAS(__put_user_nocheck_1, __put_user_1);
+EXPORT_SYMBOL(__put_user_nocheck_1);
+
+SYM_FUNC_ALIAS(__put_user_nocheck_2, __put_user_2);
+EXPORT_SYMBOL(__put_user_nocheck_2);
+
+SYM_FUNC_ALIAS(__put_user_nocheck_4, __put_user_4);
+EXPORT_SYMBOL(__put_user_nocheck_4);
+
+SYM_FUNC_ALIAS(__put_user_nocheck_8, __put_user_8);
+EXPORT_SYMBOL(__put_user_nocheck_8);
+
+#else /* CONFIG_X86_32 */
+
SYM_FUNC_START(__put_user_nocheck_1)
ASM_STAC
6: movb %al,(%_ASM_CX)
@@ -118,15 +137,15 @@ EXPORT_SYMBOL(__put_user_nocheck_4)
SYM_FUNC_START(__put_user_nocheck_8)
ASM_STAC
9: mov %_ASM_AX,(%_ASM_CX)
-#ifdef CONFIG_X86_32
10: movl %edx,4(%_ASM_CX)
-#endif
xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_nocheck_8)
EXPORT_SYMBOL(__put_user_nocheck_8)
+#endif /* CONFIG_X86_32 */
+
SYM_CODE_START_LOCAL(__put_user_handle_exception)
ASM_CLAC
.Lbad_put_user:
@@ -140,11 +159,9 @@ SYM_CODE_END(__put_user_handle_exception)
_ASM_EXTABLE_UA(4b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
_ASM_EXTABLE_UA(5b, __put_user_handle_exception)
-#endif
_ASM_EXTABLE_UA(6b, __put_user_handle_exception)
_ASM_EXTABLE_UA(7b, __put_user_handle_exception)
_ASM_EXTABLE_UA(8b, __put_user_handle_exception)
_ASM_EXTABLE_UA(9b, __put_user_handle_exception)
-#ifdef CONFIG_X86_32
_ASM_EXTABLE_UA(10b, __put_user_handle_exception)
#endif
--
2.47.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 5/6] x86/uaccess: Add user pointer masking to copy_to_user()
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
` (3 preceding siblings ...)
2024-10-17 21:55 ` [PATCH v2 4/6] x86/uaccess: Add user pointer masking to __put_user() Josh Poimboeuf
@ 2024-10-17 21:55 ` Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 6/6] x86/uaccess: Add user pointer masking to clear_user() Josh Poimboeuf
2024-10-17 22:31 ` [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Andrew Cooper
6 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
Add user pointer masking to copy_to_user() to mitigate Spectre v1.
A write in a mispredicted access_ok() branch to a user-controlled kernel
address can populate the rest of the affected cache line with kernel
data.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/include/asm/uaccess_64.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 61693028ea2b..0587830a47e1 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -140,6 +140,7 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
+ dst = mask_user_address(dst);
return copy_user_generic((__force void *)dst, src, size);
}
--
2.47.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 6/6] x86/uaccess: Add user pointer masking to clear_user()
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
` (4 preceding siblings ...)
2024-10-17 21:55 ` [PATCH v2 5/6] x86/uaccess: Add user pointer masking to copy_to_user() Josh Poimboeuf
@ 2024-10-17 21:55 ` Josh Poimboeuf
2024-10-17 22:31 ` [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Andrew Cooper
6 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 21:55 UTC (permalink / raw)
To: x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Andrew Cooper,
Mark Rutland, Kirill A . Shutemov
Add user pointer masking to clear_user() to mitigate Spectre v1.
A write in a mispredicted access_ok() branch to a user-controlled kernel
address can populate the rest of the affected cache line with kernel
data.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/include/asm/uaccess_64.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 0587830a47e1..8027db7f68c2 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -199,7 +199,7 @@ static __always_inline __must_check unsigned long __clear_user(void __user *addr
static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
{
if (__access_ok(to, n))
- return __clear_user(to, n);
+ return __clear_user(mask_user_address(to), n);
return n;
}
#endif /* _ASM_X86_UACCESS_64_H */
--
2.47.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec()
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
` (5 preceding siblings ...)
2024-10-17 21:55 ` [PATCH v2 6/6] x86/uaccess: Add user pointer masking to clear_user() Josh Poimboeuf
@ 2024-10-17 22:31 ` Andrew Cooper
2024-10-17 22:42 ` Josh Poimboeuf
6 siblings, 1 reply; 11+ messages in thread
From: Andrew Cooper @ 2024-10-17 22:31 UTC (permalink / raw)
To: Josh Poimboeuf, x86
Cc: linux-kernel, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Waiman Long, Dave Hansen, Ingo Molnar,
Linus Torvalds, Michael Ellerman, linuxppc-dev, Mark Rutland,
Kirill A . Shutemov
On 17/10/2024 10:55 pm, Josh Poimboeuf wrote:
> At least for now, continue to assume mask_user_address() is safe on AMD
> when combined with STAC/CLAC -- as get_user(), put_user() and
> masked_user_access_begin() already do today.
Honestly, I find this a very worrying position to take.
It's one thing not to know there's a speculative security vulnerability
with how mask_user_address() is used.
It's totally another to say "lets pretend that it doesn't exist so we
can continue to make things faster".
Even if you can get Intel and AMD to agree that STAC/CLAC are really
LFENCEs (and I think you'll struggle), they'd only confer the safety you
want between a real conditional that excludes the non-canonical range,
and the pointer deference.
Any path that genuinely deferences a non-canonical pointer is not safe,
whatever serialisation you put in the way. The attacker wins the moment
the load uop executes.
The final hunk of patch 1 is safe (iff STAC is given extra guarantees)
because it is between the conditional and the deference. Patch 4 is not
safe (if the comment is correct) because it removes the conditional.
Or state that you intend to disregard this non-canoncal speculation
problem; that's fine(ish) too, as long as it's done transparently.
~Andrew
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec()
2024-10-17 22:31 ` [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Andrew Cooper
@ 2024-10-17 22:42 ` Josh Poimboeuf
0 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-17 22:42 UTC (permalink / raw)
To: Andrew Cooper
Cc: x86, linux-kernel, Thomas Gleixner, Borislav Petkov,
Peter Zijlstra, Pawan Gupta, Waiman Long, Dave Hansen,
Ingo Molnar, Linus Torvalds, Michael Ellerman, linuxppc-dev,
Mark Rutland, Kirill A . Shutemov
On Thu, Oct 17, 2024 at 11:31:30PM +0100, Andrew Cooper wrote:
> Even if you can get Intel and AMD to agree that STAC/CLAC are really
> LFENCEs (and I think you'll struggle), they'd only confer the safety you
> want between a real conditional that excludes the non-canonical range,
> and the pointer deference.
>
> Any path that genuinely deferences a non-canonical pointer is not safe,
> whatever serialisation you put in the way. The attacker wins the moment
> the load uop executes.
>
> The final hunk of patch 1 is safe (iff STAC is given extra guarantees)
> because it is between the conditional and the deference. Patch 4 is not
> safe (if the comment is correct) because it removes the conditional.
So the naming is confusing:
- put_user() implementation is __put_user_*()
- __put_user() implementation is __put_user_nocheck_*()
Patch 4 only affects __put_user(), for which the user is expected to
call access_ok() beforehand.
The current implementations of get_user(), put_user() and
masked_user_access_begin() avoid the conditional. Those are the ones it
sounds like you're worried about?
None of my patches remove conditional checks.
--
Josh
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S
2024-10-17 21:55 ` [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S Josh Poimboeuf
@ 2024-10-18 8:51 ` Kirill A . Shutemov
2024-10-18 15:55 ` Josh Poimboeuf
0 siblings, 1 reply; 11+ messages in thread
From: Kirill A . Shutemov @ 2024-10-18 8:51 UTC (permalink / raw)
To: Josh Poimboeuf
Cc: x86, linux-kernel, Thomas Gleixner, Borislav Petkov,
Peter Zijlstra, Pawan Gupta, Waiman Long, Dave Hansen,
Ingo Molnar, Linus Torvalds, Michael Ellerman, linuxppc-dev,
Andrew Cooper, Mark Rutland
On Thu, Oct 17, 2024 at 02:55:22PM -0700, Josh Poimboeuf wrote:
> SYM_FUNC_START(__put_user_2)
> check_range size=2
> ASM_STAC
> -3: movw %ax,(%_ASM_CX)
> +2: movw %ax,(%_ASM_CX)
> xor %ecx,%ecx
> ASM_CLAC
> RET
> SYM_FUNC_END(__put_user_2)
> EXPORT_SYMBOL(__put_user_2)
This patch provides an opportunity to give these labels more meaningful
names, so that future rearrangements do not require as much boilerplate.
For example, we can rename this label 2: to .Luser_2 or something similar.
--
Kiryl Shutsemau / Kirill A. Shutemov
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S
2024-10-18 8:51 ` Kirill A . Shutemov
@ 2024-10-18 15:55 ` Josh Poimboeuf
0 siblings, 0 replies; 11+ messages in thread
From: Josh Poimboeuf @ 2024-10-18 15:55 UTC (permalink / raw)
To: Kirill A . Shutemov
Cc: x86, linux-kernel, Thomas Gleixner, Borislav Petkov,
Peter Zijlstra, Pawan Gupta, Waiman Long, Dave Hansen,
Ingo Molnar, Linus Torvalds, Michael Ellerman, linuxppc-dev,
Andrew Cooper, Mark Rutland
On Fri, Oct 18, 2024 at 11:51:06AM +0300, Kirill A . Shutemov wrote:
> On Thu, Oct 17, 2024 at 02:55:22PM -0700, Josh Poimboeuf wrote:
> > SYM_FUNC_START(__put_user_2)
> > check_range size=2
> > ASM_STAC
> > -3: movw %ax,(%_ASM_CX)
> > +2: movw %ax,(%_ASM_CX)
> > xor %ecx,%ecx
> > ASM_CLAC
> > RET
> > SYM_FUNC_END(__put_user_2)
> > EXPORT_SYMBOL(__put_user_2)
>
> This patch provides an opportunity to give these labels more meaningful
> names, so that future rearrangements do not require as much boilerplate.
Yeah, I can add a patch like Linus' patch to getuser.S which
encapsulates it all in a macro:
.macro UACCESS op src dst
1: \op \src,\dst
_ASM_EXTABLE_UA(1b, __get_user_handle_exception)
.endm
.text
SYM_FUNC_START(__get_user_1)
check_range size=1
ASM_STAC
UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
--
Josh
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2024-10-18 15:55 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-10-17 21:55 [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 1/6] x86/uaccess: Avoid barrier_nospec() in copy_from_user() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 2/6] x86/uaccess: Avoid barrier_nospec() in __get_user() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 3/6] x86/uaccess: Rearrange putuser.S Josh Poimboeuf
2024-10-18 8:51 ` Kirill A . Shutemov
2024-10-18 15:55 ` Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 4/6] x86/uaccess: Add user pointer masking to __put_user() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 5/6] x86/uaccess: Add user pointer masking to copy_to_user() Josh Poimboeuf
2024-10-17 21:55 ` [PATCH v2 6/6] x86/uaccess: Add user pointer masking to clear_user() Josh Poimboeuf
2024-10-17 22:31 ` [PATCH v2 0/6] x86/uaccess: Avoid barrier_nospec() Andrew Cooper
2024-10-17 22:42 ` Josh Poimboeuf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).