* [PATCH 1/9] arm64: barrier: Add CSDB macros to control data-value prediction
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 2/9] arm64: Implement array_index_mask_nospec() Will Deacon
` (8 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
For CPUs capable of data value prediction, CSDB waits for any outstanding
predictions to architecturally resolve before allowing speculative execution
to continue. Provide macros to expose it to the arch code.
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/assembler.h | 7 +++++++
arch/arm64/include/asm/barrier.h | 1 +
2 files changed, 8 insertions(+)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 794fe8122602..709adde9e80f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -116,6 +116,13 @@
.endm
/*
+ * Value prediction barrier
+ */
+ .macro csdb
+ hint #20
+ .endm
+
+/*
* NOP sequence
*/
.macro nops, num
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 77651c49ef44..c0a846d2c602 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -32,6 +32,7 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define psb_csync() asm volatile("hint #17" : : : "memory")
+#define csdb() asm volatile("hint #20" : : : "memory")
#define mb() dsb(sy)
#define rmb() dsb(ld)
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 2/9] arm64: Implement array_index_mask_nospec()
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
2018-02-05 15:34 ` [PATCH 1/9] arm64: barrier: Add CSDB macros to control data-value prediction Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 3/9] arm64: Make USER_DS an inclusive limit Will Deacon
` (7 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
From: Robin Murphy <robin.murphy@arm.com>
Provide an optimised, assembly implementation of array_index_mask_nospec()
for arm64 so that the compiler is not in a position to transform the code
in ways which affect its ability to inhibit speculation (e.g. by introducing
conditional branches).
This is similar to the sequence used by x86, modulo architectural differences
in the carry/borrow flags.
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/barrier.h | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index c0a846d2c602..f11518af96a9 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -41,6 +41,27 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+/*
+ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
+ * and 0 otherwise.
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ " cmp %1, %2\n"
+ " sbc %0, xzr, xzr\n"
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ csdb();
+ return mask;
+}
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 3/9] arm64: Make USER_DS an inclusive limit
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
2018-02-05 15:34 ` [PATCH 1/9] arm64: barrier: Add CSDB macros to control data-value prediction Will Deacon
2018-02-05 15:34 ` [PATCH 2/9] arm64: Implement array_index_mask_nospec() Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 4/9] arm64: Use pointer masking to limit uaccess speculation Will Deacon
` (6 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
From: Robin Murphy <robin.murphy@arm.com>
Currently, USER_DS represents an exclusive limit while KERNEL_DS is
inclusive. In order to do some clever trickery for speculation-safe
masking, we need them both to behave equivalently - there aren't enough
bits to make KERNEL_DS exclusive, so we have precisely one option. This
also happens to correct a longstanding false negative for a range
ending on the very top byte of kernel memory.
Mark Rutland points out that we've actually got the semantics of
addresses vs. segments muddled up in most of the places we need to
amend, so shuffle the {USER,KERNEL}_DS definitions around such that we
can correct those properly instead of just pasting "-1"s everywhere.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/processor.h | 3 +++
arch/arm64/include/asm/uaccess.h | 45 ++++++++++++++++++++++----------------
arch/arm64/kernel/entry.S | 4 ++--
arch/arm64/mm/fault.c | 4 ++--
4 files changed, 33 insertions(+), 23 deletions(-)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index cee4ae25a5d1..dfefbf3fd9e3 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -21,6 +21,9 @@
#define TASK_SIZE_64 (UL(1) << VA_BITS)
+#define KERNEL_DS UL(-1)
+#define USER_DS (TASK_SIZE_64 - 1)
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 59fda5292936..f2fc026cffb4 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -35,10 +35,7 @@
#include <asm/compiler.h>
#include <asm/extable.h>
-#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
-
-#define USER_DS TASK_SIZE_64
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
@@ -66,22 +63,32 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
-#define __range_ok(addr, size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
- : "=&r" (flag), "=&r" (roksum) \
- : "1" (__addr), "Ir" (size), \
- "r" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit;
+
+ __chk_user_ptr(addr);
+ asm volatile(
+ // A + B <= C + 1 for all A,B,C, in four easy steps:
+ // 1: X = A + B; X' = X % 2^64
+ " adds %0, %0, %2\n"
+ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+ " csel %1, xzr, %1, hi\n"
+ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+ // to compensate for the carry flag being set in step 4. For
+ // X > 2^64, X' merely has to remain nonzero, which it does.
+ " csinv %0, %0, xzr, cc\n"
+ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+ // comes from the carry in being clear. Otherwise, we are
+ // testing X' - C == 0, subject to the previous adjustments.
+ " sbcs xzr, %0, %1\n"
+ " cset %0, ls\n"
+ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+ return addr;
+}
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -90,7 +97,7 @@ static inline void set_fs(mm_segment_t fs)
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
-#define access_ok(type, addr, size) __range_ok(addr, size)
+#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b34e717d7597..4e2eae8c442e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -167,10 +167,10 @@ alternative_else_nop_endif
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
- /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ /* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #TASK_SIZE_64
+ mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0e671ddf4855..af530eb9f2ed 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < USER_DS && system_uses_ttbr0_pan())
+ if (addr < TASK_SIZE && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
+ if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 4/9] arm64: Use pointer masking to limit uaccess speculation
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (2 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 3/9] arm64: Make USER_DS an inclusive limit Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 5/9] arm64: entry: Ensure branch through syscall table is bounded under speculation Will Deacon
` (5 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
From: Robin Murphy <robin.murphy@arm.com>
Similarly to x86, mitigate speculation past an access_ok() check by
masking the pointer against the address limit before use.
Even if we don't expect speculative writes per se, it is plausible that
a CPU may still speculate at least as far as fetching a cache line for
writing, hence we also harden put_user() and clear_user() for peace of
mind.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/uaccess.h | 26 +++++++++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index f2fc026cffb4..e49fe723d72d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -228,6 +228,26 @@ static inline void uaccess_enable_not_uao(void)
}
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if above the
+ * current addr_limit.
+ */
+#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+{
+ void __user *safe_ptr;
+
+ asm volatile(
+ " bics xzr, %1, %2\n"
+ " csel %0, %1, xzr, eq\n"
+ : "=&r" (safe_ptr)
+ : "r" (ptr), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
* call.
@@ -297,7 +317,7 @@ do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
- __get_user((x), __p) : \
+ __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \
((x) = 0, -EFAULT); \
})
@@ -361,7 +381,7 @@ do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
- __put_user((x), __p) : \
+ __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \
-EFAULT; \
})
@@ -377,7 +397,7 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __clear_user(to, n);
+ n = __clear_user(__uaccess_mask_ptr(to), n);
return n;
}
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 5/9] arm64: entry: Ensure branch through syscall table is bounded under speculation
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (3 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 4/9] arm64: Use pointer masking to limit uaccess speculation Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 6/9] arm64: uaccess: Prevent speculative use of the current addr_limit Will Deacon
` (4 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
In a similar manner to array_index_mask_nospec, this patch introduces an
assembly macro (mask_nospec64) which can be used to bound a value under
speculation. This macro is then used to ensure that the indirect branch
through the syscall table is bounded under speculation, with out-of-range
addresses speculating as calls to sys_io_setup (0).
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/assembler.h | 11 +++++++++++
arch/arm64/kernel/entry.S | 2 ++
2 files changed, 13 insertions(+)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 709adde9e80f..b709f6aa5d0f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -123,6 +123,17 @@
.endm
/*
+ * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
+ * of bounds.
+ */
+ .macro mask_nospec64, idx, limit, tmp
+ sub \tmp, \idx, \limit
+ bic \tmp, \tmp, \idx
+ and \idx, \idx, \tmp, asr #63
+ csdb
+ .endm
+
+/*
* NOP sequence
*/
.macro nops, num
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 4e2eae8c442e..f4276fa81e02 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -378,6 +378,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
* x7 is reserved for the system call number in 32-bit mode.
*/
wsc_nr .req w25 // number of system calls
+xsc_nr .req x25 // number of system calls (zero-extended)
wscno .req w26 // syscall number
xscno .req x26 // syscall number (zero-extended)
stbl .req x27 // syscall table pointer
@@ -935,6 +936,7 @@ el0_svc_naked: // compat entry point
b.ne __sys_trace
cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
+ mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
b ret_fast_syscall
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 6/9] arm64: uaccess: Prevent speculative use of the current addr_limit
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (4 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 5/9] arm64: entry: Ensure branch through syscall table is bounded under speculation Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 7/9] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Will Deacon
` (3 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
A mispredicted conditional call to set_fs could result in the wrong
addr_limit being forwarded under speculation to a subsequent access_ok
check, potentially forming part of a spectre-v1 attack using uaccess
routines.
This patch prevents this forwarding from taking place, but putting heavy
barriers in set_fs after writing the addr_limit.
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/uaccess.h | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e49fe723d72d..2057deed7697 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -42,6 +42,13 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+ /*
+ * Prevent a mispredicted conditional call to set_fs from forwarding
+ * the wrong address limit to access_ok under speculation.
+ */
+ dsb(nsh);
+ isb();
+
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 7/9] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (5 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 6/9] arm64: uaccess: Prevent speculative use of the current addr_limit Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 8/9] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Will Deacon
` (2 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
access_ok isn't an expensive operation once the addr_limit for the current
thread has been loaded into the cache. Given that the initial access_ok
check preceding a sequence of __{get,put}_user operations will take
the brunt of the miss, we can make the __* variants identical to the
full-fat versions, which brings with it the benefits of address masking.
The likely cost in these sequences will be from toggling PAN/UAO, which
we can address later by implementing the *_unsafe versions.
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/uaccess.h | 54 ++++++++++++++++++++++++----------------
1 file changed, 32 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 2057deed7697..e5002bc7130d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -306,28 +306,33 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user(x, ptr) \
+#define __get_user_check(x, ptr, err) \
({ \
- int __gu_err = 0; \
- __get_user_err((x), (ptr), __gu_err); \
- __gu_err; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
+ __p = uaccess_mask_ptr(__p); \
+ __get_user_err((x), __p, (err)); \
+ } else { \
+ (x) = 0; (err) = -EFAULT; \
+ } \
})
#define __get_user_error(x, ptr, err) \
({ \
- __get_user_err((x), (ptr), (err)); \
+ __get_user_check((x), (ptr), (err)); \
(void)0; \
})
-#define get_user(x, ptr) \
+#define __get_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
- access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
- __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \
- ((x) = 0, -EFAULT); \
+ int __gu_err = 0; \
+ __get_user_check((x), (ptr), __gu_err); \
+ __gu_err; \
})
+#define get_user __get_user
+
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
asm volatile( \
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
@@ -370,28 +375,33 @@ do { \
uaccess_disable_not_uao(); \
} while (0)
-#define __put_user(x, ptr) \
+#define __put_user_check(x, ptr, err) \
({ \
- int __pu_err = 0; \
- __put_user_err((x), (ptr), __pu_err); \
- __pu_err; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
+ __p = uaccess_mask_ptr(__p); \
+ __put_user_err((x), __p, (err)); \
+ } else { \
+ (err) = -EFAULT; \
+ } \
})
#define __put_user_error(x, ptr, err) \
({ \
- __put_user_err((x), (ptr), (err)); \
+ __put_user_check((x), (ptr), (err)); \
(void)0; \
})
-#define put_user(x, ptr) \
+#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
- access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
- __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \
- -EFAULT; \
+ int __pu_err = 0; \
+ __put_user_check((x), (ptr), __pu_err); \
+ __pu_err; \
})
+#define put_user __put_user
+
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user __arch_copy_from_user
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 8/9] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (6 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 7/9] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-05 15:34 ` [PATCH 9/9] arm64: futex: Mask __user pointers prior to dereference Will Deacon
2018-02-06 22:17 ` [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Catalin Marinas
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
Like we've done for get_user and put_user, ensure that user pointers
are masked before invoking the underlying __arch_{clear,copy_*}_user
operations.
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/uaccess.h | 29 ++++++++++++++++++++++-------
arch/arm64/kernel/arm64ksyms.c | 4 ++--
arch/arm64/lib/clear_user.S | 6 +++---
arch/arm64/lib/copy_in_user.S | 5 +++--
4 files changed, 30 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e5002bc7130d..543e11f0f657 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -403,20 +403,35 @@ do { \
#define put_user __put_user
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
-#define raw_copy_from_user __arch_copy_from_user
+#define raw_copy_from_user(to, from, n) \
+({ \
+ __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
+})
+
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-#define raw_copy_to_user __arch_copy_to_user
-extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+#define raw_copy_to_user(to, from, n) \
+({ \
+ __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
+})
+
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
+#define raw_copy_in_user(to, from, n) \
+({ \
+ __arch_copy_in_user(__uaccess_mask_ptr(to), \
+ __uaccess_mask_ptr(from), (n)); \
+})
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __clear_user(__uaccess_mask_ptr(to), n);
+ n = __arch_clear_user(__uaccess_mask_ptr(to), n);
return n;
}
+#define clear_user __clear_user
extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -430,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
- return __copy_user_flushcache(dst, src, size);
+ return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
}
#endif
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 67368c7329c0..66be504edb6c 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(raw_copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 3d69a8d41fa5..21ba0b29621b 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -21,7 +21,7 @@
.text
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
@@ -29,7 +29,7 @@
*
* Alignment fixed up by hardware.
*/
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
@@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
uaccess_disable_not_uao x2, x3
ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index fbb090f431a5..54b75deb1d16 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,14 +64,15 @@
.endm
end .req x5
-ENTRY(raw_copy_in_user)
+
+ENTRY(__arch_copy_in_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0
ret
-ENDPROC(raw_copy_in_user)
+ENDPROC(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 9/9] arm64: futex: Mask __user pointers prior to dereference
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (7 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 8/9] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Will Deacon
@ 2018-02-05 15:34 ` Will Deacon
2018-02-06 22:17 ` [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Catalin Marinas
9 siblings, 0 replies; 11+ messages in thread
From: Will Deacon @ 2018-02-05 15:34 UTC (permalink / raw)
To: linux-arm-kernel
The arm64 futex code has some explicit dereferencing of user pointers
where performing atomic operations in response to a futex command. This
patch uses masking to limit any speculative futex operations to within
the user address space.
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/include/asm/futex.h | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 5bb2fd4674e7..07fe2479d310 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,9 +48,10 @@ do { \
} while (0)
static inline int
-arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
int oldval = 0, ret, tmp;
+ u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
pagefault_disable();
@@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 val, tmp;
+ u32 __user *uaddr;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
return -EFAULT;
+ uaddr = __uaccess_mask_ptr(_uaddr);
uaccess_enable();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
--
2.1.4
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel
2018-02-05 15:34 [PATCH 0/9] Mitigations against spectre-v1 in the arm64 Linux kernel Will Deacon
` (8 preceding siblings ...)
2018-02-05 15:34 ` [PATCH 9/9] arm64: futex: Mask __user pointers prior to dereference Will Deacon
@ 2018-02-06 22:17 ` Catalin Marinas
9 siblings, 0 replies; 11+ messages in thread
From: Catalin Marinas @ 2018-02-06 22:17 UTC (permalink / raw)
To: linux-arm-kernel
On Mon, Feb 05, 2018 at 03:34:15PM +0000, Will Deacon wrote:
> Hi all,
>
> This series implements some mitigations against spectre-v1 for arm64. It
> broadly follows the work that has been done for x86, by implementing:
>
> * A back-end version of array_index_mask_nospec() to suppress any
> compiler optimisations that could introduce unwanted speculative
> paths
>
> * Masking of the syscall number to restrict speculation through the
> syscall table
>
> * Masking of __user pointers prior to deference in uaccess routines
>
> The latter introduces complications in access_ok and set_fs which are
> also handled in this series.
Queued for 4.16. Thanks.
--
Catalin
^ permalink raw reply [flat|nested] 11+ messages in thread