* [MODERATED] [Patch v4] Linux Patch 1/1
@ 2018-04-28 15:21 Tim Chen
0 siblings, 0 replies; 2+ messages in thread
From: Tim Chen @ 2018-04-28 15:21 UTC (permalink / raw)
To: speck
[-- Attachment #1: Type: text/plain, Size: 15484 bytes --]
From 78fbf3237435a14cdebeb354e890029f7243aa58 Mon Sep 17 00:00:00 2001
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Fri, 27 Apr 2018 03:33:04 -0700
Subject: [PATCH] x86/ssb: Enable option to mitigate SSB by explicit prctl
Processes that are unable to use other software mitigations and execute
untrusted code may want to disable speculative store bypass. This patch
adds a new prctl : PR_SET_SPEC_CTRL_MODE.
The prctl allows the control of speculation mode
to mitigate speculative store bypass vulnerability of the
current process. When PR_SPEC_CTRL_MODE_RDS is set, the reduced data
speculation will be set for the process to provide mitigation.
The prctl mode is inherited to children to handle threads and allow wrappers,
but it is not broadcast to all threads in the same process. It is generally
safe to inherit because it doesn't allow any new attacks, but just prevents
them.
It also adds a matching prctl PR_GET_SPEC_CTRL_MODE to retrieve that state.
We allow using PR_SPEC_CTRL_MODE_NONE to clear the mode after
PR_SPEC_CTRL_MODE_RDS is set. We expect that the attack will be from
code within a JITed sandbox. We want to prevent the JITed code in sandbox
from reading data it shouldn't read from its JITed sandbox. A JIT can
turn on PR_SPEC_CTRL_MODE_RDS mode before running code in the JIT sandbox,
and turn it off when returning. JIT sandboxed code should be unable to do system
calls so there is no danger of it turning off PR_SPEC_CTRL_MODE_RDS.
So allowing the prctl to disable the mode later should be safe. If
the JIT'ed code can do system call, we have greater security issue on our hand.
We are not trying to protect native code processes in the same address space,
or threads within the same process. They already can access data of each other.
To Do:
1. For older kernel that does not support this prctl interface, we'll get
EINVAL returned. We will like to return some a more informative value
if we issue this prctl on a kernel that support this interface, but
lack support on the platform.
v4:
1. Fix incorrect condition for applying RDS in x86_restore_host_spec_ctrl and
x86_set_guest_spec_ctrl.
2. Fix up a few unnecessary vendor checks after checking specctrl_dynamic_rds,
which is set only for Intel cpu.
v3:
1. Get rid of static key usage and code clean up from patch review.
v2:
1. Switch from using RDS on seccomp processes to using an explicit
prctl to enable RDS.
This patch is applied on top of v6 of Konrad's SSB patchset.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
Documentation/admin-guide/kernel-parameters.txt | 4 +
arch/x86/include/asm/msr-index.h | 3 +-
arch/x86/include/asm/nospec-branch.h | 3 +
arch/x86/include/asm/thread_info.h | 4 +-
arch/x86/kernel/cpu/bugs.c | 111 ++++++++++++++++++++++--
arch/x86/kernel/cpu/intel.c | 2 +-
arch/x86/kernel/process.c | 24 +++++
include/uapi/linux/prctl.h | 13 +++
kernel/sys.c | 16 ++++
9 files changed, 172 insertions(+), 8 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a2d337b..387c14a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4053,6 +4053,10 @@
auto - kernel detects whether your CPU model contains a
vulnerable implementation of Speculative Store
Bypass and picks the most appropriate mitigation
+ prctl - disable Speculative Store Bypass for processes
+ via prctl. Processes run normally with
+ Seculative Store bypass by default unless
+ told otherwise by prctl.
Not specifying this option is equivalent to
spec_store_bypass_disable=auto.
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4c6b8f3..5bee7a2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,7 +42,8 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
+#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
+#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index b3c6dbf..b5c594e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -7,6 +7,7 @@
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
+#include <asm/percpu.h>
/*
* Fill the CPU return stack buffer.
@@ -242,6 +243,7 @@ extern void x86_restore_host_spec_ctrl(u64);
enum ssb_mitigation {
SPEC_STORE_BYPASS_NONE,
SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
};
extern char __indirect_thunk_start[];
@@ -310,6 +312,7 @@ do { \
preempt_enable(); \
} while (0)
+extern bool specctrl_dynamic_rds;
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a5d9521..e5c26cc 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,6 +79,7 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+#define TIF_RDS 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -105,6 +106,7 @@ struct thread_info {
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_RDS (1 << TIF_RDS)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -144,7 +146,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 191293e..6a2e9d2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -12,6 +12,7 @@
#include <linux/utsname.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/prctl.h>
#include <asm/nospec-branch.h>
#include <asm/cmdline.h>
@@ -142,7 +143,12 @@ EXPORT_SYMBOL_GPL(x86_get_default_spec_ctrl);
void x86_set_guest_spec_ctrl(u64 guest_spec_ctrl)
{
- if (x86_get_default_spec_ctrl() == guest_spec_ctrl)
+ u64 host_val = x86_get_default_spec_ctrl();
+
+ if (specctrl_dynamic_rds && test_tsk_thread_flag(current, TIF_RDS))
+ host_val |= SPEC_CTRL_RDS;
+
+ if (host_val == guest_spec_ctrl)
return;
else
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -151,10 +157,15 @@ EXPORT_SYMBOL_GPL(x86_set_guest_spec_ctrl);
void x86_restore_host_spec_ctrl(u64 guest_spec_ctrl)
{
- if (x86_get_default_spec_ctrl() == guest_spec_ctrl)
+ u64 host_val = x86_get_default_spec_ctrl();
+
+ if (specctrl_dynamic_rds && test_tsk_thread_flag(current, TIF_RDS))
+ host_val |= SPEC_CTRL_RDS;
+
+ if (host_val == guest_spec_ctrl)
return;
else
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ wrmsrl(MSR_IA32_SPEC_CTRL, host_val);
}
EXPORT_SYMBOL_GPL(x86_restore_host_spec_ctrl);
@@ -379,16 +390,21 @@ static void __init spectre_v2_select_mitigation(void)
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
+/* dynamic reduced data speculation in use */
+bool specctrl_dynamic_rds;
+
/* The kernel command line selection */
enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_NONE,
SPEC_STORE_BYPASS_CMD_AUTO,
SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
};
static const char *ssb_strings[] = {
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
};
static const struct {
@@ -398,6 +414,7 @@ static const struct {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
};
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
@@ -458,9 +475,20 @@ static void __init ssb_select_mitigation(void)
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
break;
+ /* Choose prctl as the default mode */
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
case SPEC_STORE_BYPASS_CMD_ON:
mode = SPEC_STORE_BYPASS_DISABLE;
break;
+ case SPEC_STORE_BYPASS_CMD_PRCTL:
+ /*
+ * AMD platforms by default don't need SSB mitigation.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ break;
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
case SPEC_STORE_BYPASS_CMD_NONE:
break;
}
@@ -481,13 +509,86 @@ static void __init ssb_select_mitigation(void)
* a completely different MSR.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
+ if (mode == SPEC_STORE_BYPASS_PRCTL)
+ specctrl_dynamic_rds = true;
+ else
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
x86_spec_ctrl_mask &= ~(SPEC_CTRL_RDS);
}
}
}
+static void start_speculative_store_bypass_mitigation(void)
+{
+ if (specctrl_dynamic_rds)
+ wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_RDS | x86_spec_ctrl_base);
+}
+
+static void stop_speculative_store_bypass_mitigation(void)
+{
+ if (specctrl_dynamic_rds)
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+int arch_prctl_set_spec_ctrl(struct task_struct *p, unsigned long val)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ /*
+ * Reduced Data Speculation is set for whole platform,
+ * only valid choice is PR_SPEC_CTRL_MODE_RDS.
+ */
+ if (val == PR_SPEC_CTRL_MODE_RDS)
+ return 0;
+ else
+ return -EINVAL;
+
+ break;
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (val == PR_SPEC_CTRL_MODE_RDS) {
+ set_tsk_thread_flag(p, TIF_RDS);
+ if (p == current)
+ start_speculative_store_bypass_mitigation();
+ } else if (val == PR_SPEC_CTRL_MODE_NONE) {
+ clear_tsk_thread_flag(p, TIF_RDS);
+ if (p == current)
+ stop_speculative_store_bypass_mitigation();
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+ break;
+ case SPEC_STORE_BYPASS_NONE:
+ if (val == PR_SPEC_CTRL_MODE_NONE)
+ return 0;
+ else
+ return -EINVAL;
+ break;
+ }
+ return -EINVAL;
+}
+
+int arch_prctl_get_spec_ctrl(struct task_struct *p)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_CTRL_MODE_RDS;
+ break;
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (test_tsk_thread_flag(p, TIF_RDS))
+ return PR_SPEC_CTRL_MODE_RDS;
+ else
+ return PR_SPEC_CTRL_MODE_NONE;
+ break;
+ case SPEC_STORE_BYPASS_NONE:
+ return PR_SPEC_CTRL_MODE_NONE;
+ break;
+ }
+
+ return PR_SPEC_CTRL_MODE_NONE;
+}
+
#undef pr_fmt
#ifdef CONFIG_SYSFS
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8d6d6f4..d433c0f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -773,7 +773,7 @@ static void init_intel(struct cpuinfo_x86 *c)
init_intel_misc_features(c);
if (cpu_has(c, X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
- x86_set_spec_ctrl(SPEC_CTRL_RDS);
+ x86_set_spec_ctrl(x86_get_default_spec_ctrl());
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 03408b9..2253527 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,6 +38,7 @@
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
+#include <asm/nospec-branch.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,26 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
}
}
+static inline void speculative_bypass_update(unsigned long tifn)
+{
+ u64 msr;
+
+ if (!specctrl_dynamic_rds)
+ return;
+
+ /*
+ * Note: TIF_RDS bit position (5) is greater than
+ * SPEC_CTRL_RDS bit position (2). The shift value
+ * (TIF_RDS - SPEC_CTRL_RDS_SHIFT) used to convert from TIF_RDS
+ * bit position to SPEC_CTRL_RDS bit position
+ * below is positive and valid.
+ */
+ msr = x86_get_default_spec_ctrl() |
+ ((tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT));
+
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
{
@@ -309,6 +330,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+ if ((tifp ^ tifn) & _TIF_RDS)
+ speculative_bypass_update(tifn);
}
/*
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index af5f8c2..1ab361d 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -207,4 +207,17 @@ struct prctl_mm_map {
# define PR_SVE_VL_LEN_MASK 0xffff
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+/*
+ * Speculation Control Mode knobs
+ *
+ * Set this on a task to control speculation mode.
+ * Initial use is to mitigate against speculative store bypass attack using
+ * reduced data speculation mode.
+ */
+#define PR_SET_SPEC_CTRL_MODE 52 /* set speculation control mode of process */
+#define PR_SPEC_CTRL_MODE_NONE 0 /* no speculation control mode in use */
+#define PR_SPEC_CTRL_MODE_RDS (1 << 0)/* reduced data speculation mode */
+
+#define PR_GET_SPEC_CTRL_MODE 53 /* get speculation control mode of process */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/kernel/sys.c b/kernel/sys.c
index ad69218..6c8366b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2242,6 +2242,16 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
return 1;
}
+int __weak arch_prctl_set_spec_ctrl(struct task_struct *p, unsigned long arg)
+{
+ return 0;
+}
+
+int __weak arch_prctl_get_spec_ctrl(struct task_struct *p)
+{
+ return 0;
+}
+
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
@@ -2450,6 +2460,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SVE_GET_VL:
error = SVE_GET_VL();
break;
+ case PR_SET_SPEC_CTRL_MODE:
+ error = arch_prctl_set_spec_ctrl(me, arg2);
+ break;
+ case PR_GET_SPEC_CTRL_MODE:
+ error = arch_prctl_get_spec_ctrl(me);
+ break;
default:
error = -EINVAL;
break;
--
2.9.4
[-- Attachment #2: 0001-x86-ssb-Enable-option-to-mitigate-SSB-by-explicit-pr.patch --]
[-- Type: text/plain, Size: 15484 bytes --]
From 78fbf3237435a14cdebeb354e890029f7243aa58 Mon Sep 17 00:00:00 2001
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Fri, 27 Apr 2018 03:33:04 -0700
Subject: [PATCH] x86/ssb: Enable option to mitigate SSB by explicit prctl
Processes that are unable to use other software mitigations and execute
untrusted code may want to disable speculative store bypass. This patch
adds a new prctl : PR_SET_SPEC_CTRL_MODE.
The prctl allows the control of speculation mode
to mitigate speculative store bypass vulnerability of the
current process. When PR_SPEC_CTRL_MODE_RDS is set, the reduced data
speculation will be set for the process to provide mitigation.
The prctl mode is inherited to children to handle threads and allow wrappers,
but it is not broadcast to all threads in the same process. It is generally
safe to inherit because it doesn't allow any new attacks, but just prevents
them.
It also adds a matching prctl PR_GET_SPEC_CTRL_MODE to retrieve that state.
We allow using PR_SPEC_CTRL_MODE_NONE to clear the mode after
PR_SPEC_CTRL_MODE_RDS is set. We expect that the attack will be from
code within a JITed sandbox. We want to prevent the JITed code in sandbox
from reading data it shouldn't read from its JITed sandbox. A JIT can
turn on PR_SPEC_CTRL_MODE_RDS mode before running code in the JIT sandbox,
and turn it off when returning. JIT sandboxed code should be unable to do system
calls so there is no danger of it turning off PR_SPEC_CTRL_MODE_RDS.
So allowing the prctl to disable the mode later should be safe. If
the JIT'ed code can do system call, we have greater security issue on our hand.
We are not trying to protect native code processes in the same address space,
or threads within the same process. They already can access data of each other.
To Do:
1. For older kernel that does not support this prctl interface, we'll get
EINVAL returned. We will like to return some a more informative value
if we issue this prctl on a kernel that support this interface, but
lack support on the platform.
v4:
1. Fix incorrect condition for applying RDS in x86_restore_host_spec_ctrl and
x86_set_guest_spec_ctrl.
2. Fix up a few unnecessary vendor checks after checking specctrl_dynamic_rds,
which is set only for Intel cpu.
v3:
1. Get rid of static key usage and code clean up from patch review.
v2:
1. Switch from using RDS on seccomp processes to using an explicit
prctl to enable RDS.
This patch is applied on top of v6 of Konrad's SSB patchset.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
Documentation/admin-guide/kernel-parameters.txt | 4 +
arch/x86/include/asm/msr-index.h | 3 +-
arch/x86/include/asm/nospec-branch.h | 3 +
arch/x86/include/asm/thread_info.h | 4 +-
arch/x86/kernel/cpu/bugs.c | 111 ++++++++++++++++++++++--
arch/x86/kernel/cpu/intel.c | 2 +-
arch/x86/kernel/process.c | 24 +++++
include/uapi/linux/prctl.h | 13 +++
kernel/sys.c | 16 ++++
9 files changed, 172 insertions(+), 8 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a2d337b..387c14a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4053,6 +4053,10 @@
auto - kernel detects whether your CPU model contains a
vulnerable implementation of Speculative Store
Bypass and picks the most appropriate mitigation
+ prctl - disable Speculative Store Bypass for processes
+ via prctl. Processes run normally with
+ Seculative Store bypass by default unless
+ told otherwise by prctl.
Not specifying this option is equivalent to
spec_store_bypass_disable=auto.
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4c6b8f3..5bee7a2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,7 +42,8 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
+#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
+#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index b3c6dbf..b5c594e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -7,6 +7,7 @@
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
+#include <asm/percpu.h>
/*
* Fill the CPU return stack buffer.
@@ -242,6 +243,7 @@ extern void x86_restore_host_spec_ctrl(u64);
enum ssb_mitigation {
SPEC_STORE_BYPASS_NONE,
SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
};
extern char __indirect_thunk_start[];
@@ -310,6 +312,7 @@ do { \
preempt_enable(); \
} while (0)
+extern bool specctrl_dynamic_rds;
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a5d9521..e5c26cc 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,6 +79,7 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+#define TIF_RDS 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -105,6 +106,7 @@ struct thread_info {
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_RDS (1 << TIF_RDS)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -144,7 +146,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 191293e..6a2e9d2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -12,6 +12,7 @@
#include <linux/utsname.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/prctl.h>
#include <asm/nospec-branch.h>
#include <asm/cmdline.h>
@@ -142,7 +143,12 @@ EXPORT_SYMBOL_GPL(x86_get_default_spec_ctrl);
void x86_set_guest_spec_ctrl(u64 guest_spec_ctrl)
{
- if (x86_get_default_spec_ctrl() == guest_spec_ctrl)
+ u64 host_val = x86_get_default_spec_ctrl();
+
+ if (specctrl_dynamic_rds && test_tsk_thread_flag(current, TIF_RDS))
+ host_val |= SPEC_CTRL_RDS;
+
+ if (host_val == guest_spec_ctrl)
return;
else
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -151,10 +157,15 @@ EXPORT_SYMBOL_GPL(x86_set_guest_spec_ctrl);
void x86_restore_host_spec_ctrl(u64 guest_spec_ctrl)
{
- if (x86_get_default_spec_ctrl() == guest_spec_ctrl)
+ u64 host_val = x86_get_default_spec_ctrl();
+
+ if (specctrl_dynamic_rds && test_tsk_thread_flag(current, TIF_RDS))
+ host_val |= SPEC_CTRL_RDS;
+
+ if (host_val == guest_spec_ctrl)
return;
else
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ wrmsrl(MSR_IA32_SPEC_CTRL, host_val);
}
EXPORT_SYMBOL_GPL(x86_restore_host_spec_ctrl);
@@ -379,16 +390,21 @@ static void __init spectre_v2_select_mitigation(void)
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
+/* dynamic reduced data speculation in use */
+bool specctrl_dynamic_rds;
+
/* The kernel command line selection */
enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_NONE,
SPEC_STORE_BYPASS_CMD_AUTO,
SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
};
static const char *ssb_strings[] = {
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
};
static const struct {
@@ -398,6 +414,7 @@ static const struct {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
};
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
@@ -458,9 +475,20 @@ static void __init ssb_select_mitigation(void)
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
break;
+ /* Choose prctl as the default mode */
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
case SPEC_STORE_BYPASS_CMD_ON:
mode = SPEC_STORE_BYPASS_DISABLE;
break;
+ case SPEC_STORE_BYPASS_CMD_PRCTL:
+ /*
+ * AMD platforms by default don't need SSB mitigation.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ break;
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
case SPEC_STORE_BYPASS_CMD_NONE:
break;
}
@@ -481,13 +509,86 @@ static void __init ssb_select_mitigation(void)
* a completely different MSR.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
+ if (mode == SPEC_STORE_BYPASS_PRCTL)
+ specctrl_dynamic_rds = true;
+ else
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
x86_spec_ctrl_mask &= ~(SPEC_CTRL_RDS);
}
}
}
+static void start_speculative_store_bypass_mitigation(void)
+{
+ if (specctrl_dynamic_rds)
+ wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_RDS | x86_spec_ctrl_base);
+}
+
+static void stop_speculative_store_bypass_mitigation(void)
+{
+ if (specctrl_dynamic_rds)
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+int arch_prctl_set_spec_ctrl(struct task_struct *p, unsigned long val)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ /*
+ * Reduced Data Speculation is set for whole platform,
+ * only valid choice is PR_SPEC_CTRL_MODE_RDS.
+ */
+ if (val == PR_SPEC_CTRL_MODE_RDS)
+ return 0;
+ else
+ return -EINVAL;
+
+ break;
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (val == PR_SPEC_CTRL_MODE_RDS) {
+ set_tsk_thread_flag(p, TIF_RDS);
+ if (p == current)
+ start_speculative_store_bypass_mitigation();
+ } else if (val == PR_SPEC_CTRL_MODE_NONE) {
+ clear_tsk_thread_flag(p, TIF_RDS);
+ if (p == current)
+ stop_speculative_store_bypass_mitigation();
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+ break;
+ case SPEC_STORE_BYPASS_NONE:
+ if (val == PR_SPEC_CTRL_MODE_NONE)
+ return 0;
+ else
+ return -EINVAL;
+ break;
+ }
+ return -EINVAL;
+}
+
+int arch_prctl_get_spec_ctrl(struct task_struct *p)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_CTRL_MODE_RDS;
+ break;
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (test_tsk_thread_flag(p, TIF_RDS))
+ return PR_SPEC_CTRL_MODE_RDS;
+ else
+ return PR_SPEC_CTRL_MODE_NONE;
+ break;
+ case SPEC_STORE_BYPASS_NONE:
+ return PR_SPEC_CTRL_MODE_NONE;
+ break;
+ }
+
+ return PR_SPEC_CTRL_MODE_NONE;
+}
+
#undef pr_fmt
#ifdef CONFIG_SYSFS
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8d6d6f4..d433c0f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -773,7 +773,7 @@ static void init_intel(struct cpuinfo_x86 *c)
init_intel_misc_features(c);
if (cpu_has(c, X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
- x86_set_spec_ctrl(SPEC_CTRL_RDS);
+ x86_set_spec_ctrl(x86_get_default_spec_ctrl());
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 03408b9..2253527 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,6 +38,7 @@
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
+#include <asm/nospec-branch.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,26 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
}
}
+static inline void speculative_bypass_update(unsigned long tifn)
+{
+ u64 msr;
+
+ if (!specctrl_dynamic_rds)
+ return;
+
+ /*
+ * Note: TIF_RDS bit position (5) is greater than
+ * SPEC_CTRL_RDS bit position (2). The shift value
+ * (TIF_RDS - SPEC_CTRL_RDS_SHIFT) used to convert from TIF_RDS
+ * bit position to SPEC_CTRL_RDS bit position
+ * below is positive and valid.
+ */
+ msr = x86_get_default_spec_ctrl() |
+ ((tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT));
+
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
{
@@ -309,6 +330,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+ if ((tifp ^ tifn) & _TIF_RDS)
+ speculative_bypass_update(tifn);
}
/*
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index af5f8c2..1ab361d 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -207,4 +207,17 @@ struct prctl_mm_map {
# define PR_SVE_VL_LEN_MASK 0xffff
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+/*
+ * Speculation Control Mode knobs
+ *
+ * Set this on a task to control speculation mode.
+ * Initial use is to mitigate against speculative store bypass attack using
+ * reduced data speculation mode.
+ */
+#define PR_SET_SPEC_CTRL_MODE 52 /* set speculation control mode of process */
+#define PR_SPEC_CTRL_MODE_NONE 0 /* no speculation control mode in use */
+#define PR_SPEC_CTRL_MODE_RDS (1 << 0)/* reduced data speculation mode */
+
+#define PR_GET_SPEC_CTRL_MODE 53 /* get speculation control mode of process */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/kernel/sys.c b/kernel/sys.c
index ad69218..6c8366b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2242,6 +2242,16 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
return 1;
}
+int __weak arch_prctl_set_spec_ctrl(struct task_struct *p, unsigned long arg)
+{
+ return 0;
+}
+
+int __weak arch_prctl_get_spec_ctrl(struct task_struct *p)
+{
+ return 0;
+}
+
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
@@ -2450,6 +2460,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SVE_GET_VL:
error = SVE_GET_VL();
break;
+ case PR_SET_SPEC_CTRL_MODE:
+ error = arch_prctl_set_spec_ctrl(me, arg2);
+ break;
+ case PR_GET_SPEC_CTRL_MODE:
+ error = arch_prctl_get_spec_ctrl(me);
+ break;
default:
error = -EINVAL;
break;
--
2.9.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [MODERATED] [PATCH v4] Linux Patch 1/1
2018-04-28 4:14 [MODERATED] [PATCH v3] " Tim Chen
@ 2018-04-28 16:07 ` Tim Chen
0 siblings, 0 replies; 2+ messages in thread
From: Tim Chen @ 2018-04-28 16:07 UTC (permalink / raw)
To: speck
[-- Attachment #1.1: Type: text/plain, Size: 15942 bytes --]
From 78fbf3237435a14cdebeb354e890029f7243aa58 Mon Sep 17 00:00:00 2001
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Fri, 27 Apr 2018 03:33:04 -0700
Subject: [PATCH] x86/ssb: Enable option to mitigate SSB by explicit prctl
Processes that are unable to use other software mitigations and execute
untrusted code may want to disable speculative store bypass. This patch
adds a new prctl : PR_SET_SPEC_CTRL_MODE.
The prctl allows the control of speculation mode
to mitigate speculative store bypass vulnerability of the
current process. When PR_SPEC_CTRL_MODE_RDS is set, the reduced data
speculation will be set for the process to provide mitigation.
The prctl mode is inherited to children to handle threads and allow wrappers,
but it is not broadcast to all threads in the same process. It is generally
safe to inherit because it doesn't allow any new attacks, but just prevents
them.
It also adds a matching prctl PR_GET_SPEC_CTRL_MODE to retrieve that state.
We allow using PR_SPEC_CTRL_MODE_NONE to clear the mode after
PR_SPEC_CTRL_MODE_RDS is set. We expect that the attack will be from
code within a JITed sandbox. We want to prevent the JITed code in sandbox
from reading data it shouldn't read from its JITed sandbox. A JIT can
turn on PR_SPEC_CTRL_MODE_RDS mode before running code in the JIT sandbox,
and turn it off when returning. JIT sandboxed code should be unable to do system
calls so there is no danger of it turning off PR_SPEC_CTRL_MODE_RDS.
So allowing the prctl to disable the mode later should be safe. If
the JIT'ed code can do system call, we have greater security issue on our hand.
We are not trying to protect native code processes in the same address space,
or threads within the same process. They already can access data of each other.
To Do:
1. For older kernel that does not support this prctl interface, we'll get
EINVAL returned. We will like to return some a more informative value
if we issue this prctl on a kernel that support this interface, but
lack support on the platform.
v4:
1. Fix incorrect condition for applying RDS in x86_restore_host_spec_ctrl and
x86_set_guest_spec_ctrl.
2. Fix up a few unnecessary vendor checks after checking specctrl_dynamic_rds,
which is set only for Intel cpu.
v3:
1. Get rid of static key usage and code clean up from patch review.
v2:
1. Switch from using RDS on seccomp processes to using an explicit
prctl to enable RDS.
This patch is applied on top of v6 of Konrad's SSB patchset.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
Documentation/admin-guide/kernel-parameters.txt | 4 +
arch/x86/include/asm/msr-index.h | 3 +-
arch/x86/include/asm/nospec-branch.h | 3 +
arch/x86/include/asm/thread_info.h | 4 +-
arch/x86/kernel/cpu/bugs.c | 111 ++++++++++++++++++++++--
arch/x86/kernel/cpu/intel.c | 2 +-
arch/x86/kernel/process.c | 24 +++++
include/uapi/linux/prctl.h | 13 +++
kernel/sys.c | 16 ++++
9 files changed, 172 insertions(+), 8 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a2d337b..387c14a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4053,6 +4053,10 @@
auto - kernel detects whether your CPU model contains a
vulnerable implementation of Speculative Store
Bypass and picks the most appropriate mitigation
+ prctl - disable Speculative Store Bypass for processes
+ via prctl. Processes run normally with
+ Seculative Store bypass by default unless
+ told otherwise by prctl.
Not specifying this option is equivalent to
spec_store_bypass_disable=auto.
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4c6b8f3..5bee7a2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,7 +42,8 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
+#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
+#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index b3c6dbf..b5c594e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -7,6 +7,7 @@
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
+#include <asm/percpu.h>
/*
* Fill the CPU return stack buffer.
@@ -242,6 +243,7 @@ extern void x86_restore_host_spec_ctrl(u64);
enum ssb_mitigation {
SPEC_STORE_BYPASS_NONE,
SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
};
extern char __indirect_thunk_start[];
@@ -310,6 +312,7 @@ do { \
preempt_enable(); \
} while (0)
+extern bool specctrl_dynamic_rds;
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a5d9521..e5c26cc 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,6 +79,7 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+#define TIF_RDS 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -105,6 +106,7 @@ struct thread_info {
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_RDS (1 << TIF_RDS)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -144,7 +146,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 191293e..6a2e9d2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -12,6 +12,7 @@
#include <linux/utsname.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/prctl.h>
#include <asm/nospec-branch.h>
#include <asm/cmdline.h>
@@ -142,7 +143,12 @@ EXPORT_SYMBOL_GPL(x86_get_default_spec_ctrl);
void x86_set_guest_spec_ctrl(u64 guest_spec_ctrl)
{
- if (x86_get_default_spec_ctrl() == guest_spec_ctrl)
+ u64 host_val = x86_get_default_spec_ctrl();
+
+ if (specctrl_dynamic_rds && test_tsk_thread_flag(current, TIF_RDS))
+ host_val |= SPEC_CTRL_RDS;
+
+ if (host_val == guest_spec_ctrl)
return;
else
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -151,10 +157,15 @@ EXPORT_SYMBOL_GPL(x86_set_guest_spec_ctrl);
void x86_restore_host_spec_ctrl(u64 guest_spec_ctrl)
{
- if (x86_get_default_spec_ctrl() == guest_spec_ctrl)
+ u64 host_val = x86_get_default_spec_ctrl();
+
+ if (specctrl_dynamic_rds && test_tsk_thread_flag(current, TIF_RDS))
+ host_val |= SPEC_CTRL_RDS;
+
+ if (host_val == guest_spec_ctrl)
return;
else
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ wrmsrl(MSR_IA32_SPEC_CTRL, host_val);
}
EXPORT_SYMBOL_GPL(x86_restore_host_spec_ctrl);
@@ -379,16 +390,21 @@ static void __init spectre_v2_select_mitigation(void)
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
+/* dynamic reduced data speculation in use */
+bool specctrl_dynamic_rds;
+
/* The kernel command line selection */
enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_NONE,
SPEC_STORE_BYPASS_CMD_AUTO,
SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
};
static const char *ssb_strings[] = {
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
};
static const struct {
@@ -398,6 +414,7 @@ static const struct {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
};
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
@@ -458,9 +475,20 @@ static void __init ssb_select_mitigation(void)
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
break;
+ /* Choose prctl as the default mode */
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
case SPEC_STORE_BYPASS_CMD_ON:
mode = SPEC_STORE_BYPASS_DISABLE;
break;
+ case SPEC_STORE_BYPASS_CMD_PRCTL:
+ /*
+ * AMD platforms by default don't need SSB mitigation.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ break;
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
case SPEC_STORE_BYPASS_CMD_NONE:
break;
}
@@ -481,13 +509,86 @@ static void __init ssb_select_mitigation(void)
* a completely different MSR.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
+ if (mode == SPEC_STORE_BYPASS_PRCTL)
+ specctrl_dynamic_rds = true;
+ else
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
x86_spec_ctrl_mask &= ~(SPEC_CTRL_RDS);
}
}
}
+static void start_speculative_store_bypass_mitigation(void)
+{
+ if (specctrl_dynamic_rds)
+ wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_RDS | x86_spec_ctrl_base);
+}
+
+static void stop_speculative_store_bypass_mitigation(void)
+{
+ if (specctrl_dynamic_rds)
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+int arch_prctl_set_spec_ctrl(struct task_struct *p, unsigned long val)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ /*
+ * Reduced Data Speculation is set for whole platform,
+ * only valid choice is PR_SPEC_CTRL_MODE_RDS.
+ */
+ if (val == PR_SPEC_CTRL_MODE_RDS)
+ return 0;
+ else
+ return -EINVAL;
+
+ break;
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (val == PR_SPEC_CTRL_MODE_RDS) {
+ set_tsk_thread_flag(p, TIF_RDS);
+ if (p == current)
+ start_speculative_store_bypass_mitigation();
+ } else if (val == PR_SPEC_CTRL_MODE_NONE) {
+ clear_tsk_thread_flag(p, TIF_RDS);
+ if (p == current)
+ stop_speculative_store_bypass_mitigation();
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+ break;
+ case SPEC_STORE_BYPASS_NONE:
+ if (val == PR_SPEC_CTRL_MODE_NONE)
+ return 0;
+ else
+ return -EINVAL;
+ break;
+ }
+ return -EINVAL;
+}
+
+int arch_prctl_get_spec_ctrl(struct task_struct *p)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_CTRL_MODE_RDS;
+ break;
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (test_tsk_thread_flag(p, TIF_RDS))
+ return PR_SPEC_CTRL_MODE_RDS;
+ else
+ return PR_SPEC_CTRL_MODE_NONE;
+ break;
+ case SPEC_STORE_BYPASS_NONE:
+ return PR_SPEC_CTRL_MODE_NONE;
+ break;
+ }
+
+ return PR_SPEC_CTRL_MODE_NONE;
+}
+
#undef pr_fmt
#ifdef CONFIG_SYSFS
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8d6d6f4..d433c0f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -773,7 +773,7 @@ static void init_intel(struct cpuinfo_x86 *c)
init_intel_misc_features(c);
if (cpu_has(c, X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
- x86_set_spec_ctrl(SPEC_CTRL_RDS);
+ x86_set_spec_ctrl(x86_get_default_spec_ctrl());
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 03408b9..2253527 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,6 +38,7 @@
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
+#include <asm/nospec-branch.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,26 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
}
}
+static inline void speculative_bypass_update(unsigned long tifn)
+{
+ u64 msr;
+
+ if (!specctrl_dynamic_rds)
+ return;
+
+ /*
+ * Note: TIF_RDS bit position (5) is greater than
+ * SPEC_CTRL_RDS bit position (2). The shift value
+ * (TIF_RDS - SPEC_CTRL_RDS_SHIFT) used to convert from TIF_RDS
+ * bit position to SPEC_CTRL_RDS bit position
+ * below is positive and valid.
+ */
+ msr = x86_get_default_spec_ctrl() |
+ ((tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT));
+
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
{
@@ -309,6 +330,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+ if ((tifp ^ tifn) & _TIF_RDS)
+ speculative_bypass_update(tifn);
}
/*
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index af5f8c2..1ab361d 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -207,4 +207,17 @@ struct prctl_mm_map {
# define PR_SVE_VL_LEN_MASK 0xffff
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+/*
+ * Speculation Control Mode knobs
+ *
+ * Set this on a task to control speculation mode.
+ * Initial use is to mitigate against speculative store bypass attack using
+ * reduced data speculation mode.
+ */
+#define PR_SET_SPEC_CTRL_MODE 52 /* set speculation control mode of process */
+#define PR_SPEC_CTRL_MODE_NONE 0 /* no speculation control mode in use */
+#define PR_SPEC_CTRL_MODE_RDS (1 << 0)/* reduced data speculation mode */
+
+#define PR_GET_SPEC_CTRL_MODE 53 /* get speculation control mode of process */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/kernel/sys.c b/kernel/sys.c
index ad69218..6c8366b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2242,6 +2242,16 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
return 1;
}
+int __weak arch_prctl_set_spec_ctrl(struct task_struct *p, unsigned long arg)
+{
+ return 0;
+}
+
+int __weak arch_prctl_get_spec_ctrl(struct task_struct *p)
+{
+ return 0;
+}
+
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
@@ -2450,6 +2460,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SVE_GET_VL:
error = SVE_GET_VL();
break;
+ case PR_SET_SPEC_CTRL_MODE:
+ error = arch_prctl_set_spec_ctrl(me, arg2);
+ break;
+ case PR_GET_SPEC_CTRL_MODE:
+ error = arch_prctl_get_spec_ctrl(me);
+ break;
default:
error = -EINVAL;
break;
--
2.9.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-04-28 16:07 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-04-28 15:21 [MODERATED] [Patch v4] Linux Patch 1/1 Tim Chen
-- strict thread matches above, loose matches on Subject: below --
2018-04-28 4:14 [MODERATED] [PATCH v3] " Tim Chen
2018-04-28 16:07 ` [MODERATED] [PATCH v4] " Tim Chen
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.