From: David Kaplan <david.kaplan@amd.com>
To: Thomas Gleixner <tglx@linutronix.de>,
Borislav Petkov <bp@alien8.de>,
Peter Zijlstra <peterz@infradead.org>,
Josh Poimboeuf <jpoimboe@kernel.org>,
Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
Ingo Molnar <mingo@redhat.com>,
Dave Hansen <dave.hansen@linux.intel.com>, <x86@kernel.org>,
"H . Peter Anvin" <hpa@zytor.com>
Cc: <linux-kernel@vger.kernel.org>
Subject: [PATCH v2 18/35] x86/bugs: Restructure srso mitigation
Date: Tue, 5 Nov 2024 15:54:38 -0600 [thread overview]
Message-ID: <20241105215455.359471-19-david.kaplan@amd.com> (raw)
In-Reply-To: <20241105215455.359471-1-david.kaplan@amd.com>
Restructure srso to use select/update/apply functions to create
consistent vulnerability handling. Like with retbleed, the command line
options directly select mitigations which can later be modified.
Signed-off-by: David Kaplan <david.kaplan@amd.com>
---
arch/x86/kernel/cpu/bugs.c | 182 ++++++++++++++++++-------------------
1 file changed, 89 insertions(+), 93 deletions(-)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 98ef1cbc9e2a..178415d8026a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -84,6 +84,8 @@ static void __init srbds_select_mitigation(void);
static void __init srbds_apply_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
+static void __init srso_update_mitigation(void);
+static void __init srso_apply_mitigation(void);
static void __init gds_select_mitigation(void);
static void __init gds_apply_mitigation(void);
static void __init bhi_select_mitigation(void);
@@ -200,11 +202,6 @@ void __init cpu_select_mitigations(void)
rfds_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
-
- /*
- * srso_select_mitigation() depends and must run after
- * retbleed_select_mitigation().
- */
srso_select_mitigation();
gds_select_mitigation();
bhi_select_mitigation();
@@ -220,6 +217,7 @@ void __init cpu_select_mitigations(void)
taa_update_mitigation();
mmio_update_mitigation();
rfds_update_mitigation();
+ srso_update_mitigation();
spectre_v1_apply_mitigation();
spectre_v2_apply_mitigation();
@@ -232,6 +230,7 @@ void __init cpu_select_mitigations(void)
mmio_apply_mitigation();
rfds_apply_mitigation();
srbds_apply_mitigation();
+ srso_apply_mitigation();
gds_apply_mitigation();
bhi_apply_mitigation();
}
@@ -2671,6 +2670,7 @@ early_param("l1tf", l1tf_cmdline);
enum srso_mitigation {
SRSO_MITIGATION_NONE,
+ SRSO_MITIGATION_AUTO,
SRSO_MITIGATION_UCODE_NEEDED,
SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
SRSO_MITIGATION_MICROCODE,
@@ -2679,14 +2679,6 @@ enum srso_mitigation {
SRSO_MITIGATION_IBPB_ON_VMEXIT,
};
-enum srso_mitigation_cmd {
- SRSO_CMD_OFF,
- SRSO_CMD_MICROCODE,
- SRSO_CMD_SAFE_RET,
- SRSO_CMD_IBPB,
- SRSO_CMD_IBPB_ON_VMEXIT,
-};
-
static const char * const srso_strings[] = {
[SRSO_MITIGATION_NONE] = "Vulnerable",
[SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
@@ -2697,8 +2689,7 @@ static const char * const srso_strings[] = {
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
};
-static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
-static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
+static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
static int __init srso_parse_cmdline(char *str)
{
@@ -2706,15 +2697,15 @@ static int __init srso_parse_cmdline(char *str)
return -EINVAL;
if (!strcmp(str, "off"))
- srso_cmd = SRSO_CMD_OFF;
+ srso_mitigation = SRSO_MITIGATION_NONE;
else if (!strcmp(str, "microcode"))
- srso_cmd = SRSO_CMD_MICROCODE;
+ srso_mitigation = SRSO_MITIGATION_MICROCODE;
else if (!strcmp(str, "safe-ret"))
- srso_cmd = SRSO_CMD_SAFE_RET;
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
else if (!strcmp(str, "ibpb"))
- srso_cmd = SRSO_CMD_IBPB;
+ srso_mitigation = SRSO_MITIGATION_IBPB;
else if (!strcmp(str, "ibpb-vmexit"))
- srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
else
pr_err("Ignoring unknown SRSO option (%s).", str);
@@ -2728,13 +2719,15 @@ static void __init srso_select_mitigation(void)
{
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
- if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
- cpu_mitigations_off() ||
- srso_cmd == SRSO_CMD_OFF) {
- if (boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ srso_mitigation = SRSO_MITIGATION_NONE;
+
+ if (srso_mitigation == SRSO_MITIGATION_NONE)
return;
- }
+
+ /* Default mitigation */
+ if (srso_mitigation == SRSO_MITIGATION_AUTO)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
if (has_microcode) {
/*
@@ -2747,94 +2740,97 @@ static void __init srso_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
return;
}
-
- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- srso_mitigation = SRSO_MITIGATION_IBPB;
- goto out;
- }
} else {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
- /* may be overwritten by SRSO_CMD_SAFE_RET below */
- srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
+ /* Fall-back to Safe-RET */
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
}
- switch (srso_cmd) {
- case SRSO_CMD_MICROCODE:
- if (has_microcode) {
- srso_mitigation = SRSO_MITIGATION_MICROCODE;
- pr_warn(SRSO_NOTICE);
- }
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_MICROCODE:
+ pr_warn(SRSO_NOTICE);
break;
- case SRSO_CMD_SAFE_RET:
- if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
- /*
- * Enable the return thunk for generated code
- * like ftrace, static_call, etc.
- */
- setup_force_cpu_cap(X86_FEATURE_RETHUNK);
- setup_force_cpu_cap(X86_FEATURE_UNRET);
-
- if (boot_cpu_data.x86 == 0x19) {
- setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
- x86_return_thunk = srso_alias_return_thunk;
- } else {
- setup_force_cpu_cap(X86_FEATURE_SRSO);
- x86_return_thunk = srso_return_thunk;
- }
- if (has_microcode)
- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
- else
- srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
- } else {
+ case SRSO_MITIGATION_SAFE_RET:
+ case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+ if (!IS_ENABLED(CONFIG_MITIGATION_SRSO))
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
- }
break;
- case SRSO_CMD_IBPB:
- if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- if (has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
- srso_mitigation = SRSO_MITIGATION_IBPB;
-
- /*
- * IBPB on entry already obviates the need for
- * software-based untraining so clear those in case some
- * other mitigation like Retbleed has selected them.
- */
- setup_clear_cpu_cap(X86_FEATURE_UNRET);
- setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
- }
- } else {
+ case SRSO_MITIGATION_IBPB:
+ if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY))
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
- }
break;
- case SRSO_CMD_IBPB_ON_VMEXIT:
- if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
- if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
- srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+ if (!IS_ENABLED(CONFIG_MITIGATION_SRSO))
+ pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void __init srso_update_mitigation(void)
+{
+ /* If retbleed is using IBPB, that works for SRSO as well */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB)
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+
+ if (srso_mitigation != SRSO_MITIGATION_NONE)
+ pr_info("%s\n", srso_strings[srso_mitigation]);
+}
- /*
- * There is no need for RSB filling: entry_ibpb() ensures
- * all predictions, including the RSB, are invalidated,
- * regardless of IBPB implementation.
- */
- setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- }
+static void __init srso_apply_mitigation(void)
+{
+ if (srso_mitigation == SRSO_MITIGATION_NONE) {
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ return;
+ }
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_SAFE_RET:
+ case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+ /*
+ * Enable the return thunk for generated code
+ * like ftrace, static_call, etc.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+ x86_return_thunk = srso_alias_return_thunk;
} else {
- pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
- }
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ x86_return_thunk = srso_return_thunk;
+ }
+ break;
+ case SRSO_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+ /*
+ * IBPB on entry already obviates the need for
+ * software-based untraining so clear those in case some
+ * other mitigation like Retbleed has selected them.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ break;
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ /*
+ * There is no need for RSB filling: entry_ibpb() ensures
+ * all predictions, including the RSB, are invalidated,
+ * regardless of IBPB implementation.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break;
default:
break;
}
-out:
- pr_info("%s\n", srso_strings[srso_mitigation]);
}
#undef pr_fmt
--
2.34.1
next prev parent reply other threads:[~2024-11-05 21:55 UTC|newest]
Thread overview: 78+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-05 21:54 [PATCH v2 00/35] x86/bugs: Attack vector controls David Kaplan
2024-11-05 21:54 ` [PATCH v2 01/35] x86/bugs: Add X86_BUG_SPECTRE_V2_USER David Kaplan
2024-11-05 21:54 ` [PATCH v2 02/35] x86/bugs: Relocate mds/taa/mmio/rfds defines David Kaplan
2024-11-05 21:54 ` [PATCH v2 03/35] x86/bugs: Add AUTO mitigations for mds/taa/mmio/rfds David Kaplan
2024-11-14 2:26 ` Pawan Gupta
2024-11-14 14:59 ` Kaplan, David
2024-11-14 17:14 ` Pawan Gupta
2024-11-14 17:17 ` Kaplan, David
2024-11-05 21:54 ` [PATCH v2 04/35] x86/bugs: Restructure mds mitigation David Kaplan
2024-11-14 3:03 ` Pawan Gupta
2024-11-14 15:01 ` Kaplan, David
2024-12-10 15:24 ` Borislav Petkov
2024-11-05 21:54 ` [PATCH v2 05/35] x86/bugs: Restructure taa mitigation David Kaplan
2024-11-14 4:43 ` Pawan Gupta
2024-11-14 15:08 ` Kaplan, David
2024-11-05 21:54 ` [PATCH v2 06/35] x86/bugs: Restructure mmio mitigation David Kaplan
2024-11-14 5:03 ` Pawan Gupta
2024-11-05 21:54 ` [PATCH v2 07/35] x86/bugs: Restructure rfds mitigation David Kaplan
2024-11-14 5:55 ` Pawan Gupta
2024-11-05 21:54 ` [PATCH v2 08/35] x86/bugs: Remove md_clear_*_mitigation() David Kaplan
2024-11-05 21:54 ` [PATCH v2 09/35] x86/bugs: Restructure srbds mitigation David Kaplan
2024-11-05 21:54 ` [PATCH v2 10/35] x86/bugs: Restructure gds mitigation David Kaplan
2024-11-14 6:21 ` Pawan Gupta
2024-11-05 21:54 ` [PATCH v2 11/35] x86/bugs: Restructure spectre_v1 mitigation David Kaplan
2024-11-14 6:57 ` Pawan Gupta
2024-11-14 15:36 ` Kaplan, David
2024-11-14 15:49 ` Kaplan, David
2024-11-14 16:19 ` Borislav Petkov
2024-11-14 16:45 ` Kaplan, David
2024-11-14 23:33 ` Josh Poimboeuf
2024-12-12 10:41 ` Borislav Petkov
2024-11-14 17:41 ` Pawan Gupta
2024-11-14 17:48 ` Kaplan, David
2024-11-05 21:54 ` [PATCH v2 12/35] x86/bugs: Restructure retbleed mitigation David Kaplan
2024-11-05 21:54 ` [PATCH v2 13/35] x86/bugs: Restructure spectre_v2_user mitigation David Kaplan
2024-11-06 18:56 ` kernel test robot
2024-11-05 21:54 ` [PATCH v2 14/35] x86/bugs: Restructure bhi mitigation David Kaplan
2024-11-05 21:54 ` [PATCH v2 15/35] x86/bugs: Restructure spectre_v2 mitigation David Kaplan
2024-11-05 21:54 ` [PATCH v2 16/35] x86/bugs: Restructure ssb mitigation David Kaplan
2024-11-05 21:54 ` [PATCH v2 17/35] x86/bugs: Restructure l1tf mitigation David Kaplan
2024-11-05 21:54 ` David Kaplan [this message]
2025-01-02 14:55 ` [PATCH v2 18/35] x86/bugs: Restructure srso mitigation Borislav Petkov
2024-11-05 21:54 ` [PATCH v2 19/35] Documentation/x86: Document the new attack vector controls David Kaplan
2024-11-06 10:39 ` Borislav Petkov
2024-11-06 14:49 ` Kaplan, David
2024-11-13 3:58 ` Manwaring, Derek
2024-11-13 14:15 ` Brendan Jackman
2024-11-13 15:05 ` Kaplan, David
2024-11-13 15:31 ` Brendan Jackman
2024-11-13 16:00 ` Kaplan, David
2024-11-13 16:19 ` Brendan Jackman
2024-11-14 9:32 ` Brendan Jackman
2024-11-22 16:15 ` Manwaring, Derek
2024-11-22 16:36 ` Brendan Jackman
2024-11-22 17:23 ` Kaplan, David
2024-11-20 0:14 ` Manwaring, Derek
2024-11-13 14:49 ` Kaplan, David
2024-11-13 14:15 ` Brendan Jackman
2024-11-13 15:42 ` Kaplan, David
2024-11-05 21:54 ` [PATCH v2 20/35] x86/bugs: Define attack vectors David Kaplan
2025-01-03 15:19 ` Borislav Petkov
2025-01-03 15:29 ` Kaplan, David
2025-01-03 15:51 ` Borislav Petkov
2024-11-05 21:54 ` [PATCH v2 21/35] x86/bugs: Determine relevant vulnerabilities based on attack vector controls David Kaplan
2024-11-05 21:54 ` [PATCH v2 22/35] x86/bugs: Add attack vector controls for mds David Kaplan
2024-11-05 21:54 ` [PATCH v2 23/35] x86/bugs: Add attack vector controls for taa David Kaplan
2024-11-05 21:54 ` [PATCH v2 24/35] x86/bugs: Add attack vector controls for mmio David Kaplan
2024-11-05 21:54 ` [PATCH v2 25/35] x86/bugs: Add attack vector controls for rfds David Kaplan
2024-11-05 21:54 ` [PATCH v2 26/35] x86/bugs: Add attack vector controls for srbds David Kaplan
2024-11-05 21:54 ` [PATCH v2 27/35] x86/bugs: Add attack vector controls for gds David Kaplan
2024-11-05 21:54 ` [PATCH v2 28/35] x86/bugs: Add attack vector controls for spectre_v1 David Kaplan
2024-11-05 21:54 ` [PATCH v2 29/35] x86/bugs: Add attack vector controls for retbleed David Kaplan
2024-11-05 21:54 ` [PATCH v2 30/35] x86/bugs: Add attack vector controls for spectre_v2_user David Kaplan
2024-11-05 21:54 ` [PATCH v2 31/35] x86/bugs: Add attack vector controls for bhi David Kaplan
2024-11-05 21:54 ` [PATCH v2 32/35] x86/bugs: Add attack vector controls for spectre_v2 David Kaplan
2024-11-05 21:54 ` [PATCH v2 33/35] x86/bugs: Add attack vector controls for l1tf David Kaplan
2024-11-05 21:54 ` [PATCH v2 34/35] x86/bugs: Add attack vector controls for srso David Kaplan
2024-11-05 21:54 ` [PATCH v2 35/35] x86/pti: Add attack vector controls for pti David Kaplan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241105215455.359471-19-david.kaplan@amd.com \
--to=david.kaplan@amd.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=jpoimboe@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pawan.kumar.gupta@linux.intel.com \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox