public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: David Kaplan <david.kaplan@amd.com>
To: Thomas Gleixner <tglx@linutronix.de>,
	Borislav Petkov <bp@alien8.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Josh Poimboeuf <jpoimboe@kernel.org>,
	Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
	Ingo Molnar <mingo@redhat.com>,
	Dave Hansen <dave.hansen@linux.intel.com>, <x86@kernel.org>,
	"H . Peter Anvin" <hpa@zytor.com>
Cc: <linux-kernel@vger.kernel.org>
Subject: [PATCH v3 18/35] x86/bugs: Restructure srso mitigation
Date: Wed, 8 Jan 2025 14:24:58 -0600	[thread overview]
Message-ID: <20250108202515.385902-19-david.kaplan@amd.com> (raw)
In-Reply-To: <20250108202515.385902-1-david.kaplan@amd.com>

Restructure srso to use select/update/apply functions to create
consistent vulnerability handling.  Like with retbleed, the command line
options directly select mitigations which can later be modified.

Signed-off-by: David Kaplan <david.kaplan@amd.com>
---
 arch/x86/kernel/cpu/bugs.c | 188 ++++++++++++++++++-------------------
 1 file changed, 90 insertions(+), 98 deletions(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 08ac515df888..aee2945bdef9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -84,6 +84,8 @@ static void __init srbds_select_mitigation(void);
 static void __init srbds_apply_mitigation(void);
 static void __init l1d_flush_select_mitigation(void);
 static void __init srso_select_mitigation(void);
+static void __init srso_update_mitigation(void);
+static void __init srso_apply_mitigation(void);
 static void __init gds_select_mitigation(void);
 static void __init gds_apply_mitigation(void);
 static void __init bhi_select_mitigation(void);
@@ -200,11 +202,6 @@ void __init cpu_select_mitigations(void)
 	rfds_select_mitigation();
 	srbds_select_mitigation();
 	l1d_flush_select_mitigation();
-
-	/*
-	 * srso_select_mitigation() depends and must run after
-	 * retbleed_select_mitigation().
-	 */
 	srso_select_mitigation();
 	gds_select_mitigation();
 	bhi_select_mitigation();
@@ -220,6 +217,7 @@ void __init cpu_select_mitigations(void)
 	taa_update_mitigation();
 	mmio_update_mitigation();
 	rfds_update_mitigation();
+	srso_update_mitigation();
 
 	spectre_v1_apply_mitigation();
 	spectre_v2_apply_mitigation();
@@ -232,6 +230,7 @@ void __init cpu_select_mitigations(void)
 	mmio_apply_mitigation();
 	rfds_apply_mitigation();
 	srbds_apply_mitigation();
+	srso_apply_mitigation();
 	gds_apply_mitigation();
 	bhi_apply_mitigation();
 }
@@ -2673,6 +2672,7 @@ early_param("l1tf", l1tf_cmdline);
 
 enum srso_mitigation {
 	SRSO_MITIGATION_NONE,
+	SRSO_MITIGATION_AUTO,
 	SRSO_MITIGATION_UCODE_NEEDED,
 	SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
 	SRSO_MITIGATION_MICROCODE,
@@ -2681,14 +2681,6 @@ enum srso_mitigation {
 	SRSO_MITIGATION_IBPB_ON_VMEXIT,
 };
 
-enum srso_mitigation_cmd {
-	SRSO_CMD_OFF,
-	SRSO_CMD_MICROCODE,
-	SRSO_CMD_SAFE_RET,
-	SRSO_CMD_IBPB,
-	SRSO_CMD_IBPB_ON_VMEXIT,
-};
-
 static const char * const srso_strings[] = {
 	[SRSO_MITIGATION_NONE]			= "Vulnerable",
 	[SRSO_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
@@ -2699,8 +2691,7 @@ static const char * const srso_strings[] = {
 	[SRSO_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT only"
 };
 
-static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
-static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
+static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
 
 static int __init srso_parse_cmdline(char *str)
 {
@@ -2708,15 +2699,15 @@ static int __init srso_parse_cmdline(char *str)
 		return -EINVAL;
 
 	if (!strcmp(str, "off"))
-		srso_cmd = SRSO_CMD_OFF;
+		srso_mitigation = SRSO_MITIGATION_NONE;
 	else if (!strcmp(str, "microcode"))
-		srso_cmd = SRSO_CMD_MICROCODE;
+		srso_mitigation = SRSO_MITIGATION_MICROCODE;
 	else if (!strcmp(str, "safe-ret"))
-		srso_cmd = SRSO_CMD_SAFE_RET;
+		srso_mitigation = SRSO_MITIGATION_SAFE_RET;
 	else if (!strcmp(str, "ibpb"))
-		srso_cmd = SRSO_CMD_IBPB;
+		srso_mitigation = SRSO_MITIGATION_IBPB;
 	else if (!strcmp(str, "ibpb-vmexit"))
-		srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
+		srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
 	else
 		pr_err("Ignoring unknown SRSO option (%s).", str);
 
@@ -2730,13 +2721,14 @@ static void __init srso_select_mitigation(void)
 {
 	bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
 
-	if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
-	    cpu_mitigations_off() ||
-	    srso_cmd == SRSO_CMD_OFF) {
-		if (boot_cpu_has(X86_FEATURE_SBPB))
-			x86_pred_cmd = PRED_CMD_SBPB;
+	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+		srso_mitigation = SRSO_MITIGATION_NONE;
+
+	if (srso_mitigation == SRSO_MITIGATION_NONE)
 		return;
-	}
+
+	if (srso_mitigation == SRSO_MITIGATION_AUTO)
+		srso_mitigation = SRSO_MITIGATION_SAFE_RET;
 
 	if (has_microcode) {
 		/*
@@ -2749,98 +2741,98 @@ static void __init srso_select_mitigation(void)
 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
 			return;
 		}
-
-		if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
-			srso_mitigation = SRSO_MITIGATION_IBPB;
-			goto out;
-		}
 	} else {
 		pr_warn("IBPB-extending microcode not applied!\n");
 		pr_warn(SRSO_NOTICE);
 
-		/* may be overwritten by SRSO_CMD_SAFE_RET below */
-		srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
+		/* Fall-back to Safe-RET */
+		srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
 	}
 
-	switch (srso_cmd) {
-	case SRSO_CMD_MICROCODE:
-		if (has_microcode) {
-			srso_mitigation = SRSO_MITIGATION_MICROCODE;
-			pr_warn(SRSO_NOTICE);
-		}
+	switch (srso_mitigation) {
+	case SRSO_MITIGATION_MICROCODE:
 		break;
 
-	case SRSO_CMD_SAFE_RET:
-		if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
-			goto ibpb_on_vmexit;
-
-		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
-			/*
-			 * Enable the return thunk for generated code
-			 * like ftrace, static_call, etc.
-			 */
-			setup_force_cpu_cap(X86_FEATURE_RETHUNK);
-			setup_force_cpu_cap(X86_FEATURE_UNRET);
-
-			if (boot_cpu_data.x86 == 0x19) {
-				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
-				x86_return_thunk = srso_alias_return_thunk;
-			} else {
-				setup_force_cpu_cap(X86_FEATURE_SRSO);
-				x86_return_thunk = srso_return_thunk;
-			}
-			if (has_microcode)
-				srso_mitigation = SRSO_MITIGATION_SAFE_RET;
-			else
-				srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
-		} else {
+	case SRSO_MITIGATION_SAFE_RET:
+	case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+		if (!IS_ENABLED(CONFIG_MITIGATION_SRSO))
 			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
-		}
+		else if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
+			srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
 		break;
 
-	case SRSO_CMD_IBPB:
-		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
-			if (has_microcode) {
-				setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
-				srso_mitigation = SRSO_MITIGATION_IBPB;
-
-				/*
-				 * IBPB on entry already obviates the need for
-				 * software-based untraining so clear those in case some
-				 * other mitigation like Retbleed has selected them.
-				 */
-				setup_clear_cpu_cap(X86_FEATURE_UNRET);
-				setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
-			}
-		} else {
+	case SRSO_MITIGATION_IBPB:
+		if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY))
 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
-		}
 		break;
 
-ibpb_on_vmexit:
-	case SRSO_CMD_IBPB_ON_VMEXIT:
-		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
-			if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
-				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
-				srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
-
-				/*
-				 * There is no need for RSB filling: entry_ibpb() ensures
-				 * all predictions, including the RSB, are invalidated,
-				 * regardless of IBPB implementation.
-				 */
-				setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
-			}
-		} else {
+	case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+		if (!IS_ENABLED(CONFIG_MITIGATION_SRSO))
 			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
-                }
+		break;
+	default:
+		break;
+	}
+}
+
+static void __init srso_update_mitigation(void)
+{
+	/* If retbleed is using IBPB, that works for SRSO as well */
+	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB)
+		srso_mitigation = SRSO_MITIGATION_IBPB;
+
+	if (srso_mitigation != SRSO_MITIGATION_NONE)
+		pr_info("%s\n", srso_strings[srso_mitigation]);
+}
+
+static void __init srso_apply_mitigation(void)
+{
+	if (srso_mitigation == SRSO_MITIGATION_NONE) {
+		if (boot_cpu_has(X86_FEATURE_SBPB))
+			x86_pred_cmd = PRED_CMD_SBPB;
+		return;
+	}
+	switch (srso_mitigation) {
+	case SRSO_MITIGATION_SAFE_RET:
+	case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+		/*
+		 * Enable the return thunk for generated code
+		 * like ftrace, static_call, etc.
+		 */
+		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+		setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+		if (boot_cpu_data.x86 == 0x19) {
+			setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+			x86_return_thunk = srso_alias_return_thunk;
+		} else {
+			setup_force_cpu_cap(X86_FEATURE_SRSO);
+			x86_return_thunk = srso_return_thunk;
+		}
+		break;
+	case SRSO_MITIGATION_IBPB:
+		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+		/*
+		 * IBPB on entry already obviates the need for
+		 * software-based untraining so clear those in case some
+		 * other mitigation like Retbleed has selected them.
+		 */
+		setup_clear_cpu_cap(X86_FEATURE_UNRET);
+		setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+		break;
+	case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+		/*
+		 * There is no need for RSB filling: entry_ibpb() ensures
+		 * all predictions, including the RSB, are invalidated,
+		 * regardless of IBPB implementation.
+		 */
+		setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
 		break;
 	default:
 		break;
 	}
 
-out:
-	pr_info("%s\n", srso_strings[srso_mitigation]);
 }
 
 #undef pr_fmt
-- 
2.34.1


  parent reply	other threads:[~2025-01-08 20:25 UTC|newest]

Thread overview: 138+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-08 20:24 [PATCH v3 00/35] x86/bugs: Attack vector controls David Kaplan
2025-01-08 20:24 ` [PATCH v3 01/35] x86/bugs: Add X86_BUG_SPECTRE_V2_USER David Kaplan
2025-02-28 11:53   ` [tip: x86/bugs] " tip-bot2 for David Kaplan
2025-01-08 20:24 ` [PATCH v3 02/35] x86/bugs: Relocate mds/taa/mmio/rfds defines David Kaplan
2025-02-28 11:53   ` [tip: x86/bugs] " tip-bot2 for David Kaplan
2025-01-08 20:24 ` [PATCH v3 03/35] x86/bugs: Add AUTO mitigations for mds/taa/mmio/rfds David Kaplan
2025-02-28 11:53   ` [tip: x86/bugs] " tip-bot2 for David Kaplan
2025-01-08 20:24 ` [PATCH v3 04/35] x86/bugs: Restructure mds mitigation David Kaplan
2025-02-10 16:13   ` Brendan Jackman
2025-02-10 17:17     ` Kaplan, David
2025-02-10 17:28       ` Brendan Jackman
2025-02-10 22:25   ` Josh Poimboeuf
2025-02-10 22:33     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 05/35] x86/bugs: Restructure taa mitigation David Kaplan
2025-02-10 16:24   ` Brendan Jackman
2025-02-10 17:19     ` Kaplan, David
2025-02-10 22:50   ` Josh Poimboeuf
2025-02-11 17:17     ` Kaplan, David
2025-02-11 19:17       ` Josh Poimboeuf
2025-01-08 20:24 ` [PATCH v3 06/35] x86/bugs: Restructure mmio mitigation David Kaplan
2025-02-10 16:42   ` Brendan Jackman
2025-02-10 17:22     ` Kaplan, David
2025-02-10 17:35       ` Brendan Jackman
2025-02-10 23:29   ` Josh Poimboeuf
2025-02-11 20:35     ` Kaplan, David
2025-02-11 23:18       ` Josh Poimboeuf
2025-02-12 17:28         ` Kaplan, David
2025-02-12 23:16           ` Josh Poimboeuf
2025-02-19 18:20             ` Borislav Petkov
2025-02-21 21:48               ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 07/35] x86/bugs: Restructure rfds mitigation David Kaplan
2025-02-10 23:36   ` Josh Poimboeuf
2025-02-11 22:49     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 08/35] x86/bugs: Remove md_clear_*_mitigation() David Kaplan
2025-01-08 20:24 ` [PATCH v3 09/35] x86/bugs: Restructure srbds mitigation David Kaplan
2025-02-10 23:44   ` Josh Poimboeuf
2025-02-11 22:59     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 10/35] x86/bugs: Restructure gds mitigation David Kaplan
2025-02-10 17:06   ` Brendan Jackman
2025-02-10 17:27     ` Kaplan, David
2025-02-10 17:40       ` Brendan Jackman
2025-02-10 23:52   ` Josh Poimboeuf
2025-02-12 15:36     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 11/35] x86/bugs: Restructure spectre_v1 mitigation David Kaplan
2025-01-08 20:24 ` [PATCH v3 12/35] x86/bugs: Restructure retbleed mitigation David Kaplan
2025-01-09  5:22   ` Pawan Gupta
2025-01-09 15:26     ` Kaplan, David
2025-01-09 16:40       ` Pawan Gupta
2025-01-09 16:42         ` Kaplan, David
2025-01-10 18:45     ` David Laight
2025-01-10 20:30       ` Pawan Gupta
2025-01-10 20:35         ` Borislav Petkov
2025-02-10 18:35   ` Brendan Jackman
2025-02-10 20:50     ` Kaplan, David
2025-02-11  0:10   ` Josh Poimboeuf
2025-02-24 15:45   ` Borislav Petkov
2025-02-24 15:59     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 13/35] x86/bugs: Restructure spectre_v2_user mitigation David Kaplan
2025-02-11  0:53   ` Josh Poimboeuf
2025-02-12 15:59     ` Kaplan, David
2025-02-12 21:35       ` Josh Poimboeuf
2025-01-08 20:24 ` [PATCH v3 14/35] x86/bugs: Restructure bhi mitigation David Kaplan
2025-01-08 20:24 ` [PATCH v3 15/35] x86/bugs: Restructure spectre_v2 mitigation David Kaplan
2025-02-11  1:07   ` Josh Poimboeuf
2025-02-12 16:40     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 16/35] x86/bugs: Restructure ssb mitigation David Kaplan
2025-02-11  1:10   ` Josh Poimboeuf
2025-02-12 16:45     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 17/35] x86/bugs: Restructure l1tf mitigation David Kaplan
2025-02-11  1:21   ` Josh Poimboeuf
2025-02-12 16:47     ` Kaplan, David
2025-01-08 20:24 ` David Kaplan [this message]
2025-02-11 16:39   ` [PATCH v3 18/35] x86/bugs: Restructure srso mitigation Josh Poimboeuf
2025-02-12 17:01     ` Kaplan, David
2025-01-08 20:24 ` [PATCH v3 19/35] Documentation/x86: Document the new attack vector controls David Kaplan
2025-02-11 16:43   ` Josh Poimboeuf
2025-02-11 16:57     ` Kaplan, David
2025-01-08 20:25 ` [PATCH v3 20/35] x86/bugs: Define attack vectors David Kaplan
2025-02-11 18:07   ` Josh Poimboeuf
2025-02-12 17:20     ` Kaplan, David
2025-02-17 17:33       ` Kaplan, David
2025-02-17 20:19         ` Josh Poimboeuf
2025-02-17 20:38           ` Kaplan, David
2025-02-17 23:39             ` Josh Poimboeuf
2025-02-18  2:24               ` Kaplan, David
2025-02-18  7:05                 ` Josh Poimboeuf
2025-02-18  8:52                   ` Borislav Petkov
2025-02-20 22:04                     ` Josh Poimboeuf
2025-02-26 18:57                       ` Kaplan, David
2025-02-26 20:14                         ` Pawan Gupta
2025-02-26 21:01                           ` Borislav Petkov
2025-02-26 21:51                             ` Pawan Gupta
2025-02-27 13:39                               ` Borislav Petkov
2025-02-26 21:03                           ` Kaplan, David
2025-02-26 22:13                             ` Pawan Gupta
2025-02-26 22:18                               ` Kaplan, David
2025-02-26 22:34                                 ` Pawan Gupta
2025-02-26 23:44                               ` Josh Poimboeuf
2025-02-27  0:35                                 ` Pawan Gupta
2025-02-27  1:23                                   ` Josh Poimboeuf
2025-02-27  3:50                                     ` Pawan Gupta
2025-02-27 14:08                                       ` Borislav Petkov
2025-02-27 14:36                                         ` Kaplan, David
2025-02-27 15:01                                           ` Borislav Petkov
2025-02-27 15:22                                             ` Kaplan, David
2025-02-27 15:37                                               ` Borislav Petkov
2025-02-27 16:05                                                 ` Kaplan, David
2025-02-27 17:07                                                   ` Borislav Petkov
2025-01-08 20:25 ` [PATCH v3 21/35] x86/bugs: Determine relevant vulnerabilities based on attack vector controls David Kaplan
2025-01-09  3:43   ` Pawan Gupta
2025-01-09 15:08     ` Kaplan, David
2025-02-11 18:41   ` Josh Poimboeuf
2025-02-11 18:54     ` Josh Poimboeuf
2025-02-11 19:04       ` Kaplan, David
2025-02-11 20:34         ` Josh Poimboeuf
2025-02-11 20:53           ` Kaplan, David
2025-02-11 22:38             ` Josh Poimboeuf
2025-02-11 18:55     ` Kaplan, David
2025-01-08 20:25 ` [PATCH v3 22/35] x86/bugs: Add attack vector controls for mds David Kaplan
2025-01-08 20:25 ` [PATCH v3 23/35] x86/bugs: Add attack vector controls for taa David Kaplan
2025-02-11 19:01   ` Josh Poimboeuf
2025-01-08 20:25 ` [PATCH v3 24/35] x86/bugs: Add attack vector controls for mmio David Kaplan
2025-01-08 20:25 ` [PATCH v3 25/35] x86/bugs: Add attack vector controls for rfds David Kaplan
2025-01-08 20:25 ` [PATCH v3 26/35] x86/bugs: Add attack vector controls for srbds David Kaplan
2025-01-08 20:25 ` [PATCH v3 27/35] x86/bugs: Add attack vector controls for gds David Kaplan
2025-01-08 20:25 ` [PATCH v3 28/35] x86/bugs: Add attack vector controls for spectre_v1 David Kaplan
2025-01-08 20:25 ` [PATCH v3 29/35] x86/bugs: Add attack vector controls for retbleed David Kaplan
2025-01-08 20:25 ` [PATCH v3 30/35] x86/bugs: Add attack vector controls for spectre_v2_user David Kaplan
2025-02-11 19:03   ` Josh Poimboeuf
2025-02-12 17:22     ` Kaplan, David
2025-01-08 20:25 ` [PATCH v3 31/35] x86/bugs: Add attack vector controls for bhi David Kaplan
2025-01-08 20:25 ` [PATCH v3 32/35] x86/bugs: Add attack vector controls for spectre_v2 David Kaplan
2025-01-08 20:25 ` [PATCH v3 33/35] x86/bugs: Add attack vector controls for l1tf David Kaplan
2025-01-08 20:25 ` [PATCH v3 34/35] x86/bugs: Add attack vector controls for srso David Kaplan
2025-01-08 20:25 ` [PATCH v3 35/35] x86/pti: Add attack vector controls for pti David Kaplan
     [not found] ` <20250110083627.xankiqhczr7ksldv@desk>
2025-01-10 15:39   ` [PATCH v3 00/35] x86/bugs: Attack vector controls Borislav Petkov
     [not found]     ` <20250110171410.ttbt7cohzdjwi4hk@desk>
2025-01-12 11:38       ` Borislav Petkov
2025-01-13 17:41         ` Pawan Gupta

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250108202515.385902-19-david.kaplan@amd.com \
    --to=david.kaplan@amd.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=jpoimboe@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox