stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	stable@vger.kernel.org, James Morse <james.morse@arm.com>
Subject: [PATCH 4.14 23/27] KVM: arm64: Add templates for BHB mitigation sequences
Date: Fri,  1 Apr 2022 08:36:33 +0200	[thread overview]
Message-ID: <20220401063624.888839258@linuxfoundation.org> (raw)
In-Reply-To: <20220401063624.232282121@linuxfoundation.org>

From: James Morse <james.morse@arm.com>

KVM writes the Spectre-v2 mitigation template at the beginning of each
vector when a CPU requires a specific sequence to run.

Because the template is copied, it can not be modified by the alternatives
at runtime. As the KVM template code is intertwined with the bp-hardening
callbacks, all templates must have a bp-hardening callback.

Add templates for calling ARCH_WORKAROUND_3 and one for each value of K
in the brancy-loop. Identify these sequences by a new parameter
template_start, and add a copy of install_bp_hardening_cb() that is able to
install them.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 arch/arm64/include/asm/cpucaps.h |    3 +
 arch/arm64/include/asm/kvm_mmu.h |    2 -
 arch/arm64/include/asm/mmu.h     |    6 +++
 arch/arm64/kernel/bpi.S          |   50 +++++++++++++++++++++++++++
 arch/arm64/kernel/cpu_errata.c   |   71 +++++++++++++++++++++++++++++++++++++--
 5 files changed, 128 insertions(+), 4 deletions(-)

--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -46,7 +46,8 @@
 #define ARM64_MISMATCHED_CACHE_TYPE		26
 #define ARM64_SSBS				27
 #define ARM64_WORKAROUND_1188873		28
+#define ARM64_SPECTRE_BHB			29
 
-#define ARM64_NCAPS				29
+#define ARM64_NCAPS				30
 
 #endif /* __ASM_CPUCAPS_H */
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -358,7 +358,7 @@ static inline void *kvm_get_hyp_vector(v
 	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
 	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
 
-	if (data->fn) {
+	if (data->template_start) {
 		vect = __bp_harden_hyp_vecs_start +
 		       data->hyp_vectors_slot * SZ_2K;
 
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -46,6 +46,12 @@ typedef void (*bp_hardening_cb_t)(void);
 struct bp_hardening_data {
 	int			hyp_vectors_slot;
 	bp_hardening_cb_t	fn;
+
+	/*
+	 * template_start is only used by the BHB mitigation to identify the
+	 * hyp_vectors_slot sequence.
+	 */
+	const char *template_start;
 };
 
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
--- a/arch/arm64/kernel/bpi.S
+++ b/arch/arm64/kernel/bpi.S
@@ -66,3 +66,53 @@ ENTRY(__smccc_workaround_1_smc_start)
 	ldp	x0, x1, [sp, #(8 * 2)]
 	add	sp, sp, #(8 * 4)
 ENTRY(__smccc_workaround_1_smc_end)
+
+ENTRY(__smccc_workaround_3_smc_start)
+	sub     sp, sp, #(8 * 4)
+	stp     x2, x3, [sp, #(8 * 0)]
+	stp     x0, x1, [sp, #(8 * 2)]
+	mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
+	smc     #0
+	ldp     x2, x3, [sp, #(8 * 0)]
+	ldp     x0, x1, [sp, #(8 * 2)]
+	add     sp, sp, #(8 * 4)
+ENTRY(__smccc_workaround_3_smc_end)
+
+ENTRY(__spectre_bhb_loop_k8_start)
+	sub     sp, sp, #(8 * 2)
+	stp     x0, x1, [sp, #(8 * 0)]
+	mov     x0, #8
+2:	b       . + 4
+	subs    x0, x0, #1
+	b.ne    2b
+	dsb     nsh
+	isb
+	ldp     x0, x1, [sp, #(8 * 0)]
+	add     sp, sp, #(8 * 2)
+ENTRY(__spectre_bhb_loop_k8_end)
+
+ENTRY(__spectre_bhb_loop_k24_start)
+	sub     sp, sp, #(8 * 2)
+	stp     x0, x1, [sp, #(8 * 0)]
+	mov     x0, #24
+2:	b       . + 4
+	subs    x0, x0, #1
+	b.ne    2b
+	dsb     nsh
+	isb
+	ldp     x0, x1, [sp, #(8 * 0)]
+	add     sp, sp, #(8 * 2)
+ENTRY(__spectre_bhb_loop_k24_end)
+
+ENTRY(__spectre_bhb_loop_k32_start)
+	sub     sp, sp, #(8 * 2)
+	stp     x0, x1, [sp, #(8 * 0)]
+	mov     x0, #32
+2:	b       . + 4
+	subs    x0, x0, #1
+	b.ne    2b
+	dsb     nsh
+	isb
+	ldp     x0, x1, [sp, #(8 * 0)]
+	add     sp, sp, #(8 * 2)
+ENTRY(__spectre_bhb_loop_k32_end)
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -85,6 +85,14 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_har
 #ifdef CONFIG_KVM
 extern char __smccc_workaround_1_smc_start[];
 extern char __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_3_smc_start[];
+extern char __smccc_workaround_3_smc_end[];
+extern char __spectre_bhb_loop_k8_start[];
+extern char __spectre_bhb_loop_k8_end[];
+extern char __spectre_bhb_loop_k24_start[];
+extern char __spectre_bhb_loop_k24_end[];
+extern char __spectre_bhb_loop_k32_start[];
+extern char __spectre_bhb_loop_k32_end[];
 
 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 				const char *hyp_vecs_end)
@@ -98,12 +106,14 @@ static void __copy_hyp_vect_bpi(int slot
 	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
+static DEFINE_SPINLOCK(bp_lock);
+static int last_slot = -1;
+
 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
 				    const char *hyp_vecs_start,
 				    const char *hyp_vecs_end)
 {
-	static int last_slot = -1;
-	static DEFINE_SPINLOCK(bp_lock);
+
 	int cpu, slot = -1;
 
 	spin_lock(&bp_lock);
@@ -124,6 +134,7 @@ static void install_bp_hardening_cb(bp_h
 
 	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
 	__this_cpu_write(bp_hardening_data.fn, fn);
+	__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
 	spin_unlock(&bp_lock);
 }
 #else
@@ -790,3 +801,59 @@ enum mitigation_state arm64_get_spectre_
 {
 	return spectre_bhb_state;
 }
+
+#ifdef CONFIG_KVM
+static const char *kvm_bhb_get_vecs_end(const char *start)
+{
+	if (start == __smccc_workaround_3_smc_start)
+		return __smccc_workaround_3_smc_end;
+	else if (start == __spectre_bhb_loop_k8_start)
+		return __spectre_bhb_loop_k8_end;
+	else if (start == __spectre_bhb_loop_k24_start)
+		return __spectre_bhb_loop_k24_end;
+	else if (start == __spectre_bhb_loop_k32_start)
+		return __spectre_bhb_loop_k32_end;
+
+	return NULL;
+}
+
+void kvm_setup_bhb_slot(const char *hyp_vecs_start)
+{
+	int cpu, slot = -1;
+	const char *hyp_vecs_end;
+
+	if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+		return;
+
+	hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
+	if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
+		return;
+
+	spin_lock(&bp_lock);
+	for_each_possible_cpu(cpu) {
+		if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
+			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+			break;
+		}
+	}
+
+	if (slot == -1) {
+		last_slot++;
+		BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+			/ SZ_2K) <= last_slot);
+		slot = last_slot;
+		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+	}
+
+	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+	__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+	spin_unlock(&bp_lock);
+}
+#else
+#define __smccc_workaround_3_smc_start NULL
+#define __spectre_bhb_loop_k8_start NULL
+#define __spectre_bhb_loop_k24_start NULL
+#define __spectre_bhb_loop_k32_start NULL
+
+void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
+#endif



  parent reply	other threads:[~2022-04-01  6:39 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-01  6:36 [PATCH 4.14 00/27] 4.14.275-rc1 review Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 01/27] arm64: arch_timer: Add workaround for ARM erratum 1188873 Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 02/27] arm64: arch_timer: avoid unused function warning Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 03/27] arm64: Add silicon-errata.txt entry for ARM erratum 1188873 Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 04/27] arm64: Make ARM64_ERRATUM_1188873 depend on COMPAT Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 05/27] arm64: Add part number for Neoverse N1 Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 06/27] arm64: Add part number for Arm Cortex-A77 Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 07/27] arm64: Add Neoverse-N2, Cortex-A710 CPU part definition Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 08/27] arm64: Add Cortex-X2 " Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 09/27] arm64: entry.S: Add ventry overflow sanity checks Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 10/27] arm64: entry: Make the trampoline cleanup optional Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 11/27] arm64: entry: Free up another register on kptis tramp_exit path Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 12/27] arm64: entry: Move the trampoline data page before the text page Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 13/27] arm64: entry: Allow tramp_alias to access symbols after the 4K boundary Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 14/27] arm64: entry: Dont assume tramp_vectors is the start of the vectors Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 15/27] arm64: entry: Move trampoline macros out of ifdefd section Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 16/27] arm64: entry: Make the kpti trampolines kpti sequence optional Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 17/27] arm64: entry: Allow the trampoline text to occupy multiple pages Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 18/27] arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 19/27] arm64: entry: Add vectors that have the bhb mitigation sequences Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 20/27] arm64: entry: Add macro for reading symbol addresses from the trampoline Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 21/27] arm64: Add percpu vectors for EL1 Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 22/27] arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2 Greg Kroah-Hartman
2022-04-01  6:36 ` Greg Kroah-Hartman [this message]
2022-04-01  6:36 ` [PATCH 4.14 24/27] arm64: Mitigate spectre style branch history side channels Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 25/27] KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 26/27] arm64: add ID_AA64ISAR2_EL1 sys register Greg Kroah-Hartman
2022-04-01  6:36 ` [PATCH 4.14 27/27] arm64: Use the clearbhb instruction in mitigations Greg Kroah-Hartman
2022-04-01 10:43 ` [PATCH 4.14 00/27] 4.14.275-rc1 review Guenter Roeck
2022-04-01 18:26 ` Naresh Kamboju

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220401063624.888839258@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=james.morse@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).