From: alex.shi@linaro.org (Alex Shi)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 13/39] arm64: Add skeleton to harden the branch predictor against aliasing attacks
Date: Fri, 9 Mar 2018 17:06:56 +0800 [thread overview]
Message-ID: <20180309090722.26279-14-alex.shi@linaro.org> (raw)
In-Reply-To: <20180309090722.26279-1-alex.shi@linaro.org>
From: Will Deacon <will.deacon@arm.com>
commit 0f15adbb2861 upstream.
Aliasing attacks against CPU branch predictors can allow an attacker to
redirect speculative control flow on some CPUs and potentially divulge
information from one context to another.
This patch adds initial skeleton code behind a new Kconfig option to
enable implementation-specific mitigations against these attacks for
CPUs that are affected.
Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Alex Shi <alex.shi@linaro.org>
---
arch/arm64/Kconfig | 17 +++++++
arch/arm64/include/asm/cpufeature.h | 3 +-
arch/arm64/include/asm/mmu.h | 38 +++++++++++++++
arch/arm64/include/asm/sysreg.h | 1 +
arch/arm64/kernel/Makefile | 4 ++
arch/arm64/kernel/bpi.S | 55 ++++++++++++++++++++++
arch/arm64/kernel/cpu_errata.c | 71 +++++++++++++++++++++++++++++
arch/arm64/kernel/cpufeature.c | 1 +
arch/arm64/kernel/entry.S | 8 ++--
arch/arm64/mm/context.c | 2 +
arch/arm64/mm/fault.c | 17 +++++++
include/linux/mm_types.h | 1 +
12 files changed, 214 insertions(+), 4 deletions(-)
create mode 100644 arch/arm64/kernel/bpi.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 14cdc6dea493..a7cf7dd99c1c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -610,6 +610,23 @@ config FORCE_MAX_ZONEORDER
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
4M allocations matching the default size used by generic code.
+config HARDEN_BRANCH_PREDICTOR
+ bool "Harden the branch predictor against aliasing attacks" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors rely on
+ being able to manipulate the branch predictor for a victim context by
+ executing aliasing branches in the attacker context. Such attacks
+ can be partially mitigated against by clearing internal branch
+ predictor state and limiting the prediction logic in some situations.
+
+ This config option will take CPU-specific actions to harden the
+ branch predictor against aliasing attacks and may rely on specific
+ instruction sequences or control bits being set by the system
+ firmware.
+
+ If unsure, say Y.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8884b5d5f48c..ede73d2c09a4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -31,8 +31,9 @@
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
#define ARM64_WORKAROUND_CAVIUM_27456 8
+#define ARM64_HARDEN_BRANCH_PREDICTOR 9
-#define ARM64_NCAPS 9
+#define ARM64_NCAPS 10
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 990124a67eeb..c135532b5f94 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -28,6 +28,44 @@ typedef struct {
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+ int hyp_vectors_slot;
+ bp_hardening_cb_t fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+ struct bp_hardening_data *d;
+
+ if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ return;
+
+ d = arm64_get_bp_hardening_data();
+ if (d->fn)
+ d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void) { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
extern void paging_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d48ab5b41f52..8373fa1c3036 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -95,6 +95,7 @@
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 474691f8b13a..3404ad0f9d8a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -42,6 +42,10 @@ arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI) += acpi.o
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
+endif
+
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
head-y := head.o
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 000000000000..06a931eb2673
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,55 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+ .rept 31
+ nop
+ .endr
+ b \target
+.endm
+
+.macro vectors target
+ ventry \target + 0x000
+ ventry \target + 0x080
+ ventry \target + 0x100
+ ventry \target + 0x180
+
+ ventry \target + 0x200
+ ventry \target + 0x280
+ ventry \target + 0x300
+ ventry \target + 0x380
+
+ ventry \target + 0x400
+ ventry \target + 0x480
+ ventry \target + 0x500
+ ventry \target + 0x580
+
+ ventry \target + 0x600
+ ventry \target + 0x680
+ ventry \target + 0x700
+ ventry \target + 0x780
+.endm
+
+ .align 11
+ENTRY(__bp_harden_hyp_vecs_start)
+ .rept 4
+ vectors __kvm_hyp_vector
+ .endr
+ENTRY(__bp_harden_hyp_vecs_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a3e846a28b05..894333617d4e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -41,6 +41,77 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ static int last_slot = -1;
+ static DEFINE_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+
+ spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ last_slot++;
+ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+ / SZ_2K) <= last_slot);
+ slot = last_slot;
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ spin_unlock(&bp_lock);
+}
+#else
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+ bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ u64 pfr0;
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return;
+
+ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
#define MIDR_RANGE(model, min, max) \
.matches = is_affected_midr_range, \
.midr_model = model, \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2735bf814592..8488a829e24e 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -88,6 +88,7 @@ static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
/* Linux doesn't care about the EL3 */
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 166cd6626ca7..1dfbd9023777 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -495,13 +495,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ msr daifclr, #(8 | 4 | 1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
mov x2, sp
- bl do_mem_abort
+ bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b901cb964a08..66eaa00ce35b 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -193,6 +193,8 @@ asmlinkage void post_ttbr_update_workaround(void)
"ic iallu; dsb nsh; isb",
ARM64_WORKAROUND_CAVIUM_27456,
CONFIG_CAVIUM_ERRATUM_27456));
+
+ arm64_apply_bp_hardening();
}
static int asids_init(void)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 86485415c5f0..8a4e0a317edb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -534,6 +534,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (addr > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
+ local_irq_enable();
+ do_mem_abort(addr, esr, regs);
+}
+
+
/*
* Handle stack alignment exceptions.
*/
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 36f4695aa604..54b317d5d24b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
+#include <linux/percpu.h>
#include <asm/page.h>
#include <asm/mmu.h>
--
2.16.2.440.gc6284da
next prev parent reply other threads:[~2018-03-09 9:06 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-09 9:06 spectre backport for LTS 4.4 review Alex Shi
2018-03-09 9:06 ` [PATCH 01/39] mm: Introduce lm_alias Alex Shi
2018-03-09 9:06 ` [PATCH 02/39] arm64: barrier: Add CSDB macros to control data-value prediction Alex Shi
2018-03-09 9:06 ` [PATCH 03/39] arm64: Implement array_index_mask_nospec() Alex Shi
2018-03-09 9:06 ` [PATCH 04/39] arm64: move TASK_* definitions to <asm/processor.h> Alex Shi
2018-03-09 9:06 ` [PATCH 05/39] arm64: Make USER_DS an inclusive limit Alex Shi
2018-03-09 9:06 ` [PATCH 06/39] arm64: entry: Ensure branch through syscall table is bounded under speculation Alex Shi
2018-03-09 9:06 ` [PATCH 07/39] arm64: Use pointer masking to limit uaccess speculation Alex Shi
2018-03-09 9:06 ` [PATCH 08/39] arm64: uaccess: Prevent speculative use of the current addr_limit Alex Shi
2018-03-09 9:06 ` [PATCH 09/39] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Alex Shi
2018-03-09 9:06 ` [PATCH 10/39] arm64: futex: Mask __user pointers prior to dereference Alex Shi
2018-03-09 9:06 ` [PATCH 11/39] drivers/firmware: Expose psci_get_version through psci_ops structure Alex Shi
2018-03-09 9:06 ` [PATCH 12/39] arm64: Move post_ttbr_update_workaround to C code Alex Shi
2018-03-09 9:06 ` Alex Shi [this message]
2018-03-09 9:06 ` [PATCH 14/39] arm64: Move BP hardening to check_and_switch_context Alex Shi
2018-03-09 9:06 ` [PATCH 15/39] arm64: KVM: Use per-CPU vector when BP hardening is enabled Alex Shi
2018-03-09 9:06 ` [PATCH 16/39] arm64: entry: Apply BP hardening for high-priority synchronous exceptions Alex Shi
2018-03-09 9:07 ` [PATCH 17/39] arm64: entry: Apply BP hardening for suspicious interrupts from EL0 Alex Shi
2018-03-09 9:07 ` [PATCH 18/39] arm64: cpu_errata: Allow an erratum to be match for all revisions of a core Alex Shi
2018-03-09 9:07 ` [PATCH 19/39] arm64: prefetch: add alternative pattern for CPUs without a prefetcher Alex Shi
2018-03-09 9:07 ` [PATCH 20/39] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75 Alex Shi
2018-03-09 9:07 ` [PATCH 21/39] arm64: Implement branch predictor hardening for affected Cortex-A CPUs Alex Shi
2018-03-09 9:07 ` [PATCH 22/39] arm64: KVM: Increment PC after handling an SMC trap Alex Shi
2018-03-09 9:07 ` [PATCH 23/39] arm/arm64: KVM: Consolidate the PSCI include files Alex Shi
2018-03-09 9:07 ` [PATCH 24/39] arm/arm64: KVM: Add PSCI_VERSION helper Alex Shi
2018-03-09 9:07 ` [PATCH 25/39] arm/arm64: KVM: Add smccc accessors to PSCI code Alex Shi
2018-03-09 9:07 ` [PATCH 26/39] arm/arm64: KVM: Implement PSCI 1.0 support Alex Shi
2018-03-09 9:07 ` [PATCH 27/39] ARM: 8478/2: arm/arm64: add arm-smccc Alex Shi
2018-03-09 9:07 ` [PATCH 28/39] ARM: 8479/2: add implementation for arm-smccc Alex Shi
2018-03-09 9:07 ` [PATCH 29/39] ARM: 8480/2: arm64: " Alex Shi
2018-03-09 9:07 ` [PATCH 30/39] ARM: 8481/2: drivers: psci: replace psci firmware calls Alex Shi
2018-03-09 9:07 ` [PATCH 31/39] arm/arm64: KVM: Advertise SMCCC v1.1 Alex Shi
2018-03-09 9:07 ` [PATCH 32/39] arm/arm64: KVM: Turn kvm_psci_version into a static inline Alex Shi
2018-03-09 9:07 ` [PATCH 33/39] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support Alex Shi
2018-03-09 9:07 ` [PATCH 34/39] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling Alex Shi
2018-03-09 9:07 ` [PATCH 35/39] firmware/psci: Expose PSCI conduit Alex Shi
2018-03-09 9:07 ` [PATCH 36/39] firmware/psci: Expose SMCCC version through psci_ops Alex Shi
2018-03-09 9:07 ` [PATCH 37/39] arm/arm64: smccc: Make function identifiers an unsigned quantity Alex Shi
2018-03-09 9:07 ` [PATCH 38/39] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive Alex Shi
2018-03-09 9:07 ` [PATCH 39/39] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Alex Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180309090722.26279-14-alex.shi@linaro.org \
--to=alex.shi@linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).