* [RFC PATCH 1/2] arm64: introduce infrastructure for emitting veneers at module reloc time
2015-09-17 19:38 [RFC PATCH 0/2] arm64: runtime workaround for erratum #843419 Ard Biesheuvel
@ 2015-09-17 19:38 ` Ard Biesheuvel
2015-09-17 19:38 ` [RFC PATCH 2/2] arm64: errata: add module load workaround for erratum #843419 Ard Biesheuvel
1 sibling, 0 replies; 3+ messages in thread
From: Ard Biesheuvel @ 2015-09-17 19:38 UTC (permalink / raw)
To: linux-arm-kernel
Introduce a framework for arm64 that allows errata fixups to be implemented
by replacing problematic instruction sequences with calls into veneers that
are generated on the fly.
This is based on the module PLT support for ARM.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/Kconfig | 4 +
arch/arm64/Makefile | 4 +
arch/arm64/include/asm/module.h | 9 ++
arch/arm64/kernel/Makefile | 1 +
arch/arm64/kernel/module.lds | 4 +
arch/arm64/kernel/veneers.c | 100 ++++++++++++++++++++
6 files changed, 122 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7d95663c0160..115586d8299b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -68,6 +68,7 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_MOD_ARCH_SPECIFIC if ARM64_MODULE_VENEERS
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -333,6 +334,9 @@ config ARM64_ERRATUM_845719
endmenu
+config ARM64_MODULE_VENEERS
+ bool
+ depends on MODULES
choice
prompt "Page size"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 15ff5b4156fd..ccbcb399b9f1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -41,6 +41,10 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_ARM64_MODULE_VENEERS),y)
+LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e80e232b730e..87e1f097a500 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -20,4 +20,13 @@
#define MODULE_ARCH_VERMAGIC "aarch64"
+#ifdef CONFIG_ARM64_MODULE_VENEERS
+struct mod_arch_specific {
+ struct veneer_section {
+ struct elf64_shdr *veneers;
+ unsigned long size;
+ } core, init;
+};
+#endif
+
#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 22dc9bc781be..0016e042848f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -25,6 +25,7 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
+arm64-obj-$(CONFIG_ARM64_MODULE_VENEERS)+= veneers.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
new file mode 100644
index 000000000000..845b75ee0f93
--- /dev/null
+++ b/arch/arm64/kernel/module.lds
@@ -0,0 +1,4 @@
+SECTIONS {
+ .core.veneers : { BYTE(0) }
+ .init.veneers : { BYTE(0) }
+}
diff --git a/arch/arm64/kernel/veneers.c b/arch/arm64/kernel/veneers.c
new file mode 100644
index 000000000000..0a33a63a9b46
--- /dev/null
+++ b/arch/arm64/kernel/veneers.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static bool in_init(const struct module *mod, u64 addr)
+{
+ return addr - (u64)mod->module_init < mod->init_size;
+}
+
+static void __maybe_unused *alloc_veneer(struct module *mod, u64 loc, int size)
+{
+ struct veneer_section *vs;
+ void *ret;
+
+ if (in_init(mod, loc))
+ vs = &mod->arch.init;
+ else
+ vs = &mod->arch.core;
+
+ ret = (void*)vs->veneers->sh_addr + vs->size;
+ vs->size += size;
+
+ return ret;
+}
+
+/* estimate the maximum size of the veneer for this relocation */
+static unsigned long get_veneers_size(Elf64_Addr base, const Elf64_Rel *rel,
+ int num)
+{
+ unsigned long ret = 0;
+ int i;
+
+ for (i = 0; i < num; i++)
+ switch (ELF64_R_TYPE(rel[i].r_info)) {
+ }
+ return ret;
+}
+
+int module_frob_arch_sections(Elf64_Ehdr *ehdr, Elf64_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned long core_veneers_maxsize = 0, init_veneers_maxsize = 0;
+ Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
+
+ /*
+ * To store the veneers, we expand the .text section for core module
+ * code and the .init.text section for initialization code.
+ */
+ for (s = sechdrs; s < sechdrs_end; ++s)
+ if (strcmp(".core.veneers", secstrings + s->sh_name) == 0)
+ mod->arch.core.veneers = s;
+ else if (strcmp(".init.veneers", secstrings + s->sh_name) == 0)
+ mod->arch.init.veneers = s;
+
+ if (!mod->arch.core.veneers || !mod->arch.init.veneers) {
+ pr_err("%s: sections missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ for (s = sechdrs + 1; s < sechdrs_end; ++s) {
+ const Elf64_Rel *rels = (void *)ehdr + s->sh_offset;
+ int numrels = s->sh_size / sizeof(Elf64_Rel);
+ Elf64_Shdr *dstsec = sechdrs + s->sh_info;
+
+ if (s->sh_type != SHT_REL)
+ continue;
+
+ if (strstr(secstrings + s->sh_name, ".init"))
+ init_veneers_maxsize += get_veneers_size(
+ dstsec->sh_addr, rels, numrels);
+ else
+ core_veneers_maxsize += get_veneers_size(
+ dstsec->sh_addr, rels, numrels);
+ }
+
+ mod->arch.core.veneers->sh_type = SHT_NOBITS;
+ mod->arch.core.veneers->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.core.veneers->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.core.veneers->sh_size = core_veneers_maxsize;
+ mod->arch.core.size = 0;
+
+ mod->arch.init.veneers->sh_type = SHT_NOBITS;
+ mod->arch.init.veneers->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.init.veneers->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.init.veneers->sh_size = init_veneers_maxsize;
+ mod->arch.init.size = 0;
+ pr_debug("%s: core.veneers=%llx, init.veneers=%llx\n",
+ __func__,
+ mod->arch.core.veneers->sh_size,
+ mod->arch.init.veneers->sh_size);
+ return 0;
+}
--
1.9.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [RFC PATCH 2/2] arm64: errata: add module load workaround for erratum #843419
2015-09-17 19:38 [RFC PATCH 0/2] arm64: runtime workaround for erratum #843419 Ard Biesheuvel
2015-09-17 19:38 ` [RFC PATCH 1/2] arm64: introduce infrastructure for emitting veneers at module reloc time Ard Biesheuvel
@ 2015-09-17 19:38 ` Ard Biesheuvel
1 sibling, 0 replies; 3+ messages in thread
From: Ard Biesheuvel @ 2015-09-17 19:38 UTC (permalink / raw)
To: linux-arm-kernel
In order to work around Cortex-A35 erratum #843419, this patch updates
the module loading logic to either change potentially problematic adrp
instructions into adr instructions (if the symbol turns out to be in
range), or emit a veneer that is guaranteed to be at an offset that does
not trigger the issue.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/Kconfig | 17 ++++++++++
arch/arm64/include/asm/veneers.h | 19 +++++++++++
arch/arm64/kernel/module.c | 33 +++++++++++++++++++
arch/arm64/kernel/veneers.c | 34 ++++++++++++++++++++
4 files changed, 103 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 115586d8299b..57e45e77d7e3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -338,6 +338,23 @@ config ARM64_MODULE_VENEERS
bool
depends on MODULES
+config ARM64_ERRATUM_843419
+ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+ depends on MODULES
+ select ARM64_MODULE_VENEERS
+ default y
+ help
+ This option builds kernel modules using the large memory model in
+ order to avoid the use of the ADRP instruction, which can cause
+ a subsequent memory access to use an incorrect address on Cortex-A53
+ parts up to r0p4.
+
+ Note that the kernel itself must be linked with a version of ld
+ which fixes potentially affected ADRP instructions through the
+ use of veneers.
+
+ If unsure, say Y.
+
choice
prompt "Page size"
default ARM64_4K_PAGES
diff --git a/arch/arm64/include/asm/veneers.h b/arch/arm64/include/asm/veneers.h
new file mode 100644
index 000000000000..4ee6efe4f5a1
--- /dev/null
+++ b/arch/arm64/include/asm/veneers.h
@@ -0,0 +1,19 @@
+
+#include <linux/types.h>
+
+struct veneer_erratum_843419 {
+ u32 adrp;
+ u32 branch;
+};
+
+static inline bool erratum_843419_affects_adrp_insn(void *addr)
+{
+ /*
+ * The workaround for erratum 843419 only needs to be
+ * applied if the adrp instruction appears in either of
+ * the last two instruction slots in the 4 KB page.
+ */
+ return ((u64)addr % SZ_4K) >= (SZ_4K - 8);
+}
+
+void *emit_erratum_843419_veneer(struct module *mod, u32 *insn);
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf4107f6ef..5307d08f15e8 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -28,6 +28,7 @@
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/sections.h>
+#include <asm/veneers.h>
#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
@@ -335,6 +336,38 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
+#ifdef CONFIG_ARM64_ERRATUM_843419
+ /*
+ * TODO check for presence of affected A53 cores
+ */
+ if (erratum_843419_affects_adrp_insn(loc)) {
+ struct veneer_erratum_843419 *v;
+
+ /*
+ * This adrp instruction appears at an offset
+ * that may be problematic on older Cortex-A53
+ * cores. So first, try to convert it into a
+ * simple adr instruction.
+ */
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc,
+ val & ~(SZ_4K - 1), 0, 21,
+ AARCH64_INSN_IMM_ADR);
+ if (ovf == 0) {
+ /* success! convert adrp -> adr */
+ *(u32 *)loc &= 0x7fffffff;
+ break;
+ } else {
+ /* symbol out of range -> emit veneer */
+ v = emit_erratum_843419_veneer(me, loc);
+ *(u32 *)loc = aarch64_insn_gen_branch_imm(
+ (unsigned long)loc,
+ (unsigned long)v,
+ AARCH64_INSN_BRANCH_NOLINK);
+ loc = &v->adrp;
+ }
+ /* fall through */
+ }
+#endif
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
diff --git a/arch/arm64/kernel/veneers.c b/arch/arm64/kernel/veneers.c
index 0a33a63a9b46..1b708d6a021a 100644
--- a/arch/arm64/kernel/veneers.c
+++ b/arch/arm64/kernel/veneers.c
@@ -10,6 +10,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <asm/veneers.h>
+
static bool in_init(const struct module *mod, u64 addr)
{
return addr - (u64)mod->module_init < mod->init_size;
@@ -31,6 +33,30 @@ static void __maybe_unused *alloc_veneer(struct module *mod, u64 loc, int size)
return ret;
}
+#ifdef CONFIG_ARM64_ERRATUM_843419
+void *emit_erratum_843419_veneer(struct module *mod, u32 *insn)
+{
+ struct veneer_erratum_843419 *veneer;
+
+ veneer = alloc_veneer(mod, (u64)insn, 2 * sizeof(*veneer));
+ if (erratum_843419_affects_adrp_insn(&veneer->adrp))
+ /*
+ * We allocated a veneer that is susceptible to the same problem
+ * as the original location. We allocated twice the space, so
+ * just advance to the next slot.
+ */
+ veneer++;
+
+ veneer->adrp = *insn;
+ veneer->branch = aarch64_insn_gen_branch_imm(
+ (unsigned long)&veneer->branch,
+ (unsigned long)(insn + 1),
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ return veneer;
+}
+#endif
+
/* estimate the maximum size of the veneer for this relocation */
static unsigned long get_veneers_size(Elf64_Addr base, const Elf64_Rel *rel,
int num)
@@ -40,6 +66,14 @@ static unsigned long get_veneers_size(Elf64_Addr base, const Elf64_Rel *rel,
for (i = 0; i < num; i++)
switch (ELF64_R_TYPE(rel[i].r_info)) {
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ case R_AARCH64_ADR_PREL_PG_HI21:
+#ifdef CONFIG_ARM64_ERRATUM_843419
+ if (erratum_843419_affects_adrp_insn((void *)base +
+ rel[i].r_offset))
+ ret += 2 * sizeof(struct veneer_erratum_843419);
+#endif
+ break;
}
return ret;
}
--
1.9.1
^ permalink raw reply related [flat|nested] 3+ messages in thread