From: Ard Biesheuvel <ardb@google.com>
To: linux-arm-kernel@lists.infradead.org
Cc: Ard Biesheuvel <ardb@kernel.org>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Ryan Roberts <ryan.roberts@arm.com>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Kees Cook <keescook@chromium.org>
Subject: [PATCH v6 40/41] arm64: mm: omit redundant remap of kernel image
Date: Wed, 29 Nov 2023 12:16:36 +0100 [thread overview]
Message-ID: <20231129111555.3594833-83-ardb@google.com> (raw)
In-Reply-To: <20231129111555.3594833-43-ardb@google.com>
From: Ard Biesheuvel <ardb@kernel.org>
Now that the early kernel mapping is created with all the right
attributes and segment boundaries, there is no longer a need to recreate
it and switch to it. This also means we no longer have to copy the kasan
shadow or some parts of the fixmap from one set of page tables to the
other.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/arm64/include/asm/fixmap.h | 1 -
arch/arm64/include/asm/kasan.h | 2 -
arch/arm64/include/asm/mmu.h | 2 +-
arch/arm64/kernel/image-vars.h | 1 +
arch/arm64/kernel/pi/map_kernel.c | 6 +-
arch/arm64/mm/fixmap.c | 34 --------
arch/arm64/mm/kasan_init.c | 15 ----
arch/arm64/mm/mmu.c | 85 ++++----------------
8 files changed, 21 insertions(+), 125 deletions(-)
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 58c294a96676..8aabd45e9a13 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -100,7 +100,6 @@ enum fixed_addresses {
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
void __init early_fixmap_init(void);
-void __init fixmap_copy(pgd_t *pgdir);
#define __early_set_fixmap __set_fixmap
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 12d5f47f7dbe..ab52688ac4bd 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -36,12 +36,10 @@ void kasan_init(void);
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
-void kasan_copy_shadow(pgd_t *pgdir);
asmlinkage void kasan_early_init(void);
#else
static inline void kasan_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *pgdir) { }
#endif
#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d0b8b4b413b6..65977c7783c5 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -110,7 +110,7 @@ static inline bool kaslr_requires_kpti(void)
}
#define INIT_MM_CONTEXT(name) \
- .pgd = init_pg_dir,
+ .pgd = swapper_pg_dir,
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 941a14c05184..e140c5bda90b 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -55,6 +55,7 @@ PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
PROVIDE(__pi_init_idmap_pg_dir = init_idmap_pg_dir);
PROVIDE(__pi_init_pg_dir = init_pg_dir);
PROVIDE(__pi_init_pg_end = init_pg_end);
+PROVIDE(__pi_swapper_pg_dir = swapper_pg_dir);
PROVIDE(__pi__text = _text);
PROVIDE(__pi__stext = _stext);
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index f86e878d366d..4b76a007a50d 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -124,8 +124,12 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
text_prot, true, root_level);
map_segment(init_pg_dir, NULL, va_offset, __inittext_begin,
__inittext_end, text_prot, false, root_level);
- dsb(ishst);
}
+
+ /* Copy the root page table to its final location */
+ memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PGD_SIZE);
+ dsb(ishst);
+ idmap_cpu_replace_ttbr1(swapper_pg_dir);
}
static void __init map_fdt(u64 fdt)
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
index c0a3301203bd..9436a12e1882 100644
--- a/arch/arm64/mm/fixmap.c
+++ b/arch/arm64/mm/fixmap.c
@@ -167,37 +167,3 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
-
-/*
- * Copy the fixmap region into a new pgdir.
- */
-void __init fixmap_copy(pgd_t *pgdir)
-{
- if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
- /*
- * The fixmap falls in a separate pgd to the kernel, and doesn't
- * live in the carveout for the swapper_pg_dir. We can simply
- * re-use the existing dir for the fixmap.
- */
- set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
- READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
- } else if (CONFIG_PGTABLE_LEVELS > 3) {
- pgd_t *bm_pgdp;
- p4d_t *bm_p4dp;
- pud_t *bm_pudp;
- /*
- * The fixmap shares its top level pgd entry with the kernel
- * mapping. This can really only occur when we are running
- * with 16k/4 levels, so we can simply reuse the pud level
- * entry instead.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
- bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
- bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
- pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
- pud_clear_fixmap();
- } else {
- BUG();
- }
-}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 555285ebd5af..dd91f5942fd2 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -184,21 +184,6 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
}
-/*
- * Copy the current shadow region into a new pgdir.
- */
-void __init kasan_copy_shadow(pgd_t *pgdir)
-{
- pgd_t *pgdp, *pgdp_new, *pgdp_end;
-
- pgdp = pgd_offset_k(KASAN_SHADOW_START);
- pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
- pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
- do {
- set_pgd(pgdp_new, READ_ONCE(*pgdp));
- } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
-}
-
static void __init clear_pgds(unsigned long start,
unsigned long end)
{
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5996b374ff8a..07da61c1d7c6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -648,9 +648,9 @@ void mark_rodata_ro(void)
debug_checkwx();
}
-static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
- pgprot_t prot, struct vm_struct *vma,
- int flags, unsigned long vm_flags)
+static void __init declare_vma(struct vm_struct *vma,
+ void *va_start, void *va_end,
+ unsigned long vm_flags)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
@@ -658,9 +658,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(pa_start));
BUG_ON(!PAGE_ALIGNED(size));
- __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, flags);
-
if (!(vm_flags & VM_NO_GUARD))
size += PAGE_SIZE;
@@ -673,12 +670,12 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
vm_area_add_early(vma);
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static pgprot_t kernel_exec_prot(void)
{
return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
}
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
int i;
@@ -710,60 +707,17 @@ core_initcall(map_entry_trampoline);
#endif
/*
- * Open coded check for BTI, only for use to determine configuration
- * for early mappings for before the cpufeature code has run.
- */
-static bool arm64_early_this_cpu_has_bti(void)
-{
- u64 pfr1;
-
- if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
- return false;
-
- pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
- return cpuid_feature_extract_unsigned_field(pfr1,
- ID_AA64PFR1_EL1_BT_SHIFT);
-}
-
-/*
- * Create fine-grained mappings for the kernel.
+ * Declare the VMA areas for the kernel
*/
-static void __init map_kernel(pgd_t *pgdp)
+static void __init declare_kernel_vmas(void)
{
- static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
- vmlinux_initdata, vmlinux_data;
-
- /*
- * External debuggers may need to write directly to the text
- * mapping to install SW breakpoints. Allow this (only) when
- * explicitly requested with rodata=off.
- */
- pgprot_t text_prot = kernel_exec_prot();
-
- /*
- * If we have a CPU that supports BTI and a kernel built for
- * BTI then mark the kernel executable text as guarded pages
- * now so we don't have to rewrite the page tables later.
- */
- if (arm64_early_this_cpu_has_bti())
- text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
+ static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
- /*
- * Only rodata will be remapped with different permissions later on,
- * all other segments are allowed to use contiguous mappings.
- */
- map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
- VM_NO_GUARD);
- map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
- &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
- map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
- &vmlinux_inittext, 0, VM_NO_GUARD);
- map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
- &vmlinux_initdata, 0, VM_NO_GUARD);
- map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
-
- fixmap_copy(pgdp);
- kasan_copy_shadow(pgdp);
+ declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[4], _data, _end, 0);
}
void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
@@ -799,23 +753,12 @@ static void __init create_idmap(void)
void __init paging_init(void)
{
- pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
- extern pgd_t init_idmap_pg_dir[];
-
- map_kernel(pgdp);
- map_mem(pgdp);
-
- pgd_clear_fixmap();
-
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
- init_mm.pgd = swapper_pg_dir;
-
- memblock_phys_free(__pa_symbol(init_pg_dir),
- __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+ map_mem(swapper_pg_dir);
memblock_allow_resize();
create_idmap();
+ declare_kernel_vmas();
}
#ifdef CONFIG_MEMORY_HOTPLUG
--
2.43.0.rc1.413.gea7ed67945-goog
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2023-11-29 11:23 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-29 11:15 [PATCH v6 00/41] arm64: Reorganize kernel VA space for LPA2 Ard Biesheuvel
2023-11-29 11:15 ` [PATCH v6 01/41] arm64: kernel: Disable latent_entropy GCC plugin in early C runtime Ard Biesheuvel
2023-11-30 4:44 ` Anshuman Khandual
2023-11-29 11:15 ` [PATCH v6 02/41] arm64: mm: Take potential load offset into account when KASLR is off Ard Biesheuvel
2023-11-30 5:23 ` Anshuman Khandual
2023-12-04 14:12 ` Mark Rutland
2023-12-04 15:40 ` Ard Biesheuvel
2023-11-29 11:15 ` [PATCH v6 03/41] arm64: mm: get rid of kimage_vaddr global variable Ard Biesheuvel
2023-11-30 5:38 ` Anshuman Khandual
2023-12-04 14:37 ` Mark Rutland
2023-12-05 2:26 ` Anshuman Khandual
2023-11-29 11:16 ` [PATCH v6 04/41] arm64: mm: Move PCI I/O emulation region above the vmemmap region Ard Biesheuvel
2023-11-30 7:59 ` Anshuman Khandual
2023-11-30 8:02 ` Ard Biesheuvel
2023-11-30 8:52 ` Anshuman Khandual
2023-11-30 8:56 ` Ard Biesheuvel
2023-12-11 13:57 ` Mark Rutland
2023-12-11 14:10 ` Ard Biesheuvel
2023-12-11 14:21 ` Mark Rutland
2023-11-29 11:16 ` [PATCH v6 05/41] arm64: mm: Move fixmap region above " Ard Biesheuvel
2023-12-11 14:23 ` Mark Rutland
2023-11-29 11:16 ` [PATCH v6 06/41] arm64: ptdump: Allow all region boundaries to be defined at boot time Ard Biesheuvel
2023-12-11 14:15 ` Mark Rutland
2023-11-29 11:16 ` [PATCH v6 07/41] arm64: ptdump: Discover start of vmemmap region at runtime Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 08/41] arm64: vmemmap: Avoid base2 order of struct page size to dimension region Ard Biesheuvel
2023-12-11 14:35 ` Mark Rutland
2023-12-12 21:34 ` Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 09/41] arm64: mm: Reclaim unused vmemmap region for vmalloc use Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 10/41] arm64: kaslr: Adjust randomization range dynamically Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 11/41] arm64: kernel: Manage absolute relocations in code built under pi/ Ard Biesheuvel
2023-11-29 12:27 ` Marc Zyngier
2023-11-29 12:46 ` Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 12/41] arm64: kernel: Don't rely on objcopy to make code under pi/ __init Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 13/41] arm64: head: move relocation handling to C code Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 14/41] arm64: idreg-override: Omit non-NULL checks for override pointer Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 15/41] arm64: idreg-override: Prepare for place relative reloc patching Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 16/41] arm64: idreg-override: Avoid parameq() and parameqn() Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 17/41] arm64: idreg-override: avoid strlen() to check for empty strings Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 18/41] arm64: idreg-override: Avoid sprintf() for simple string concatenation Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 19/41] arm64: idreg-override: Avoid kstrtou64() to parse a single hex digit Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 20/41] arm64/kernel: Move 'nokaslr' parsing out of early idreg code Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 21/41] arm64: idreg-override: Move to early mini C runtime Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 22/41] arm64: kernel: Remove early fdt remap code Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 23/41] arm64: head: Clear BSS and the kernel page tables in one go Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 24/41] arm64: Move feature overrides into the BSS section Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 25/41] arm64: head: Run feature override detection before mapping the kernel Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 26/41] arm64: head: move dynamic shadow call stack patching into early C runtime Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 27/41] arm64: cpufeature: Add helper to test for CPU feature overrides Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 28/41] arm64: kaslr: Use feature override instead of parsing the cmdline again Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 29/41] arm64: idreg-override: Create a pseudo feature for rodata=off Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 30/41] arm64: Add helpers to probe local CPU for PAC and BTI support Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 31/41] arm64: head: allocate more pages for the kernel mapping Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 32/41] arm64: head: move memstart_offset_seed handling to C code Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 33/41] arm64: mm: Make kaslr_requires_kpti() a static inline Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 34/41] arm64: mmu: Make __cpu_replace_ttbr1() out of line Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 35/41] arm64: head: Move early kernel mapping routines into C code Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 36/41] arm64: mm: Use 48-bit virtual addressing for the permanent ID map Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 37/41] arm64: pgtable: Decouple PGDIR size macros from PGD/PUD/PMD levels Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 38/41] arm64: kernel: Create initial ID map from C code Ard Biesheuvel
2023-11-29 11:16 ` [PATCH v6 39/41] arm64: mm: avoid fixmap for early swapper_pg_dir updates Ard Biesheuvel
2023-11-29 11:16 ` Ard Biesheuvel [this message]
2023-11-29 11:16 ` [PATCH v6 41/41] arm64: Revert "mm: provide idmap pointer to cpu_replace_ttbr1()" Ard Biesheuvel
2023-12-12 17:20 ` [PATCH v6 00/41] arm64: Reorganize kernel VA space for LPA2 Will Deacon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231129111555.3594833-83-ardb@google.com \
--to=ardb@google.com \
--cc=anshuman.khandual@arm.com \
--cc=ardb@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=keescook@chromium.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).