linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: Ard Biesheuvel <ardb@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Will Deacon <will@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Kees Cook <keescook@chromium.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Brown <broonie@kernel.org>,
	Anshuman Khandual <anshuman.khandual@arm.com>
Subject: [PATCH v7 29/33] arm64: mm: omit redundant remap of kernel image
Date: Fri, 11 Nov 2022 18:11:57 +0100	[thread overview]
Message-ID: <20221111171201.2088501-30-ardb@kernel.org> (raw)
In-Reply-To: <20221111171201.2088501-1-ardb@kernel.org>

Now that the early kernel mapping is created with all the right
attributes and segment boundaries, there is no longer a need to recreate
it and switch to it. This also means we no longer have to copy the kasan
shadow or some parts of the fixmap from one set of page tables to the
other.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/kasan.h    |   2 -
 arch/arm64/include/asm/mmu.h      |   2 +-
 arch/arm64/kernel/image-vars.h    |   2 +-
 arch/arm64/kernel/pi/map_kernel.c |   9 +-
 arch/arm64/mm/kasan_init.c        |  15 ---
 arch/arm64/mm/mmu.c               | 110 +++-----------------
 6 files changed, 22 insertions(+), 118 deletions(-)

diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 12d5f47f7dbec628..ab52688ac4bd43b6 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -36,12 +36,10 @@ void kasan_init(void);
 #define _KASAN_SHADOW_START(va)	(KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
 #define KASAN_SHADOW_START      _KASAN_SHADOW_START(vabits_actual)
 
-void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
 
 #else
 static inline void kasan_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *pgdir) { }
 #endif
 
 #endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 48f8466a4be92ac3..a93d495d6e8c94a2 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -73,7 +73,7 @@ extern void mark_linear_text_alias_ro(void);
 extern bool kaslr_requires_kpti(void);
 
 #define INIT_MM_CONTEXT(name)	\
-	.pgd = init_pg_dir,
+	.pgd = swapper_pg_dir,
 
 #endif	/* !__ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 88f864f28f03630c..5bd878f414d85366 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -56,7 +56,7 @@ PROVIDE(__pi__ctype			= _ctype);
 
 PROVIDE(__pi_init_pg_dir		= init_pg_dir);
 PROVIDE(__pi_init_pg_end		= init_pg_end);
-PROVIDE(__pi__end			= _end);
+PROVIDE(__pi_swapper_pg_dir		= swapper_pg_dir);
 
 PROVIDE(__pi__text			= _text);
 PROVIDE(__pi__stext               	= _stext);
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index c5c6eebef684f81d..4b604b104460c3ef 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -198,7 +198,8 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset)
 	map_segment(&pgdp, va_offset, __start_rodata, __inittext_begin, data_prot, false);
 	map_segment(&pgdp, va_offset, __inittext_begin, __inittext_end, prot, false);
 	map_segment(&pgdp, va_offset, __initdata_begin, __initdata_end, data_prot, false);
-	map_segment(&pgdp, va_offset, _data, _end, data_prot, true);
+	map_segment(&pgdp, va_offset, _data, init_pg_dir, data_prot, true);
+	/* omit [init_pg_dir, _end] - it doesn't need a kernel mapping */
 	dsb(ishst);
 
 	idmap_cpu_replace_ttbr1(init_pg_dir);
@@ -233,8 +234,12 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset)
 		map_segment(NULL, va_offset, _stext, _etext, text_prot, true);
 		map_segment(NULL, va_offset, __inittext_begin, __inittext_end,
 			    text_prot, false);
-		dsb(ishst);
 	}
+
+	/* Copy the root page table to its final location */
+	memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PGD_SIZE);
+	dsb(ishst);
+	idmap_cpu_replace_ttbr1(swapper_pg_dir);
 }
 
 asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index e969e68de005fd2a..df98f496539f0e39 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -184,21 +184,6 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
 }
 
-/*
- * Copy the current shadow region into a new pgdir.
- */
-void __init kasan_copy_shadow(pgd_t *pgdir)
-{
-	pgd_t *pgdp, *pgdp_new, *pgdp_end;
-
-	pgdp = pgd_offset_k(KASAN_SHADOW_START);
-	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
-	pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
-	do {
-		set_pgd(pgdp_new, READ_ONCE(*pgdp));
-	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
-}
-
 static void __init clear_pgds(unsigned long start,
 			unsigned long end)
 {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 68e66b979fc3ac5d..6942255056aed5ae 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -635,9 +635,9 @@ void mark_rodata_ro(void)
 	debug_checkwx();
 }
 
-static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
-				      pgprot_t prot, struct vm_struct *vma,
-				      int flags, unsigned long vm_flags)
+static void __init declare_vma(struct vm_struct *vma,
+			       void *va_start, void *va_end,
+			       unsigned long vm_flags)
 {
 	phys_addr_t pa_start = __pa_symbol(va_start);
 	unsigned long size = va_end - va_start;
@@ -645,9 +645,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
 	BUG_ON(!PAGE_ALIGNED(pa_start));
 	BUG_ON(!PAGE_ALIGNED(size));
 
-	__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
-			     early_pgtable_alloc, flags);
-
 	if (!(vm_flags & VM_NO_GUARD))
 		size += PAGE_SIZE;
 
@@ -692,87 +689,17 @@ core_initcall(map_entry_trampoline);
 #endif
 
 /*
- * Open coded check for BTI, only for use to determine configuration
- * for early mappings for before the cpufeature code has run.
+ * Declare the VMA areas for the kernel
  */
-static bool arm64_early_this_cpu_has_bti(void)
+static void __init declare_kernel_vmas(void)
 {
-	u64 pfr1;
-
-	if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
-		return false;
-
-	pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
-	return cpuid_feature_extract_unsigned_field(pfr1,
-						    ID_AA64PFR1_EL1_BT_SHIFT);
-}
-
-/*
- * Create fine-grained mappings for the kernel.
- */
-static void __init map_kernel(pgd_t *pgdp)
-{
-	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
-				vmlinux_initdata, vmlinux_data;
-
-	/*
-	 * External debuggers may need to write directly to the text
-	 * mapping to install SW breakpoints. Allow this (only) when
-	 * explicitly requested with rodata=off.
-	 */
-	pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
-
-	/*
-	 * If we have a CPU that supports BTI and a kernel built for
-	 * BTI then mark the kernel executable text as guarded pages
-	 * now so we don't have to rewrite the page tables later.
-	 */
-	if (arm64_early_this_cpu_has_bti())
-		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
+	static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
 
-	/*
-	 * Only rodata will be remapped with different permissions later on,
-	 * all other segments are allowed to use contiguous mappings.
-	 */
-	map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
-			   VM_NO_GUARD);
-	map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
-			   &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
-	map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
-			   &vmlinux_inittext, 0, VM_NO_GUARD);
-	map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
-			   &vmlinux_initdata, 0, VM_NO_GUARD);
-	map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
-
-	if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
-		/*
-		 * The fixmap falls in a separate pgd to the kernel, and doesn't
-		 * live in the carveout for the swapper_pg_dir. We can simply
-		 * re-use the existing dir for the fixmap.
-		 */
-		set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
-			READ_ONCE(*pgd_offset_k(FIXADDR_START)));
-	} else if (CONFIG_PGTABLE_LEVELS > 3) {
-		pgd_t *bm_pgdp;
-		p4d_t *bm_p4dp;
-		pud_t *bm_pudp;
-		/*
-		 * The fixmap shares its top level pgd entry with the kernel
-		 * mapping. This can really only occur when we are running
-		 * with 16k/4 levels, so we can simply reuse the pud level
-		 * entry instead.
-		 */
-		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
-		bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
-		bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
-		bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
-		pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
-		pud_clear_fixmap();
-	} else {
-		BUG();
-	}
-
-	kasan_copy_shadow(pgdp);
+	declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
+	declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
+	declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
+	declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
+	declare_vma(&vmlinux_seg[4], _data, _end, 0);
 }
 
 static void __init create_idmap(void)
@@ -807,25 +734,14 @@ static void __init create_idmap(void)
 
 void __init paging_init(void)
 {
-	pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
-	extern pgd_t init_idmap_pg_dir[];
-
 	idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
 
-	map_kernel(pgdp);
-	map_mem(pgdp);
-
-	pgd_clear_fixmap();
-
-	cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
-	init_mm.pgd = swapper_pg_dir;
-
-	memblock_phys_free(__pa_symbol(init_pg_dir),
-			   __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+	map_mem(swapper_pg_dir);
 
 	memblock_allow_resize();
 
 	create_idmap();
+	declare_kernel_vmas();
 }
 
 /*
-- 
2.35.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-11-11 17:34 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-11 17:11 [PATCH v7 00/33] arm64: robustify boot sequence and add support for WXN Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 01/33] arm64: mm: Avoid SWAPPER_BLOCK_xxx constants in FDT fixmap logic Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 02/33] arm64: mm: Avoid swapper block size when choosing vmemmap granularity Ard Biesheuvel
2022-11-24  5:11   ` Anshuman Khandual
2022-11-11 17:11 ` [PATCH v7 03/33] arm64: kaslr: don't pretend KASLR is enabled if offset < MIN_KIMG_ALIGN Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 04/33] arm64: kaslr: drop special case for ThunderX in kaslr_requires_kpti() Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 05/33] arm64: kernel: Disable latent_entropy GCC plugin in early C runtime Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 06/33] arm64: kernel: Add relocation check to code built under pi/ Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 07/33] arm64: kernel: Don't rely on objcopy to make code under pi/ __init Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 08/33] arm64: head: move relocation handling to C code Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 09/33] arm64: Turn kaslr_feature_override into a generic SW feature override Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 10/33] arm64: idreg-override: Omit non-NULL checks for override pointer Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 11/33] arm64: idreg-override: Use relative references to override variables Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 12/33] arm64: idreg-override: Use relative references to filter routines Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 13/33] arm64: idreg-override: Avoid parameq() and parameqn() Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 14/33] arm64: idreg-override: avoid strlen() to check for empty strings Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 15/33] arm64: idreg-override: Avoid sprintf() for simple string concatenation Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 16/33] arm64: idreg_override: Avoid kstrtou64() to parse a single hex digit Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 17/33] arm64: idreg-override: Move to early mini C runtime Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 18/33] arm64: kernel: Remove early fdt remap code Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 19/33] arm64: head: Clear BSS and the kernel page tables in one go Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 20/33] arm64: Move feature overrides into the BSS section Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 21/33] arm64: head: Run feature override detection before mapping the kernel Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 22/33] arm64: head: move dynamic shadow call stack patching into early C runtime Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 23/33] arm64: kaslr: Use feature override instead of parsing the cmdline again Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 24/33] arm64: idreg-override: Create a pseudo feature for rodata=off Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 25/33] arm64: head: allocate more pages for the kernel mapping Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 26/33] arm64: head: move memstart_offset_seed handling to C code Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 27/33] arm64: head: Move early kernel mapping routines into " Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 28/33] arm64: mm: avoid fixmap for early swapper_pg_dir updates Ard Biesheuvel
2022-11-11 17:11 ` Ard Biesheuvel [this message]
2022-11-11 17:11 ` [PATCH v7 30/33] arm64: Revert "mm: provide idmap pointer to cpu_replace_ttbr1()" Ard Biesheuvel
2022-11-11 17:11 ` [PATCH v7 31/33] arm64: mmu: Retire SWAPPER_BLOCK_xxx and related constants Ard Biesheuvel
2022-11-11 17:12 ` [PATCH v7 32/33] mm: add arch hook to validate mmap() prot flags Ard Biesheuvel
2022-11-11 17:12 ` [PATCH v7 33/33] arm64: mm: add support for WXN memory translation attribute Ard Biesheuvel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221111171201.2088501-30-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=anshuman.khandual@arm.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=keescook@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).