linux-efi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: linux-efi@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>,
	Marc Zyngier <maz@kernel.org>, Will Deacon <will@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Kees Cook <keescook@chromium.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Brown <broonie@kernel.org>
Subject: [PATCH v3 06/30] arm64: head: switch to map_memory macro for the extended ID map
Date: Mon, 11 Apr 2022 11:48:00 +0200	[thread overview]
Message-ID: <20220411094824.4176877-7-ardb@kernel.org> (raw)
In-Reply-To: <20220411094824.4176877-1-ardb@kernel.org>

In a future patch, we will start using an ID map that covers the entire
image, rather than a single page. This means that we need to deal with
the pathological case of an extended ID map where the kernel image does
not fit neatly inside a single entry at the root level, which means we
will need to create additional table entries and map additional pages
for page tables.

The existing map_memory macro already takes care of most of that, so
let's just extend it to deal with this case as well. While at it, drop
the conditional branch on the value of T0SZ: we don't set the variable
anymore in the entry code, and so we can just let the map_memory macro
deal with the case where the output address exceeds VA_BITS.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/head.S | 76 ++++++++++----------
 1 file changed, 37 insertions(+), 39 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 9fdde2f9cc0f..eb54c0289c8a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -122,29 +122,6 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
 	b	dcache_inval_poc		// tail call
 SYM_CODE_END(preserve_boot_args)
 
-/*
- * Macro to create a table entry to the next page.
- *
- *	tbl:	page table address
- *	virt:	virtual address
- *	shift:	#imm page table shift
- *	ptrs:	#imm pointers per table page
- *
- * Preserves:	virt
- * Corrupts:	ptrs, tmp1, tmp2
- * Returns:	tbl -> next level table page address
- */
-	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
-	add	\tmp1, \tbl, #PAGE_SIZE
-	phys_to_pte \tmp2, \tmp1
-	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
-	lsr	\tmp1, \virt, #\shift
-	sub	\ptrs, \ptrs, #1
-	and	\tmp1, \tmp1, \ptrs		// table index
-	str	\tmp2, [\tbl, \tmp1, lsl #3]
-	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
-	.endm
-
 /*
  * Macro to populate page table entries, these entries can be pointers to the next level
  * or last level entries pointing to physical memory.
@@ -209,15 +186,27 @@ SYM_CODE_END(preserve_boot_args)
  *	phys:	physical address corresponding to vstart - physical memory is contiguous
  *	order:  #imm 2log(number of entries in PGD table)
  *
+ * If extra_shift is set, an extra level will be populated if the end address does
+ * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range.
+ *
  * Temporaries:	istart, iend, tmp, count, sv - these need to be different registers
  * Preserves:	vstart, flags
  * Corrupts:	tbl, rtbl, vend, istart, iend, tmp, count, sv
  */
-	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv
+	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift
 	sub \vend, \vend, #1
 	add \rtbl, \tbl, #PAGE_SIZE
 	mov \count, #0
 
+	.ifnb	\extra_shift
+	tst	\vend, #~((1 << (\extra_shift)) - 1)
+	b.eq	.L_\@
+	compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	mov \sv, \rtbl
+	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+	mov \tbl, \sv
+	.endif
+.L_\@:
 	compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
@@ -284,20 +273,32 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
 
 	/*
-	 * VA_BITS may be too small to allow for an ID mapping to be created
-	 * that covers system RAM if that is located sufficiently high in the
-	 * physical address space. So for the ID map, use an extended virtual
-	 * range in that case, and configure an additional translation level
-	 * if needed.
+	 * The ID map carries a 1:1 mapping of the physical address range
+	 * covered by the loaded image, which could be anywhere in DRAM. This
+	 * means that the required size of the VA (== PA) space is decided at
+	 * boot time, and could be more than the configured size of the VA
+	 * space for ordinary kernel and user space mappings.
+	 *
+	 * There are three cases to consider here:
+	 * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover
+	 *   the placement of the image. In this case, we configure one extra
+	 *   level of translation on the fly for the ID map only. (This case
+	 *   also covers 42-bit VA/52-bit PA on 64k pages).
+	 *
+	 * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can
+	 *   only happen when using 64k pages, in which case we need to extend
+	 *   the root level table rather than add a level. Note that we can
+	 *   treat this case as 'always extended' as long as we take care not
+	 *   to program an unsupported T0SZ value into the TCR register.
+	 *
+	 * - Combinations that would require two additional levels of
+	 *   translation are not supported, e.g., VA_BITS==36 on 16k pages, or
+	 *   VA_BITS==39/4k pages with 5-level paging, where the input address
+	 *   requires more than 47 or 48 bits, respectively.
 	 */
-	idmap_get_t0sz x5
-	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
-	b.ge	1f			// .. then skip VA range extension
-
 #if (VA_BITS < 48)
 #define IDMAP_PGD_ORDER	(VA_BITS - PGDIR_SHIFT)
 #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
-#define EXTRA_PTRS	(1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
 
 	/*
 	 * If VA_BITS < 48, we have to configure an additional table level.
@@ -309,20 +310,17 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 #if VA_BITS != EXTRA_SHIFT
 #error "Mismatch between VA_BITS and page size/number of translation levels"
 #endif
-
-	mov	x2, EXTRA_PTRS
-	create_table_entry x0, x3, EXTRA_SHIFT, x2, x5, x6
 #else
 #define IDMAP_PGD_ORDER	(PHYS_MASK_SHIFT - PGDIR_SHIFT)
+#define EXTRA_SHIFT
 	/*
 	 * If VA_BITS == 48, we don't have to configure an additional
 	 * translation level, but the top-level table has more entries.
 	 */
 #endif
-1:
 	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
 
-	map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14
+	map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
 
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
-- 
2.30.2


  parent reply	other threads:[~2022-04-11  9:49 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-11  9:47 [PATCH v3 00/30] arm64: support WXN and entry with MMU enabled Ard Biesheuvel
2022-04-11  9:47 ` [PATCH v3 01/30] arm64: head: move kimage_vaddr variable into C file Ard Biesheuvel
2022-04-11  9:47 ` [PATCH v3 02/30] arm64: mm: make vabits_actual a build time constant if possible Ard Biesheuvel
2022-04-11  9:47 ` [PATCH v3 03/30] arm64: head: move assignment of idmap_t0sz to C code Ard Biesheuvel
2022-04-11  9:47 ` [PATCH v3 04/30] arm64: head: drop idmap_ptrs_per_pgd Ard Biesheuvel
2022-04-11  9:47 ` [PATCH v3 05/30] arm64: head: simplify page table mapping macros (slightly) Ard Biesheuvel
2022-04-11  9:48 ` Ard Biesheuvel [this message]
2022-04-11  9:48 ` [PATCH v3 07/30] arm64: head: split off idmap creation code Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 08/30] arm64: kernel: drop unnecessary PoC cache clean+invalidate Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 09/30] arm64: head: pass ID map root table address to __enable_mmu() Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 10/30] arm64: mm: provide idmap pointer to cpu_replace_ttbr1() Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 11/30] arm64: head: add helper function to remap regions in early page tables Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 12/30] arm64: head: cover entire kernel image in initial ID map Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 13/30] arm64: head: use relative references to the RELA and RELR tables Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 14/30] arm64: head: create a temporary FDT mapping in the initial ID map Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 15/30] arm64: idreg-override: use early FDT mapping in " Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 16/30] arm64: head: factor out TTBR1 assignment into a macro Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 17/30] arm64: head: populate kernel page tables with MMU and caches on Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 18/30] arm64: head: record CPU boot mode after enabling the MMU Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 19/30] arm64: kaslr: deal with init called with VA randomization enabled Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 20/30] arm64: head: relocate kernel only a single time if KASLR is enabled Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 21/30] arm64: head: remap the kernel text/inittext region read-only Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 22/30] arm64: setup: drop early FDT pointer helpers Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 23/30] arm64: mm: move ro_after_init section into the data segment Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 24/30] arm64: mm: add support for WXN memory translation attribute Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 25/30] arm64: head: record the MMU state at primary entry Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 26/30] arm64: head: avoid cache invalidation when entering with the MMU on Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 27/30] arm64: head: clean the ID map page to the PoC Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 28/30] efi: libstub: pass image handle to handle_kernel_image() Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 29/30] efi/arm64: libstub: run image in place if randomized by the loader Ard Biesheuvel
2022-04-11  9:48 ` [PATCH v3 30/30] arm64: efi/libstub: enter with the MMU on if executing in place Ard Biesheuvel
2022-04-12 16:59 ` [PATCH v3 00/30] arm64: support WXN and entry with MMU enabled Kees Cook

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220411094824.4176877-7-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=keescook@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).