linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: Ard Biesheuvel <ardb@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Will Deacon <will@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Kees Cook <keescook@chromium.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Brown <broonie@kernel.org>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Richard Henderson <richard.henderson@linaro.org>,
	Ryan Roberts <ryan.roberts@arm.com>
Subject: [PATCH v2 06/19] arm64: head: remove order argument from early mapping routine
Date: Thu, 24 Nov 2022 13:39:19 +0100	[thread overview]
Message-ID: <20221124123932.2648991-7-ardb@kernel.org> (raw)
In-Reply-To: <20221124123932.2648991-1-ardb@kernel.org>

When creating mappings in the upper region of the address space, it is
important to know the order of the table being created, i.e., the number
of bits that are being translated at the level in question. Bits beyond
that number do not contribute to the virtual address, and need to be
masked out.

Now that we no longer use the asm kernel page creation code for mappings
in the upper region, those bits are guaranteed to be zero anyway, so we
don't have to account for them in the masking.

This means we can simply use the maximum order for the all tables
including the root level table. Doing so will also allow us to
transparently use the same routines creating the initial ID map covering
4 levels when the VA space is configured for 5.

Note that the root level tables are always statically allocated as full
pages regardless of how many VA bits they translate.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/head.S | 26 +++++++++-----------
 1 file changed, 11 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 3b3c5e8e84af..a37525a5ee34 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -158,7 +158,6 @@ SYM_CODE_END(preserve_boot_args)
  *	vstart:	virtual address of start of range
  *	vend:	virtual address of end of range - we map [vstart, vend]
  *	shift:	shift used to transform virtual address into index
- *	order:  #imm 2log(number of entries in page table)
  *	istart:	index in table corresponding to vstart
  *	iend:	index in table corresponding to vend
  *	count:	On entry: how many extra entries were required in previous level, scales
@@ -168,10 +167,10 @@ SYM_CODE_END(preserve_boot_args)
  * Preserves:	vstart, vend
  * Returns:	istart, iend, count
  */
-	.macro compute_indices, vstart, vend, shift, order, istart, iend, count
-	ubfx	\istart, \vstart, \shift, \order
-	ubfx	\iend, \vend, \shift, \order
-	add	\iend, \iend, \count, lsl \order
+	.macro compute_indices, vstart, vend, shift, istart, iend, count
+	ubfx	\istart, \vstart, \shift, #PAGE_SHIFT - 3
+	ubfx	\iend, \vend, \shift, #PAGE_SHIFT - 3
+	add	\iend, \iend, \count, lsl #PAGE_SHIFT - 3
 	sub	\count, \iend, \istart
 	.endm
 
@@ -186,7 +185,6 @@ SYM_CODE_END(preserve_boot_args)
  *	vend:	virtual address of end of range - we map [vstart, vend - 1]
  *	flags:	flags to use to map last level entries
  *	phys:	physical address corresponding to vstart - physical memory is contiguous
- *	order:  #imm 2log(number of entries in PGD table)
  *
  * If extra_shift is set, an extra level will be populated if the end address does
  * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range.
@@ -195,7 +193,7 @@ SYM_CODE_END(preserve_boot_args)
  * Preserves:	vstart, flags
  * Corrupts:	tbl, rtbl, vend, istart, iend, tmp, count, sv
  */
-	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift
+	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, istart, iend, tmp, count, sv, extra_shift
 	sub \vend, \vend, #1
 	add \rtbl, \tbl, #PAGE_SIZE
 	mov \count, #0
@@ -203,32 +201,32 @@ SYM_CODE_END(preserve_boot_args)
 	.ifnb	\extra_shift
 	tst	\vend, #~((1 << (\extra_shift)) - 1)
 	b.eq	.L_\@
-	compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #\extra_shift, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 	.endif
 .L_\@:
-	compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count
+	compute_indices \vstart, \vend, #PGDIR_SHIFT, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 
 #if INIT_IDMAP_TABLE_LEVELS > 3
-	compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #PUD_SHIFT, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 #endif
 
 #if INIT_IDMAP_TABLE_LEVELS > 2
-	compute_indices \vstart, \vend, #INIT_IDMAP_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #INIT_IDMAP_TABLE_SHIFT, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 #endif
 
-	compute_indices \vstart, \vend, #INIT_IDMAP_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #INIT_IDMAP_BLOCK_SHIFT, \istart, \iend, \count
 	bic \rtbl, \phys, #INIT_IDMAP_BLOCK_SIZE - 1
 	populate_entries \tbl, \rtbl, \istart, \iend, \flags, #INIT_IDMAP_BLOCK_SIZE, \tmp
 	.endm
@@ -294,7 +292,6 @@ SYM_FUNC_START_LOCAL(create_idmap)
 	 *   requires more than 47 or 48 bits, respectively.
 	 */
 #if (VA_BITS < 48)
-#define IDMAP_PGD_ORDER	(VA_BITS - PGDIR_SHIFT)
 #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
 
 	/*
@@ -308,7 +305,6 @@ SYM_FUNC_START_LOCAL(create_idmap)
 #error "Mismatch between VA_BITS and page size/number of translation levels"
 #endif
 #else
-#define IDMAP_PGD_ORDER	(PHYS_MASK_SHIFT - PGDIR_SHIFT)
 #define EXTRA_SHIFT
 	/*
 	 * If VA_BITS == 48, we don't have to configure an additional
@@ -320,7 +316,7 @@ SYM_FUNC_START_LOCAL(create_idmap)
 	adrp	x6, _end + MAX_FDT_SIZE + INIT_IDMAP_BLOCK_SIZE
 	mov	x7, INIT_IDMAP_RX_MMUFLAGS
 
-	map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
+	map_memory x0, x1, x3, x6, x7, x3, x10, x11, x12, x13, x14, EXTRA_SHIFT
 
 	/* Remap BSS and the kernel page tables r/w in the ID map */
 	adrp	x1, _text
-- 
2.38.1.584.g0f3c55d4c2-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-11-24 12:43 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-24 12:39 [PATCH v2 00/19] arm64: Enable LPA2 support for 4k and 16k pages Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 01/19] arm64/mm: Simplify and document pte_to_phys() for 52 bit addresses Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 02/19] arm64/mm: Add FEAT_LPA2 specific TCR_EL1.DS field Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 03/19] arm64/mm: Add FEAT_LPA2 specific ID_AA64MMFR0.TGRAN[2] Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 04/19] arm64: kaslr: Adjust randomization range dynamically Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 05/19] arm64: mm: get rid of kimage_vaddr global variable Ard Biesheuvel
2022-11-24 12:39 ` Ard Biesheuvel [this message]
2022-11-24 12:39 ` [PATCH v2 07/19] arm64: mm: Handle LVA support as a CPU feature Ard Biesheuvel
2022-11-28 14:54   ` Ryan Roberts
2022-11-24 12:39 ` [PATCH v2 08/19] arm64: mm: Deal with potential ID map extension if VA_BITS > VA_BITS_MIN Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 09/19] arm64: mm: Add feature override support for LVA Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 10/19] arm64: mm: Wire up TCR.DS bit to PTE shareability fields Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 11/19] arm64: mm: Add LPA2 support to phys<->pte conversion routines Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 12/19] arm64: mm: Add definitions to support 5 levels of paging Ard Biesheuvel
2022-11-28 16:17   ` Ryan Roberts
2022-11-28 16:22     ` Ard Biesheuvel
2022-11-28 18:00       ` Marc Zyngier
2022-11-28 18:20         ` Ryan Roberts
2022-11-29 15:46       ` Ryan Roberts
2022-11-29 15:48         ` Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 13/19] arm64: mm: add 5 level paging support to G-to-nG conversion routine Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 14/19] arm64: Enable LPA2 at boot if supported by the system Ard Biesheuvel
2022-11-28 14:54   ` Ryan Roberts
2022-11-24 12:39 ` [PATCH v2 15/19] arm64: mm: Add 5 level paging support to fixmap and swapper handling Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 16/19] arm64: kasan: Reduce minimum shadow alignment and enable 5 level paging Ard Biesheuvel
2022-11-24 17:44   ` Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 17/19] arm64: mm: Add support for folding PUDs at runtime Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 18/19] arm64: ptdump: Disregard unaddressable VA space Ard Biesheuvel
2022-11-24 12:39 ` [PATCH v2 19/19] arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs Ard Biesheuvel
2022-11-24 14:39 ` [PATCH v2 00/19] arm64: Enable LPA2 support for 4k and 16k pages Ryan Roberts
2022-11-24 17:14   ` Ard Biesheuvel
2022-11-25  9:22     ` Ryan Roberts
2022-11-25  9:35       ` Ard Biesheuvel
2022-11-25 10:07         ` Ryan Roberts
2022-11-25 10:36           ` Ard Biesheuvel
2022-11-25 14:12             ` Ryan Roberts
2022-11-25 14:19               ` Ard Biesheuvel
2022-11-29 15:31 ` Ryan Roberts
2022-11-29 15:47   ` Ard Biesheuvel
2022-11-29 16:35     ` Ryan Roberts
2022-11-29 16:56       ` Ard Biesheuvel
2022-12-01 12:22         ` Ryan Roberts
2022-12-01 13:43           ` Ard Biesheuvel
2022-12-01 16:00             ` Ryan Roberts

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221124123932.2648991-7-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=anshuman.khandual@arm.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=keescook@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=richard.henderson@linaro.org \
    --cc=ryan.roberts@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).