From: Ard Biesheuvel <ardb@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: Ard Biesheuvel <ardb@kernel.org>, Marc Zyngier <maz@kernel.org>,
Will Deacon <will@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Kees Cook <keescook@chromium.org>,
Catalin Marinas <catalin.marinas@arm.com>,
Mark Brown <broonie@kernel.org>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Richard Henderson <richard.henderson@linaro.org>
Subject: [RFC PATCH 4/7] arm64: mm: Support use of 52-bit pgdirs on 48-bit/16k systems
Date: Thu, 17 Nov 2022 14:24:20 +0100 [thread overview]
Message-ID: <20221117132423.1252942-5-ardb@kernel.org> (raw)
In-Reply-To: <20221117132423.1252942-1-ardb@kernel.org>
On LVA/64k granule configurations, we simply extend the level 1 root
page table to cover 52 bits of VA space, and if the system in question
only supports 48 bits, we point TTBR1 to the pgdir entry that covers the
start of the 48-bit addressable part of the VA space.
Sadly, we cannot use the same trick on LPA2/16k granule configurations.
This is due to the fact that TTBR registers require 64 byte aligned
addresses, while the 48-bit addressable entries in question will not
appear at a 64 byte aligned address if the entire 52-bit VA table is
aligned to its size (which is another requirement for TTBR registers).
Fortunately, we are only dealing with two entries in this case: one that
covers the kernel/vmalloc region and one covering the linear map. This
makes it feasible to simply clone those entries into the start of the
page table after the first mapping into the respective region is
created.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/arm64/include/asm/assembler.h | 17 +++++------------
arch/arm64/include/asm/mmu.h | 18 ++++++++++++++++++
arch/arm64/kernel/cpufeature.c | 1 +
arch/arm64/kernel/pi/map_kernel.c | 2 +-
arch/arm64/mm/mmu.c | 2 ++
5 files changed, 27 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 4cb84dc6e2205a91..9fa62f102c1c94e9 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -609,11 +609,15 @@ alternative_endif
* but we have to add an offset so that the TTBR1 address corresponds with the
* pgdir entry that covers the lowest 48-bit addressable VA.
*
+ * Note that this trick only works for 64k pages - 4k pages uses an additional
+ * paging level, and on 16k pages, we would end up with a TTBR address that is
+ * not 64 byte aligned.
+ *
* orr is used as it can cover the immediate value (and is idempotent).
* ttbr: Value of ttbr to set, modified.
*/
.macro offset_ttbr1, ttbr, tmp
-#ifdef CONFIG_ARM64_VA_BITS_52
+#if defined(CONFIG_ARM64_VA_BITS_52) && defined(CONFIG_ARM64_64K_PAGES)
mrs \tmp, tcr_el1
and \tmp, \tmp, #TCR_T1SZ_MASK
cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
@@ -622,17 +626,6 @@ alternative_endif
#endif
.endm
-/*
- * Perform the reverse of offset_ttbr1.
- * bic is used as it can cover the immediate value and, in future, won't need
- * to be nop'ed out when dealing with 52-bit kernel VAs.
- */
- .macro restore_ttbr1, ttbr
-#ifdef CONFIG_ARM64_VA_BITS_52
- bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
-#endif
- .endm
-
/*
* Arrange a physical address in a TTBR register, taking care of 52-bit
* addresses.
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index a93d495d6e8c94a2..aa9fdefdb8c8b9e6 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,7 @@
#include <linux/refcount.h>
#include <asm/cpufeature.h>
+#include <asm/pgtable-prot.h>
typedef struct {
atomic64_t id;
@@ -72,6 +73,23 @@ extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
extern bool kaslr_requires_kpti(void);
+static inline void sync_kernel_pgdir_root_entries(pgd_t *pgdir)
+{
+ /*
+ * On 16k pages, we cannot advance the TTBR1 address to the pgdir entry
+ * that covers the start of the 48-bit addressable kernel VA space like
+ * we do on 64k pages when the hardware does not support LPA2, since the
+ * resulting address would not be 64 byte aligned. So instead, copy the
+ * pgdir entry that covers the mapping we just created to the start of
+ * the page table.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_16K_PAGES) &&
+ VA_BITS > VA_BITS_MIN && !lpa2_is_enabled()) {
+ pgdir[0] = pgdir[PTRS_PER_PGD - 2];
+ pgdir[1] = pgdir[PTRS_PER_PGD - 1];
+ }
+}
+
#define INIT_MM_CONTEXT(name) \
.pgd = swapper_pg_dir,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4a631a6e7e42b981..d19f9c1a93d9d000 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1768,6 +1768,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc),
KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
kpti_ng_pgd_alloc, 0);
+ sync_kernel_pgdir_root_entries(kpti_ng_temp_pgd);
}
cpu_install_idmap();
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index 6c5d78dcb90e55c5..3b0b3fecf2bd533b 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -217,8 +217,8 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset)
map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin,
__initdata_end, data_prot, false);
map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot, true);
+ sync_kernel_pgdir_root_entries(init_pg_dir);
dsb(ishst);
-
idmap_cpu_replace_ttbr1(init_pg_dir);
if (twopass) {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 63fb62e16a1f8873..90733567f0b89a31 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -665,6 +665,7 @@ static int __init map_entry_trampoline(void)
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
entry_tramp_text_size(), prot,
__pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
+ sync_kernel_pgdir_root_entries(tramp_pg_dir);
/* Map both the text and data into the kernel page table */
for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
@@ -729,6 +730,7 @@ void __init paging_init(void)
idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
map_mem(swapper_pg_dir);
+ sync_kernel_pgdir_root_entries(swapper_pg_dir);
memblock_allow_resize();
--
2.35.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2022-11-17 13:28 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-17 13:24 [RFC PATCH 0/7] arm64: Enable LPA2 support for 16k pages Ard Biesheuvel
2022-11-17 13:24 ` [RFC PATCH 1/7] arm64: ptdump: Disregard unaddressable VA space Ard Biesheuvel
2022-11-17 13:24 ` [RFC PATCH 2/7] arm64: mm: Disable all 52-bit virtual addressing support with arm64.nolva Ard Biesheuvel
2022-11-17 13:24 ` [RFC PATCH 3/7] arm64: mm: Wire up TCR.DS bit to PTE shareability fields Ard Biesheuvel
2022-11-17 13:24 ` Ard Biesheuvel [this message]
2022-11-17 13:24 ` [RFC PATCH 5/7] arm64: mm: Add LPA2 support to phys<->pte conversion routines Ard Biesheuvel
2022-11-17 13:24 ` [RFC PATCH 6/7] arm64: Enable LPA2 at boot if supported by the system Ard Biesheuvel
2022-11-17 13:24 ` [RFC PATCH 7/7] arm64: Enable 52-bit virtual addressing for 16k granule configs Ard Biesheuvel
2022-11-18 10:38 ` [RFC PATCH 0/7] arm64: Enable LPA2 support for 16k pages Catalin Marinas
2022-11-18 10:50 ` Ard Biesheuvel
2022-11-18 11:04 ` Ryan Roberts
2022-11-18 11:53 ` Anshuman Khandual
2022-11-18 11:18 ` Anshuman Khandual
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221117132423.1252942-5-ardb@kernel.org \
--to=ardb@kernel.org \
--cc=anshuman.khandual@arm.com \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=keescook@chromium.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=richard.henderson@linaro.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox