From: Steve Capper <steve.capper@arm.com>
To: linux-arm-kernel@lists.infradead.org
Cc: crecklin@redhat.com, ard.biesheuvel@linaro.org,
marc.zyngier@arm.com, catalin.marinas@arm.com,
bhsharma@redhat.com, will.deacon@arm.com
Subject: [PATCH v2 12/12] arm64: mm: Introduce 52-bit Kernel VAs
Date: Tue, 28 May 2019 17:10:26 +0100 [thread overview]
Message-ID: <20190528161026.13193-13-steve.capper@arm.com> (raw)
In-Reply-To: <20190528161026.13193-1-steve.capper@arm.com>
Most of the machinery is now in place to enable 52-bit kernel VAs that
are detectable at boot time.
This patch adds a Kconfig option for 52-bit user and kernel addresses
and plumbs in the requisite CONFIG_ macros as well as sets TCR.T1SZ,
physvirt_offset and vmemmap at early boot.
Signed-off-by: Steve Capper <steve.capper@arm.com>
---
arch/arm64/Kconfig | 31 ++++++++++++++++++++++----
arch/arm64/include/asm/assembler.h | 9 +++++++-
arch/arm64/include/asm/memory.h | 2 +-
arch/arm64/include/asm/mmu_context.h | 2 +-
arch/arm64/include/asm/pgtable-hwdef.h | 2 +-
arch/arm64/kernel/head.S | 4 ++--
arch/arm64/mm/init.c | 10 +++++++++
arch/arm64/mm/proc.S | 6 ++++-
8 files changed, 55 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0cedcb4a0a01..d22432c2caca 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -276,11 +276,14 @@ config KERNEL_MODE_NEON
config FIX_EARLYCON_MEM
def_bool y
+config HAS_VA_BITS_52
+ def_bool n
+
config PGTABLE_LEVELS
int
default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
- default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52)
+ default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || HAS_VA_BITS_52)
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
@@ -294,12 +297,12 @@ config ARCH_PROC_KCORE_TEXT
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && !KASAN_SW_TAGS
+ default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || HAS_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
- default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && KASAN_SW_TAGS
+ default 0xefff900000000000 if (ARM64_VA_BITS_48 || HAS_VA_BITS_52) && KASAN_SW_TAGS
default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
@@ -739,6 +742,7 @@ config ARM64_VA_BITS_48
config ARM64_USER_VA_BITS_52
bool "52-bit (user)"
depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+ select HAS_VA_BITS_52
help
Enable 52-bit virtual addressing for userspace when explicitly
requested via a hint to mmap(). The kernel will continue to
@@ -751,11 +755,28 @@ config ARM64_USER_VA_BITS_52
If unsure, select 48-bit virtual addressing instead.
+config ARM64_USER_KERNEL_VA_BITS_52
+ bool "52-bit (user & kernel)"
+ depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+ select HAS_VA_BITS_52
+ help
+ Enable 52-bit virtual addressing for userspace when explicitly
+ requested via a hint to mmap(). The kernel will also use 52-bit
+ virtual addresses for its own mappings (provided HW support for
+ this feature is available, otherwise it reverts to 48-bit).
+
+ NOTE: Enabling 52-bit virtual addressing in conjunction with
+ ARMv8.3 Pointer Authentication will result in the PAC being
+ reduced from 7 bits to 3 bits, which may have a significant
+ impact on its susceptibility to brute-force attacks.
+
+ If unsure, select 48-bit virtual addressing instead.
+
endchoice
config ARM64_FORCE_52BIT
bool "Force 52-bit virtual addresses for userspace"
- depends on ARM64_USER_VA_BITS_52 && EXPERT
+ depends on HAS_VA_BITS_52 && EXPERT
help
For systems with 52-bit userspace VAs enabled, the kernel will attempt
to maintain compatibility with older software by providing 48-bit VAs
@@ -773,9 +794,11 @@ config ARM64_VA_BITS
default 42 if ARM64_VA_BITS_42
default 47 if ARM64_VA_BITS_47
default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52
+ default 52 if ARM64_USER_KERNEL_VA_BITS_52
config ARM64_VA_BITS_MIN
int
+ default 48 if ARM64_USER_KERNEL_VA_BITS_52
default ARM64_VA_BITS
choice
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index a42c392ed1e1..1f8d7b891ec9 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -356,6 +356,13 @@ alternative_endif
bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
+/*
+ * tcr_set_t1sz - update TCR.T1SZ
+ */
+ .macro tcr_set_t1sz, valreg, t1sz
+ bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
+ .endm
+
/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
@@ -565,7 +572,7 @@ alternative_endif
* to be nop'ed out when dealing with 52-bit kernel VAs.
*/
.macro restore_ttbr1, ttbr
-#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_KERNEL_VA_BITS_52)
+#ifdef CONFIG_HAS_VA_BITS_52
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c817ee71e201..8e3c12a3414d 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -77,7 +77,7 @@
#define KERNEL_START _text
#define KERNEL_END _end
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_HAS_VA_BITS_52
#define MAX_USER_VA_BITS 52
#else
#define MAX_USER_VA_BITS VA_BITS
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 133ecb65b602..1ae26b357e1e 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -74,7 +74,7 @@ extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
- if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
+ if (IS_ENABLED(CONFIG_HAS_VA_BITS_52))
return false;
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index a69259cc1f16..73dc4a8c0df3 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -316,7 +316,7 @@
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
#endif
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_HAS_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
(UL(1) << (48 - PGDIR_SHIFT))) * 8)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 8bc1b533a912..31c43abf30aa 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -319,7 +319,7 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_HAS_VA_BITS_52
mrs_s x6, SYS_ID_AA64MMFR2_EL1
and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
mov x5, #52
@@ -817,7 +817,7 @@ ENTRY(__enable_mmu)
ENDPROC(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva)
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_HAS_VA_BITS_52
ldr_l x0, vabits_user
cmp x0, #52
b.ne 2f
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 6844365c0a51..c0c6d2dc39b2 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -335,6 +335,16 @@ void __init arm64_memblock_init(void)
vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
+ /*
+ * If we are running with a 52-bit kernel VA config on a system that
+ * does not support it, we have to offset our vmemmap and physvirt_offset
+ * s.t. we avoid the 52-bit portion of the direct linear map
+ */
+ if (IS_ENABLED(CONFIG_ARM64_USER_KERNEL_VA_BITS_52) && (VA_BITS_ACTUAL != 52)) {
+ vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
+ physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
+ }
+
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index fdd626d34274..62554d92e903 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -449,7 +449,7 @@ ENTRY(__cpu_setup)
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_clear_errata_bits x10, x9, x5
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_HAS_VA_BITS_52
ldr_l x9, vabits_user
sub x9, xzr, x9
add x9, x9, #64
@@ -458,6 +458,10 @@ ENTRY(__cpu_setup)
#endif
tcr_set_t0sz x10, x9
+#ifdef CONFIG_ARM64_USER_KERNEL_VA_BITS_52
+ tcr_set_t1sz x10, x9
+#endif
+
/*
* Set the IPS bits in TCR_EL1.
*/
--
2.20.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2019-05-28 16:13 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-28 16:10 [PATCH v2 00/12] 52-bit kernel + user VAs Steve Capper
2019-05-28 16:10 ` [PATCH v2 01/12] arm/arm64: KVM: Formalise end of direct linear map Steve Capper
2019-05-28 16:27 ` Marc Zyngier
2019-05-28 17:01 ` Steve Capper
2019-05-29 9:26 ` Steve Capper
2019-05-28 16:10 ` [PATCH v2 02/12] arm64: mm: Flip kernel VA space Steve Capper
2019-05-28 16:10 ` [PATCH v2 03/12] arm64: kasan: Switch to using KASAN_SHADOW_OFFSET Steve Capper
2019-05-28 16:10 ` [PATCH v2 04/12] arm64: mm: Replace fixed map BUILD_BUG_ON's with BUG_ON's Steve Capper
2019-05-28 17:07 ` Ard Biesheuvel
2019-05-28 17:11 ` Ard Biesheuvel
2019-05-29 9:28 ` Steve Capper
2019-05-28 16:10 ` [PATCH v2 05/12] arm64: dump: Make kernel page table dumper dynamic again Steve Capper
2019-05-28 16:10 ` [PATCH v2 06/12] arm64: mm: Introduce VA_BITS_MIN Steve Capper
2019-05-28 16:10 ` [PATCH v2 07/12] arm64: mm: Introduce VA_BITS_ACTUAL Steve Capper
2019-05-28 16:10 ` [PATCH v2 08/12] arm64: mm: Logic to make offset_ttbr1 conditional Steve Capper
2019-06-10 14:18 ` Catalin Marinas
2019-06-12 10:58 ` Steve Capper
2019-05-28 16:10 ` [PATCH v2 09/12] arm64: mm: Separate out vmemmap Steve Capper
2019-05-28 16:10 ` [PATCH v2 10/12] arm64: mm: Modify calculation of VMEMMAP_SIZE Steve Capper
2019-05-28 16:10 ` [PATCH v2 11/12] arm64: mm: Tweak PAGE_OFFSET logic Steve Capper
2019-05-28 16:10 ` Steve Capper [this message]
2019-06-05 15:34 ` [PATCH v2 12/12] arm64: mm: Introduce 52-bit Kernel VAs Catalin Marinas
2019-06-07 10:34 ` Steve Capper
2019-06-07 13:53 ` [PATCH v2 00/12] 52-bit kernel + user VAs Anshuman Khandual
2019-06-07 14:24 ` Steve Capper
2019-06-10 10:40 ` Bhupesh Sharma
2019-06-10 10:54 ` Catalin Marinas
2019-06-10 11:15 ` Bhupesh Sharma
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190528161026.13193-13-steve.capper@arm.com \
--to=steve.capper@arm.com \
--cc=ard.biesheuvel@linaro.org \
--cc=bhsharma@redhat.com \
--cc=catalin.marinas@arm.com \
--cc=crecklin@redhat.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=marc.zyngier@arm.com \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).