linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb+git@google.com>
To: linux-arm-kernel@lists.infradead.org
Cc: Ard Biesheuvel <ardb@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	 Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	 Ryan Roberts <ryan.roberts@arm.com>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	 Kees Cook <keescook@chromium.org>
Subject: [PATCH v8 43/43] arm64: mm: add support for WXN memory translation attribute
Date: Wed, 14 Feb 2024 13:29:29 +0100	[thread overview]
Message-ID: <20240214122845.2033971-88-ardb+git@google.com> (raw)
In-Reply-To: <20240214122845.2033971-45-ardb+git@google.com>

From: Ard Biesheuvel <ardb@kernel.org>

The AArch64 virtual memory system supports a global WXN control, which
can be enabled to make all writable mappings implicitly no-exec. This is
a useful hardening feature, as it prevents mistakes in managing page
table permissions from being exploited to attack the system.

When enabled at EL1, the restrictions apply to both EL1 and EL0. EL1 is
completely under our control, and has been cleaned up to allow WXN to be
enabled from boot onwards. EL0 is not under our control, but given that
widely deployed security features such as selinux or PaX already limit
the ability of user space to create mappings that are writable and
executable at the same time, the impact of enabling this for EL0 is
expected to be limited. (For this reason, common user space libraries
that have a legitimate need for manipulating executable code already
carry fallbacks such as [0].)

If enabled at compile time, the feature can still be disabled at boot if
needed, by passing arm64.nowxn on the kernel command line.

[0] https://github.com/libffi/libffi/blob/master/src/closures.c#L440

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
---
 arch/arm64/Kconfig                    | 11 ++++++
 arch/arm64/include/asm/cpufeature.h   |  8 +++++
 arch/arm64/include/asm/mman.h         | 36 ++++++++++++++++++++
 arch/arm64/include/asm/mmu_context.h  | 30 +++++++++++++++-
 arch/arm64/kernel/pi/idreg-override.c |  4 ++-
 arch/arm64/kernel/pi/map_kernel.c     | 23 +++++++++++++
 arch/arm64/mm/proc.S                  |  6 ++++
 7 files changed, 116 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 160856de9bbb..7761ffc6dbcf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1608,6 +1608,17 @@ config RODATA_FULL_DEFAULT_ENABLED
 	  This requires the linear region to be mapped down to pages,
 	  which may adversely affect performance in some cases.
 
+config ARM64_WXN
+	bool "Enable WXN attribute so all writable mappings are non-exec"
+	help
+	  Set the WXN bit in the SCTLR system register so that all writable
+	  mappings are treated as if the PXN/UXN bit is set as well.
+	  If this is set to Y, it can still be disabled at runtime by
+	  passing 'arm64.nowxn' on the kernel command line.
+
+	  This should only be set if no software needs to be supported that
+	  relies on being able to execute from writable mappings.
+
 config ARM64_SW_TTBR0_PAN
 	bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
 	help
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index a8f97690ce1f..ee33b7e52da7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -18,6 +18,7 @@
 #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR	0
 #define ARM64_SW_FEATURE_OVERRIDE_HVHE		4
 #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF	8
+#define ARM64_SW_FEATURE_OVERRIDE_NOWXN		12
 
 #ifndef __ASSEMBLY__
 
@@ -962,6 +963,13 @@ static inline bool kaslr_disabled_cmdline(void)
 	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
 }
 
+static inline bool arm64_wxn_enabled(void)
+{
+	if (!IS_ENABLED(CONFIG_ARM64_WXN))
+		return false;
+	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN);
+}
+
 u32 get_kvm_ipa_limit(void);
 void dump_cpu_features(void);
 
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 5966ee4a6154..6d4940342ba7 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -35,11 +35,40 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
 }
 #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
 
+static inline bool arm64_check_wx_prot(unsigned long prot,
+				       struct task_struct *tsk)
+{
+	/*
+	 * When we are running with SCTLR_ELx.WXN==1, writable mappings are
+	 * implicitly non-executable. This means we should reject such mappings
+	 * when user space attempts to create them using mmap() or mprotect().
+	 */
+	if (arm64_wxn_enabled() &&
+	    ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC))) {
+		/*
+		 * User space libraries such as libffi carry elaborate
+		 * heuristics to decide whether it is worth it to even attempt
+		 * to create writable executable mappings, as PaX or selinux
+		 * enabled systems will outright reject it. They will usually
+		 * fall back to something else (e.g., two separate shared
+		 * mmap()s of a temporary file) on failure.
+		 */
+		pr_info_ratelimited(
+			"process %s (%d) attempted to create PROT_WRITE+PROT_EXEC mapping\n",
+			tsk->comm, tsk->pid);
+		return false;
+	}
+	return true;
+}
+
 static inline bool arch_validate_prot(unsigned long prot,
 	unsigned long addr __always_unused)
 {
 	unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
 
+	if (!arm64_check_wx_prot(prot, current))
+		return false;
+
 	if (system_supports_bti())
 		supported |= PROT_BTI;
 
@@ -50,6 +79,13 @@ static inline bool arch_validate_prot(unsigned long prot,
 }
 #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
 
+static inline bool arch_validate_mmap_prot(unsigned long prot,
+					   unsigned long addr)
+{
+	return arm64_check_wx_prot(prot, current);
+}
+#define arch_validate_mmap_prot arch_validate_mmap_prot
+
 static inline bool arch_validate_flags(unsigned long vm_flags)
 {
 	if (!system_supports_mte())
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c768d16b81a4..f0fe2d09d139 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -20,13 +20,41 @@
 #include <asm/cpufeature.h>
 #include <asm/daifflags.h>
 #include <asm/proc-fns.h>
-#include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/sysreg.h>
 #include <asm/tlbflush.h>
 
 extern bool rodata_full;
 
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
+{
+	return 0;
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+			unsigned long start, unsigned long end)
+{
+}
+
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+		bool write, bool execute, bool foreign)
+{
+	if (IS_ENABLED(CONFIG_ARM64_WXN) && execute &&
+	    (vma->vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
+		pr_warn_ratelimited(
+			"process %s (%d) attempted to execute from writable memory\n",
+			current->comm, current->pid);
+		/* disallow unless the nowxn override is set */
+		return !arm64_wxn_enabled();
+	}
+	return true;
+}
+
 static inline void contextidr_thread_switch(struct task_struct *next)
 {
 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index aad399796e81..bccfee34f62f 100644
--- a/arch/arm64/kernel/pi/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -189,6 +189,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
 		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
 		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
 		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
+		FIELD("nowxn", ARM64_SW_FEATURE_OVERRIDE_NOWXN, NULL),
 		{}
 	},
 };
@@ -221,8 +222,9 @@ static const struct {
 	{ "arm64.nomops",		"id_aa64isar2.mops=0" },
 	{ "arm64.nomte",		"id_aa64pfr1.mte=0" },
 	{ "nokaslr",			"arm64_sw.nokaslr=1" },
-	{ "rodata=off",			"arm64_sw.rodataoff=1" },
+	{ "rodata=off",			"arm64_sw.rodataoff=1 arm64_sw.nowxn=1" },
 	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
+	{ "arm64.nowxn",		"arm64_sw.nowxn=1" },
 };
 
 static int __init parse_hexdigit(const char *p, u64 *v)
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index 5fa08e13e17e..cac1e1f63c44 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -132,6 +132,25 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
 	idmap_cpu_replace_ttbr1(swapper_pg_dir);
 }
 
+static void noinline __section(".idmap.text") disable_wxn(void)
+{
+	u64 sctlr = read_sysreg(sctlr_el1) & ~SCTLR_ELx_WXN;
+
+	/*
+	 * We cannot safely clear the WXN bit while the MMU and caches are on,
+	 * so turn the MMU off, flush the TLBs and turn it on again but with
+	 * the WXN bit cleared this time.
+	 */
+	asm("	msr	sctlr_el1, %0		;"
+	    "	isb				;"
+	    "	tlbi    vmalle1			;"
+	    "	dsb     nsh			;"
+	    "	isb				;"
+	    "	msr     sctlr_el1, %1		;"
+	    "	isb				;"
+	    ::	"r"(sctlr & ~SCTLR_ELx_M), "r"(sctlr));
+}
+
 static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
 {
 	u64 sctlr = read_sysreg(sctlr_el1);
@@ -229,6 +248,10 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
 	if (va_bits > VA_BITS_MIN)
 		sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
 
+	if (IS_ENABLED(CONFIG_ARM64_WXN) &&
+	    arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN))
+		disable_wxn();
+
 	/*
 	 * The virtual KASLR displacement modulo 2MiB is decided by the
 	 * physical placement of the image, as otherwise, we might not be able
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9d40f3ffd8d2..bfd2ad896108 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -546,6 +546,12 @@ alternative_else_nop_endif
 	 * Prepare SCTLR
 	 */
 	mov_q	x0, INIT_SCTLR_EL1_MMU_ON
+#ifdef CONFIG_ARM64_WXN
+	ldr_l	x1, arm64_sw_feature_override + FTR_OVR_VAL_OFFSET
+	tst	x1, #0xf << ARM64_SW_FEATURE_OVERRIDE_NOWXN
+	orr	x1, x0, #SCTLR_ELx_WXN
+	csel	x0, x0, x1, ne
+#endif
 	ret					// return to head.S
 
 	.unreq	mair
-- 
2.43.0.687.g38aa6559b0-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2024-02-14 13:46 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-14 12:28 [PATCH v8 00/43] arm64: Add support for LPA2 and WXN at stage 1 Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 01/43] arm64: kernel: Manage absolute relocations in code built under pi/ Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 02/43] arm64: kernel: Don't rely on objcopy to make code under pi/ __init Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 03/43] arm64: head: move relocation handling to C code Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 04/43] arm64: idreg-override: Move to early mini C runtime Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 05/43] arm64: kernel: Remove early fdt remap code Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 06/43] arm64: head: Clear BSS and the kernel page tables in one go Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 07/43] arm64: Move feature overrides into the BSS section Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 08/43] arm64: head: Run feature override detection before mapping the kernel Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 09/43] arm64: head: move dynamic shadow call stack patching into early C runtime Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 10/43] arm64: cpufeature: Add helper to test for CPU feature overrides Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 11/43] arm64: kaslr: Use feature override instead of parsing the cmdline again Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 12/43] arm64: idreg-override: Create a pseudo feature for rodata=off Ard Biesheuvel
2024-02-14 12:28 ` [PATCH v8 13/43] arm64: Add helpers to probe local CPU for PAC and BTI support Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 14/43] arm64: head: allocate more pages for the kernel mapping Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 15/43] arm64: head: move memstart_offset_seed handling to C code Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 16/43] arm64: mm: Make kaslr_requires_kpti() a static inline Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 17/43] arm64: mmu: Make __cpu_replace_ttbr1() out of line Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 18/43] arm64: head: Move early kernel mapping routines into C code Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 19/43] arm64: mm: Use 48-bit virtual addressing for the permanent ID map Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 20/43] arm64: pgtable: Decouple PGDIR size macros from PGD/PUD/PMD levels Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 21/43] arm64: kernel: Create initial ID map from C code Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 22/43] arm64: mm: avoid fixmap for early swapper_pg_dir updates Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 23/43] arm64: mm: omit redundant remap of kernel image Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 24/43] arm64: Revert "mm: provide idmap pointer to cpu_replace_ttbr1()" Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 25/43] arm64: mm: Handle LVA support as a CPU feature Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 26/43] arm64: mm: Add feature override support for LVA Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 27/43] arm64: Avoid #define'ing PTE_MAYBE_NG to 0x0 for asm use Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 28/43] arm64: Add ESR decoding for exceptions involving translation level -1 Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 29/43] arm64: mm: Wire up TCR.DS bit to PTE shareability fields Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 30/43] arm64: mm: Add LPA2 support to phys<->pte conversion routines Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 31/43] arm64: mm: Add definitions to support 5 levels of paging Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 32/43] arm64: mm: add LPA2 and 5 level paging support to G-to-nG conversion Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 33/43] arm64: Enable LPA2 at boot if supported by the system Ard Biesheuvel
2024-08-06 16:16   ` Ryan Roberts
2024-08-07  8:46     ` Ryan Roberts
2024-08-07 21:41       ` Ryan Roberts
2024-08-27  9:03         ` Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 34/43] arm64: mm: Add 5 level paging support to fixmap and swapper handling Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 35/43] arm64: kasan: Reduce minimum shadow alignment and enable 5 level paging Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 36/43] arm64: mm: Add support for folding PUDs at runtime Ard Biesheuvel
2024-02-29 14:17   ` Ryan Roberts
2024-02-29 23:01     ` Nathan Chancellor
2024-03-01  8:54       ` Ryan Roberts
2024-03-01  9:10         ` Ard Biesheuvel
2024-03-01  9:37           ` Ard Biesheuvel
2024-03-01  9:47             ` Ryan Roberts
2024-03-01 10:22               ` Ryan Roberts
2024-09-30 14:36   ` Ryan Roberts
2024-09-30 14:53     ` Ard Biesheuvel
2024-09-30 15:12       ` Ryan Roberts
2024-10-01  6:23         ` Ard Biesheuvel
2024-10-02  9:08           ` Ryan Roberts
2024-10-12  9:47             ` Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 37/43] arm64: ptdump: Disregard unaddressable VA space Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 38/43] arm64: ptdump: Deal with translation levels folded at runtime Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 39/43] arm64: kvm: avoid CONFIG_PGTABLE_LEVELS for runtime levels Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 40/43] arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 41/43] arm64: defconfig: Enable LPA2 support Ard Biesheuvel
2024-02-14 12:29 ` [PATCH v8 42/43] mm: add arch hook to validate mmap() prot flags Ard Biesheuvel
2024-03-12 19:53   ` Catalin Marinas
2024-03-12 23:23     ` Ard Biesheuvel
2024-03-13 10:47       ` Catalin Marinas
2024-03-13 11:45         ` Ard Biesheuvel
2024-03-13 15:31           ` Catalin Marinas
2024-02-14 12:29 ` Ard Biesheuvel [this message]
2024-02-16 17:35 ` [PATCH v8 00/43] arm64: Add support for LPA2 and WXN at stage 1 Catalin Marinas
2024-02-16 18:23   ` Ard Biesheuvel
2024-02-16 22:34     ` Ard Biesheuvel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240214122845.2033971-88-ardb+git@google.com \
    --to=ardb+git@google.com \
    --cc=anshuman.khandual@arm.com \
    --cc=ardb@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=keescook@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=ryan.roberts@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).