linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux@armlinux.org.uk, linux-arm-kernel@lists.infradead.org
Cc: linux-hardening@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>,
	Nicolas Pitre <nico@fluxnic.net>, Arnd Bergmann <arnd@arndb.de>,
	Kees Cook <keescook@chromium.org>,
	Keith Packard <keithpac@amazon.com>,
	Linus Walleij <linus.walleij@linaro.org>,
	Nick Desaulniers <ndesaulniers@google.com>,
	Tony Lindgren <tony@atomide.com>, Marc Zyngier <maz@kernel.org>,
	Vladimir Murzin <vladimir.murzin@arm.com>,
	Jesse Taube <mr.bossman075@gmail.com>
Subject: [PATCH v5 31/32] ARM: mm: prepare vmalloc_seq handling for use under SMP
Date: Mon, 24 Jan 2022 18:47:43 +0100	[thread overview]
Message-ID: <20220124174744.1054712-32-ardb@kernel.org> (raw)
In-Reply-To: <20220124174744.1054712-1-ardb@kernel.org>

Currently, the vmalloc_seq counter is only used to keep track of changes
in the vmalloc region on !SMP builds, which means there is no need to
deal with concurrency explicitly.

However, in a subsequent patch, we will wire up this same mechanism for
ensuring that vmap'ed stacks are guaranteed to be mapped by the active
mm before switching to a task, and here we need to ensure that changes
to the page tables are visible to other CPUs when they observe a change
in the sequence count.

Since LPAE needs none of this, fold a check against it into the
vmalloc_seq counter check after breaking it out into a separate static
inline helper.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm/include/asm/mmu.h         |  2 +-
 arch/arm/include/asm/mmu_context.h | 13 +++++++++++--
 arch/arm/mm/context.c              |  3 +--
 arch/arm/mm/ioremap.c              | 18 +++++++++++-------
 4 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 1592a4264488..e049723840d3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -10,7 +10,7 @@ typedef struct {
 #else
 	int		switch_pending;
 #endif
-	unsigned int	vmalloc_seq;
+	atomic_t	vmalloc_seq;
 	unsigned long	sigpage;
 #ifdef CONFIG_VDSO
 	unsigned long	vdso;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 84e58956fcab..71a26986efb9 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -23,6 +23,16 @@
 
 void __check_vmalloc_seq(struct mm_struct *mm);
 
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+	if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+	    unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+		     atomic_read(&init_mm.context.vmalloc_seq)))
+		__check_vmalloc_seq(mm);
+}
+#endif
+
 #ifdef CONFIG_CPU_HAS_ASID
 
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
@@ -52,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
 static inline void check_and_switch_context(struct mm_struct *mm,
 					    struct task_struct *tsk)
 {
-	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
-		__check_vmalloc_seq(mm);
+	check_vmalloc_seq(mm);
 
 	if (irqs_disabled())
 		/*
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 48091870db89..4204ffa2d104 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -240,8 +240,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 	unsigned int cpu = smp_processor_id();
 	u64 asid;
 
-	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
-		__check_vmalloc_seq(mm);
+	check_vmalloc_seq(mm);
 
 	/*
 	 * We cannot update the pgd and the ASID atomicly with classic
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 197f8eb3a775..aa08bcb72db9 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -117,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page);
 
 void __check_vmalloc_seq(struct mm_struct *mm)
 {
-	unsigned int seq;
+	int seq;
 
 	do {
-		seq = init_mm.context.vmalloc_seq;
+		seq = atomic_read(&init_mm.context.vmalloc_seq);
 		memcpy(pgd_offset(mm, VMALLOC_START),
 		       pgd_offset_k(VMALLOC_START),
 		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
 					pgd_index(VMALLOC_START)));
-		mm->context.vmalloc_seq = seq;
-	} while (seq != init_mm.context.vmalloc_seq);
+		/*
+		 * Use a store-release so that other CPUs that observe the
+		 * counter's new value are guaranteed to see the results of the
+		 * memcpy as well.
+		 */
+		atomic_set_release(&mm->context.vmalloc_seq, seq);
+	} while (seq != atomic_read(&init_mm.context.vmalloc_seq));
 }
 
 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -157,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
 			 * Note: this is still racy on SMP machines.
 			 */
 			pmd_clear(pmdp);
-			init_mm.context.vmalloc_seq++;
+			atomic_inc_return_release(&init_mm.context.vmalloc_seq);
 
 			/*
 			 * Free the page table, if there was one.
@@ -174,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
 	 * Ensure that the active_mm is up to date - we want to
 	 * catch any use-after-iounmap cases.
 	 */
-	if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
-		__check_vmalloc_seq(current->active_mm);
+	check_vmalloc_seq(current->active_mm);
 
 	flush_tlb_kernel_range(virt, end);
 }
-- 
2.30.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-01-24 18:14 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-24 17:47 [PATCH v5 00/32] ARM vmap'ed and IRQ stacks roundup Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 01/32] ARM: riscpc: drop support for IOMD_IRQREQC/IOMD_IRQREQD IRQ groups Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 02/32] ARM: riscpc: use GENERIC_IRQ_MULTI_HANDLER Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 03/32] ARM: footbridge: " Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 04/32] ARM: iop32x: offset IRQ numbers by 1 Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 05/32] ARM: iop32x: use GENERIC_IRQ_MULTI_HANDLER Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 06/32] ARM: remove old-style irq entry Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 07/32] irqchip: nvic: Use GENERIC_IRQ_MULTI_HANDLER Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 08/32] ARM: decompressor: disable stack protector Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 09/32] ARM: stackprotector: prefer compiler for TLS based per-task protector Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 10/32] ARM: entry: preserve thread_info pointer in switch_to Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 11/32] ARM: module: implement support for PC-relative group relocations Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 12/32] ARM: assembler: add optimized ldr/str macros to load variables from memory Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 13/32] ARM: percpu: add SMP_ON_UP support Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 14/32] ARM: use TLS register for 'current' on !SMP as well Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 15/32] ARM: smp: defer TPIDRURO update for SMP v6 configurations too Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 16/32] ARM: implement THREAD_INFO_IN_TASK for uniprocessor systems Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 17/32] ARM: assembler: introduce bl_r macro Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 18/32] ARM: unwind: support unwinding across multiple stacks Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 19/32] ARM: export dump_mem() to other objects Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 20/32] ARM: unwind: dump exception stack from calling frame Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 21/32] ARM: backtrace-clang: avoid crash on bogus frame pointer Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 22/32] ARM: implement IRQ stacks Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 23/32] ARM: call_with_stack: add unwind support Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 24/32] ARM: run softirqs on the per-CPU IRQ stack Ard Biesheuvel
2022-03-22  9:04   ` Sebastian Andrzej Siewior
2022-03-22  9:35     ` Ard Biesheuvel
2022-03-22 11:29       ` Sebastian Andrzej Siewior
2022-01-24 17:47 ` [PATCH v5 25/32] ARM: memcpy: use frame pointer as unwind anchor Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 26/32] ARM: memmove: " Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 27/32] ARM: memset: clean up unwind annotations Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 28/32] ARM: unwind: disregard unwind info before stack frame is set up Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 29/32] ARM: entry: rework stack realignment code in svc_entry Ard Biesheuvel
2022-01-24 17:47 ` [PATCH v5 30/32] ARM: switch_to: clean up Thumb2 code path Ard Biesheuvel
2022-01-24 17:47 ` Ard Biesheuvel [this message]
2022-01-24 17:47 ` [PATCH v5 32/32] ARM: implement support for vmap'ed stacks Ard Biesheuvel
2022-01-24 17:56 ` [PATCH v5 00/32] ARM vmap'ed and IRQ stacks roundup Russell King (Oracle)
2022-01-24 17:57   ` Ard Biesheuvel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220124174744.1054712-32-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=arnd@arndb.de \
    --cc=keescook@chromium.org \
    --cc=keithpac@amazon.com \
    --cc=linus.walleij@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-hardening@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=maz@kernel.org \
    --cc=mr.bossman075@gmail.com \
    --cc=ndesaulniers@google.com \
    --cc=nico@fluxnic.net \
    --cc=tony@atomide.com \
    --cc=vladimir.murzin@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).