From: Valentin Schneider <vschneid@redhat.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Josh Poimboeuf <jpoimboe@kernel.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Arnd Bergmann <arnd@arndb.de>,
Frederic Weisbecker <frederic@kernel.org>,
"Paul E. McKenney" <paulmck@kernel.org>,
Jason Baron <jbaron@akamai.com>,
Steven Rostedt <rostedt@goodmis.org>,
Ard Biesheuvel <ardb@kernel.org>,
Sami Tolvanen <samitolvanen@google.com>,
"David S. Miller" <davem@davemloft.net>,
Neeraj Upadhyay <neeraj.upadhyay@kernel.org>,
Joel Fernandes <joelagnelf@nvidia.com>,
Josh Triplett <josh@joshtriplett.org>,
Boqun Feng <boqun.feng@gmail.com>,
Uladzislau Rezki <urezki@gmail.com>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Mel Gorman <mgorman@suse.de>,
Andrew Morton <akpm@linux-foundation.org>,
Masahiro Yamada <masahiroy@kernel.org>,
Han Shen <shenhan@google.com>, Rik van Riel <riel@surriel.com>,
Jann Horn <jannh@google.com>,
Dan Carpenter <dan.carpenter@linaro.org>,
Oleg Nesterov <oleg@redhat.com>,
Juri Lelli <juri.lelli@redhat.com>,
Clark Williams <williams@redhat.com>,
Tomas Glozar <tglozar@redhat.com>,
Yair Podemsky <ypodemsk@redhat.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
Daniel Wagner <dwagner@suse.de>, Petr Tesarik <ptesarik@suse.com>,
Shrikanth Hegde <sshegde@linux.ibm.com>
Subject: [RFC PATCH v8 08/10] x86/mm/pti: Introduce a kernel/user CR3 software signal
Date: Tue, 24 Mar 2026 10:47:59 +0100 [thread overview]
Message-ID: <20260324094801.3092968-9-vschneid@redhat.com> (raw)
In-Reply-To: <20260324094801.3092968-1-vschneid@redhat.com>
Later commits will rely on being able to check whether a remote CPU is
using the kernel or the user CR3.
This software signal needs to be updated before the actual CR3 write, IOW
it always immediately precedes it:
KERNEL_CR3_LOADED := 1
SWITCH_TO_KERNEL_CR3
[...]
KERNEL_CR3_LOADED := 0
SWITCH_TO_USER_CR3
The variable also gets mapped into the user space visible pages.
I tried really hard not to do that, and at some point had something mostly
working with having an alias to it through the cpu_entry_area accessed like
so before the switch to the kernel CR3:
subq $10, %rsp
sgdt (%rsp)
movq 2(%rsp), \scratch_reg /* GDT address */
addq $10, %rsp
movl $1, CPU_ENTRY_AREA_kernel_cr3(\scratch_reg)
however this explodes when running 64-bit user code that invokes SYSCALL,
since the scratch reg is %rsp itself, and I figured this was enough headaches.
This will only be really useful for NOHZ_FULL CPUs, but it should be
cheaper to unconditionally update a never-used per-CPU variable living in
its own cacheline than to check a shared cpumask such as
housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)
at every entry.
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
arch/x86/Kconfig | 14 +++++++++++++
arch/x86/entry/calling.h | 13 ++++++++++++
arch/x86/entry/syscall_64.c | 4 ++++
arch/x86/include/asm/tlbflush.h | 3 +++
arch/x86/mm/pti.c | 36 ++++++++++++++++++++++-----------
5 files changed, 58 insertions(+), 12 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 80527299f859a..f680e83cd5962 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2192,6 +2192,20 @@ config ADDRESS_MASKING
The capability can be used for efficient address sanitizers (ASAN)
implementation and for optimizations in JITs.
+config TRACK_CR3
+ def_bool n
+ prompt "Track which CR3 is in use"
+ depends on X86_64 && MITIGATION_PAGE_TABLE_ISOLATION && NO_HZ_FULL
+ help
+ This option adds a software signal that allows checking remotely
+ whether a CPU is using the user or the kernel page table.
+
+ This allows further optimizations for NOHZ_FULL CPUs.
+
+ This obviously makes the user<->kernel transition overhead even worse.
+
+ If unsure, say N.
+
config HOTPLUG_CPU
def_bool y
depends on SMP
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 77e2d920a6407..4099b7d86efd9 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -9,6 +9,7 @@
#include <asm/ptrace-abi.h>
#include <asm/msr.h>
#include <asm/nospec-branch.h>
+#include <asm/jump_label.h>
/*
@@ -170,8 +171,17 @@ For 32-bit we have the following conventions - kernel is built with
andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
.endm
+.macro NOTE_CR3_SWITCH scratch_reg:req in_kernel:req
+#ifdef CONFIG_TRACK_CR3
+ STATIC_BRANCH_FALSE_LIKELY housekeeping_overridden, .Lend_\@
+ movl \in_kernel, PER_CPU_VAR(kernel_cr3_loaded)
+.Lend_\@:
+#endif // CONFIG_TRACK_CR3
+.endm
+
.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ NOTE_CR3_SWITCH \scratch_reg $1
mov %cr3, \scratch_reg
ADJUST_KERNEL_CR3 \scratch_reg
mov \scratch_reg, %cr3
@@ -182,6 +192,7 @@ For 32-bit we have the following conventions - kernel is built with
PER_CPU_VAR(cpu_tlbstate + TLB_STATE_user_pcid_flush_mask)
.macro SWITCH_TO_USER_CR3 scratch_reg:req scratch_reg2:req
+ NOTE_CR3_SWITCH \scratch_reg $0
mov %cr3, \scratch_reg
ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
@@ -229,6 +240,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
+ NOTE_CR3_SWITCH \scratch_reg $1
movq %cr3, \scratch_reg
movq \scratch_reg, \save_reg
/*
@@ -257,6 +269,7 @@ For 32-bit we have the following conventions - kernel is built with
bt $PTI_USER_PGTABLE_BIT, \save_reg
jnc .Lend_\@
+ NOTE_CR3_SWITCH \scratch_reg $0
ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
/*
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index b6e68ea98b839..7583f71978856 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -83,6 +83,10 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
return false;
}
+#ifdef CONFIG_TRACK_CR3
+DEFINE_PER_CPU_PAGE_ALIGNED(bool, kernel_cr3_loaded) = true;
+#endif
+
/* Returns true to return using SYSRET, or false to use IRET */
__visible noinstr bool do_syscall_64(struct pt_regs *regs, int nr)
{
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 00daedfefc1b0..3b3aceee701e6 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -17,6 +17,9 @@
#include <asm/pgtable.h>
DECLARE_PER_CPU(u64, tlbstate_untag_mask);
+#ifdef CONFIG_TRACK_CR3
+DECLARE_PER_CPU_PAGE_ALIGNED(bool, kernel_cr3_loaded);
+#endif
void __flush_tlb_all(void);
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index f7546e9e8e896..e75450cabd3a6 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -440,6 +440,18 @@ static void __init pti_clone_p4d(unsigned long addr)
*user_p4d = *kernel_p4d;
}
+static void __init pti_clone_percpu(unsigned long va)
+{
+ phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+ pte_t *target_pte;
+
+ target_pte = pti_user_pagetable_walk_pte(va, false);
+ if (WARN_ON(!target_pte))
+ return;
+
+ *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
+}
+
/*
* Clone the CPU_ENTRY_AREA and associated data into the user space visible
* page table.
@@ -450,25 +462,25 @@ static void __init pti_clone_user_shared(void)
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
+ /*
+ * This is done for all possible CPUs during boot to ensure that it's
+ * propagated to all mms.
+ */
for_each_possible_cpu(cpu) {
/*
* The SYSCALL64 entry code needs one word of scratch space
* in which to spill a register. It lives in the sp2 slot
* of the CPU's TSS.
- *
- * This is done for all possible CPUs during boot to ensure
- * that it's propagated to all mms.
*/
+ pti_clone_percpu((unsigned long)&per_cpu(cpu_tss_rw, cpu));
- unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
- phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
- pte_t *target_pte;
-
- target_pte = pti_user_pagetable_walk_pte(va, false);
- if (WARN_ON(!target_pte))
- return;
-
- *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
+#ifdef CONFIG_TRACK_CR3
+ /*
+ * The entry code needs access to the @kernel_cr3_loaded percpu
+ * variable before the kernel CR3 is loaded.
+ */
+ pti_clone_percpu((unsigned long)&per_cpu(kernel_cr3_loaded, cpu));
+#endif
}
}
--
2.52.0
next prev parent reply other threads:[~2026-03-24 9:50 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 9:47 [RFC PATCH v8 00/10] context_tracking,x86: Defer some IPIs until a user->kernel transition Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 01/10] objtool: Make validate_call() recognize indirect calls to pv_ops[] Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 02/10] objtool: Flesh out warning related to pv_ops[] calls Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 03/10] objtool: Always pass a section to validate_unwind_hints() Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 04/10] x86/retpoline: Make warn_thunk_thunk .noinstr Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 05/10] sched/isolation: Mark housekeeping_overridden key as __ro_after_init Valentin Schneider
2026-03-24 15:17 ` Shrikanth Hegde
2026-03-24 19:46 ` Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 06/10] objtool: Add .entry.text validation for static branches Valentin Schneider
2026-03-24 9:47 ` [RFC PATCH v8 07/10] x86/jump_label: Add ASM support for static_branch_likely() Valentin Schneider
2026-03-24 9:47 ` Valentin Schneider [this message]
2026-03-24 9:48 ` [RFC PATCH v8 09/10] context_tracking,x86: Defer kernel text patching IPIs when tracking CR3 switches Valentin Schneider
2026-03-24 9:48 ` [RFC PATCH v8 10/10] x86/mm, mm/vmalloc: Defer kernel TLB flush " Valentin Schneider
2026-03-24 15:01 ` [syzbot ci] Re: context_tracking,x86: Defer some IPIs until a user->kernel transition syzbot ci
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260324094801.3092968-9-vschneid@redhat.com \
--to=vschneid@redhat.com \
--cc=acme@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=ardb@kernel.org \
--cc=arnd@arndb.de \
--cc=boqun.feng@gmail.com \
--cc=bp@alien8.de \
--cc=dan.carpenter@linaro.org \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=dwagner@suse.de \
--cc=frederic@kernel.org \
--cc=hpa@zytor.com \
--cc=jannh@google.com \
--cc=jbaron@akamai.com \
--cc=joelagnelf@nvidia.com \
--cc=josh@joshtriplett.org \
--cc=jpoimboe@kernel.org \
--cc=juri.lelli@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=masahiroy@kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=mtosatti@redhat.com \
--cc=neeraj.upadhyay@kernel.org \
--cc=oleg@redhat.com \
--cc=paulmck@kernel.org \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=ptesarik@suse.com \
--cc=riel@surriel.com \
--cc=rostedt@goodmis.org \
--cc=samitolvanen@google.com \
--cc=shenhan@google.com \
--cc=sshegde@linux.ibm.com \
--cc=tglozar@redhat.com \
--cc=tglx@linutronix.de \
--cc=urezki@gmail.com \
--cc=williams@redhat.com \
--cc=x86@kernel.org \
--cc=ypodemsk@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox