From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
To: x86@kernel.org, Jon Kohler <jon@nutanix.com>,
Nikolay Borisov <nik.borisov@suse.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Josh Poimboeuf <jpoimboe@kernel.org>,
David Kaplan <david.kaplan@amd.com>,
Sean Christopherson <seanjc@google.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Andrii Nakryiko <andrii@kernel.org>,
KP Singh <kpsingh@kernel.org>, Jiri Olsa <jolsa@kernel.org>,
"David S. Miller" <davem@davemloft.net>,
David Laight <david.laight.linux@gmail.com>,
Andy Lutomirski <luto@kernel.org>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
David Ahern <dsahern@kernel.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
Eduard Zingerman <eddyz87@gmail.com>, Song Liu <song@kernel.org>,
Yonghong Song <yonghong.song@linux.dev>,
John Fastabend <john.fastabend@gmail.com>,
Stanislav Fomichev <sdf@fomichev.me>, Hao Luo <haoluo@google.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Jonathan Corbet <corbet@lwn.net>
Cc: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
Asit Mallick <asit.k.mallick@intel.com>,
Tao Zhang <tao1.zhang@intel.com>,
bpf@vger.kernel.org, netdev@vger.kernel.org,
linux-doc@vger.kernel.org
Subject: [PATCH v8 07/10] x86/vmscape: Use static_call() for predictor flush
Date: Tue, 24 Mar 2026 11:18:13 -0700 [thread overview]
Message-ID: <20260324-vmscape-bhb-v8-7-68bb524b3ab9@linux.intel.com> (raw)
In-Reply-To: <20260324-vmscape-bhb-v8-0-68bb524b3ab9@linux.intel.com>
Adding more mitigation options at exit-to-userspace for VMSCAPE would
usually require a series of checks to decide which mitigation to use. In
this case, the mitigation is done by calling a function, which is decided
at boot. So, adding more feature flags and multiple checks can be avoided
by using static_call() to the mitigating function.
Replace the flag-based mitigation selector with a static_call(). This also
frees the existing X86_FEATURE_IBPB_EXIT_TO_USER.
Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/cpufeatures.h | 2 +-
arch/x86/include/asm/entry-common.h | 7 +++----
arch/x86/include/asm/nospec-branch.h | 3 +++
arch/x86/include/asm/processor.h | 1 +
arch/x86/kernel/cpu/bugs.c | 14 +++++++++++++-
arch/x86/kvm/x86.c | 2 +-
7 files changed, 23 insertions(+), 7 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e2df1b147184..5b8def9ddb98 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2720,6 +2720,7 @@ config MITIGATION_TSA
config MITIGATION_VMSCAPE
bool "Mitigate VMSCAPE"
depends on KVM
+ depends on HAVE_STATIC_CALL
default y
help
Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index dbe104df339b..b4d529dd6d30 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -503,7 +503,7 @@
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
-#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
+/* Free */
#define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */
#define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */
#define X86_FEATURE_SGX_EUPDATESVN (21*32+17) /* Support for ENCLS[EUPDATESVN] instruction */
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 78b143673ca7..783e7cb50cae 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -4,6 +4,7 @@
#include <linux/randomize_kstack.h>
#include <linux/user-return-notifier.h>
+#include <linux/static_call_types.h>
#include <asm/nospec-branch.h>
#include <asm/io_bitmap.h>
@@ -94,10 +95,8 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
*/
choose_random_kstack_offset(rdtsc());
- /* Avoid unnecessary reads of 'x86_predictor_flush_exit_to_user' */
- if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
- this_cpu_read(x86_predictor_flush_exit_to_user)) {
- write_ibpb();
+ if (unlikely(this_cpu_read(x86_predictor_flush_exit_to_user))) {
+ static_call_cond(vmscape_predictor_flush)();
this_cpu_write(x86_predictor_flush_exit_to_user, false);
}
}
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 0a55b1c64741..e45e49f1e0c9 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -542,6 +542,9 @@ static inline void indirect_branch_prediction_barrier(void)
:: "rax", "rcx", "rdx", "memory");
}
+#include <linux/static_call_types.h>
+DECLARE_STATIC_CALL(vmscape_predictor_flush, write_ibpb);
+
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a24c7805acdb..20ab4dd588c6 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -753,6 +753,7 @@ enum mds_mitigations {
};
extern bool gds_ucode_mitigated(void);
+extern bool vmscape_mitigation_enabled(void);
/*
* Make previous memory operations globally visible before
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 68e2df3e3bf5..a7dee7ec6ea3 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -144,6 +144,12 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
*/
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+/*
+ * Controls how vmscape is mitigated e.g. via IBPB or BHB-clear
+ * sequence. This defaults to no mitigation.
+ */
+DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
+
#undef pr_fmt
#define pr_fmt(fmt) "mitigations: " fmt
@@ -3129,8 +3135,14 @@ static void __init vmscape_update_mitigation(void)
static void __init vmscape_apply_mitigation(void)
{
if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
- setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
+ static_call_update(vmscape_predictor_flush, write_ibpb);
+}
+
+bool vmscape_mitigation_enabled(void)
+{
+ return !!static_call_query(vmscape_predictor_flush);
}
+EXPORT_SYMBOL_FOR_KVM(vmscape_mitigation_enabled);
#undef pr_fmt
#define pr_fmt(fmt) fmt
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 45d7cfedc507..e204482e64f3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11463,7 +11463,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* set for the CPU that actually ran the guest, and not the CPU that it
* may migrate to.
*/
- if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
+ if (vmscape_mitigation_enabled())
this_cpu_write(x86_predictor_flush_exit_to_user, true);
/*
--
2.34.1
next prev parent reply other threads:[~2026-03-24 18:18 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 18:16 [PATCH v8 00/10] VMSCAPE optimization for BHI variant Pawan Gupta
2026-03-24 18:16 ` [PATCH v8 01/10] x86/bhi: x86/vmscape: Move LFENCE out of clear_bhb_loop() Pawan Gupta
2026-03-24 20:22 ` Borislav Petkov
2026-03-24 21:30 ` Pawan Gupta
2026-03-24 18:16 ` [PATCH v8 02/10] x86/bhi: Make clear_bhb_loop() effective on newer CPUs Pawan Gupta
2026-03-24 20:59 ` Borislav Petkov
2026-03-24 22:13 ` Pawan Gupta
2026-03-25 20:37 ` Borislav Petkov
2026-03-25 22:40 ` David Laight
2026-03-26 8:39 ` Pawan Gupta
2026-03-26 9:15 ` David Laight
2026-03-26 10:01 ` Borislav Petkov
2026-03-26 10:45 ` David Laight
2026-03-26 20:29 ` Pawan Gupta
2026-03-25 17:50 ` Jim Mattson
2026-03-25 18:44 ` Pawan Gupta
2026-03-25 19:41 ` David Laight
2026-03-25 22:29 ` Pawan Gupta
2026-03-24 18:17 ` [PATCH v8 03/10] x86/bhi: Rename clear_bhb_loop() to clear_bhb_loop_nofence() Pawan Gupta
2026-03-24 18:17 ` [PATCH v8 04/10] x86/vmscape: Rename x86_ibpb_exit_to_user to x86_predictor_flush_exit_to_user Pawan Gupta
2026-03-24 18:17 ` [PATCH v8 05/10] x86/vmscape: Move mitigation selection to a switch() Pawan Gupta
2026-03-24 18:17 ` [PATCH v8 06/10] x86/vmscape: Use write_ibpb() instead of indirect_branch_prediction_barrier() Pawan Gupta
2026-03-24 18:18 ` Pawan Gupta [this message]
2026-03-24 19:09 ` [PATCH v8 07/10] x86/vmscape: Use static_call() for predictor flush bot+bpf-ci
2026-03-24 19:51 ` Pawan Gupta
2026-03-24 18:18 ` [PATCH v8 08/10] x86/vmscape: Deploy BHB clearing mitigation Pawan Gupta
2026-03-24 19:09 ` bot+bpf-ci
2026-03-24 19:46 ` Pawan Gupta
2026-03-24 18:18 ` [PATCH v8 09/10] x86/vmscape: Resolve conflict between attack-vectors and vmscape=force Pawan Gupta
2026-03-24 18:19 ` [PATCH v8 10/10] x86/vmscape: Add cmdline vmscape=on to override attack vector controls Pawan Gupta
2026-03-24 19:09 ` bot+bpf-ci
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260324-vmscape-bhb-v8-7-68bb524b3ab9@linux.intel.com \
--to=pawan.kumar.gupta@linux.intel.com \
--cc=andrii@kernel.org \
--cc=asit.k.mallick@intel.com \
--cc=ast@kernel.org \
--cc=bp@alien8.de \
--cc=bpf@vger.kernel.org \
--cc=corbet@lwn.net \
--cc=daniel@iogearbox.net \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=david.kaplan@amd.com \
--cc=david.laight.linux@gmail.com \
--cc=dsahern@kernel.org \
--cc=eddyz87@gmail.com \
--cc=haoluo@google.com \
--cc=hpa@zytor.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=jon@nutanix.com \
--cc=jpoimboe@kernel.org \
--cc=kpsingh@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=luto@kernel.org \
--cc=martin.lau@linux.dev \
--cc=mingo@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=nik.borisov@suse.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=sdf@fomichev.me \
--cc=seanjc@google.com \
--cc=song@kernel.org \
--cc=tao1.zhang@intel.com \
--cc=tglx@kernel.org \
--cc=x86@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox