* [PATCH 1/3] x86/smp: Use static_call for arch_send_call_function_single_ipi()
2025-12-22 20:14 [PATCH 0/3] x86: Use static_call for three smp_ops methods Eric Dumazet
@ 2025-12-22 20:14 ` Eric Dumazet
2026-01-16 21:22 ` [tip: x86/core] " tip-bot2 for Eric Dumazet
2025-12-22 20:14 ` [PATCH 2/3] x86/smp: Use static_call for arch_smp_send_reschedule() Eric Dumazet
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Eric Dumazet @ 2025-12-22 20:14 UTC (permalink / raw)
To: Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen,
Peter Zijlstra, x86, H . Peter Anvin
Cc: linux-kernel, Eric Dumazet, Eric Dumazet
Use static_call to avoid an indirect call, especially expensive
with retpoline.
Signed-off-by: Eric Dumazet <edumazet@google.com>
---
arch/x86/include/asm/smp.h | 10 ++++++++--
arch/x86/kernel/smp.c | 10 ++++++++++
arch/x86/xen/smp_hvm.c | 1 +
arch/x86/xen/smp_pv.c | 1 +
4 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 84951572ab8170578ca5c4a3437c66e9809f34c2..891a5d16bcd52d2da0b71d928d3728afe3d15ade 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
#include <linux/thread_info.h>
+#include <linux/static_call_types.h>
#include <asm/cpumask.h>
@@ -42,6 +43,12 @@ struct smp_ops {
void (*send_call_func_single_ipi)(int cpu);
};
+void x86_smp_ops_static_call_update(void);
+
+void native_send_call_func_single_ipi(int cpu);
+DECLARE_STATIC_CALL(x86_send_call_func_single_ipi,
+ native_send_call_func_single_ipi);
+
/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);
@@ -92,7 +99,7 @@ static inline void arch_smp_send_reschedule(int cpu)
static inline void arch_send_call_function_single_ipi(int cpu)
{
- smp_ops.send_call_func_single_ipi(cpu);
+ static_call(x86_send_call_func_single_ipi)(cpu);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -122,7 +129,6 @@ void __noreturn mwait_play_dead(unsigned int eax_hint);
void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
-void native_send_call_func_single_ipi(int cpu);
asmlinkage __visible void smp_reboot_interrupt(void);
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index b014e6d229f9519add6f3fb315f1a2f375675a50..2c8fdf1f59d953e49a6beeba8f76f5fe62bd5db7 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -300,6 +300,16 @@ struct smp_ops smp_ops = {
};
EXPORT_SYMBOL_GPL(smp_ops);
+DEFINE_STATIC_CALL(x86_send_call_func_single_ipi,
+ native_send_call_func_single_ipi);
+
+void x86_smp_ops_static_call_update(void)
+{
+ static_call_update(x86_send_call_func_single_ipi,
+ smp_ops.send_call_func_single_ipi);
+}
+EXPORT_SYMBOL_GPL(x86_smp_ops_static_call_update);
+
int arch_cpu_rescan_dead_smt_siblings(void)
{
enum cpuhp_smt_control old = cpu_smt_control;
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index 485c1d8804f739fbbb7980c98ed56eab59ad8b37..5ea0f53e4ea3f6cd4ad24fedbe2ef28d2dc444b9 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -85,4 +85,5 @@ void __init xen_hvm_smp_init(void)
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+ x86_smp_ops_static_call_update();
}
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 9bb8ff8bff30a6d13a186609e40346203d4feb25..91f9d6854f7448bd1ffcb4e194615ce6336827bb 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -442,6 +442,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
+ x86_smp_ops_static_call_update();
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_mptable = x86_init_noop;
--
2.52.0.351.gbe84eed79e-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread* [tip: x86/core] x86/smp: Use static_call for arch_send_call_function_single_ipi()
2025-12-22 20:14 ` [PATCH 1/3] x86/smp: Use static_call for arch_send_call_function_single_ipi() Eric Dumazet
@ 2026-01-16 21:22 ` tip-bot2 for Eric Dumazet
0 siblings, 0 replies; 8+ messages in thread
From: tip-bot2 for Eric Dumazet @ 2026-01-16 21:22 UTC (permalink / raw)
To: linux-tip-commits
Cc: Eric Dumazet, Borislav Petkov (AMD), Peter Zijlstra (Intel), x86,
linux-kernel
The following commit has been merged into the x86/core branch of tip:
Commit-ID: 7c76769ce0d985597c189ff9a6194e3151396ee7
Gitweb: https://git.kernel.org/tip/7c76769ce0d985597c189ff9a6194e3151396ee7
Author: Eric Dumazet <edumazet@google.com>
AuthorDate: Mon, 22 Dec 2025 20:14:04
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Fri, 16 Jan 2026 20:26:52 +01:00
x86/smp: Use static_call for arch_send_call_function_single_ipi()
Use static_call to avoid an indirect call, especially expensive
with retpoline.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251222201406.3725665-2-edumazet@google.com
---
arch/x86/include/asm/smp.h | 10 ++++++++--
arch/x86/kernel/smp.c | 10 ++++++++++
arch/x86/xen/smp_hvm.c | 1 +
arch/x86/xen/smp_pv.c | 1 +
4 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 8495157..891a5d1 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
#include <linux/thread_info.h>
+#include <linux/static_call_types.h>
#include <asm/cpumask.h>
@@ -42,6 +43,12 @@ struct smp_ops {
void (*send_call_func_single_ipi)(int cpu);
};
+void x86_smp_ops_static_call_update(void);
+
+void native_send_call_func_single_ipi(int cpu);
+DECLARE_STATIC_CALL(x86_send_call_func_single_ipi,
+ native_send_call_func_single_ipi);
+
/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);
@@ -92,7 +99,7 @@ static inline void arch_smp_send_reschedule(int cpu)
static inline void arch_send_call_function_single_ipi(int cpu)
{
- smp_ops.send_call_func_single_ipi(cpu);
+ static_call(x86_send_call_func_single_ipi)(cpu);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -122,7 +129,6 @@ void __noreturn mwait_play_dead(unsigned int eax_hint);
void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
-void native_send_call_func_single_ipi(int cpu);
asmlinkage __visible void smp_reboot_interrupt(void);
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index b014e6d..2c8fdf1 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -300,6 +300,16 @@ struct smp_ops smp_ops = {
};
EXPORT_SYMBOL_GPL(smp_ops);
+DEFINE_STATIC_CALL(x86_send_call_func_single_ipi,
+ native_send_call_func_single_ipi);
+
+void x86_smp_ops_static_call_update(void)
+{
+ static_call_update(x86_send_call_func_single_ipi,
+ smp_ops.send_call_func_single_ipi);
+}
+EXPORT_SYMBOL_GPL(x86_smp_ops_static_call_update);
+
int arch_cpu_rescan_dead_smt_siblings(void)
{
enum cpuhp_smt_control old = cpu_smt_control;
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index 485c1d8..5ea0f53 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -85,4 +85,5 @@ void __init xen_hvm_smp_init(void)
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+ x86_smp_ops_static_call_update();
}
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 9bb8ff8..91f9d68 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -442,6 +442,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
+ x86_smp_ops_static_call_update();
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_mptable = x86_init_noop;
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/3] x86/smp: Use static_call for arch_smp_send_reschedule()
2025-12-22 20:14 [PATCH 0/3] x86: Use static_call for three smp_ops methods Eric Dumazet
2025-12-22 20:14 ` [PATCH 1/3] x86/smp: Use static_call for arch_send_call_function_single_ipi() Eric Dumazet
@ 2025-12-22 20:14 ` Eric Dumazet
2026-01-16 21:22 ` [tip: x86/core] " tip-bot2 for Eric Dumazet
2025-12-22 20:14 ` [PATCH 3/3] x86/smp: Use static_call for arch_send_call_function_ipi() Eric Dumazet
2026-01-08 11:51 ` [PATCH 0/3] x86: Use static_call for three smp_ops methods Peter Zijlstra
3 siblings, 1 reply; 8+ messages in thread
From: Eric Dumazet @ 2025-12-22 20:14 UTC (permalink / raw)
To: Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen,
Peter Zijlstra, x86, H . Peter Anvin
Cc: linux-kernel, Eric Dumazet, Eric Dumazet
Use static_call to avoid an indirect call, especially expensive
with retpoline.
Signed-off-by: Eric Dumazet <edumazet@google.com>
---
arch/x86/include/asm/smp.h | 7 +++++--
arch/x86/kernel/smp.c | 5 +++++
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 891a5d16bcd52d2da0b71d928d3728afe3d15ade..3b3ff77706fb72c18e66fc1785b21896d32c9ac9 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -49,6 +49,10 @@ void native_send_call_func_single_ipi(int cpu);
DECLARE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+void native_smp_send_reschedule(int cpu);
+DECLARE_STATIC_CALL(x86_smp_send_reschedule,
+ native_smp_send_reschedule);
+
/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);
@@ -94,7 +98,7 @@ static inline void __noreturn play_dead(void)
static inline void arch_smp_send_reschedule(int cpu)
{
- smp_ops.smp_send_reschedule(cpu);
+ static_call(x86_smp_send_reschedule)(cpu);
}
static inline void arch_send_call_function_single_ipi(int cpu)
@@ -127,7 +131,6 @@ void wbnoinvd_on_cpus_mask(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
-void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
asmlinkage __visible void smp_reboot_interrupt(void);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 2c8fdf1f59d953e49a6beeba8f76f5fe62bd5db7..6e25a86f67faf0a30e9b4920b1d4634aa9c64de7 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -302,11 +302,16 @@ EXPORT_SYMBOL_GPL(smp_ops);
DEFINE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+DEFINE_STATIC_CALL(x86_smp_send_reschedule,
+ native_smp_send_reschedule);
+EXPORT_STATIC_CALL(x86_smp_send_reschedule);
void x86_smp_ops_static_call_update(void)
{
static_call_update(x86_send_call_func_single_ipi,
smp_ops.send_call_func_single_ipi);
+ static_call_update(x86_smp_send_reschedule,
+ smp_ops.smp_send_reschedule);
}
EXPORT_SYMBOL_GPL(x86_smp_ops_static_call_update);
--
2.52.0.351.gbe84eed79e-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread* [tip: x86/core] x86/smp: Use static_call for arch_smp_send_reschedule()
2025-12-22 20:14 ` [PATCH 2/3] x86/smp: Use static_call for arch_smp_send_reschedule() Eric Dumazet
@ 2026-01-16 21:22 ` tip-bot2 for Eric Dumazet
0 siblings, 0 replies; 8+ messages in thread
From: tip-bot2 for Eric Dumazet @ 2026-01-16 21:22 UTC (permalink / raw)
To: linux-tip-commits
Cc: Eric Dumazet, Borislav Petkov (AMD), Peter Zijlstra (Intel), x86,
linux-kernel
The following commit has been merged into the x86/core branch of tip:
Commit-ID: 83408307cf374038c5df199dde2e3fc11b3e27c2
Gitweb: https://git.kernel.org/tip/83408307cf374038c5df199dde2e3fc11b3e27c2
Author: Eric Dumazet <edumazet@google.com>
AuthorDate: Mon, 22 Dec 2025 20:14:05
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Fri, 16 Jan 2026 20:52:41 +01:00
x86/smp: Use static_call for arch_smp_send_reschedule()
Use static_call to avoid an indirect call, especially expensive
with retpoline.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251222201406.3725665-3-edumazet@google.com
---
arch/x86/include/asm/smp.h | 7 +++++--
arch/x86/kernel/smp.c | 5 +++++
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 891a5d1..3b3ff77 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -49,6 +49,10 @@ void native_send_call_func_single_ipi(int cpu);
DECLARE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+void native_smp_send_reschedule(int cpu);
+DECLARE_STATIC_CALL(x86_smp_send_reschedule,
+ native_smp_send_reschedule);
+
/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);
@@ -94,7 +98,7 @@ static inline void __noreturn play_dead(void)
static inline void arch_smp_send_reschedule(int cpu)
{
- smp_ops.smp_send_reschedule(cpu);
+ static_call(x86_smp_send_reschedule)(cpu);
}
static inline void arch_send_call_function_single_ipi(int cpu)
@@ -127,7 +131,6 @@ void wbnoinvd_on_cpus_mask(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
-void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
asmlinkage __visible void smp_reboot_interrupt(void);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 2c8fdf1..6e25a86 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -302,11 +302,16 @@ EXPORT_SYMBOL_GPL(smp_ops);
DEFINE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+DEFINE_STATIC_CALL(x86_smp_send_reschedule,
+ native_smp_send_reschedule);
+EXPORT_STATIC_CALL(x86_smp_send_reschedule);
void x86_smp_ops_static_call_update(void)
{
static_call_update(x86_send_call_func_single_ipi,
smp_ops.send_call_func_single_ipi);
+ static_call_update(x86_smp_send_reschedule,
+ smp_ops.smp_send_reschedule);
}
EXPORT_SYMBOL_GPL(x86_smp_ops_static_call_update);
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/3] x86/smp: Use static_call for arch_send_call_function_ipi()
2025-12-22 20:14 [PATCH 0/3] x86: Use static_call for three smp_ops methods Eric Dumazet
2025-12-22 20:14 ` [PATCH 1/3] x86/smp: Use static_call for arch_send_call_function_single_ipi() Eric Dumazet
2025-12-22 20:14 ` [PATCH 2/3] x86/smp: Use static_call for arch_smp_send_reschedule() Eric Dumazet
@ 2025-12-22 20:14 ` Eric Dumazet
2026-01-16 21:21 ` [tip: x86/core] " tip-bot2 for Eric Dumazet
2026-01-08 11:51 ` [PATCH 0/3] x86: Use static_call for three smp_ops methods Peter Zijlstra
3 siblings, 1 reply; 8+ messages in thread
From: Eric Dumazet @ 2025-12-22 20:14 UTC (permalink / raw)
To: Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen,
Peter Zijlstra, x86, H . Peter Anvin
Cc: linux-kernel, Eric Dumazet, Eric Dumazet
Use static_call to avoid an indirect call, especially expensive
with retpoline.
Signed-off-by: Eric Dumazet <edumazet@google.com>
---
arch/x86/include/asm/smp.h | 6 ++++--
arch/x86/kernel/smp.c | 4 ++++
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 3b3ff77706fb72c18e66fc1785b21896d32c9ac9..f7305c492e6d5b724b0ee3d983ac1ebf45696294 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -49,6 +49,9 @@ void native_send_call_func_single_ipi(int cpu);
DECLARE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+void native_send_call_func_ipi(const struct cpumask *mask);
+DECLARE_STATIC_CALL(x86_send_call_func_ipi, native_send_call_func_ipi);
+
void native_smp_send_reschedule(int cpu);
DECLARE_STATIC_CALL(x86_smp_send_reschedule,
native_smp_send_reschedule);
@@ -108,7 +111,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- smp_ops.send_call_func_ipi(mask);
+ static_call(x86_send_call_func_ipi)(mask);
}
void cpu_disable_common(void);
@@ -131,7 +134,6 @@ void wbnoinvd_on_cpus_mask(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
-void native_send_call_func_ipi(const struct cpumask *mask);
asmlinkage __visible void smp_reboot_interrupt(void);
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 6e25a86f67faf0a30e9b4920b1d4634aa9c64de7..4633ade879938eabfa424d36e7835cf3a7192d6a 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -302,6 +302,8 @@ EXPORT_SYMBOL_GPL(smp_ops);
DEFINE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+DEFINE_STATIC_CALL(x86_send_call_func_ipi,
+ native_send_call_func_ipi);
DEFINE_STATIC_CALL(x86_smp_send_reschedule,
native_smp_send_reschedule);
EXPORT_STATIC_CALL(x86_smp_send_reschedule);
@@ -310,6 +312,8 @@ void x86_smp_ops_static_call_update(void)
{
static_call_update(x86_send_call_func_single_ipi,
smp_ops.send_call_func_single_ipi);
+ static_call_update(x86_send_call_func_ipi,
+ smp_ops.send_call_func_ipi);
static_call_update(x86_smp_send_reschedule,
smp_ops.smp_send_reschedule);
}
--
2.52.0.351.gbe84eed79e-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread* [tip: x86/core] x86/smp: Use static_call for arch_send_call_function_ipi()
2025-12-22 20:14 ` [PATCH 3/3] x86/smp: Use static_call for arch_send_call_function_ipi() Eric Dumazet
@ 2026-01-16 21:21 ` tip-bot2 for Eric Dumazet
0 siblings, 0 replies; 8+ messages in thread
From: tip-bot2 for Eric Dumazet @ 2026-01-16 21:21 UTC (permalink / raw)
To: linux-tip-commits
Cc: Eric Dumazet, Borislav Petkov (AMD), Peter Zijlstra (Intel), x86,
linux-kernel
The following commit has been merged into the x86/core branch of tip:
Commit-ID: 1f60230cdc6342d37e7a9eec261ac3c392131688
Gitweb: https://git.kernel.org/tip/1f60230cdc6342d37e7a9eec261ac3c392131688
Author: Eric Dumazet <edumazet@google.com>
AuthorDate: Mon, 22 Dec 2025 20:14:06
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Fri, 16 Jan 2026 21:32:57 +01:00
x86/smp: Use static_call for arch_send_call_function_ipi()
Use static_call to avoid an indirect call, especially expensive
with retpoline.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251222201406.3725665-4-edumazet@google.com
---
arch/x86/include/asm/smp.h | 6 ++++--
arch/x86/kernel/smp.c | 4 ++++
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 3b3ff77..f7305c4 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -49,6 +49,9 @@ void native_send_call_func_single_ipi(int cpu);
DECLARE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+void native_send_call_func_ipi(const struct cpumask *mask);
+DECLARE_STATIC_CALL(x86_send_call_func_ipi, native_send_call_func_ipi);
+
void native_smp_send_reschedule(int cpu);
DECLARE_STATIC_CALL(x86_smp_send_reschedule,
native_smp_send_reschedule);
@@ -108,7 +111,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- smp_ops.send_call_func_ipi(mask);
+ static_call(x86_send_call_func_ipi)(mask);
}
void cpu_disable_common(void);
@@ -131,7 +134,6 @@ void wbnoinvd_on_cpus_mask(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
-void native_send_call_func_ipi(const struct cpumask *mask);
asmlinkage __visible void smp_reboot_interrupt(void);
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 6e25a86..4633ade 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -302,6 +302,8 @@ EXPORT_SYMBOL_GPL(smp_ops);
DEFINE_STATIC_CALL(x86_send_call_func_single_ipi,
native_send_call_func_single_ipi);
+DEFINE_STATIC_CALL(x86_send_call_func_ipi,
+ native_send_call_func_ipi);
DEFINE_STATIC_CALL(x86_smp_send_reschedule,
native_smp_send_reschedule);
EXPORT_STATIC_CALL(x86_smp_send_reschedule);
@@ -310,6 +312,8 @@ void x86_smp_ops_static_call_update(void)
{
static_call_update(x86_send_call_func_single_ipi,
smp_ops.send_call_func_single_ipi);
+ static_call_update(x86_send_call_func_ipi,
+ smp_ops.send_call_func_ipi);
static_call_update(x86_smp_send_reschedule,
smp_ops.smp_send_reschedule);
}
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 0/3] x86: Use static_call for three smp_ops methods
2025-12-22 20:14 [PATCH 0/3] x86: Use static_call for three smp_ops methods Eric Dumazet
` (2 preceding siblings ...)
2025-12-22 20:14 ` [PATCH 3/3] x86/smp: Use static_call for arch_send_call_function_ipi() Eric Dumazet
@ 2026-01-08 11:51 ` Peter Zijlstra
3 siblings, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2026-01-08 11:51 UTC (permalink / raw)
To: Eric Dumazet
Cc: Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
H . Peter Anvin, linux-kernel, Eric Dumazet
On Mon, Dec 22, 2025 at 08:14:03PM +0000, Eric Dumazet wrote:
> Busy hosts with RFS enabled can send more than 6,000,000 IPI
> per second. arch_send_call_function_single_ipi() is currently
> using an indirect call (because Xen can override the target).
>
> Indirect calls are expensive on some platforms with retpoline.
>
> This series converts three smp_ops methods to static_call
> infrastructure to remove common indirect calls.
>
> Eric Dumazet (3):
> x86/smp: Use static_call for arch_send_call_function_single_ipi()
> x86/smp: Use static_call for arch_smp_send_reschedule()
> x86/smp: Use static_call for arch_send_call_function_ipi()
>
> arch/x86/include/asm/smp.h | 23 +++++++++++++++++------
> arch/x86/kernel/smp.c | 19 +++++++++++++++++++
> arch/x86/xen/smp_hvm.c | 1 +
> arch/x86/xen/smp_pv.c | 1 +
> 4 files changed, 38 insertions(+), 6 deletions(-)
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
^ permalink raw reply [flat|nested] 8+ messages in thread