* [PATCH] Add smp_ops interface
@ 2007-03-27 22:09 Jeremy Fitzhardinge
2007-03-27 22:11 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 3+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-27 22:09 UTC (permalink / raw)
To: Andrew Morton
Cc: Zachary Amsden, Xen-devel, Linux Kernel Mailing List,
James Bottomley, Virtualization Mailing List, Ingo Molnar
Add a smp_ops interface. This abstracts the API defined by
<linux/smp.h> for use within arch/i386. The primary intent is that it
be used by a paravirtualizing hypervisor to implement SMP, but it
could also be used by non-APIC-using sub-architectures.
This is related to CONFIG_PARAVIRT, but is implemented unconditionally
since it is simpler that way and not a highly performance-sensitive
interface.
[ Andrew: Patch is based on 2.6.21-rc5-mm1 + the pda->percpu patch I
posted the other day. It will have a minor context conflict without
that patch, but is otherwise standalone. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
---
arch/i386/kernel/smp.c | 21 +++++++++++++----
arch/i386/kernel/smpboot.c | 8 +++---
include/asm-i386/smp.h | 53 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 73 insertions(+), 9 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -483,7 +483,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void smp_send_reschedule(int cpu)
+void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -576,9 +576,9 @@ static int __smp_call_function_mask(cpum
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
int ret;
@@ -657,7 +657,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void smp_send_stop(void)
+void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
@@ -733,3 +733,14 @@ int safe_smp_processor_id(void)
return cpuid >= 0 ? cpuid : 0;
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = native_smp_prepare_cpus,
+ .cpu_up = native_cpu_up,
+ .smp_cpus_done = native_smp_cpus_done,
+
+ .smp_send_stop = native_smp_send_stop,
+ .smp_send_reschedule = native_smp_send_reschedule,
+ .smp_call_function_mask = native_smp_call_function_mask,
+};
===================================================================
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1169,7 +1169,7 @@ static void __init smp_boot_cpus(unsigne
/* These are wrappers to interface to the new boot process. Someone
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
smp_commenced_mask = cpumask_of_cpu(0);
cpu_callin_map = cpumask_of_cpu(0);
@@ -1189,7 +1189,7 @@ static inline void switch_to_new_gdt(voi
asm ("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}
-void __init smp_prepare_boot_cpu(void)
+void __init native_smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
@@ -1290,7 +1290,7 @@ void __cpu_die(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit native_cpu_up(unsigned int cpu)
{
unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
@@ -1335,7 +1335,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
return 0;
}
-void __init smp_cpus_done(unsigned int max_cpus)
+void __init native_smp_cpus_done(unsigned int max_cpus)
{
#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
===================================================================
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -47,6 +47,59 @@ extern void cpu_exit_clear(void);
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
#endif
+
+struct smp_ops
+{
+ void (*smp_prepare_boot_cpu)(void);
+ void (*smp_prepare_cpus)(unsigned max_cpus);
+ int (*cpu_up)(unsigned cpu);
+ void (*smp_cpus_done)(unsigned max_cpus);
+
+ void (*smp_send_stop)(void);
+ void (*smp_send_reschedule)(int cpu);
+ int (*smp_call_function_mask)(cpumask_t mask,
+ void (*func)(void *info), void *info,
+ int wait);
+};
+
+extern struct smp_ops smp_ops;
+
+static inline void smp_prepare_boot_cpu(void)
+{
+ smp_ops.smp_prepare_boot_cpu();
+}
+static inline void smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_ops.smp_prepare_cpus(max_cpus);
+}
+static inline int __cpu_up(unsigned int cpu)
+{
+ return smp_ops.cpu_up(cpu);
+}
+static inline void smp_cpus_done(unsigned int max_cpus)
+{
+ smp_ops.smp_cpus_done(max_cpus);
+}
+
+static inline void smp_send_stop(void)
+{
+ smp_ops.smp_send_stop();
+}
+static inline void smp_send_reschedule(int cpu)
+{
+ smp_ops.smp_send_reschedule(cpu);
+}
+static inline int smp_call_function_mask(cpumask_t mask,
+ void (*func) (void *info), void *info,
+ int wait)
+{
+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
+
+void native_smp_prepare_boot_cpu(void);
+void native_smp_prepare_cpus(unsigned int max_cpus);
+int native_cpu_up(unsigned int cpunum);
+void native_smp_cpus_done(unsigned int max_cpus);
#ifndef CONFIG_PARAVIRT
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [PATCH] Add smp_ops interface
2007-03-27 22:09 [PATCH] Add smp_ops interface Jeremy Fitzhardinge
@ 2007-03-27 22:11 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 3+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-27 22:11 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Zachary Amsden, Xen-devel, Linux Kernel Mailing List,
James Bottomley, Virtualization Mailing List, Andrew Morton,
Ingo Molnar
Jeremy Fitzhardinge wrote:
> [ Andrew: Patch is based on 2.6.21-rc5-mm1 + the pda->percpu patch I
> posted the other day. It will have a minor context conflict without
> that patch, but is otherwise standalone. ]
>
Sorry, it also depends on simplify-smp_call_function.patch, which I'll
repost shortly.
J
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH] Simplify smp_call_function*() by using common implementation
@ 2007-03-27 22:13 Jeremy Fitzhardinge
2007-03-28 19:03 ` Andi Kleen
0 siblings, 1 reply; 3+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-27 22:13 UTC (permalink / raw)
To: Andrew Morton
Cc: Virtualization Mailing List, Stephane Eranian, Ingo Molnar,
Jan Beulich, Linux Kernel Mailing List
smp_call_function and smp_call_function_single are almost complete
duplicates of the same logic. This patch combines them by
implementing them in terms of the more general
smp_call_function_mask().
[ Jan, Andi: This only changes arch/i386; can x86_64 be changed in the
same way? ]
[ Rebased onto Jan's x86_64-mm-consolidate-smp_send_stop patch ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Andi Kleen <ak@suse.de>
Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
Cc: Ingo Molnar <mingo@elte.hu>
---
arch/i386/kernel/smp.c | 177 +++++++++++++++++++++++-------------------------
1 file changed, 86 insertions(+), 91 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -515,14 +515,26 @@ void unlock_ipi_call_lock(void)
static struct call_data_struct *call_data;
-static void __smp_call_function(void (*func) (void *info), void *info,
- int nonatomic, int wait)
+
+static int __smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
- int cpus = num_online_cpus() - 1;
+ cpumask_t allbutself;
+ int cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+
+ cpus_and(mask, mask, allbutself);
+ cpus = cpus_weight(mask);
if (!cpus)
- return;
+ return 0;
data.func = func;
data.info = info;
@@ -533,9 +545,12 @@ static void __smp_call_function(void (*f
call_data = &data;
mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+
+ /* Send a message to other CPUs */
+ if (cpus_equal(mask, allbutself))
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+ else
+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
@@ -544,6 +559,34 @@ static void __smp_call_function(void (*f
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
+
+ return 0;
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have finished.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
+{
+ int ret;
+
+ spin_lock(&call_lock);
+ ret = __smp_call_function_mask(mask, func, info, wait);
+ spin_unlock(&call_lock);
+
+ return ret;
}
/**
@@ -559,20 +602,43 @@ static void __smp_call_function(void (*f
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
- __smp_call_function(func, info, nonatomic, wait);
- spin_unlock(&call_lock);
-
- return 0;
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
+
+/*
+ * smp_call_function_single - Run a function on another CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Currently unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Retrurns 0 on success, else a negative status code.
+ *
+ * Does not return until the remote CPU is nearly ready to execute <func>
+ * or is or has executed.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int ret;
+ int me = get_cpu();
+ if (cpu == me) {
+ WARN_ON(1);
+ put_cpu();
+ return -EBUSY;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
static void stop_this_cpu (void * dummy)
{
@@ -598,7 +664,7 @@ void smp_send_stop(void)
unsigned long flags;
local_irq_save(flags);
- __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
@@ -641,77 +707,6 @@ fastcall void smp_call_function_interrup
}
}
-/*
- * this function sends a 'generic call function' IPI to one other CPU
- * in the system.
- *
- * cpu is a standard Linux logical CPU number.
- */
-static void
-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (!wait)
- return;
-
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
-}
-
-/*
- * smp_call_function_single - Run a function on another CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock_bh(&call_lock);
- __smp_call_function_single(cpu, func, info, nonatomic, wait);
- spin_unlock_bh(&call_lock);
- put_cpu();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
static int convert_apicid_to_cpu(int apic_id)
{
int i;
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-27 22:13 [PATCH] Simplify smp_call_function*() by using common implementation Jeremy Fitzhardinge
@ 2007-03-28 19:03 ` Andi Kleen
2007-03-28 19:18 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 3+ messages in thread
From: Andi Kleen @ 2007-03-28 19:03 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Virtualization Mailing List, Stephane Eranian, Andrew Morton,
Ingo Molnar, Jan Beulich, Linux Kernel Mailing List
On Wednesday 28 March 2007 00:13, Jeremy Fitzhardinge wrote:
> smp_call_function and smp_call_function_single are almost complete
> duplicates of the same logic. This patch combines them by
> implementing them in terms of the more general
> smp_call_function_mask().
I think I got those already.
-Andi
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-28 19:03 ` Andi Kleen
@ 2007-03-28 19:18 ` Jeremy Fitzhardinge
2007-03-28 19:22 ` Andi Kleen
0 siblings, 1 reply; 3+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 19:18 UTC (permalink / raw)
To: Andi Kleen
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
Andi Kleen wrote:
> On Wednesday 28 March 2007 00:13, Jeremy Fitzhardinge wrote:
>
>> smp_call_function and smp_call_function_single are almost complete
>> duplicates of the same logic. This patch combines them by
>> implementing them in terms of the more general
>> smp_call_function_mask().
>>
>
> I think I got those already.
>
OK. Do you have smp_ops and machine_ops?
There's a doc update patch too:
Subject: Fix smp_call_function* docs
As reported by Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Randy Dunlap <rdunlap@xenotime.net>
---
arch/i386/kernel/smp.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -570,8 +570,10 @@ static int __smp_call_function_mask(cpum
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have finished.
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
@@ -593,11 +595,13 @@ int smp_call_function_mask(cpumask_t mas
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @nonatomic: currently unused.
+ * @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
@@ -609,17 +613,18 @@ int smp_call_function(void (*func) (void
}
EXPORT_SYMBOL(smp_call_function);
-/*
+/**
* smp_call_function_single - Run a function on another CPU
+ * @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
+ * @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-28 19:18 ` Jeremy Fitzhardinge
@ 2007-03-28 19:22 ` Andi Kleen
2007-03-28 19:31 ` [PATCH] Add smp_ops interface Jeremy Fitzhardinge
0 siblings, 1 reply; 3+ messages in thread
From: Andi Kleen @ 2007-03-28 19:22 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
On Wednesday 28 March 2007 21:18, Jeremy Fitzhardinge wrote:
> Andi Kleen wrote:
> > On Wednesday 28 March 2007 00:13, Jeremy Fitzhardinge wrote:
> >
> >> smp_call_function and smp_call_function_single are almost complete
> >> duplicates of the same logic. This patch combines them by
> >> implementing them in terms of the more general
> >> smp_call_function_mask().
> >>
> >
> > I think I got those already.
> >
>
> OK. Do you have smp_ops and machine_ops?
Not yet. Can you resend the current set?
> There's a doc update patch too:
Can you send that separately after i sync out please?
Thanks
-Andi
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH] Add smp_ops interface
2007-03-28 19:22 ` Andi Kleen
@ 2007-03-28 19:31 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 3+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 19:31 UTC (permalink / raw)
To: Andi Kleen
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
Subject: Add smp_ops interface
Add a smp_ops interface. This abstracts the API defined by
<linux/smp.h> for use within arch/i386. The primary intent is that it
be used by a paravirtualizing hypervisor to implement SMP, but it
could also be used by non-APIC-using sub-architectures.
This is related to CONFIG_PARAVIRT, but is implemented unconditionally
since it is simpler that way and not a highly performance-sensitive
interface.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
---
arch/i386/kernel/smp.c | 21 +++++++++++++----
arch/i386/kernel/smpboot.c | 8 +++---
include/asm-i386/smp.h | 53 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 73 insertions(+), 9 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -483,7 +483,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void smp_send_reschedule(int cpu)
+void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -576,9 +576,9 @@ static int __smp_call_function_mask(cpum
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
int ret;
@@ -657,7 +657,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void smp_send_stop(void)
+void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
@@ -733,3 +733,14 @@ int safe_smp_processor_id(void)
return cpuid >= 0 ? cpuid : 0;
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = native_smp_prepare_cpus,
+ .cpu_up = native_cpu_up,
+ .smp_cpus_done = native_smp_cpus_done,
+
+ .smp_send_stop = native_smp_send_stop,
+ .smp_send_reschedule = native_smp_send_reschedule,
+ .smp_call_function_mask = native_smp_call_function_mask,
+};
===================================================================
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1169,7 +1169,7 @@ static void __init smp_boot_cpus(unsigne
/* These are wrappers to interface to the new boot process. Someone
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
smp_commenced_mask = cpumask_of_cpu(0);
cpu_callin_map = cpumask_of_cpu(0);
@@ -1189,7 +1189,7 @@ static inline void switch_to_new_gdt(voi
asm ("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}
-void __init smp_prepare_boot_cpu(void)
+void __init native_smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
@@ -1290,7 +1290,7 @@ void __cpu_die(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit native_cpu_up(unsigned int cpu)
{
unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
@@ -1335,7 +1335,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
return 0;
}
-void __init smp_cpus_done(unsigned int max_cpus)
+void __init native_smp_cpus_done(unsigned int max_cpus)
{
#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
===================================================================
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -47,6 +47,59 @@ extern void cpu_exit_clear(void);
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
#endif
+
+struct smp_ops
+{
+ void (*smp_prepare_boot_cpu)(void);
+ void (*smp_prepare_cpus)(unsigned max_cpus);
+ int (*cpu_up)(unsigned cpu);
+ void (*smp_cpus_done)(unsigned max_cpus);
+
+ void (*smp_send_stop)(void);
+ void (*smp_send_reschedule)(int cpu);
+ int (*smp_call_function_mask)(cpumask_t mask,
+ void (*func)(void *info), void *info,
+ int wait);
+};
+
+extern struct smp_ops smp_ops;
+
+static inline void smp_prepare_boot_cpu(void)
+{
+ smp_ops.smp_prepare_boot_cpu();
+}
+static inline void smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_ops.smp_prepare_cpus(max_cpus);
+}
+static inline int __cpu_up(unsigned int cpu)
+{
+ return smp_ops.cpu_up(cpu);
+}
+static inline void smp_cpus_done(unsigned int max_cpus)
+{
+ smp_ops.smp_cpus_done(max_cpus);
+}
+
+static inline void smp_send_stop(void)
+{
+ smp_ops.smp_send_stop();
+}
+static inline void smp_send_reschedule(int cpu)
+{
+ smp_ops.smp_send_reschedule(cpu);
+}
+static inline int smp_call_function_mask(cpumask_t mask,
+ void (*func) (void *info), void *info,
+ int wait)
+{
+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
+
+void native_smp_prepare_boot_cpu(void);
+void native_smp_prepare_cpus(unsigned int max_cpus);
+int native_cpu_up(unsigned int cpunum);
+void native_smp_cpus_done(unsigned int max_cpus);
#ifndef CONFIG_PARAVIRT
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2007-03-28 19:31 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-03-27 22:09 [PATCH] Add smp_ops interface Jeremy Fitzhardinge
2007-03-27 22:11 ` Jeremy Fitzhardinge
-- strict thread matches above, loose matches on Subject: below --
2007-03-27 22:13 [PATCH] Simplify smp_call_function*() by using common implementation Jeremy Fitzhardinge
2007-03-28 19:03 ` Andi Kleen
2007-03-28 19:18 ` Jeremy Fitzhardinge
2007-03-28 19:22 ` Andi Kleen
2007-03-28 19:31 ` [PATCH] Add smp_ops interface Jeremy Fitzhardinge
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).