* [PATCH] Simplify smp_call_function*() by using common implementation
@ 2007-03-27 22:13 Jeremy Fitzhardinge
2007-03-27 22:43 ` Randy Dunlap
2007-03-28 19:03 ` Andi Kleen
0 siblings, 2 replies; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-27 22:13 UTC (permalink / raw)
To: Andrew Morton
Cc: Virtualization Mailing List, Stephane Eranian, Ingo Molnar,
Jan Beulich, Linux Kernel Mailing List
smp_call_function and smp_call_function_single are almost complete
duplicates of the same logic. This patch combines them by
implementing them in terms of the more general
smp_call_function_mask().
[ Jan, Andi: This only changes arch/i386; can x86_64 be changed in the
same way? ]
[ Rebased onto Jan's x86_64-mm-consolidate-smp_send_stop patch ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Andi Kleen <ak@suse.de>
Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
Cc: Ingo Molnar <mingo@elte.hu>
---
arch/i386/kernel/smp.c | 177 +++++++++++++++++++++++-------------------------
1 file changed, 86 insertions(+), 91 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -515,14 +515,26 @@ void unlock_ipi_call_lock(void)
static struct call_data_struct *call_data;
-static void __smp_call_function(void (*func) (void *info), void *info,
- int nonatomic, int wait)
+
+static int __smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
- int cpus = num_online_cpus() - 1;
+ cpumask_t allbutself;
+ int cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+
+ cpus_and(mask, mask, allbutself);
+ cpus = cpus_weight(mask);
if (!cpus)
- return;
+ return 0;
data.func = func;
data.info = info;
@@ -533,9 +545,12 @@ static void __smp_call_function(void (*f
call_data = &data;
mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+
+ /* Send a message to other CPUs */
+ if (cpus_equal(mask, allbutself))
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+ else
+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
@@ -544,6 +559,34 @@ static void __smp_call_function(void (*f
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
+
+ return 0;
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have finished.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
+{
+ int ret;
+
+ spin_lock(&call_lock);
+ ret = __smp_call_function_mask(mask, func, info, wait);
+ spin_unlock(&call_lock);
+
+ return ret;
}
/**
@@ -559,20 +602,43 @@ static void __smp_call_function(void (*f
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
- __smp_call_function(func, info, nonatomic, wait);
- spin_unlock(&call_lock);
-
- return 0;
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
+
+/*
+ * smp_call_function_single - Run a function on another CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Currently unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Retrurns 0 on success, else a negative status code.
+ *
+ * Does not return until the remote CPU is nearly ready to execute <func>
+ * or is or has executed.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int ret;
+ int me = get_cpu();
+ if (cpu == me) {
+ WARN_ON(1);
+ put_cpu();
+ return -EBUSY;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
static void stop_this_cpu (void * dummy)
{
@@ -598,7 +664,7 @@ void smp_send_stop(void)
unsigned long flags;
local_irq_save(flags);
- __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
@@ -641,77 +707,6 @@ fastcall void smp_call_function_interrup
}
}
-/*
- * this function sends a 'generic call function' IPI to one other CPU
- * in the system.
- *
- * cpu is a standard Linux logical CPU number.
- */
-static void
-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (!wait)
- return;
-
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
-}
-
-/*
- * smp_call_function_single - Run a function on another CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock_bh(&call_lock);
- __smp_call_function_single(cpu, func, info, nonatomic, wait);
- spin_unlock_bh(&call_lock);
- put_cpu();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
static int convert_apicid_to_cpu(int apic_id)
{
int i;
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-27 22:13 [PATCH] Simplify smp_call_function*() by using common implementation Jeremy Fitzhardinge
@ 2007-03-27 22:43 ` Randy Dunlap
2007-03-27 22:46 ` Jeremy Fitzhardinge
2007-03-28 19:03 ` Andi Kleen
1 sibling, 1 reply; 15+ messages in thread
From: Randy Dunlap @ 2007-03-27 22:43 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Andi Kleen, Ingo Molnar, Randy.Dunlap
On Tue, 27 Mar 2007 15:13:29 -0700 Jeremy Fitzhardinge wrote:
> smp_call_function and smp_call_function_single are almost complete
> duplicates of the same logic. This patch combines them by
> implementing them in terms of the more general
> smp_call_function_mask().
>
> [ Jan, Andi: This only changes arch/i386; can x86_64 be changed in the
> same way? ]
>
> [ Rebased onto Jan's x86_64-mm-consolidate-smp_send_stop patch ]
>
> Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
> Cc: Jan Beulich <jbeulich@novell.com>
> Cc: Stephane Eranian <eranian@hpl.hp.com>
> Cc: Andrew Morton <akpm@osdl.org>
> Cc: Andi Kleen <ak@suse.de>
> Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
> Cc: Ingo Molnar <mingo@elte.hu>
>
> ---
> arch/i386/kernel/smp.c | 177 +++++++++++++++++++++++-------------------------
> 1 file changed, 86 insertions(+), 91 deletions(-)
>
> ===================================================================
> --- a/arch/i386/kernel/smp.c
> +++ b/arch/i386/kernel/smp.c
> @@ -544,6 +559,34 @@ static void __smp_call_function(void (*f
> if (wait)
> while (atomic_read(&data.finished) != cpus)
> cpu_relax();
> +
> + return 0;
> +}
> +
> +/**
> + * smp_call_function_mask(): Run a function on a set of other CPUs.
> + * @mask: The set of cpus to run on. Must not include the current cpu.
> + * @func: The function to run. This must be fast and non-blocking.
> + * @info: An arbitrary pointer to pass to the function.
> + * @wait: If true, wait (atomically) until function has completed on other CPUs.
> + *
> + * Returns 0 on success, else a negative status code. Does not return until
> + * remote CPUs are nearly ready to execute <<func>> or are or have finished.
func() or <what>??
and what does "nearly ready" mean?
> + *
> + * You must not call this function with disabled interrupts or from a
> + * hardware interrupt handler or from a bottom half handler.
> + */
> +int smp_call_function_mask(cpumask_t mask,
> + void (*func)(void *), void *info,
> + int wait)
> +{
> + int ret;
> +
> + spin_lock(&call_lock);
> + ret = __smp_call_function_mask(mask, func, info, wait);
> + spin_unlock(&call_lock);
> +
> + return ret;
> }
>
> /**
> @@ -559,20 +602,43 @@ static void __smp_call_function(void (*f
> * You must not call this function with disabled interrupts or from a
> * hardware interrupt handler or from a bottom half handler.
> */
> -int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
> - int wait)
> -{
> - /* Can deadlock when called with interrupts disabled */
> - WARN_ON(irqs_disabled());
> -
> - /* Holding any lock stops cpus from going down. */
> - spin_lock(&call_lock);
> - __smp_call_function(func, info, nonatomic, wait);
> - spin_unlock(&call_lock);
> -
> - return 0;
> +int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
> + int wait)
> +{
> + return smp_call_function_mask(cpu_online_map, func, info, wait);
> }
> EXPORT_SYMBOL(smp_call_function);
> +
> +/*
/**
> + * smp_call_function_single - Run a function on another CPU
> + * @func: The function to run. This must be fast and non-blocking.
> + * @info: An arbitrary pointer to pass to the function.
> + * @nonatomic: Currently unused.
> + * @wait: If true, wait until function has completed on other CPUs.
> + *
> + * Retrurns 0 on success, else a negative status code.
Returns
> + *
> + * Does not return until the remote CPU is nearly ready to execute <func>
Same comments: what is "nearly ready"? and s/<func>/func()/
> + * or is or has executed.
Huh? or what??
> + */
> +int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
> + int nonatomic, int wait)
> +{
> + /* prevent preemption and reschedule on another processor */
> + int ret;
> + int me = get_cpu();
> + if (cpu == me) {
> + WARN_ON(1);
> + put_cpu();
> + return -EBUSY;
> + }
> +
> + ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
> +
> + put_cpu();
> + return ret;
> +}
> +EXPORT_SYMBOL(smp_call_function_single);
>
> static void stop_this_cpu (void * dummy)
> {
> @@ -641,77 +707,6 @@ fastcall void smp_call_function_interrup
> }
> }
>
> -/*
> - * this function sends a 'generic call function' IPI to one other CPU
> - * in the system.
> - *
> - * cpu is a standard Linux logical CPU number.
> - */
> -static void
> -__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
> - int nonatomic, int wait)
> -{
> - struct call_data_struct data;
> - int cpus = 1;
> -
> - data.func = func;
> - data.info = info;
> - atomic_set(&data.started, 0);
> - data.wait = wait;
> - if (wait)
> - atomic_set(&data.finished, 0);
> -
> - call_data = &data;
> - wmb();
> - /* Send a message to all other CPUs and wait for them to respond */
> - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
> -
> - /* Wait for response */
> - while (atomic_read(&data.started) != cpus)
> - cpu_relax();
> -
> - if (!wait)
> - return;
> -
> - while (atomic_read(&data.finished) != cpus)
> - cpu_relax();
> -}
> -
> -/*
> - * smp_call_function_single - Run a function on another CPU
> - * @func: The function to run. This must be fast and non-blocking.
> - * @info: An arbitrary pointer to pass to the function.
> - * @nonatomic: Currently unused.
> - * @wait: If true, wait until function has completed on other CPUs.
> - *
> - * Retrurns 0 on success, else a negative status code.
> - *
> - * Does not return until the remote CPU is nearly ready to execute <func>
> - * or is or has executed.
> - */
> -
> -int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
> - int nonatomic, int wait)
> -{
> - /* prevent preemption and reschedule on another processor */
> - int me = get_cpu();
> - if (cpu == me) {
> - WARN_ON(1);
> - put_cpu();
> - return -EBUSY;
> - }
> -
> - /* Can deadlock when called with interrupts disabled */
> - WARN_ON(irqs_disabled());
> -
> - spin_lock_bh(&call_lock);
> - __smp_call_function_single(cpu, func, info, nonatomic, wait);
> - spin_unlock_bh(&call_lock);
> - put_cpu();
> - return 0;
> -}
> -EXPORT_SYMBOL(smp_call_function_single);
> -
> static int convert_apicid_to_cpu(int apic_id)
> {
> int i;
---
~Randy
*** Remember to use Documentation/SubmitChecklist when testing your code ***
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-27 22:13 [PATCH] Simplify smp_call_function*() by using common implementation Jeremy Fitzhardinge
2007-03-27 22:43 ` Randy Dunlap
@ 2007-03-28 19:03 ` Andi Kleen
2007-03-28 19:18 ` Jeremy Fitzhardinge
1 sibling, 1 reply; 15+ messages in thread
From: Andi Kleen @ 2007-03-28 19:03 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Virtualization Mailing List, Stephane Eranian, Andrew Morton,
Ingo Molnar, Jan Beulich, Linux Kernel Mailing List
On Wednesday 28 March 2007 00:13, Jeremy Fitzhardinge wrote:
> smp_call_function and smp_call_function_single are almost complete
> duplicates of the same logic. This patch combines them by
> implementing them in terms of the more general
> smp_call_function_mask().
I think I got those already.
-Andi
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-28 19:03 ` Andi Kleen
@ 2007-03-28 19:18 ` Jeremy Fitzhardinge
2007-03-28 19:22 ` Andi Kleen
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 19:18 UTC (permalink / raw)
To: Andi Kleen
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
Andi Kleen wrote:
> On Wednesday 28 March 2007 00:13, Jeremy Fitzhardinge wrote:
>
>> smp_call_function and smp_call_function_single are almost complete
>> duplicates of the same logic. This patch combines them by
>> implementing them in terms of the more general
>> smp_call_function_mask().
>>
>
> I think I got those already.
>
OK. Do you have smp_ops and machine_ops?
There's a doc update patch too:
Subject: Fix smp_call_function* docs
As reported by Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Randy Dunlap <rdunlap@xenotime.net>
---
arch/i386/kernel/smp.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -570,8 +570,10 @@ static int __smp_call_function_mask(cpum
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have finished.
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
@@ -593,11 +595,13 @@ int smp_call_function_mask(cpumask_t mas
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @nonatomic: currently unused.
+ * @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
@@ -609,17 +613,18 @@ int smp_call_function(void (*func) (void
}
EXPORT_SYMBOL(smp_call_function);
-/*
+/**
* smp_call_function_single - Run a function on another CPU
+ * @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
+ * @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH] Simplify smp_call_function*() by using common implementation
2007-03-28 19:18 ` Jeremy Fitzhardinge
@ 2007-03-28 19:22 ` Andi Kleen
2007-03-28 19:31 ` [PATCH] Add smp_ops interface Jeremy Fitzhardinge
2007-03-28 19:32 ` [PATCH] Add machine_ops interface to abstract halting and rebooting Jeremy Fitzhardinge
0 siblings, 2 replies; 15+ messages in thread
From: Andi Kleen @ 2007-03-28 19:22 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
On Wednesday 28 March 2007 21:18, Jeremy Fitzhardinge wrote:
> Andi Kleen wrote:
> > On Wednesday 28 March 2007 00:13, Jeremy Fitzhardinge wrote:
> >
> >> smp_call_function and smp_call_function_single are almost complete
> >> duplicates of the same logic. This patch combines them by
> >> implementing them in terms of the more general
> >> smp_call_function_mask().
> >>
> >
> > I think I got those already.
> >
>
> OK. Do you have smp_ops and machine_ops?
Not yet. Can you resend the current set?
> There's a doc update patch too:
Can you send that separately after i sync out please?
Thanks
-Andi
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH] Add smp_ops interface
2007-03-28 19:22 ` Andi Kleen
@ 2007-03-28 19:31 ` Jeremy Fitzhardinge
2007-03-28 19:32 ` [PATCH] Add machine_ops interface to abstract halting and rebooting Jeremy Fitzhardinge
1 sibling, 0 replies; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 19:31 UTC (permalink / raw)
To: Andi Kleen
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
Subject: Add smp_ops interface
Add a smp_ops interface. This abstracts the API defined by
<linux/smp.h> for use within arch/i386. The primary intent is that it
be used by a paravirtualizing hypervisor to implement SMP, but it
could also be used by non-APIC-using sub-architectures.
This is related to CONFIG_PARAVIRT, but is implemented unconditionally
since it is simpler that way and not a highly performance-sensitive
interface.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
---
arch/i386/kernel/smp.c | 21 +++++++++++++----
arch/i386/kernel/smpboot.c | 8 +++---
include/asm-i386/smp.h | 53 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 73 insertions(+), 9 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -483,7 +483,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void smp_send_reschedule(int cpu)
+void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -576,9 +576,9 @@ static int __smp_call_function_mask(cpum
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
int ret;
@@ -657,7 +657,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void smp_send_stop(void)
+void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
@@ -733,3 +733,14 @@ int safe_smp_processor_id(void)
return cpuid >= 0 ? cpuid : 0;
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = native_smp_prepare_cpus,
+ .cpu_up = native_cpu_up,
+ .smp_cpus_done = native_smp_cpus_done,
+
+ .smp_send_stop = native_smp_send_stop,
+ .smp_send_reschedule = native_smp_send_reschedule,
+ .smp_call_function_mask = native_smp_call_function_mask,
+};
===================================================================
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1169,7 +1169,7 @@ static void __init smp_boot_cpus(unsigne
/* These are wrappers to interface to the new boot process. Someone
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
smp_commenced_mask = cpumask_of_cpu(0);
cpu_callin_map = cpumask_of_cpu(0);
@@ -1189,7 +1189,7 @@ static inline void switch_to_new_gdt(voi
asm ("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}
-void __init smp_prepare_boot_cpu(void)
+void __init native_smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
@@ -1290,7 +1290,7 @@ void __cpu_die(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit native_cpu_up(unsigned int cpu)
{
unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
@@ -1335,7 +1335,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
return 0;
}
-void __init smp_cpus_done(unsigned int max_cpus)
+void __init native_smp_cpus_done(unsigned int max_cpus)
{
#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
===================================================================
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -47,6 +47,59 @@ extern void cpu_exit_clear(void);
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
#endif
+
+struct smp_ops
+{
+ void (*smp_prepare_boot_cpu)(void);
+ void (*smp_prepare_cpus)(unsigned max_cpus);
+ int (*cpu_up)(unsigned cpu);
+ void (*smp_cpus_done)(unsigned max_cpus);
+
+ void (*smp_send_stop)(void);
+ void (*smp_send_reschedule)(int cpu);
+ int (*smp_call_function_mask)(cpumask_t mask,
+ void (*func)(void *info), void *info,
+ int wait);
+};
+
+extern struct smp_ops smp_ops;
+
+static inline void smp_prepare_boot_cpu(void)
+{
+ smp_ops.smp_prepare_boot_cpu();
+}
+static inline void smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_ops.smp_prepare_cpus(max_cpus);
+}
+static inline int __cpu_up(unsigned int cpu)
+{
+ return smp_ops.cpu_up(cpu);
+}
+static inline void smp_cpus_done(unsigned int max_cpus)
+{
+ smp_ops.smp_cpus_done(max_cpus);
+}
+
+static inline void smp_send_stop(void)
+{
+ smp_ops.smp_send_stop();
+}
+static inline void smp_send_reschedule(int cpu)
+{
+ smp_ops.smp_send_reschedule(cpu);
+}
+static inline int smp_call_function_mask(cpumask_t mask,
+ void (*func) (void *info), void *info,
+ int wait)
+{
+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
+
+void native_smp_prepare_boot_cpu(void);
+void native_smp_prepare_cpus(unsigned int max_cpus);
+int native_cpu_up(unsigned int cpunum);
+void native_smp_cpus_done(unsigned int max_cpus);
#ifndef CONFIG_PARAVIRT
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH] Add machine_ops interface to abstract halting and rebooting
2007-03-28 19:22 ` Andi Kleen
2007-03-28 19:31 ` [PATCH] Add smp_ops interface Jeremy Fitzhardinge
@ 2007-03-28 19:32 ` Jeremy Fitzhardinge
2007-03-28 19:43 ` Andi Kleen
1 sibling, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 19:32 UTC (permalink / raw)
To: Andi Kleen
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
machine_ops is an interface for the machine_* functions defined in
<linux/reboot.h>. This is intended to allow hypervisors to intercept
the reboot process, but it could be used to implement other x86
subarchtecture reboots.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
---
arch/i386/kernel/apm.c | 3 --
arch/i386/kernel/reboot.c | 49 +++++++++++++++++++++++++++++++++++++--------
include/asm-i386/reboot.h | 20 ++++++++++++++++++
3 files changed, 62 insertions(+), 10 deletions(-)
===================================================================
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -233,10 +233,9 @@
#include <asm/desc.h>
#include <asm/i8253.h>
#include <asm/paravirt.h>
+#include <asm/reboot.h>
#include "io_ports.h"
-
-extern void machine_real_restart(unsigned char *, int);
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
extern int (*console_blank_hook)(int);
===================================================================
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -18,6 +18,7 @@
#include <asm/desc.h>
#include "mach_reboot.h"
#include <asm/reboot_fixups.h>
+#include <asm/reboot.h>
/*
* Power off function, if any
@@ -280,7 +281,7 @@ EXPORT_SYMBOL(machine_real_restart);
EXPORT_SYMBOL(machine_real_restart);
#endif
-void machine_shutdown(void)
+static void native_machine_shutdown(void)
{
#ifdef CONFIG_SMP
int reboot_cpu_id;
@@ -320,7 +321,7 @@ void __attribute__((weak)) mach_reboot_f
{
}
-void machine_emergency_restart(void)
+static void native_machine_emergency_restart(void)
{
if (!reboot_thru_bios) {
if (efi_enabled) {
@@ -344,17 +345,17 @@ void machine_emergency_restart(void)
machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
}
-void machine_restart(char * __unused)
+static void native_machine_restart(char * __unused)
{
machine_shutdown();
machine_emergency_restart();
}
-void machine_halt(void)
-{
-}
-
-void machine_power_off(void)
+static void native_machine_halt(void)
+{
+}
+
+static void native_machine_power_off(void)
{
if (pm_power_off) {
machine_shutdown();
@@ -363,3 +364,35 @@ void machine_power_off(void)
}
+struct machine_ops machine_ops = {
+ .power_off = native_machine_power_off,
+ .shutdown = native_machine_shutdown,
+ .emergency_restart = native_machine_emergency_restart,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt,
+};
+
+void machine_power_off(void)
+{
+ machine_ops.power_off();
+}
+
+void machine_shutdown(void)
+{
+ machine_ops.shutdown();
+}
+
+void machine_emergency_restart(void)
+{
+ machine_ops.emergency_restart();
+}
+
+void machine_restart(char *cmd)
+{
+ machine_ops.restart(cmd);
+}
+
+void machine_halt(void)
+{
+ machine_ops.halt();
+}
===================================================================
--- /dev/null
+++ b/include/asm-i386/reboot.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_REBOOT_H
+#define _ASM_REBOOT_H
+
+struct pt_regs;
+
+struct machine_ops
+{
+ void (*restart)(char *cmd);
+ void (*halt)(void);
+ void (*power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+ void (*emergency_restart)(void);
+};
+
+extern struct machine_ops machine_ops;
+
+void machine_real_restart(unsigned char *code, int length);
+
+#endif /* _ASM_REBOOT_H */
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH] Add machine_ops interface to abstract halting and rebooting
2007-03-28 19:32 ` [PATCH] Add machine_ops interface to abstract halting and rebooting Jeremy Fitzhardinge
@ 2007-03-28 19:43 ` Andi Kleen
2007-03-28 19:47 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 15+ messages in thread
From: Andi Kleen @ 2007-03-28 19:43 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
On Wednesday 28 March 2007 21:32, Jeremy Fitzhardinge wrote:
> machine_ops is an interface for the machine_* functions defined in
> <linux/reboot.h>. This is intended to allow hypervisors to intercept
> the reboot process, but it could be used to implement other x86
> subarchtecture reboots.
Both patches added thanks
-Andi
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH] Add machine_ops interface to abstract halting and rebooting
2007-03-28 19:43 ` Andi Kleen
@ 2007-03-28 19:47 ` Jeremy Fitzhardinge
2007-03-28 19:54 ` Andi Kleen
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 19:47 UTC (permalink / raw)
To: Andi Kleen
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
Andi Kleen wrote:
> On Wednesday 28 March 2007 21:32, Jeremy Fitzhardinge wrote:
>
>> machine_ops is an interface for the machine_* functions defined in
>> <linux/reboot.h>. This is intended to allow hypervisors to intercept
>> the reboot process, but it could be used to implement other x86
>> subarchtecture reboots.
>>
>
> Both patches added thanks
>
I presume you've also got the reboot-fixups cleanup patch?
J
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH] Add machine_ops interface to abstract halting and rebooting
2007-03-28 19:47 ` Jeremy Fitzhardinge
@ 2007-03-28 19:54 ` Andi Kleen
2007-03-28 20:11 ` [PATCH] Clean up mach_reboot_fixups Jeremy Fitzhardinge
0 siblings, 1 reply; 15+ messages in thread
From: Andi Kleen @ 2007-03-28 19:54 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linux Kernel Mailing List,
Virtualization Mailing List, Jan Beulich, Stephane Eranian,
Ingo Molnar, Randy.Dunlap
On Wednesday 28 March 2007 21:47, Jeremy Fitzhardinge wrote:
> Andi Kleen wrote:
> > On Wednesday 28 March 2007 21:32, Jeremy Fitzhardinge wrote:
> >
> >> machine_ops is an interface for the machine_* functions defined in
> >> <linux/reboot.h>. This is intended to allow hypervisors to intercept
> >> the reboot process, but it could be used to implement other x86
> >> subarchtecture reboots.
> >>
> >
> > Both patches added thanks
> >
>
> I presume you've also got the reboot-fixups cleanup patch?
No, please resend too
-Andi
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH] Clean up mach_reboot_fixups
2007-03-28 19:54 ` Andi Kleen
@ 2007-03-28 20:11 ` Jeremy Fitzhardinge
2007-03-28 20:14 ` Andi Kleen
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-28 20:11 UTC (permalink / raw)
To: Andi Kleen
Cc: Jeremy Fitzhardinge, Virtualization Mailing List,
Stephane Eranian, Andrew Morton, Ingo Molnar, Jan Beulich,
Linux Kernel Mailing List
Two cleanups:
- reboot_fixups.h is entirely i386-dependent, so put it in asm-i386
- use a weak version rather than ifdeffery
[ Andi - the machine_ops probably depends on this, but only in a minor
context-clash way. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Jaya Kumar <jayalk@intworks.biz>
---
arch/i386/kernel/reboot.c | 6 +++++-
arch/i386/kernel/reboot_fixups.c | 2 +-
include/asm-i386/reboot_fixups.h | 6 ++++++
include/linux/reboot_fixups.h | 10 ----------
4 files changed, 12 insertions(+), 12 deletions(-)
===================================================================
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -17,7 +17,7 @@
#include <asm/apic.h>
#include <asm/desc.h>
#include "mach_reboot.h"
-#include <linux/reboot_fixups.h>
+#include <asm/reboot_fixups.h>
/*
* Power off function, if any
@@ -314,6 +314,10 @@ void machine_shutdown(void)
#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
#endif
+}
+
+void __attribute__((weak)) mach_reboot_fixups(void)
+{
}
void machine_emergency_restart(void)
===================================================================
--- a/arch/i386/kernel/reboot_fixups.c
+++ b/arch/i386/kernel/reboot_fixups.c
@@ -10,7 +10,7 @@
#include <asm/delay.h>
#include <linux/pci.h>
-#include <linux/reboot_fixups.h>
+#include <asm/reboot_fixups.h>
static void cs5530a_warm_reset(struct pci_dev *dev)
{
===================================================================
--- /dev/null
+++ b/include/asm-i386/reboot_fixups.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_REBOOT_FIXUPS_H
+#define _LINUX_REBOOT_FIXUPS_H
+
+extern void mach_reboot_fixups(void);
+
+#endif /* _LINUX_REBOOT_FIXUPS_H */
===================================================================
--- a/include/linux/reboot_fixups.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LINUX_REBOOT_FIXUPS_H
-#define _LINUX_REBOOT_FIXUPS_H
-
-#ifdef CONFIG_X86_REBOOTFIXUPS
-extern void mach_reboot_fixups(void);
-#else
-#define mach_reboot_fixups() ((void)(0))
-#endif
-
-#endif /* _LINUX_REBOOT_FIXUPS_H */
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH] Clean up mach_reboot_fixups
2007-03-28 20:11 ` [PATCH] Clean up mach_reboot_fixups Jeremy Fitzhardinge
@ 2007-03-28 20:14 ` Andi Kleen
0 siblings, 0 replies; 15+ messages in thread
From: Andi Kleen @ 2007-03-28 20:14 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Virtualization Mailing List, Stephane Eranian, Andrew Morton,
Ingo Molnar, Linux Kernel Mailing List, Jan Beulich
On Wednesday 28 March 2007 22:11, Jeremy Fitzhardinge wrote:
> Two cleanups:
> - reboot_fixups.h is entirely i386-dependent, so put it in asm-i386
> - use a weak version rather than ifdeffery
>
> [ Andi - the machine_ops probably depends on this, but only in a minor
> context-clash way. ]
Sorry I actually had it already, but forgot about it
-Andi
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH] Add smp_ops interface
@ 2007-03-27 22:09 Jeremy Fitzhardinge
2007-03-27 22:11 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-27 22:09 UTC (permalink / raw)
To: Andrew Morton
Cc: Zachary Amsden, Xen-devel, Linux Kernel Mailing List,
James Bottomley, Virtualization Mailing List, Ingo Molnar
Add a smp_ops interface. This abstracts the API defined by
<linux/smp.h> for use within arch/i386. The primary intent is that it
be used by a paravirtualizing hypervisor to implement SMP, but it
could also be used by non-APIC-using sub-architectures.
This is related to CONFIG_PARAVIRT, but is implemented unconditionally
since it is simpler that way and not a highly performance-sensitive
interface.
[ Andrew: Patch is based on 2.6.21-rc5-mm1 + the pda->percpu patch I
posted the other day. It will have a minor context conflict without
that patch, but is otherwise standalone. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
---
arch/i386/kernel/smp.c | 21 +++++++++++++----
arch/i386/kernel/smpboot.c | 8 +++---
include/asm-i386/smp.h | 53 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 73 insertions(+), 9 deletions(-)
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -483,7 +483,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void smp_send_reschedule(int cpu)
+void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -576,9 +576,9 @@ static int __smp_call_function_mask(cpum
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
int ret;
@@ -657,7 +657,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void smp_send_stop(void)
+void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
@@ -733,3 +733,14 @@ int safe_smp_processor_id(void)
return cpuid >= 0 ? cpuid : 0;
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = native_smp_prepare_cpus,
+ .cpu_up = native_cpu_up,
+ .smp_cpus_done = native_smp_cpus_done,
+
+ .smp_send_stop = native_smp_send_stop,
+ .smp_send_reschedule = native_smp_send_reschedule,
+ .smp_call_function_mask = native_smp_call_function_mask,
+};
===================================================================
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1169,7 +1169,7 @@ static void __init smp_boot_cpus(unsigne
/* These are wrappers to interface to the new boot process. Someone
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
smp_commenced_mask = cpumask_of_cpu(0);
cpu_callin_map = cpumask_of_cpu(0);
@@ -1189,7 +1189,7 @@ static inline void switch_to_new_gdt(voi
asm ("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}
-void __init smp_prepare_boot_cpu(void)
+void __init native_smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
@@ -1290,7 +1290,7 @@ void __cpu_die(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit native_cpu_up(unsigned int cpu)
{
unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
@@ -1335,7 +1335,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
return 0;
}
-void __init smp_cpus_done(unsigned int max_cpus)
+void __init native_smp_cpus_done(unsigned int max_cpus)
{
#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
===================================================================
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -47,6 +47,59 @@ extern void cpu_exit_clear(void);
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
#endif
+
+struct smp_ops
+{
+ void (*smp_prepare_boot_cpu)(void);
+ void (*smp_prepare_cpus)(unsigned max_cpus);
+ int (*cpu_up)(unsigned cpu);
+ void (*smp_cpus_done)(unsigned max_cpus);
+
+ void (*smp_send_stop)(void);
+ void (*smp_send_reschedule)(int cpu);
+ int (*smp_call_function_mask)(cpumask_t mask,
+ void (*func)(void *info), void *info,
+ int wait);
+};
+
+extern struct smp_ops smp_ops;
+
+static inline void smp_prepare_boot_cpu(void)
+{
+ smp_ops.smp_prepare_boot_cpu();
+}
+static inline void smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_ops.smp_prepare_cpus(max_cpus);
+}
+static inline int __cpu_up(unsigned int cpu)
+{
+ return smp_ops.cpu_up(cpu);
+}
+static inline void smp_cpus_done(unsigned int max_cpus)
+{
+ smp_ops.smp_cpus_done(max_cpus);
+}
+
+static inline void smp_send_stop(void)
+{
+ smp_ops.smp_send_stop();
+}
+static inline void smp_send_reschedule(int cpu)
+{
+ smp_ops.smp_send_reschedule(cpu);
+}
+static inline int smp_call_function_mask(cpumask_t mask,
+ void (*func) (void *info), void *info,
+ int wait)
+{
+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
+
+void native_smp_prepare_boot_cpu(void);
+void native_smp_prepare_cpus(unsigned int max_cpus);
+int native_cpu_up(unsigned int cpunum);
+void native_smp_cpus_done(unsigned int max_cpus);
#ifndef CONFIG_PARAVIRT
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH] Add smp_ops interface
2007-03-27 22:09 [PATCH] Add smp_ops interface Jeremy Fitzhardinge
@ 2007-03-27 22:11 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2007-03-27 22:11 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Zachary Amsden, Xen-devel, Linux Kernel Mailing List,
James Bottomley, Virtualization Mailing List, Andrew Morton,
Ingo Molnar
Jeremy Fitzhardinge wrote:
> [ Andrew: Patch is based on 2.6.21-rc5-mm1 + the pda->percpu patch I
> posted the other day. It will have a minor context conflict without
> that patch, but is otherwise standalone. ]
>
Sorry, it also depends on simplify-smp_call_function.patch, which I'll
repost shortly.
J
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2007-03-28 20:14 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-03-27 22:13 [PATCH] Simplify smp_call_function*() by using common implementation Jeremy Fitzhardinge
2007-03-27 22:43 ` Randy Dunlap
2007-03-27 22:46 ` Jeremy Fitzhardinge
2007-03-28 19:03 ` Andi Kleen
2007-03-28 19:18 ` Jeremy Fitzhardinge
2007-03-28 19:22 ` Andi Kleen
2007-03-28 19:31 ` [PATCH] Add smp_ops interface Jeremy Fitzhardinge
2007-03-28 19:32 ` [PATCH] Add machine_ops interface to abstract halting and rebooting Jeremy Fitzhardinge
2007-03-28 19:43 ` Andi Kleen
2007-03-28 19:47 ` Jeremy Fitzhardinge
2007-03-28 19:54 ` Andi Kleen
2007-03-28 20:11 ` [PATCH] Clean up mach_reboot_fixups Jeremy Fitzhardinge
2007-03-28 20:14 ` Andi Kleen
-- strict thread matches above, loose matches on Subject: below --
2007-03-27 22:09 [PATCH] Add smp_ops interface Jeremy Fitzhardinge
2007-03-27 22:11 ` Jeremy Fitzhardinge
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).