* [VOYAGER] fix build broken by shift to smp_ops
@ 2007-05-14 16:08 James Bottomley
2007-05-14 17:10 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 22+ messages in thread
From: James Bottomley @ 2007-05-14 16:08 UTC (permalink / raw)
To: Andrew Morton, Linus Torvalds; +Cc: linux-kernel, Andi Kleen, Eric W. Biederman
From: Jeremy Fitzhardinge <jeremy@xensource.com>
Subject: [VOYAGER] fix build broken by shift to smp_ops
This adds an smp_ops for voyager, and hooks things up appropriately.
This is the first baby-step to making subarch runtime switchable.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Rediffed to work in the absence of the smp consolidation patch
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Andi Kleen <ak@suse.de>
---
Following conversations with Andi, we've agreed that the best way to fix
the currently broken voyager port is to do a local fix converting it to
SMP operations, and then fix the invalid conversions in generic x86 by
re abstracting the operations so they (and several other functions) can
be shared between x86.
With this, voyager builds and boots on 2.6.22-rc1.
James
---
voyager_smp.c | 99 ++++++++++++++++++++++++++++++++++++++++------------------
1 file changed, 69 insertions(+), 30 deletions(-)
Index: BUILD-voyager/arch/i386/mach-voyager/voyager_smp.c
===================================================================
--- BUILD-voyager.orig/arch/i386/mach-voyager/voyager_smp.c 2007-05-14 10:05:33.000000000 -0500
+++ BUILD-voyager/arch/i386/mach-voyager/voyager_smp.c 2007-05-14 10:46:35.000000000 -0500
@@ -27,7 +27,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/arch_hooks.h>
-#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -40,6 +39,9 @@ static unsigned long cpu_irq_affinity[NR
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
/* physical ID of the CPU used to boot the system */
unsigned char boot_cpu_id;
@@ -202,6 +204,28 @@ ack_CPI(__u8 cpi)
/* local variables */
+/* FIXME: this is a local copy of this function cut and paste from
+ * ../kernel/smpboot.c ... these need to be consolidated properly.
+ *
+ * Initialize the CPU's GDT. This is either the boot CPU doing itself
+ * (still using the master per-cpu area), or a CPU doing it for a
+ * secondary which will soon come up. */
+static __devinit void init_gdt(int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+ (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
+ __per_cpu_offset[cpu], 0xFFFFF,
+ 0x80 | DESCTYPE_S | 0x2, 0x8);
+
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ per_cpu(cpu_number, cpu) = cpu;
+}
+/* Defined in head.S */
+extern struct Xgt_desc_struct early_gdt_descr;
+/* end of FIXME */
+
/* The VIC IRQ descriptors -- these look almost identical to the
* 8259 IRQs except that masks and things must be kept per processor
*/
@@ -422,7 +446,7 @@ find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
- write_pda(cpu_number, boot_cpu_id);
+ x86_write_percpu(cpu_number, boot_cpu_id);
}
/*
@@ -435,7 +459,7 @@ smp_store_cpu_info(int id)
*c = boot_cpu_data;
- identify_cpu(c);
+ identify_secondary_cpu(c);
}
/* set up the trampoline and return the physical address of the code */
@@ -459,7 +483,7 @@ start_secondary(void *unused)
/* external functions not defined in the headers */
extern void calibrate_delay(void);
- secondary_cpu_init();
+ cpu_init();
/* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI);
@@ -572,7 +596,9 @@ do_boot_cpu(__u8 cpu)
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
- init_gdt(cpu, idle);
+ init_gdt(cpu);
+ per_cpu(current_task, cpu) = idle;
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */
@@ -859,8 +885,8 @@ smp_invalidate_interrupt(void)
/* This routine is called with a physical cpu mask */
static void
-flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- unsigned long va)
+voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
{
int stuck = 50000;
@@ -912,7 +938,7 @@ flush_tlb_current_task(void)
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb();
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -934,7 +960,7 @@ flush_tlb_mm (struct mm_struct * mm)
leave_mm(smp_processor_id());
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -955,7 +981,7 @@ void flush_tlb_page(struct vm_area_struc
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, va);
+ voyager_flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
@@ -1044,11 +1070,12 @@ smp_call_function_interrupt(void)
}
static int
-__smp_call_function_mask (void (*func) (void *info), void *info, int retry,
- int wait, __u32 mask)
+voyager_smp_call_function_mask (cpumask_t cpumask, void (*func) (void *info),
+ void *info, int wait)
{
struct call_data_struct data;
+ u32 mask = cpus_addr(cpumask)[0];
mask &= ~(1<<smp_processor_id());
if (!mask)
@@ -1095,9 +1122,7 @@ int
smp_call_function(void (*func) (void *info), void *info, int retry,
int wait)
{
- __u32 mask = cpus_addr(cpu_online_map)[0];
-
- return __smp_call_function_mask(func, info, retry, wait, mask);
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
@@ -1118,9 +1143,9 @@ int
smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
- __u32 mask = 1 << cpu;
+ cpumask_t mask = cpumask_of_cpu(cpu);
- return __smp_call_function_mask(func, info, nonatomic, wait, mask);
+ return smp_call_function_mask(mask, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function_single);
@@ -1138,7 +1163,7 @@ EXPORT_SYMBOL(smp_call_function_single);
* no local APIC, so I can't do this
*
* This function is currently a placeholder and is unused in the code */
-fastcall void
+fastcall void
smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1237,8 +1262,8 @@ smp_alloc_memory(void)
}
/* send a reschedule CPI to one CPU by physical CPU number*/
-void
-smp_send_reschedule(int cpu)
+static void
+voyager_smp_send_reschedule(int cpu)
{
send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
}
@@ -1267,8 +1292,8 @@ safe_smp_processor_id(void)
}
/* broadcast a halt to all other CPUs */
-void
-smp_send_stop(void)
+static void
+voyager_smp_send_stop(void)
{
smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
}
@@ -1930,23 +1955,26 @@ smp_voyager_power_off(void *dummy)
smp_stop_cpu_function(NULL);
}
-void __init
-smp_prepare_cpus(unsigned int max_cpus)
+static void __init
+voyager_smp_prepare_cpus(unsigned int max_cpus)
{
/* FIXME: ignore max_cpus for now */
smp_boot_cpus();
}
-void __devinit smp_prepare_boot_cpu(void)
+static void __devinit voyager_smp_prepare_boot_cpu(void)
{
+ init_gdt(smp_processor_id());
+ switch_to_new_gdt();
+
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map);
}
-int __devinit
-__cpu_up(unsigned int cpu)
+static int __devinit
+voyager_cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
if (cpu_isset(cpu, smp_commenced_mask))
@@ -1962,8 +1990,8 @@ __cpu_up(unsigned int cpu)
return 0;
}
-void __init
-smp_cpus_done(unsigned int max_cpus)
+static void __init
+voyager_smp_cpus_done(unsigned int max_cpus)
{
zap_low_mappings();
}
@@ -1972,5 +2000,16 @@ void __init
smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- write_pda(cpu_number, hard_smp_processor_id());
+ x86_write_percpu(cpu_number, hard_smp_processor_id());
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = voyager_smp_prepare_cpus,
+ .cpu_up = voyager_cpu_up,
+ .smp_cpus_done = voyager_smp_cpus_done,
+
+ .smp_send_stop = voyager_smp_send_stop,
+ .smp_send_reschedule = voyager_smp_send_reschedule,
+ .smp_call_function_mask = voyager_smp_call_function_mask,
+};
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 16:08 [VOYAGER] fix build broken by shift to smp_ops James Bottomley
@ 2007-05-14 17:10 ` Jeremy Fitzhardinge
2007-05-14 17:22 ` James Bottomley
2007-05-14 19:59 ` Andrew Morton
0 siblings, 2 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 17:10 UTC (permalink / raw)
To: James Bottomley
Cc: Andrew Morton, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
James Bottomley wrote:
> From: Jeremy Fitzhardinge <jeremy@xensource.com>
> Subject: [VOYAGER] fix build broken by shift to smp_ops
>
> This adds an smp_ops for voyager, and hooks things up appropriately.
> This is the first baby-step to making subarch runtime switchable.
>
> Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
>
> Rediffed to work in the absence of the smp consolidation patch
>
I think Andrew is carrying that in -mm. If you're proposing this for
git, then we may as well pull in that patch too.
J
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 17:10 ` Jeremy Fitzhardinge
@ 2007-05-14 17:22 ` James Bottomley
2007-05-14 19:03 ` Jeremy Fitzhardinge
2007-05-14 19:59 ` Andrew Morton
1 sibling, 1 reply; 22+ messages in thread
From: James Bottomley @ 2007-05-14 17:22 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 2007-05-14 at 10:10 -0700, Jeremy Fitzhardinge wrote:
> James Bottomley wrote:
> > From: Jeremy Fitzhardinge <jeremy@xensource.com>
> > Subject: [VOYAGER] fix build broken by shift to smp_ops
> >
> > This adds an smp_ops for voyager, and hooks things up appropriately.
> > This is the first baby-step to making subarch runtime switchable.
> >
> > Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
> >
> > Rediffed to work in the absence of the smp consolidation patch
> >
>
> I think Andrew is carrying that in -mm. If you're proposing this for
> git, then we may as well pull in that patch too.
No, I'm proposing this for 2.6.22-rc1 ... Andi has already said he won't
push the smp consolidation patch for 2.6.22.
Without this patch, voyager won't even build, since the smp_ops broke
it, so it needs to be fixed *now*.
James
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 17:22 ` James Bottomley
@ 2007-05-14 19:03 ` Jeremy Fitzhardinge
2007-05-14 20:09 ` Andi Kleen
0 siblings, 1 reply; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 19:03 UTC (permalink / raw)
To: James Bottomley
Cc: Andrew Morton, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
James Bottomley wrote:
> No, I'm proposing this for 2.6.22-rc1 ... Andi has already said he won't
> push the smp consolidation patch for 2.6.22.
>
> Without this patch, voyager won't even build, since the smp_ops broke
> it, so it needs to be fixed *now*.
>
Sure, I suppose. But given that the patch which fixes this is a simple
no-functional-changes code-motion patch, it seems odd to not use it in
favour of putting in a big chunk of duplicated code.
J
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 17:10 ` Jeremy Fitzhardinge
2007-05-14 17:22 ` James Bottomley
@ 2007-05-14 19:59 ` Andrew Morton
2007-05-14 20:02 ` Jeremy Fitzhardinge
1 sibling, 1 reply; 22+ messages in thread
From: Andrew Morton @ 2007-05-14 19:59 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 14 May 2007 10:10:48 -0700
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> James Bottomley wrote:
> > From: Jeremy Fitzhardinge <jeremy@xensource.com>
> > Subject: [VOYAGER] fix build broken by shift to smp_ops
> >
> > This adds an smp_ops for voyager, and hooks things up appropriately.
> > This is the first baby-step to making subarch runtime switchable.
> >
> > Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
> >
> > Rediffed to work in the absence of the smp consolidation patch
> >
>
> I think Andrew is carrying that in -mm. If you're proposing this for
> git, then we may as well pull in that patch too.
>
Does "that" have name? I can find no patch in -mm which appears to have
anything to do with SMP consolidation, and this patch applies cleanly to
the current -mm lineup.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 19:59 ` Andrew Morton
@ 2007-05-14 20:02 ` Jeremy Fitzhardinge
2007-05-14 20:37 ` Andrew Morton
0 siblings, 1 reply; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 20:02 UTC (permalink / raw)
To: Andrew Morton
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
Andrew Morton wrote:
> Does "that" have name? I can find no patch in -mm which appears to have
> anything to do with SMP consolidation, and this patch applies cleanly to
> the current -mm lineup.
>
Sorry, I thought you'd picked this up:
Subject: i386: move common parts of smp into their own file
Several parts of kernel/smp.c and smpboot.c are generally useful for
other subarchitectures and paravirt_ops implementations, so make them
available for reuse.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: Chris Wright <chrisw@sous-sol.org>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
---
arch/i386/kernel/Makefile | 1
arch/i386/kernel/smp.c | 65 +++-------------------------------
arch/i386/kernel/smpboot.c | 22 -----------
arch/i386/kernel/smpcommon.c | 79 ++++++++++++++++++++++++++++++++++++++++++
include/asm-i386/processor.h | 4 ++
5 files changed, 90 insertions(+), 81 deletions(-)
===================================================================
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
+obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -468,7 +468,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void native_smp_send_reschedule(int cpu)
+static void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -547,9 +547,10 @@ static void __smp_call_function(void (*f
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+static int
+native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
@@ -600,60 +601,6 @@ int native_smp_call_function_mask(cpumas
return 0;
}
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on another CPU
- * @cpu: The target CPU. Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int ret;
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
static void stop_this_cpu (void * dummy)
{
local_irq_disable();
@@ -671,7 +618,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void native_smp_send_stop(void)
+static void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
===================================================================
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -99,9 +99,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID];
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
/*
* Trampoline 80x86 program as an array.
*/
@@ -766,25 +763,6 @@ static inline struct task_struct * alloc
#define alloc_idle_task(cpu) fork_idle(cpu)
#endif
-/* Initialize the CPU's GDT. This is either the boot CPU doing itself
- (still using the master per-cpu area), or a CPU doing it for a
- secondary which will soon come up. */
-static __cpuinit void init_gdt(int cpu)
-{
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-
- pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
- (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
- __per_cpu_offset[cpu], 0xFFFFF,
- 0x80 | DESCTYPE_S | 0x2, 0x8);
-
- per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- per_cpu(cpu_number, cpu) = cpu;
-}
-
-/* Defined in head.S */
-extern struct Xgt_desc_struct early_gdt_descr;
-
static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
===================================================================
--- /dev/null
+++ b/arch/i386/kernel/smpcommon.c
@@ -0,0 +1,79 @@
+/*
+ * SMP stuff which is common to all sub-architectures.
+ */
+#include <linux/module.h>
+#include <asm/smp.h>
+
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+/* Initialize the CPU's GDT. This is either the boot CPU doing itself
+ (still using the master per-cpu area), or a CPU doing it for a
+ secondary which will soon come up. */
+__cpuinit void init_gdt(int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+ (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
+ __per_cpu_offset[cpu], 0xFFFFF,
+ 0x80 | DESCTYPE_S | 0x2, 0x8);
+
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ per_cpu(cpu_number, cpu) = cpu;
+}
+
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
+}
+EXPORT_SYMBOL(smp_call_function);
+
+/**
+ * smp_call_function_single - Run a function on another CPU
+ * @cpu: The target CPU. Cannot be the calling CPU.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int ret;
+ int me = get_cpu();
+ if (cpu == me) {
+ WARN_ON(1);
+ put_cpu();
+ return -EBUSY;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
===================================================================
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -749,9 +749,13 @@ extern void enable_sep_cpu(void);
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+/* Defined in head.S */
+extern struct Xgt_desc_struct early_gdt_descr;
+
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void cpu_init(void);
+extern void init_gdt(int cpu);
extern int force_mwait;
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 19:03 ` Jeremy Fitzhardinge
@ 2007-05-14 20:09 ` Andi Kleen
0 siblings, 0 replies; 22+ messages in thread
From: Andi Kleen @ 2007-05-14 20:09 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: James Bottomley, Andrew Morton, Linus Torvalds, linux-kernel,
Eric W. Biederman
On Monday 14 May 2007 21:03, Jeremy Fitzhardinge wrote:
> James Bottomley wrote:
> > No, I'm proposing this for 2.6.22-rc1 ... Andi has already said he won't
> > push the smp consolidation patch for 2.6.22.
> >
> > Without this patch, voyager won't even build, since the smp_ops broke
> > it, so it needs to be fixed *now*.
>
> Sure, I suppose. But given that the patch which fixes this is a simple
> no-functional-changes code-motion patch, it seems odd to not use it in
> favour of putting in a big chunk of duplicated code.
Ok. If it's just the code movement patch that's needed perhaps it's better
to use that.
-Andi
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 20:02 ` Jeremy Fitzhardinge
@ 2007-05-14 20:37 ` Andrew Morton
2007-05-14 20:48 ` Jeremy Fitzhardinge
2007-05-14 20:54 ` James Bottomley
0 siblings, 2 replies; 22+ messages in thread
From: Andrew Morton @ 2007-05-14 20:37 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 14 May 2007 13:02:42 -0700
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> Andrew Morton wrote:
> > Does "that" have name? I can find no patch in -mm which appears to have
> > anything to do with SMP consolidation, and this patch applies cleanly to
> > the current -mm lineup.
> >
> Sorry, I thought you'd picked this up:
>
>
> Subject: i386: move common parts of smp into their own file
>
> Several parts of kernel/smp.c and smpboot.c are generally useful for
> other subarchitectures and paravirt_ops implementations, so make them
> available for reuse.
Confused. This patch conflicts a lot with James's one (which I named
voyager-fix-build-broken-by-shift-to-smp_ops.patch).
If your "i386: move common parts of smp into their own file" also fixes
Voyager and is preferred then cool, but a) the changelog should tell us
that and b) could James please test it?
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 20:37 ` Andrew Morton
@ 2007-05-14 20:48 ` Jeremy Fitzhardinge
2007-05-14 20:54 ` James Bottomley
1 sibling, 0 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 20:48 UTC (permalink / raw)
To: Andrew Morton
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
Andrew Morton wrote:
> Confused. This patch conflicts a lot with James's one (which I named
> voyager-fix-build-broken-by-shift-to-smp_ops.patch).
>
> If your "i386: move common parts of smp into their own file" also fixes
> Voyager and is preferred then cool, but a) the changelog should tell us
> that and b) could James please test it?
>
The first version of the patch did both, which I guess is the one you've
got. The one I just sent has no Voyager pieces, and has a
Voyager-specific companion patch, which is the basis of the one that
James posted (with pieces of this patch pasted into it).
J
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 20:37 ` Andrew Morton
2007-05-14 20:48 ` Jeremy Fitzhardinge
@ 2007-05-14 20:54 ` James Bottomley
2007-05-14 21:05 ` Andrew Morton
1 sibling, 1 reply; 22+ messages in thread
From: James Bottomley @ 2007-05-14 20:54 UTC (permalink / raw)
To: Andrew Morton
Cc: Jeremy Fitzhardinge, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 2007-05-14 at 13:37 -0700, Andrew Morton wrote:
> On Mon, 14 May 2007 13:02:42 -0700
> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>
> > Andrew Morton wrote:
> > > Does "that" have name? I can find no patch in -mm which appears to have
> > > anything to do with SMP consolidation, and this patch applies cleanly to
> > > the current -mm lineup.
> > >
> > Sorry, I thought you'd picked this up:
> >
> >
> > Subject: i386: move common parts of smp into their own file
> >
> > Several parts of kernel/smp.c and smpboot.c are generally useful for
> > other subarchitectures and paravirt_ops implementations, so make them
> > available for reuse.
>
> Confused. This patch conflicts a lot with James's one (which I named
> voyager-fix-build-broken-by-shift-to-smp_ops.patch).
> If your "i386: move common parts of smp into their own file" also fixes
> Voyager and is preferred then cool, but a) the changelog should tell us
> that and b) could James please test it?
OK, let me try a brief history. A while ago Eric pointed out that the
smp ops patch in -mm would break voyager. So we worked on (and tested a
fix for it). Part of the fix was the prerequisite patch "i386: move
common parts of smp into their own file". The fix on top of this was
called "i386: fix voyager build" which actually fixed the voyager build.
I've been nagging Andi for a couple of weeks now to get these two
upstream. Finally he replied that the he wasn't planning on sending the
precursor "i386: move common parts of smp into their own file" upstream
for 2.6.22. So I had to do a patch that would fix the voyager build
without this ... which is what you have.
So, you either need the single patch you have, or the other two entitled
"i386: move common parts of smp into their own file".
"i386: fix voyager build"
James
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 20:54 ` James Bottomley
@ 2007-05-14 21:05 ` Andrew Morton
2007-05-14 21:08 ` James Bottomley
2007-05-14 21:10 ` Jeremy Fitzhardinge
0 siblings, 2 replies; 22+ messages in thread
From: Andrew Morton @ 2007-05-14 21:05 UTC (permalink / raw)
To: James Bottomley
Cc: Jeremy Fitzhardinge, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 14 May 2007 15:54:18 -0500
James Bottomley <James.Bottomley@HansenPartnership.com> wrote:
> On Mon, 2007-05-14 at 13:37 -0700, Andrew Morton wrote:
> > On Mon, 14 May 2007 13:02:42 -0700
> > Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> >
> > > Andrew Morton wrote:
> > > > Does "that" have name? I can find no patch in -mm which appears to have
> > > > anything to do with SMP consolidation, and this patch applies cleanly to
> > > > the current -mm lineup.
> > > >
> > > Sorry, I thought you'd picked this up:
> > >
> > >
> > > Subject: i386: move common parts of smp into their own file
> > >
> > > Several parts of kernel/smp.c and smpboot.c are generally useful for
> > > other subarchitectures and paravirt_ops implementations, so make them
> > > available for reuse.
> >
> > Confused. This patch conflicts a lot with James's one (which I named
> > voyager-fix-build-broken-by-shift-to-smp_ops.patch).
>
> > If your "i386: move common parts of smp into their own file" also fixes
> > Voyager and is preferred then cool, but a) the changelog should tell us
> > that and b) could James please test it?
>
> OK, let me try a brief history. A while ago Eric pointed out that the
> smp ops patch in -mm would break voyager. So we worked on (and tested a
> fix for it). Part of the fix was the prerequisite patch "i386: move
> common parts of smp into their own file". The fix on top of this was
> called "i386: fix voyager build" which actually fixed the voyager build.
>
> I've been nagging Andi for a couple of weeks now to get these two
> upstream. Finally he replied that the he wasn't planning on sending the
> precursor "i386: move common parts of smp into their own file" upstream
> for 2.6.22. So I had to do a patch that would fix the voyager build
> without this ... which is what you have.
uh, I suspected it was something like that.
> So, you either need the single patch you have, or the other two entitled
>
> "i386: move common parts of smp into their own file".
> "i386: fix voyager build"
OK, thanks. I hereby propose that I send the below
(voyager-fix-build-broken-by-shift-to-smp_ops.patch) to Linus later today,
provided it passes local testing.
All those in favour say aye?
From: Jeremy Fitzhardinge <jeremy@xensource.com>
This adds an smp_ops for voyager, and hooks things up appropriately.
This is the first baby-step to making subarch runtime switchable.
Rediffed to work in the absence of the smp consolidation patch
Following conversations with Andi, we've agreed that the best way to fix
the currently broken voyager port is to do a local fix converting it to
SMP operations, and then fix the invalid conversions in generic x86 by
re abstracting the operations so they (and several other functions) can
be shared between x86.
With this, voyager builds and boots on 2.6.22-rc1.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
arch/i386/mach-voyager/voyager_smp.c | 99 +++++++++++++++++--------
1 files changed, 69 insertions(+), 30 deletions(-)
diff -puN arch/i386/mach-voyager/voyager_smp.c~voyager-fix-build-broken-by-shift-to-smp_ops arch/i386/mach-voyager/voyager_smp.c
--- a/arch/i386/mach-voyager/voyager_smp.c~voyager-fix-build-broken-by-shift-to-smp_ops
+++ a/arch/i386/mach-voyager/voyager_smp.c
@@ -27,7 +27,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/arch_hooks.h>
-#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -40,6 +39,9 @@ static unsigned long cpu_irq_affinity[NR
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
/* physical ID of the CPU used to boot the system */
unsigned char boot_cpu_id;
@@ -202,6 +204,28 @@ ack_CPI(__u8 cpi)
/* local variables */
+/* FIXME: this is a local copy of this function cut and paste from
+ * ../kernel/smpboot.c ... these need to be consolidated properly.
+ *
+ * Initialize the CPU's GDT. This is either the boot CPU doing itself
+ * (still using the master per-cpu area), or a CPU doing it for a
+ * secondary which will soon come up. */
+static __devinit void init_gdt(int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+ (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
+ __per_cpu_offset[cpu], 0xFFFFF,
+ 0x80 | DESCTYPE_S | 0x2, 0x8);
+
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ per_cpu(cpu_number, cpu) = cpu;
+}
+/* Defined in head.S */
+extern struct Xgt_desc_struct early_gdt_descr;
+/* end of FIXME */
+
/* The VIC IRQ descriptors -- these look almost identical to the
* 8259 IRQs except that masks and things must be kept per processor
*/
@@ -422,7 +446,7 @@ find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
- write_pda(cpu_number, boot_cpu_id);
+ x86_write_percpu(cpu_number, boot_cpu_id);
}
/*
@@ -435,7 +459,7 @@ smp_store_cpu_info(int id)
*c = boot_cpu_data;
- identify_cpu(c);
+ identify_secondary_cpu(c);
}
/* set up the trampoline and return the physical address of the code */
@@ -459,7 +483,7 @@ start_secondary(void *unused)
/* external functions not defined in the headers */
extern void calibrate_delay(void);
- secondary_cpu_init();
+ cpu_init();
/* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI);
@@ -572,7 +596,9 @@ do_boot_cpu(__u8 cpu)
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
- init_gdt(cpu, idle);
+ init_gdt(cpu);
+ per_cpu(current_task, cpu) = idle;
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */
@@ -859,8 +885,8 @@ smp_invalidate_interrupt(void)
/* This routine is called with a physical cpu mask */
static void
-flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- unsigned long va)
+voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
{
int stuck = 50000;
@@ -912,7 +938,7 @@ flush_tlb_current_task(void)
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb();
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -934,7 +960,7 @@ flush_tlb_mm (struct mm_struct * mm)
leave_mm(smp_processor_id());
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -955,7 +981,7 @@ void flush_tlb_page(struct vm_area_struc
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, va);
+ voyager_flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
@@ -1044,11 +1070,12 @@ smp_call_function_interrupt(void)
}
static int
-__smp_call_function_mask (void (*func) (void *info), void *info, int retry,
- int wait, __u32 mask)
+voyager_smp_call_function_mask (cpumask_t cpumask, void (*func) (void *info),
+ void *info, int wait)
{
struct call_data_struct data;
+ u32 mask = cpus_addr(cpumask)[0];
mask &= ~(1<<smp_processor_id());
if (!mask)
@@ -1095,9 +1122,7 @@ int
smp_call_function(void (*func) (void *info), void *info, int retry,
int wait)
{
- __u32 mask = cpus_addr(cpu_online_map)[0];
-
- return __smp_call_function_mask(func, info, retry, wait, mask);
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
@@ -1118,9 +1143,9 @@ int
smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
- __u32 mask = 1 << cpu;
+ cpumask_t mask = cpumask_of_cpu(cpu);
- return __smp_call_function_mask(func, info, nonatomic, wait, mask);
+ return smp_call_function_mask(mask, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function_single);
@@ -1138,7 +1163,7 @@ EXPORT_SYMBOL(smp_call_function_single);
* no local APIC, so I can't do this
*
* This function is currently a placeholder and is unused in the code */
-fastcall void
+fastcall void
smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1237,8 +1262,8 @@ smp_alloc_memory(void)
}
/* send a reschedule CPI to one CPU by physical CPU number*/
-void
-smp_send_reschedule(int cpu)
+static void
+voyager_smp_send_reschedule(int cpu)
{
send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
}
@@ -1267,8 +1292,8 @@ safe_smp_processor_id(void)
}
/* broadcast a halt to all other CPUs */
-void
-smp_send_stop(void)
+static void
+voyager_smp_send_stop(void)
{
smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
}
@@ -1930,23 +1955,26 @@ smp_voyager_power_off(void *dummy)
smp_stop_cpu_function(NULL);
}
-void __init
-smp_prepare_cpus(unsigned int max_cpus)
+static void __init
+voyager_smp_prepare_cpus(unsigned int max_cpus)
{
/* FIXME: ignore max_cpus for now */
smp_boot_cpus();
}
-void __devinit smp_prepare_boot_cpu(void)
+static void __devinit voyager_smp_prepare_boot_cpu(void)
{
+ init_gdt(smp_processor_id());
+ switch_to_new_gdt();
+
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map);
}
-int __devinit
-__cpu_up(unsigned int cpu)
+static int __devinit
+voyager_cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
if (cpu_isset(cpu, smp_commenced_mask))
@@ -1962,8 +1990,8 @@ __cpu_up(unsigned int cpu)
return 0;
}
-void __init
-smp_cpus_done(unsigned int max_cpus)
+static void __init
+voyager_smp_cpus_done(unsigned int max_cpus)
{
zap_low_mappings();
}
@@ -1972,5 +2000,16 @@ void __init
smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- write_pda(cpu_number, hard_smp_processor_id());
+ x86_write_percpu(cpu_number, hard_smp_processor_id());
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = voyager_smp_prepare_cpus,
+ .cpu_up = voyager_cpu_up,
+ .smp_cpus_done = voyager_smp_cpus_done,
+
+ .smp_send_stop = voyager_smp_send_stop,
+ .smp_send_reschedule = voyager_smp_send_reschedule,
+ .smp_call_function_mask = voyager_smp_call_function_mask,
+};
_
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 21:05 ` Andrew Morton
@ 2007-05-14 21:08 ` James Bottomley
2007-05-14 21:10 ` Jeremy Fitzhardinge
1 sibling, 0 replies; 22+ messages in thread
From: James Bottomley @ 2007-05-14 21:08 UTC (permalink / raw)
To: Andrew Morton
Cc: Jeremy Fitzhardinge, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 2007-05-14 at 14:05 -0700, Andrew Morton wrote:
> OK, thanks. I hereby propose that I send the below
> (voyager-fix-build-broken-by-shift-to-smp_ops.patch) to Linus later
> today,
> provided it passes local testing.
>
> All those in favour say aye?
Boots for me on my voyagers, so aye.
James
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 21:05 ` Andrew Morton
2007-05-14 21:08 ` James Bottomley
@ 2007-05-14 21:10 ` Jeremy Fitzhardinge
2007-05-14 21:51 ` Andrew Morton
2007-05-14 21:58 ` Andi Kleen
1 sibling, 2 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 21:10 UTC (permalink / raw)
To: Andrew Morton
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
Andrew Morton wrote:
> On Mon, 14 May 2007 15:54:18 -0500
> James Bottomley <James.Bottomley@HansenPartnership.com> wrote:
>
>
>> On Mon, 2007-05-14 at 13:37 -0700, Andrew Morton wrote:
>>
>>> On Mon, 14 May 2007 13:02:42 -0700
>>> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>>>
>>>
>>>> Andrew Morton wrote:
>>>>
>>>>> Does "that" have name? I can find no patch in -mm which appears to have
>>>>> anything to do with SMP consolidation, and this patch applies cleanly to
>>>>> the current -mm lineup.
>>>>>
>>>>>
>>>> Sorry, I thought you'd picked this up:
>>>>
>>>>
>>>> Subject: i386: move common parts of smp into their own file
>>>>
>>>> Several parts of kernel/smp.c and smpboot.c are generally useful for
>>>> other subarchitectures and paravirt_ops implementations, so make them
>>>> available for reuse.
>>>>
>>> Confused. This patch conflicts a lot with James's one (which I named
>>> voyager-fix-build-broken-by-shift-to-smp_ops.patch).
>>>
>>> If your "i386: move common parts of smp into their own file" also fixes
>>> Voyager and is preferred then cool, but a) the changelog should tell us
>>> that and b) could James please test it?
>>>
>> OK, let me try a brief history. A while ago Eric pointed out that the
>> smp ops patch in -mm would break voyager. So we worked on (and tested a
>> fix for it). Part of the fix was the prerequisite patch "i386: move
>> common parts of smp into their own file". The fix on top of this was
>> called "i386: fix voyager build" which actually fixed the voyager build.
>>
>> I've been nagging Andi for a couple of weeks now to get these two
>> upstream. Finally he replied that the he wasn't planning on sending the
>> precursor "i386: move common parts of smp into their own file" upstream
>> for 2.6.22. So I had to do a patch that would fix the voyager build
>> without this ... which is what you have.
>>
>
> uh, I suspected it was something like that.
>
>
>> So, you either need the single patch you have, or the other two entitled
>>
>> "i386: move common parts of smp into their own file".
>> "i386: fix voyager build"
>>
>
> OK, thanks. I hereby propose that I send the below
> (voyager-fix-build-broken-by-shift-to-smp_ops.patch) to Linus later today,
> provided it passes local testing.
>
> All those in favour say aye?
>
OK, but only if you don't want to put "i386: move common parts of smp
into their own file" in front of it, and remove the duplicated code. I
could send you a third copy if you like.
J
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 21:10 ` Jeremy Fitzhardinge
@ 2007-05-14 21:51 ` Andrew Morton
2007-05-14 22:00 ` Jeremy Fitzhardinge
2007-05-14 21:58 ` Andi Kleen
1 sibling, 1 reply; 22+ messages in thread
From: Andrew Morton @ 2007-05-14 21:51 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 14 May 2007 14:10:23 -0700
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> > OK, thanks. I hereby propose that I send the below
> > (voyager-fix-build-broken-by-shift-to-smp_ops.patch) to Linus later today,
> > provided it passes local testing.
> >
> > All those in favour say aye?
> >
>
> OK, but only if you don't want to put "i386: move common parts of smp
> into their own file" in front of it, and remove the duplicated code. I
> could send you a third copy if you like.
Well, let's get the build fix in place first and then we can feed
the cleanup in later on, in a more leisurely fashion?
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 21:10 ` Jeremy Fitzhardinge
2007-05-14 21:51 ` Andrew Morton
@ 2007-05-14 21:58 ` Andi Kleen
2007-05-14 22:29 ` Andrew Morton
1 sibling, 1 reply; 22+ messages in thread
From: Andi Kleen @ 2007-05-14 21:58 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, James Bottomley, Linus Torvalds, linux-kernel,
Eric W. Biederman
> OK, but only if you don't want to put "i386: move common parts of smp
> into their own file" in front of it, and remove the duplicated code. I
> could send you a third copy if you like.
Using Jeremy's patch is better than James'
-Andi
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 21:51 ` Andrew Morton
@ 2007-05-14 22:00 ` Jeremy Fitzhardinge
2007-05-14 22:27 ` James Bottomley
0 siblings, 1 reply; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 22:00 UTC (permalink / raw)
To: Andrew Morton
Cc: James Bottomley, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
Andrew Morton wrote:
> Well, let's get the build fix in place first and then we can feed
> the cleanup in later on, in a more leisurely fashion?
>
If you like. Are all the Voyager users standing outside your office
with pitchforks, demanding satisfaction?
J
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 22:00 ` Jeremy Fitzhardinge
@ 2007-05-14 22:27 ` James Bottomley
2007-05-14 22:55 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 22+ messages in thread
From: James Bottomley @ 2007-05-14 22:27 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Andrew Morton, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
On Mon, 2007-05-14 at 15:00 -0700, Jeremy Fitzhardinge wrote:
> Andrew Morton wrote:
> > Well, let's get the build fix in place first and then we can feed
> > the cleanup in later on, in a more leisurely fashion?
> >
>
> If you like. Are all the Voyager users standing outside your office
> with pitchforks, demanding satisfaction?
That could be arranged ... The Xensource offices are just up the road
from Google, you know if I wanted to kill two birds with one stone.
Seriously, though, although I tolerate voyager breakage through the
merge window, I actively try to clean it up and have a working voyager
for the next release. There's no real excuse for not fixing build
breakage. How it's done, I don't really care. The only blocker for the
two patch smp consolidation + voyager fix was Andi ... if he's relaxing
that, then we can go that route.
Until this is fixed, there are going to be -rc testers firing off build
failure reports, which I think we'd all like to
James
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 21:58 ` Andi Kleen
@ 2007-05-14 22:29 ` Andrew Morton
2007-05-14 22:40 ` Jeremy Fitzhardinge
2007-05-14 22:42 ` James Bottomley
0 siblings, 2 replies; 22+ messages in thread
From: Andrew Morton @ 2007-05-14 22:29 UTC (permalink / raw)
To: Andi Kleen
Cc: Jeremy Fitzhardinge, James Bottomley, Linus Torvalds,
linux-kernel, Eric W. Biederman
On Mon, 14 May 2007 23:58:09 +0200
Andi Kleen <ak@suse.de> wrote:
>
> > OK, but only if you don't want to put "i386: move common parts of smp
> > into their own file" in front of it, and remove the duplicated code. I
> > could send you a third copy if you like.
>
> Using Jeremy's patch is better than James'
>
This is getting comical.
According to my records, the patch
voyager-fix-build-broken-by-shift-to-smp_ops.patch _is_ Jeremy's patch.
James forwarded it.
I take it from your statement that we should merge some Jeremy-patch other
than this Jeremy-patch?
If "yes", than could Jeremy please resend the other patch(es) (I believe there
are multiple patches involved) and then can James please test them?
If "no" then I think I'll just go shopping.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 22:29 ` Andrew Morton
@ 2007-05-14 22:40 ` Jeremy Fitzhardinge
2007-05-14 22:42 ` James Bottomley
1 sibling, 0 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 22:40 UTC (permalink / raw)
To: Andrew Morton
Cc: Andi Kleen, James Bottomley, Linus Torvalds, linux-kernel,
Eric W. Biederman
[-- Attachment #1: Type: text/plain, Size: 755 bytes --]
Andrew Morton wrote:
> This is getting comical.
>
> According to my records, the patch
> voyager-fix-build-broken-by-shift-to-smp_ops.patch _is_ Jeremy's patch.
> James forwarded it.
>
> I take it from your statement that we should merge some Jeremy-patch other
> than this Jeremy-patch?
>
> If "yes", than could Jeremy please resend the other patch(es) (I believe there
> are multiple patches involved) and then can James please test them?
>
> If "no" then I think I'll just go shopping.
>
Maybe we should just refer to all patches by their SHA1 hashes.
I'm attaching the two I have, which build for me, and I think James has
tested as working.
(21583c0234c7cc1cd6c4f312ce7a2456086e2323 and
e8862935d0e773f12d73a9502f4bc7de06a6cedd, BTW.)
J
[-- Attachment #2: i386-common-smp.patch --]
[-- Type: text/x-patch, Size: 8834 bytes --]
Subject: i386: move common parts of smp into their own file
Several parts of kernel/smp.c and smpboot.c are generally useful for
other subarchitectures and paravirt_ops implementations, so make them
available for reuse.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: Chris Wright <chrisw@sous-sol.org>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
---
arch/i386/kernel/Makefile | 1
arch/i386/kernel/smp.c | 65 +++-------------------------------
arch/i386/kernel/smpboot.c | 22 -----------
arch/i386/kernel/smpcommon.c | 79 ++++++++++++++++++++++++++++++++++++++++++
include/asm-i386/processor.h | 4 ++
5 files changed, 90 insertions(+), 81 deletions(-)
===================================================================
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
+obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
===================================================================
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -468,7 +468,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void native_smp_send_reschedule(int cpu)
+static void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -547,9 +547,10 @@ static void __smp_call_function(void (*f
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+static int
+native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
@@ -600,60 +601,6 @@ int native_smp_call_function_mask(cpumas
return 0;
}
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on another CPU
- * @cpu: The target CPU. Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int ret;
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
static void stop_this_cpu (void * dummy)
{
local_irq_disable();
@@ -671,7 +618,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void native_smp_send_stop(void)
+static void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
===================================================================
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -99,9 +99,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID];
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
/*
* Trampoline 80x86 program as an array.
*/
@@ -766,25 +763,6 @@ static inline struct task_struct * alloc
#define alloc_idle_task(cpu) fork_idle(cpu)
#endif
-/* Initialize the CPU's GDT. This is either the boot CPU doing itself
- (still using the master per-cpu area), or a CPU doing it for a
- secondary which will soon come up. */
-static __cpuinit void init_gdt(int cpu)
-{
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-
- pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
- (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
- __per_cpu_offset[cpu], 0xFFFFF,
- 0x80 | DESCTYPE_S | 0x2, 0x8);
-
- per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- per_cpu(cpu_number, cpu) = cpu;
-}
-
-/* Defined in head.S */
-extern struct Xgt_desc_struct early_gdt_descr;
-
static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
===================================================================
--- /dev/null
+++ b/arch/i386/kernel/smpcommon.c
@@ -0,0 +1,79 @@
+/*
+ * SMP stuff which is common to all sub-architectures.
+ */
+#include <linux/module.h>
+#include <asm/smp.h>
+
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+/* Initialize the CPU's GDT. This is either the boot CPU doing itself
+ (still using the master per-cpu area), or a CPU doing it for a
+ secondary which will soon come up. */
+__cpuinit void init_gdt(int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+ (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
+ __per_cpu_offset[cpu], 0xFFFFF,
+ 0x80 | DESCTYPE_S | 0x2, 0x8);
+
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ per_cpu(cpu_number, cpu) = cpu;
+}
+
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
+}
+EXPORT_SYMBOL(smp_call_function);
+
+/**
+ * smp_call_function_single - Run a function on another CPU
+ * @cpu: The target CPU. Cannot be the calling CPU.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int ret;
+ int me = get_cpu();
+ if (cpu == me) {
+ WARN_ON(1);
+ put_cpu();
+ return -EBUSY;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
===================================================================
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -749,9 +749,13 @@ extern void enable_sep_cpu(void);
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+/* Defined in head.S */
+extern struct Xgt_desc_struct early_gdt_descr;
+
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void cpu_init(void);
+extern void init_gdt(int cpu);
extern int force_mwait;
[-- Attachment #3: i386-fix-voyager-build.patch --]
[-- Type: text/x-patch, Size: 7187 bytes --]
Subject: i386: fix voyager build
This adds an smp_ops for voyager, and hooks things up appropriately.
This is the first baby-step to making subarch runtime switchable.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
---
arch/i386/mach-voyager/voyager_smp.c | 110 +++++++++++++---------------------
1 file changed, 43 insertions(+), 67 deletions(-)
===================================================================
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -28,7 +28,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/arch_hooks.h>
-#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -423,7 +422,7 @@ find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
- write_pda(cpu_number, boot_cpu_id);
+ x86_write_percpu(cpu_number, boot_cpu_id);
}
/*
@@ -436,7 +435,7 @@ smp_store_cpu_info(int id)
*c = boot_cpu_data;
- identify_cpu(c);
+ identify_secondary_cpu(c);
}
/* set up the trampoline and return the physical address of the code */
@@ -460,7 +459,7 @@ start_secondary(void *unused)
/* external functions not defined in the headers */
extern void calibrate_delay(void);
- secondary_cpu_init();
+ cpu_init();
/* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI);
@@ -573,7 +572,9 @@ do_boot_cpu(__u8 cpu)
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
- init_gdt(cpu, idle);
+ init_gdt(cpu);
+ per_cpu(current_task, cpu) = idle;
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */
@@ -860,8 +861,8 @@ smp_invalidate_interrupt(void)
/* This routine is called with a physical cpu mask */
static void
-flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- unsigned long va)
+voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
{
int stuck = 50000;
@@ -913,7 +914,7 @@ flush_tlb_current_task(void)
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb();
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -935,7 +936,7 @@ flush_tlb_mm (struct mm_struct * mm)
leave_mm(smp_processor_id());
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -956,7 +957,7 @@ void flush_tlb_page(struct vm_area_struc
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, va);
+ voyager_flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
@@ -1045,10 +1046,12 @@ smp_call_function_interrupt(void)
}
static int
-__smp_call_function_mask (void (*func) (void *info), void *info, int retry,
- int wait, __u32 mask)
+voyager_smp_call_function_mask (cpumask_t cpumask,
+ void (*func) (void *info), void *info,
+ int wait)
{
struct call_data_struct data;
+ u32 mask = cpus_addr(cpumask)[0];
mask &= ~(1<<smp_processor_id());
@@ -1083,47 +1086,6 @@ __smp_call_function_mask (void (*func) (
return 0;
}
-
-/* Call this function on all CPUs using the function_interrupt above
- <func> The function to run. This must be fast and non-blocking.
- <info> An arbitrary pointer to pass to the function.
- <retry> If true, keep retrying until ready.
- <wait> If true, wait until function has completed on other CPUs.
- [RETURNS] 0 on success, else a negative status code. Does not return until
- remote CPUs are nearly ready to execute <<func>> or are or have executed.
-*/
-int
-smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
-{
- __u32 mask = cpus_addr(cpu_online_map)[0];
-
- return __smp_call_function_mask(func, info, retry, wait, mask);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * smp_call_function_single - Run a function on another CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- __u32 mask = 1 << cpu;
-
- return __smp_call_function_mask(func, info, nonatomic, wait, mask);
-}
-EXPORT_SYMBOL(smp_call_function_single);
/* Sorry about the name. In an APIC based system, the APICs
* themselves are programmed to send a timer interrupt. This is used
@@ -1238,8 +1200,8 @@ smp_alloc_memory(void)
}
/* send a reschedule CPI to one CPU by physical CPU number*/
-void
-smp_send_reschedule(int cpu)
+static void
+voyager_smp_send_reschedule(int cpu)
{
send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
}
@@ -1268,8 +1230,8 @@ safe_smp_processor_id(void)
}
/* broadcast a halt to all other CPUs */
-void
-smp_send_stop(void)
+static void
+voyager_smp_send_stop(void)
{
smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
}
@@ -1931,23 +1893,26 @@ smp_voyager_power_off(void *dummy)
smp_stop_cpu_function(NULL);
}
-void __init
-smp_prepare_cpus(unsigned int max_cpus)
+static void __init
+voyager_smp_prepare_cpus(unsigned int max_cpus)
{
/* FIXME: ignore max_cpus for now */
smp_boot_cpus();
}
-void __devinit smp_prepare_boot_cpu(void)
-{
+static void __devinit voyager_smp_prepare_boot_cpu(void)
+{
+ init_gdt(smp_processor_id());
+ switch_to_new_gdt();
+
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map);
}
-int __devinit
-__cpu_up(unsigned int cpu)
+static int __devinit
+voyager_cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
if (cpu_isset(cpu, smp_commenced_mask))
@@ -1963,8 +1928,8 @@ __cpu_up(unsigned int cpu)
return 0;
}
-void __init
-smp_cpus_done(unsigned int max_cpus)
+static void __init
+voyager_smp_cpus_done(unsigned int max_cpus)
{
zap_low_mappings();
}
@@ -1973,5 +1938,16 @@ smp_setup_processor_id(void)
smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- write_pda(cpu_number, hard_smp_processor_id());
-}
+ x86_write_percpu(cpu_number, hard_smp_processor_id());
+}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = voyager_smp_prepare_cpus,
+ .cpu_up = voyager_cpu_up,
+ .smp_cpus_done = voyager_smp_cpus_done,
+
+ .smp_send_stop = voyager_smp_send_stop,
+ .smp_send_reschedule = voyager_smp_send_reschedule,
+ .smp_call_function_mask = voyager_smp_call_function_mask,
+};
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 22:29 ` Andrew Morton
2007-05-14 22:40 ` Jeremy Fitzhardinge
@ 2007-05-14 22:42 ` James Bottomley
2007-05-14 22:57 ` Jeremy Fitzhardinge
1 sibling, 1 reply; 22+ messages in thread
From: James Bottomley @ 2007-05-14 22:42 UTC (permalink / raw)
To: Andrew Morton
Cc: Andi Kleen, Jeremy Fitzhardinge, Linus Torvalds, linux-kernel,
Eric W. Biederman
[-- Attachment #1: Type: text/plain, Size: 1070 bytes --]
On Mon, 2007-05-14 at 15:29 -0700, Andrew Morton wrote:
> On Mon, 14 May 2007 23:58:09 +0200
> Andi Kleen <ak@suse.de> wrote:
>
> >
> > > OK, but only if you don't want to put "i386: move common parts of smp
> > > into their own file" in front of it, and remove the duplicated code. I
> > > could send you a third copy if you like.
> >
> > Using Jeremy's patch is better than James'
> >
>
> This is getting comical.
>
> According to my records, the patch
> voyager-fix-build-broken-by-shift-to-smp_ops.patch _is_ Jeremy's patch.
> James forwarded it.
It's Jeremy's patch modified to work in the absence of the smp ops
consolidation patch.
> I take it from your statement that we should merge some Jeremy-patch other
> than this Jeremy-patch?
>
> If "yes", than could Jeremy please resend the other patch(es) (I believe there
> are multiple patches involved) and then can James please test them?
Let me do it.
These are the two patches, tested and working on Voyager.
The order of application is
i386-common-smp.patch
i386-fix-voyager-build.patch
James
[-- Attachment #2: i386-common-smp.patch --]
[-- Type: message/rfc822, Size: 9800 bytes --]
From: Jeremy Fitzhardinge <jeremy@xensource.com>
Subject: i386: move common parts of smp into their own file
Date: Mon, 14 May 2007 17:42:35 -0500
Message-ID: <1179182555.3703.103.camel@mulgrave.il.steeleye.com>
Several parts of kernel/smp.c and smpboot.c are generally useful for
other subarchitectures and paravirt_ops implementations, so make them
available for reuse.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
---
arch/i386/kernel/Makefile | 1
arch/i386/kernel/smp.c | 65 +++-------------------------------
arch/i386/kernel/smpboot.c | 22 -----------
arch/i386/kernel/smpcommon.c | 79 ++++++++++++++++++++++++++++++++++++++++++
include/asm-i386/processor.h | 4 ++
5 files changed, 90 insertions(+), 81 deletions(-)
===================================================================
Index: BUILD-voyager/arch/i386/kernel/Makefile
===================================================================
--- BUILD-voyager.orig/arch/i386/kernel/Makefile 2007-05-08 17:00:34.000000000 -0500
+++ BUILD-voyager/arch/i386/kernel/Makefile 2007-05-08 17:00:45.000000000 -0500
@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
+obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
Index: BUILD-voyager/arch/i386/kernel/smp.c
===================================================================
--- BUILD-voyager.orig/arch/i386/kernel/smp.c 2007-05-08 17:00:34.000000000 -0500
+++ BUILD-voyager/arch/i386/kernel/smp.c 2007-05-08 17:00:45.000000000 -0500
@@ -467,7 +467,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void native_smp_send_reschedule(int cpu)
+static void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -546,9 +546,10 @@ static void __smp_call_function(void (*f
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+static int
+native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumas
return 0;
}
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on another CPU
- * @cpu: The target CPU. Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int ret;
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
static void stop_this_cpu (void * dummy)
{
local_irq_disable();
@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void native_smp_send_stop(void)
+static void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
Index: BUILD-voyager/arch/i386/kernel/smpboot.c
===================================================================
--- BUILD-voyager.orig/arch/i386/kernel/smpboot.c 2007-05-08 17:00:34.000000000 -0500
+++ BUILD-voyager/arch/i386/kernel/smpboot.c 2007-05-08 17:00:45.000000000 -0500
@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID];
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
/*
* Trampoline 80x86 program as an array.
*/
@@ -763,25 +760,6 @@ static inline struct task_struct * alloc
#define alloc_idle_task(cpu) fork_idle(cpu)
#endif
-/* Initialize the CPU's GDT. This is either the boot CPU doing itself
- (still using the master per-cpu area), or a CPU doing it for a
- secondary which will soon come up. */
-static __cpuinit void init_gdt(int cpu)
-{
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-
- pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
- (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
- __per_cpu_offset[cpu], 0xFFFFF,
- 0x80 | DESCTYPE_S | 0x2, 0x8);
-
- per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- per_cpu(cpu_number, cpu) = cpu;
-}
-
-/* Defined in head.S */
-extern struct Xgt_desc_struct early_gdt_descr;
-
static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
Index: BUILD-voyager/arch/i386/kernel/smpcommon.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ BUILD-voyager/arch/i386/kernel/smpcommon.c 2007-05-08 17:00:45.000000000 -0500
@@ -0,0 +1,79 @@
+/*
+ * SMP stuff which is common to all sub-architectures.
+ */
+#include <linux/module.h>
+#include <asm/smp.h>
+
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+/* Initialize the CPU's GDT. This is either the boot CPU doing itself
+ (still using the master per-cpu area), or a CPU doing it for a
+ secondary which will soon come up. */
+__cpuinit void init_gdt(int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+ (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
+ __per_cpu_offset[cpu], 0xFFFFF,
+ 0x80 | DESCTYPE_S | 0x2, 0x8);
+
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ per_cpu(cpu_number, cpu) = cpu;
+}
+
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
+}
+EXPORT_SYMBOL(smp_call_function);
+
+/**
+ * smp_call_function_single - Run a function on another CPU
+ * @cpu: The target CPU. Cannot be the calling CPU.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int ret;
+ int me = get_cpu();
+ if (cpu == me) {
+ WARN_ON(1);
+ put_cpu();
+ return -EBUSY;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
Index: BUILD-voyager/include/asm-i386/processor.h
===================================================================
--- BUILD-voyager.orig/include/asm-i386/processor.h 2007-05-08 16:59:35.000000000 -0500
+++ BUILD-voyager/include/asm-i386/processor.h 2007-05-08 17:00:45.000000000 -0500
@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_ov
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+/* Defined in head.S */
+extern struct Xgt_desc_struct early_gdt_descr;
+
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void cpu_init(void);
+extern void init_gdt(int cpu);
extern int force_mwait;
[-- Attachment #3: i386-fix-voyager-build.patch --]
[-- Type: message/rfc822, Size: 7680 bytes --]
From: Jeremy Fitzhardinge <jeremy@xensource.com>
Subject: i386: fix voyager build
Date: Mon, 14 May 2007 17:42:35 -0500
Message-ID: <1179182555.3703.104.camel@mulgrave.il.steeleye.com>
This adds an smp_ops for voyager, and hooks things up appropriately.
This is the first baby-step to making subarch runtime switchable.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
---
arch/i386/mach-voyager/voyager_smp.c | 73 ++++++++++++++++++++--------------
1 file changed, 44 insertions(+), 29 deletions(-)
===================================================================
Index: BUILD-voyager/arch/i386/mach-voyager/voyager_smp.c
===================================================================
--- BUILD-voyager.orig/arch/i386/mach-voyager/voyager_smp.c 2007-05-08 17:00:34.000000000 -0500
+++ BUILD-voyager/arch/i386/mach-voyager/voyager_smp.c 2007-05-08 17:00:53.000000000 -0500
@@ -27,7 +27,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/arch_hooks.h>
-#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -422,7 +421,7 @@ find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
- write_pda(cpu_number, boot_cpu_id);
+ x86_write_percpu(cpu_number, boot_cpu_id);
}
/*
@@ -435,7 +434,7 @@ smp_store_cpu_info(int id)
*c = boot_cpu_data;
- identify_cpu(c);
+ identify_secondary_cpu(c);
}
/* set up the trampoline and return the physical address of the code */
@@ -459,7 +458,7 @@ start_secondary(void *unused)
/* external functions not defined in the headers */
extern void calibrate_delay(void);
- secondary_cpu_init();
+ cpu_init();
/* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI);
@@ -572,7 +571,9 @@ do_boot_cpu(__u8 cpu)
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
- init_gdt(cpu, idle);
+ init_gdt(cpu);
+ per_cpu(current_task, cpu) = idle;
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */
@@ -859,8 +860,8 @@ smp_invalidate_interrupt(void)
/* This routine is called with a physical cpu mask */
static void
-flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- unsigned long va)
+voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
{
int stuck = 50000;
@@ -912,7 +913,7 @@ flush_tlb_current_task(void)
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb();
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -934,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm)
leave_mm(smp_processor_id());
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
@@ -955,7 +956,7 @@ void flush_tlb_page(struct vm_area_struc
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, va);
+ voyager_flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
@@ -1044,11 +1045,12 @@ smp_call_function_interrupt(void)
}
static int
-__smp_call_function_mask (void (*func) (void *info), void *info, int retry,
- int wait, __u32 mask)
+voyager_smp_call_function_mask (cpumask_t cpumask, void (*func) (void *info),
+ void *info, int wait)
{
struct call_data_struct data;
+ u32 mask = cpus_addr(cpumask)[0];
mask &= ~(1<<smp_processor_id());
if (!mask)
@@ -1083,47 +1085,6 @@ __smp_call_function_mask (void (*func) (
return 0;
}
-/* Call this function on all CPUs using the function_interrupt above
- <func> The function to run. This must be fast and non-blocking.
- <info> An arbitrary pointer to pass to the function.
- <retry> If true, keep retrying until ready.
- <wait> If true, wait until function has completed on other CPUs.
- [RETURNS] 0 on success, else a negative status code. Does not return until
- remote CPUs are nearly ready to execute <<func>> or are or have executed.
-*/
-int
-smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
-{
- __u32 mask = cpus_addr(cpu_online_map)[0];
-
- return __smp_call_function_mask(func, info, retry, wait, mask);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * smp_call_function_single - Run a function on another CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- __u32 mask = 1 << cpu;
-
- return __smp_call_function_mask(func, info, nonatomic, wait, mask);
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
/* Sorry about the name. In an APIC based system, the APICs
* themselves are programmed to send a timer interrupt. This is used
* by linux to reschedule the processor. Voyager doesn't have this,
@@ -1237,8 +1198,8 @@ smp_alloc_memory(void)
}
/* send a reschedule CPI to one CPU by physical CPU number*/
-void
-smp_send_reschedule(int cpu)
+static void
+voyager_smp_send_reschedule(int cpu)
{
send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
}
@@ -1267,8 +1228,8 @@ safe_smp_processor_id(void)
}
/* broadcast a halt to all other CPUs */
-void
-smp_send_stop(void)
+static void
+voyager_smp_send_stop(void)
{
smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
}
@@ -1930,23 +1891,26 @@ smp_voyager_power_off(void *dummy)
smp_stop_cpu_function(NULL);
}
-void __init
-smp_prepare_cpus(unsigned int max_cpus)
+static void __init
+voyager_smp_prepare_cpus(unsigned int max_cpus)
{
/* FIXME: ignore max_cpus for now */
smp_boot_cpus();
}
-void __devinit smp_prepare_boot_cpu(void)
+static void __devinit voyager_smp_prepare_boot_cpu(void)
{
+ init_gdt(smp_processor_id());
+ switch_to_new_gdt();
+
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map);
}
-int __devinit
-__cpu_up(unsigned int cpu)
+static int __devinit
+voyager_cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
if (cpu_isset(cpu, smp_commenced_mask))
@@ -1962,8 +1926,8 @@ __cpu_up(unsigned int cpu)
return 0;
}
-void __init
-smp_cpus_done(unsigned int max_cpus)
+static void __init
+voyager_smp_cpus_done(unsigned int max_cpus)
{
zap_low_mappings();
}
@@ -1972,5 +1936,16 @@ void __init
smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- write_pda(cpu_number, hard_smp_processor_id());
+ x86_write_percpu(cpu_number, hard_smp_processor_id());
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = voyager_smp_prepare_cpus,
+ .cpu_up = voyager_cpu_up,
+ .smp_cpus_done = voyager_smp_cpus_done,
+
+ .smp_send_stop = voyager_smp_send_stop,
+ .smp_send_reschedule = voyager_smp_send_reschedule,
+ .smp_call_function_mask = voyager_smp_call_function_mask,
+};
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 22:27 ` James Bottomley
@ 2007-05-14 22:55 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 22:55 UTC (permalink / raw)
To: James Bottomley
Cc: Andrew Morton, Linus Torvalds, linux-kernel, Andi Kleen,
Eric W. Biederman
James Bottomley wrote:
> Seriously, though, although I tolerate voyager breakage through the
> merge window, I actively try to clean it up and have a working voyager
> for the next release. There's no real excuse for not fixing build
> breakage. How it's done, I don't really care. The only blocker for the
> two patch smp consolidation + voyager fix was Andi ... if he's relaxing
> that, then we can go that route.
>
> Until this is fixed, there are going to be -rc testers firing off build
> failure reports, which I think we'd all like to
Sure. I'm just surprised this is causing so much fuss, since I thought
we'd resolved it all last month.
J
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [VOYAGER] fix build broken by shift to smp_ops
2007-05-14 22:42 ` James Bottomley
@ 2007-05-14 22:57 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2007-05-14 22:57 UTC (permalink / raw)
To: James Bottomley
Cc: Andrew Morton, Andi Kleen, Linus Torvalds, linux-kernel,
Eric W. Biederman
James Bottomley wrote:
> Let me do it.
>
> These are the two patches, tested and working on Voyager.
>
> The order of application is
>
> i386-common-smp.patch
> i386-fix-voyager-build.patch
>
Yep, looks fine to me. The only difference from the ones I just posted
appears to be a little bit of whitespace.
J
^ permalink raw reply [flat|nested] 22+ messages in thread
end of thread, other threads:[~2007-05-14 22:57 UTC | newest]
Thread overview: 22+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-05-14 16:08 [VOYAGER] fix build broken by shift to smp_ops James Bottomley
2007-05-14 17:10 ` Jeremy Fitzhardinge
2007-05-14 17:22 ` James Bottomley
2007-05-14 19:03 ` Jeremy Fitzhardinge
2007-05-14 20:09 ` Andi Kleen
2007-05-14 19:59 ` Andrew Morton
2007-05-14 20:02 ` Jeremy Fitzhardinge
2007-05-14 20:37 ` Andrew Morton
2007-05-14 20:48 ` Jeremy Fitzhardinge
2007-05-14 20:54 ` James Bottomley
2007-05-14 21:05 ` Andrew Morton
2007-05-14 21:08 ` James Bottomley
2007-05-14 21:10 ` Jeremy Fitzhardinge
2007-05-14 21:51 ` Andrew Morton
2007-05-14 22:00 ` Jeremy Fitzhardinge
2007-05-14 22:27 ` James Bottomley
2007-05-14 22:55 ` Jeremy Fitzhardinge
2007-05-14 21:58 ` Andi Kleen
2007-05-14 22:29 ` Andrew Morton
2007-05-14 22:40 ` Jeremy Fitzhardinge
2007-05-14 22:42 ` James Bottomley
2007-05-14 22:57 ` Jeremy Fitzhardinge
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox