* [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-21 17:36 ` Dave Jones
@ 2003-02-22 3:23 ` Thomas Schlichter
0 siblings, 0 replies; 14+ messages in thread
From: Thomas Schlichter @ 2003-02-22 3:23 UTC (permalink / raw)
To: Dave Jones; +Cc: Hugh Dickins, Andrew Morton, Linux Kernel
[-- Attachment #1.1: body text --]
[-- Type: text/plain, Size: 567 bytes --]
On Fri, Feb 21, 2003 at 18:36, Dave Jones wrote:
> Ok, here's a first stab at an implementation. Compiles, but is untested..
> Fixes up a few preemption races Thomas highlighted, and converts
> a few smp_call_function() users over to on_each_cpu(), which
> saves quite a bit of code.
Your patch was really fine, I just modified it a bit and fixed some more
preempt-issues with the smp_call_function() calls. It compiles and workes
with no problems so far... I hope I did not make any big mistakes... ;-)
I hope you like this one, too...
Thomas
[-- Attachment #1.2: preempt_fix.patch --]
[-- Type: text/x-diff, Size: 35354 bytes --]
diff -urP linux-2.5.62/arch/alpha/kernel/process.c linux-2.5.62_patched/arch/alpha/kernel/process.c
--- linux-2.5.62/arch/alpha/kernel/process.c Mon Feb 17 23:56:54 2003
+++ linux-2.5.62_patched/arch/alpha/kernel/process.c Sat Feb 22 02:02:17 2003
@@ -155,10 +155,7 @@
struct halt_info args;
args.mode = mode;
args.restart_cmd = restart_cmd;
-#ifdef CONFIG_SMP
- smp_call_function(common_shutdown_1, &args, 1, 0);
-#endif
- common_shutdown_1(&args);
+ on_each_cpu(common_shutdown_1, &args, 1, 0);
}
void
diff -urP linux-2.5.62/arch/alpha/kernel/smp.c linux-2.5.62_patched/arch/alpha/kernel/smp.c
--- linux-2.5.62/arch/alpha/kernel/smp.c Mon Feb 17 23:56:10 2003
+++ linux-2.5.62_patched/arch/alpha/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -899,10 +899,8 @@
smp_imb(void)
{
/* Must wait other processors to flush their icache before continue. */
- if (smp_call_function(ipi_imb, NULL, 1, 1))
+ if (on_each_cpu(ipi_imb, NULL, 1, 1))
printk(KERN_CRIT "smp_imb: timed out\n");
-
- imb();
}
static void
@@ -916,11 +914,9 @@
{
/* Although we don't have any data to pass, we do want to
synchronize with the other processors. */
- if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
+ if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
printk(KERN_CRIT "flush_tlb_all: timed out\n");
}
-
- tbia();
}
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
@@ -938,6 +934,8 @@
void
flush_tlb_mm(struct mm_struct *mm)
{
+ preempt_disable();
+
if (mm == current->active_mm) {
flush_tlb_current(mm);
if (atomic_read(&mm->mm_users) <= 1) {
@@ -948,6 +946,7 @@
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
@@ -955,6 +954,8 @@
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
}
+
+ preempt_enable();
}
struct flush_tlb_page_struct {
@@ -981,6 +982,8 @@
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
+ preempt_disable();
+
if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr);
if (atomic_read(&mm->mm_users) <= 1) {
@@ -991,6 +994,7 @@
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
@@ -1002,6 +1006,8 @@
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
+
+ preempt_enable();
}
void
@@ -1030,6 +1036,8 @@
if ((vma->vm_flags & VM_EXEC) == 0)
return;
+ preempt_disable();
+
if (mm == current->active_mm) {
__load_new_mm_context(mm);
if (atomic_read(&mm->mm_users) <= 1) {
@@ -1040,6 +1048,7 @@
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
@@ -1047,6 +1056,8 @@
if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
printk(KERN_CRIT "flush_icache_page: timed out\n");
}
+
+ preempt_enable();
}
\f
#ifdef CONFIG_DEBUG_SPINLOCK
diff -urP linux-2.5.62/arch/i386/kernel/cpuid.c linux-2.5.62_patched/arch/i386/kernel/cpuid.c
--- linux-2.5.62/arch/i386/kernel/cpuid.c Mon Feb 17 23:55:49 2003
+++ linux-2.5.62_patched/arch/i386/kernel/cpuid.c Sat Feb 22 02:02:17 2003
@@ -44,8 +44,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-#ifdef CONFIG_SMP
-
struct cpuid_command {
int cpu;
u32 reg;
@@ -64,24 +62,12 @@
{
struct cpuid_command cmd;
- if ( cpu == smp_processor_id() ) {
- cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
- } else {
- cmd.cpu = cpu;
- cmd.reg = reg;
- cmd.data = data;
-
- smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
- }
-}
-#else /* ! CONFIG_SMP */
-
-static inline void do_cpuid(int cpu, u32 reg, u32 *data)
-{
- cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+ cmd.data = data;
+
+ on_each_cpu(cpuid_smp_cpuid, &cmd, 1, 1);
}
-
-#endif /* ! CONFIG_SMP */
static loff_t cpuid_seek(struct file *file, loff_t offset, int orig)
{
diff -urP linux-2.5.62/arch/i386/kernel/io_apic.c linux-2.5.62_patched/arch/i386/kernel/io_apic.c
--- linux-2.5.62/arch/i386/kernel/io_apic.c Mon Feb 17 23:56:10 2003
+++ linux-2.5.62_patched/arch/i386/kernel/io_apic.c Sat Feb 22 02:02:17 2003
@@ -1376,8 +1376,7 @@
void print_all_local_APICs (void)
{
- smp_call_function(print_local_APIC, NULL, 1, 1);
- print_local_APIC(NULL);
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
}
void /*__init*/ print_PIC(void)
@@ -1855,8 +1854,7 @@
*/
printk(KERN_INFO "activating NMI Watchdog ...");
- smp_call_function(enable_NMI_through_LVT0, NULL, 1, 1);
- enable_NMI_through_LVT0(NULL);
+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
printk(" done.\n");
}
diff -urP linux-2.5.62/arch/i386/kernel/ldt.c linux-2.5.62_patched/arch/i386/kernel/ldt.c
--- linux-2.5.62/arch/i386/kernel/ldt.c Mon Feb 17 23:56:25 2003
+++ linux-2.5.62_patched/arch/i386/kernel/ldt.c Sat Feb 22 02:02:17 2003
@@ -55,13 +55,13 @@
wmb();
if (reload) {
+ preempt_disable();
load_LDT(pc);
#ifdef CONFIG_SMP
- preempt_disable();
if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
- preempt_enable();
#endif
+ preempt_enable();
}
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
diff -urP linux-2.5.62/arch/i386/kernel/microcode.c linux-2.5.62_patched/arch/i386/kernel/microcode.c
--- linux-2.5.62/arch/i386/kernel/microcode.c Mon Feb 17 23:56:02 2003
+++ linux-2.5.62_patched/arch/i386/kernel/microcode.c Sat Feb 22 02:02:17 2003
@@ -183,11 +183,8 @@
int i, error = 0, err;
struct microcode *m;
- if (smp_call_function(do_update_one, NULL, 1, 1) != 0) {
- printk(KERN_ERR "microcode: IPI timeout, giving up\n");
+ if (on_each_cpu(do_update_one, NULL, 1, 1) != 0)
return -EIO;
- }
- do_update_one(NULL);
for (i=0; i<NR_CPUS; i++) {
err = update_req[i].err;
diff -urP linux-2.5.62/arch/i386/kernel/msr.c linux-2.5.62_patched/arch/i386/kernel/msr.c
--- linux-2.5.62/arch/i386/kernel/msr.c Mon Feb 17 23:56:13 2003
+++ linux-2.5.62_patched/arch/i386/kernel/msr.c Sat Feb 22 02:02:17 2003
@@ -116,36 +116,28 @@
{
struct msr_command cmd;
- if ( cpu == smp_processor_id() ) {
- return wrmsr_eio(reg, eax, edx);
- } else {
- cmd.cpu = cpu;
- cmd.reg = reg;
- cmd.data[0] = eax;
- cmd.data[1] = edx;
-
- smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
- return cmd.err;
- }
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+ cmd.data[0] = eax;
+ cmd.data[1] = edx;
+
+ on_each_cpu(msr_smp_wrmsr, &cmd, 1, 1);
+ return cmd.err;
}
static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
{
struct msr_command cmd;
- if ( cpu == smp_processor_id() ) {
- return rdmsr_eio(reg, eax, edx);
- } else {
- cmd.cpu = cpu;
- cmd.reg = reg;
-
- smp_call_function(msr_smp_rdmsr, &cmd, 1, 1);
-
- *eax = cmd.data[0];
- *edx = cmd.data[1];
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+
+ on_each_cpu(msr_smp_rdmsr, &cmd, 1, 1);
+
+ *eax = cmd.data[0];
+ *edx = cmd.data[1];
- return cmd.err;
- }
+ return cmd.err;
}
#else /* ! CONFIG_SMP */
diff -urP linux-2.5.62/arch/i386/kernel/smp.c linux-2.5.62_patched/arch/i386/kernel/smp.c
--- linux-2.5.62/arch/i386/kernel/smp.c Mon Feb 17 23:55:52 2003
+++ linux-2.5.62_patched/arch/i386/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -436,7 +436,7 @@
preempt_enable();
}
-static inline void do_flush_tlb_all_local(void)
+void do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
@@ -445,18 +445,9 @@
leave_mm(cpu);
}
-static void flush_tlb_all_ipi(void* info)
-{
- do_flush_tlb_all_local();
-}
-
void flush_tlb_all(void)
{
- preempt_disable();
- smp_call_function (flush_tlb_all_ipi,0,1,1);
-
- do_flush_tlb_all_local();
- preempt_enable();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
/*
diff -urP linux-2.5.62/arch/i386/kernel/sysenter.c linux-2.5.62_patched/arch/i386/kernel/sysenter.c
--- linux-2.5.62/arch/i386/kernel/sysenter.c Mon Feb 17 23:57:19 2003
+++ linux-2.5.62_patched/arch/i386/kernel/sysenter.c Sat Feb 22 02:02:17 2003
@@ -95,8 +95,7 @@
return 0;
memcpy((void *) page, sysent, sizeof(sysent));
- enable_sep_cpu(NULL);
- smp_call_function(enable_sep_cpu, NULL, 1, 1);
+ on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0;
}
diff -urP linux-2.5.62/arch/i386/mach-voyager/voyager_smp.c linux-2.5.62_patched/arch/i386/mach-voyager/voyager_smp.c
--- linux-2.5.62/arch/i386/mach-voyager/voyager_smp.c Mon Feb 17 23:56:12 2003
+++ linux-2.5.62_patched/arch/i386/mach-voyager/voyager_smp.c Sat Feb 22 02:02:17 2003
@@ -1209,8 +1209,8 @@
smp_call_function_interrupt();
}
-static inline void
-do_flush_tlb_all_local(void)
+void
+do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
@@ -1220,19 +1220,11 @@
}
-static void
-flush_tlb_all_function(void* info)
-{
- do_flush_tlb_all_local();
-}
-
/* flush the TLB of every active CPU in the system */
void
flush_tlb_all(void)
{
- smp_call_function (flush_tlb_all_function, 0, 1, 1);
-
- do_flush_tlb_all_local();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
/* used to set up the trampoline for other CPUs when the memory manager
diff -urP linux-2.5.62/arch/i386/mm/pageattr.c linux-2.5.62_patched/arch/i386/mm/pageattr.c
--- linux-2.5.62/arch/i386/mm/pageattr.c Mon Feb 17 23:56:13 2003
+++ linux-2.5.62_patched/arch/i386/mm/pageattr.c Sat Feb 22 02:02:17 2003
@@ -130,11 +130,8 @@
}
static inline void flush_map(void)
-{
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, NULL, 1, 1);
-#endif
- flush_kernel_map(NULL);
+{
+ on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
struct deferred_page {
diff -urP linux-2.5.62/arch/i386/oprofile/nmi_int.c linux-2.5.62_patched/arch/i386/oprofile/nmi_int.c
--- linux-2.5.62/arch/i386/oprofile/nmi_int.c Mon Feb 17 23:56:59 2003
+++ linux-2.5.62_patched/arch/i386/oprofile/nmi_int.c Sat Feb 22 02:02:17 2003
@@ -95,8 +95,7 @@
* without actually triggering any NMIs as this will
* break the core code horrifically.
*/
- smp_call_function(nmi_cpu_setup, NULL, 0, 1);
- nmi_cpu_setup(0);
+ on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
set_nmi_callback(nmi_callback);
oprofile_pmdev = set_nmi_pm_callback(oprofile_pm_callback);
return 0;
@@ -148,8 +147,7 @@
{
unset_nmi_pm_callback(oprofile_pmdev);
unset_nmi_callback();
- smp_call_function(nmi_cpu_shutdown, NULL, 0, 1);
- nmi_cpu_shutdown(0);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
}
@@ -162,8 +160,7 @@
static int nmi_start(void)
{
- smp_call_function(nmi_cpu_start, NULL, 0, 1);
- nmi_cpu_start(0);
+ on_each_cpu(nmi_cpu_start, NULL, 0, 1);
return 0;
}
@@ -177,8 +174,7 @@
static void nmi_stop(void)
{
- smp_call_function(nmi_cpu_stop, NULL, 0, 1);
- nmi_cpu_stop(0);
+ on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
}
diff -urP linux-2.5.62/arch/ia64/kernel/smp.c linux-2.5.62_patched/arch/ia64/kernel/smp.c
--- linux-2.5.62/arch/ia64/kernel/smp.c Mon Feb 17 23:57:19 2003
+++ linux-2.5.62_patched/arch/ia64/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -206,18 +206,18 @@
void
smp_flush_tlb_all (void)
{
- smp_call_function((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
- local_flush_tlb_all();
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
}
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
- local_finish_flush_tlb_mm(mm);
-
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
+ {
+ local_finish_flush_tlb_mm(mm);
return;
+ }
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
@@ -226,7 +226,7 @@
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
- smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+ on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
}
/*
diff -urP linux-2.5.62/arch/mips64/kernel/smp.c linux-2.5.62_patched/arch/mips64/kernel/smp.c
--- linux-2.5.62/arch/mips64/kernel/smp.c Mon Feb 17 23:56:27 2003
+++ linux-2.5.62_patched/arch/mips64/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -195,8 +195,7 @@
void flush_tlb_all(void)
{
- smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
- _flush_tlb_all();
+ on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -219,6 +218,8 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ preempt_disable();
+
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
} else {
@@ -228,6 +229,8 @@
CPU_CONTEXT(i, mm) = 0;
}
_flush_tlb_mm(mm);
+
+ preempt_enable();
}
struct flush_tlb_data {
@@ -246,6 +249,8 @@
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
+ preempt_disable();
+
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
@@ -260,6 +265,8 @@
CPU_CONTEXT(i, mm) = 0;
}
_flush_tlb_range(mm, start, end);
+
+ preempt_enable();
}
static void flush_tlb_page_ipi(void *info)
@@ -271,6 +278,8 @@
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ preempt_disable();
+
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
@@ -284,5 +293,7 @@
CPU_CONTEXT(i, vma->vm_mm) = 0;
}
_flush_tlb_page(vma, page);
+
+ preempt_enable();
}
diff -urP linux-2.5.62/arch/parisc/kernel/cache.c linux-2.5.62_patched/arch/parisc/kernel/cache.c
--- linux-2.5.62/arch/parisc/kernel/cache.c Mon Feb 17 23:56:13 2003
+++ linux-2.5.62_patched/arch/parisc/kernel/cache.c Sat Feb 22 02:02:17 2003
@@ -39,8 +39,7 @@
void
flush_data_cache(void)
{
- smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
- flush_data_cache_local();
+ on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
}
#endif
diff -urP linux-2.5.62/arch/parisc/kernel/irq.c linux-2.5.62_patched/arch/parisc/kernel/irq.c
--- linux-2.5.62/arch/parisc/kernel/irq.c Mon Feb 17 23:56:13 2003
+++ linux-2.5.62_patched/arch/parisc/kernel/irq.c Sat Feb 22 02:02:17 2003
@@ -61,20 +61,17 @@
static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
-#ifdef CONFIG_SMP
static void cpu_set_eiem(void *info)
{
set_eiem((unsigned long) info);
}
-#endif
static inline void disable_cpu_irq(void *unused, int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit;
- set_eiem(cpu_eiem);
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
static void enable_cpu_irq(void *unused, int irq)
@@ -83,8 +80,7 @@
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit;
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
- set_eiem(cpu_eiem);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
/* mask and disable are the same at the CPU level
@@ -100,8 +96,7 @@
** handle *any* unmasked pending interrupts.
** ie We don't need to check for pending interrupts here.
*/
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
- set_eiem(cpu_eiem);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
/*
diff -urP linux-2.5.62/arch/parisc/kernel/smp.c linux-2.5.62_patched/arch/parisc/kernel/smp.c
--- linux-2.5.62/arch/parisc/kernel/smp.c Mon Feb 17 23:57:18 2003
+++ linux-2.5.62_patched/arch/parisc/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -401,7 +401,7 @@
__setup("maxcpus=", maxcpus);
/*
- * Flush all other CPU's tlb and then mine. Do this with smp_call_function()
+ * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
*/
@@ -410,8 +410,7 @@
void
smp_flush_tlb_all(void)
{
- smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
- flush_tlb_all_local();
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
}
diff -urP linux-2.5.62/arch/parisc/mm/init.c linux-2.5.62_patched/arch/parisc/mm/init.c
--- linux-2.5.62/arch/parisc/mm/init.c Mon Feb 17 23:56:14 2003
+++ linux-2.5.62_patched/arch/parisc/mm/init.c Sat Feb 22 02:02:17 2003
@@ -974,8 +974,7 @@
do_recycle++;
}
spin_unlock(&sid_lock);
- smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
- flush_tlb_all_local();
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
if (do_recycle) {
spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array);
diff -urP linux-2.5.62/arch/ppc/kernel/temp.c linux-2.5.62_patched/arch/ppc/kernel/temp.c
--- linux-2.5.62/arch/ppc/kernel/temp.c Mon Feb 17 23:56:16 2003
+++ linux-2.5.62_patched/arch/ppc/kernel/temp.c Sat Feb 22 02:02:17 2003
@@ -194,10 +194,7 @@
/* schedule ourselves to be run again */
mod_timer(&tau_timer, jiffies + shrink_timer) ;
-#ifdef CONFIG_SMP
- smp_call_function(tau_timeout, NULL, 1, 0);
-#endif
- tau_timeout(NULL);
+ on_each_cpu(tau_timeout, NULL, 1, 0);
}
/*
@@ -239,10 +236,7 @@
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
-#ifdef CONFIG_SMP
- smp_call_function(TAU_init_smp, NULL, 1, 0);
-#endif
- TAU_init_smp(NULL);
+ on_each_cpu(TAU_init_smp, NULL, 1, 0);
printk("Thermal assist unit ");
#ifdef CONFIG_TAU_INT
diff -urP linux-2.5.62/arch/s390/kernel/smp.c linux-2.5.62_patched/arch/s390/kernel/smp.c
--- linux-2.5.62/arch/s390/kernel/smp.c Mon Feb 17 23:55:49 2003
+++ linux-2.5.62_patched/arch/s390/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -228,8 +228,7 @@
void machine_restart_smp(char * __unused)
{
cpu_restart_map = cpu_online_map;
- smp_call_function(do_machine_restart, NULL, 0, 0);
- do_machine_restart(NULL);
+ on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
@@ -247,8 +246,7 @@
void machine_halt_smp(void)
{
- smp_call_function(do_machine_halt, NULL, 0, 0);
- do_machine_halt(NULL);
+ on_each_cpu(do_machine_halt, NULL, 0, 0);
}
static void do_machine_power_off(void * __unused)
@@ -266,8 +264,7 @@
void machine_power_off_smp(void)
{
- smp_call_function(do_machine_power_off, NULL, 0, 0);
- do_machine_power_off(NULL);
+ on_each_cpu(do_machine_power_off, NULL, 0, 0);
}
/*
@@ -339,8 +336,7 @@
void smp_ptlb_all(void)
{
- smp_call_function(smp_ptlb_callback, NULL, 0, 1);
- local_flush_tlb();
+ on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
}
/*
@@ -400,8 +396,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = 0xFFFFFFFF;
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_set_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
@@ -414,8 +409,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 0x00000000;
parms.andvals[cr] = ~(1 << bit);
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_clear_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
diff -urP linux-2.5.62/arch/s390x/kernel/smp.c linux-2.5.62_patched/arch/s390x/kernel/smp.c
--- linux-2.5.62/arch/s390x/kernel/smp.c Mon Feb 17 23:56:15 2003
+++ linux-2.5.62_patched/arch/s390x/kernel/smp.c Sat Feb 22 02:02:17 2003
@@ -227,8 +227,7 @@
void machine_restart_smp(char * __unused)
{
cpu_restart_map = cpu_online_map;
- smp_call_function(do_machine_restart, NULL, 0, 0);
- do_machine_restart(NULL);
+ on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
@@ -246,8 +245,7 @@
void machine_halt_smp(void)
{
- smp_call_function(do_machine_halt, NULL, 0, 0);
- do_machine_halt(NULL);
+ on_each_cpu(do_machine_halt, NULL, 0, 0);
}
static void do_machine_power_off(void * __unused)
@@ -265,8 +263,7 @@
void machine_power_off_smp(void)
{
- smp_call_function(do_machine_power_off, NULL, 0, 0);
- do_machine_power_off(NULL);
+ on_each_cpu(do_machine_power_off, NULL, 0, 0);
}
/*
@@ -383,8 +380,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = -1L;
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_set_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
@@ -397,8 +393,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 0;
parms.andvals[cr] = ~(1L << bit);
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_clear_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
diff -urP linux-2.5.62/arch/x86_64/kernel/bluesmoke.c linux-2.5.62_patched/arch/x86_64/kernel/bluesmoke.c
--- linux-2.5.62/arch/x86_64/kernel/bluesmoke.c Mon Feb 17 23:56:55 2003
+++ linux-2.5.62_patched/arch/x86_64/kernel/bluesmoke.c Sat Feb 22 02:02:18 2003
@@ -111,11 +111,7 @@
{
u32 low, high;
int i;
- unsigned int *cpu = info;
- BUG_ON (*cpu != smp_processor_id());
-
- preempt_disable();
for (i=0; i<banks; i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
@@ -130,20 +126,12 @@
wmb();
}
}
- preempt_enable();
}
static void mce_timerfunc (unsigned long data)
{
- unsigned int i;
-
- for (i=0; i<smp_num_cpus; i++) {
- if (i == smp_processor_id())
- mce_checkregs(&i);
- else
- smp_call_function (mce_checkregs, &i, 1, 1);
- }
+ on_each_cpu (mce_checkregs, NULL, 1, 1);
/* Refresh the timer. */
mce_timer.expires = jiffies + MCE_RATE;
diff -urP linux-2.5.62/arch/x86_64/kernel/cpuid.c linux-2.5.62_patched/arch/x86_64/kernel/cpuid.c
--- linux-2.5.62/arch/x86_64/kernel/cpuid.c Mon Feb 17 23:56:02 2003
+++ linux-2.5.62_patched/arch/x86_64/kernel/cpuid.c Sat Feb 22 02:02:18 2003
@@ -63,16 +63,12 @@
static inline void do_cpuid(int cpu, u32 reg, u32 *data)
{
struct cpuid_command cmd;
-
- if ( cpu == smp_processor_id() ) {
- cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
- } else {
- cmd.cpu = cpu;
- cmd.reg = reg;
- cmd.data = data;
-
- smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
- }
+
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+ cmd.data = data;
+
+ on_each_cpu(cpuid_smp_cpuid, &cmd, 1, 1);
}
#else /* ! CONFIG_SMP */
diff -urP linux-2.5.62/arch/x86_64/kernel/io_apic.c linux-2.5.62_patched/arch/x86_64/kernel/io_apic.c
--- linux-2.5.62/arch/x86_64/kernel/io_apic.c Mon Feb 17 23:56:15 2003
+++ linux-2.5.62_patched/arch/x86_64/kernel/io_apic.c Sat Feb 22 02:02:18 2003
@@ -926,8 +926,7 @@
void print_all_local_APICs (void)
{
- smp_call_function(print_local_APIC, NULL, 1, 1);
- print_local_APIC(NULL);
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
}
void /*__init*/ print_PIC(void)
diff -urP linux-2.5.62/arch/x86_64/kernel/ldt.c linux-2.5.62_patched/arch/x86_64/kernel/ldt.c
--- linux-2.5.62/arch/x86_64/kernel/ldt.c Mon Feb 17 23:56:58 2003
+++ linux-2.5.62_patched/arch/x86_64/kernel/ldt.c Sat Feb 22 02:02:18 2003
@@ -60,13 +60,13 @@
pc->size = mincount;
wmb();
if (reload) {
+ preempt_disable();
load_LDT(pc);
#ifdef CONFIG_SMP
- preempt_disable();
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
- preempt_enable();
#endif
+ preempt_enable();
}
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
diff -urP linux-2.5.62/arch/x86_64/kernel/msr.c linux-2.5.62_patched/arch/x86_64/kernel/msr.c
--- linux-2.5.62/arch/x86_64/kernel/msr.c Mon Feb 17 23:55:57 2003
+++ linux-2.5.62_patched/arch/x86_64/kernel/msr.c Sat Feb 22 02:02:18 2003
@@ -120,36 +120,28 @@
{
struct msr_command cmd;
- if ( cpu == smp_processor_id() ) {
- return wrmsr_eio(reg, eax, edx);
- } else {
- cmd.cpu = cpu;
- cmd.reg = reg;
- cmd.data[0] = eax;
- cmd.data[1] = edx;
-
- smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
- return cmd.err;
- }
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+ cmd.data[0] = eax;
+ cmd.data[1] = edx;
+
+ on_each_cpu(msr_smp_wrmsr, &cmd, 1, 1);
+ return cmd.err;
}
static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
{
struct msr_command cmd;
- if ( cpu == smp_processor_id() ) {
- return rdmsr_eio(reg, eax, edx);
- } else {
- cmd.cpu = cpu;
- cmd.reg = reg;
-
- smp_call_function(msr_smp_rdmsr, &cmd, 1, 1);
-
- *eax = cmd.data[0];
- *edx = cmd.data[1];
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+
+ on_each_cpu(msr_smp_rdmsr, &cmd, 1, 1);
+
+ *eax = cmd.data[0];
+ *edx = cmd.data[1];
- return cmd.err;
- }
+ return cmd.err;
}
#else /* ! CONFIG_SMP */
diff -urP linux-2.5.62/arch/x86_64/kernel/smp.c linux-2.5.62_patched/arch/x86_64/kernel/smp.c
--- linux-2.5.62/arch/x86_64/kernel/smp.c Mon Feb 17 23:56:09 2003
+++ linux-2.5.62_patched/arch/x86_64/kernel/smp.c Sat Feb 22 02:02:18 2003
@@ -328,7 +328,7 @@
preempt_enable();
}
-static inline void do_flush_tlb_all_local(void)
+void do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
@@ -337,16 +337,9 @@
leave_mm(cpu);
}
-static void flush_tlb_all_ipi(void* info)
-{
- do_flush_tlb_all_local();
-}
-
void flush_tlb_all(void)
{
- smp_call_function (flush_tlb_all_ipi,0,1,1);
-
- do_flush_tlb_all_local();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
void smp_kdb_stop(void)
diff -urP linux-2.5.62/arch/x86_64/mm/pageattr.c linux-2.5.62_patched/arch/x86_64/mm/pageattr.c
--- linux-2.5.62/arch/x86_64/mm/pageattr.c Mon Feb 17 23:56:14 2003
+++ linux-2.5.62_patched/arch/x86_64/mm/pageattr.c Sat Feb 22 02:02:18 2003
@@ -122,11 +122,8 @@
}
static inline void flush_map(unsigned long address)
-{
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, (void *)address, 1, 1);
-#endif
- flush_kernel_map((void *)address);
+{
+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
}
struct deferred_page {
diff -urP linux-2.5.62/drivers/char/agp/agp.h linux-2.5.62_patched/drivers/char/agp/agp.h
--- linux-2.5.62/drivers/char/agp/agp.h Mon Feb 17 23:56:13 2003
+++ linux-2.5.62_patched/drivers/char/agp/agp.h Sat Feb 22 02:02:18 2003
@@ -34,24 +34,10 @@
#define PFX "agpgart: "
-#ifdef CONFIG_SMP
-static void ipi_handler(void *null)
-{
- flush_agp_cache();
-}
-
-static void __attribute__((unused)) global_cache_flush(void)
-{
- if (smp_call_function(ipi_handler, NULL, 1, 1) != 0)
- panic(PFX "timed out waiting for the other CPUs!\n");
- flush_agp_cache();
-}
-#else
static inline void global_cache_flush(void)
{
- flush_agp_cache();
+ on_each_cpu(flush_agp_cache, NULL, 1, 1);
}
-#endif /* !CONFIG_SMP */
enum aper_size_type {
U8_APER_SIZE,
diff -urP linux-2.5.62/drivers/s390/char/sclp.c linux-2.5.62_patched/drivers/s390/char/sclp.c
--- linux-2.5.62/drivers/s390/char/sclp.c Mon Feb 17 23:56:20 2003
+++ linux-2.5.62_patched/drivers/s390/char/sclp.c Sat Feb 22 02:02:18 2003
@@ -481,8 +481,7 @@
do_machine_quiesce(void)
{
cpu_quiesce_map = cpu_online_map;
- smp_call_function(do_load_quiesce_psw, NULL, 0, 0);
- do_load_quiesce_psw(NULL);
+ on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
}
#else
static void
diff -urP linux-2.5.62/drivers/s390/net/iucv.c linux-2.5.62_patched/drivers/s390/net/iucv.c
--- linux-2.5.62/drivers/s390/net/iucv.c Mon Feb 17 23:55:52 2003
+++ linux-2.5.62_patched/drivers/s390/net/iucv.c Sat Feb 22 02:02:18 2003
@@ -617,10 +617,7 @@
ulong b2f0_result = 0x0deadbeef;
iucv_debug(1, "entering");
- if (smp_processor_id() == 0)
- iucv_declare_buffer_cpu0(&b2f0_result);
- else
- smp_call_function(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1);
+ on_each_cpu(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1);
iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
if (b2f0_result == 0x0deadbeef)
b2f0_result = 0xaa;
@@ -639,10 +636,7 @@
{
iucv_debug(1, "entering");
if (declare_flag) {
- if (smp_processor_id() == 0)
- iucv_retrieve_buffer_cpu0(0);
- else
- smp_call_function(iucv_retrieve_buffer_cpu0, 0, 0, 1);
+ on_each_cpu(iucv_retrieve_buffer_cpu0, 0, 0, 1);
declare_flag = 0;
}
iucv_debug(1, "exiting");
diff -urP linux-2.5.62/fs/buffer.c linux-2.5.62_patched/fs/buffer.c
--- linux-2.5.62/fs/buffer.c Mon Feb 17 23:56:17 2003
+++ linux-2.5.62_patched/fs/buffer.c Sat Feb 22 02:02:18 2003
@@ -1404,10 +1404,7 @@
static void invalidate_bh_lrus(void)
{
- preempt_disable();
- invalidate_bh_lru(NULL);
- smp_call_function(invalidate_bh_lru, NULL, 1, 1);
- preempt_enable();
+ on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
}
void set_bh_page(struct buffer_head *bh,
diff -urP linux-2.5.62/include/asm-alpha/agp.h linux-2.5.62_patched/include/asm-alpha/agp.h
--- linux-2.5.62/include/asm-alpha/agp.h Mon Feb 17 23:55:50 2003
+++ linux-2.5.62_patched/include/asm-alpha/agp.h Sat Feb 22 02:02:18 2003
@@ -8,6 +8,10 @@
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_mappings()
-#define flush_agp_cache() mb()
+
+static void flush_agp_cache(void* info)
+{
+ mb();
+}
#endif
diff -urP linux-2.5.62/include/asm-i386/agp.h linux-2.5.62_patched/include/asm-i386/agp.h
--- linux-2.5.62/include/asm-i386/agp.h Mon Feb 17 23:55:49 2003
+++ linux-2.5.62_patched/include/asm-i386/agp.h Sat Feb 22 02:02:18 2003
@@ -18,6 +18,9 @@
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
-#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+static void flush_agp_cache(void *info)
+{
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+}
#endif
diff -urP linux-2.5.62/include/asm-ia64/agp.h linux-2.5.62_patched/include/asm-ia64/agp.h
--- linux-2.5.62/include/asm-ia64/agp.h Mon Feb 17 23:56:15 2003
+++ linux-2.5.62_patched/include/asm-ia64/agp.h Sat Feb 22 02:02:18 2003
@@ -16,7 +16,14 @@
#define map_page_into_agp(page) /* nothing */
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_mappings() /* nothing */
-#define flush_agp_cache() mb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+ need to be called for each cacheline of the whole page so it may not be
+ worth it. Would need a page for it. */
+static void flush_agp_cache(void* info)
+{
+ mb();
+}
/* Page-protection value to be used for AGP memory mapped into kernel space. */
#define PAGE_AGP PAGE_KERNEL
diff -urP linux-2.5.62/include/asm-parisc/cacheflush.h linux-2.5.62_patched/include/asm-parisc/cacheflush.h
--- linux-2.5.62/include/asm-parisc/cacheflush.h Mon Feb 17 23:57:01 2003
+++ linux-2.5.62_patched/include/asm-parisc/cacheflush.h Sat Feb 22 02:02:18 2003
@@ -25,16 +25,10 @@
extern void flush_cache_all_local(void);
-#ifdef CONFIG_SMP
static inline void flush_cache_all(void)
{
- smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
- flush_cache_all_local();
+ on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
}
-#else
-#define flush_cache_all flush_cache_all_local
-#endif
-
/* The following value needs to be tuned and probably scaled with the
* cache size.
diff -urP linux-2.5.62/include/asm-sparc64/agp.h linux-2.5.62_patched/include/asm-sparc64/agp.h
--- linux-2.5.62/include/asm-sparc64/agp.h Mon Feb 17 23:55:55 2003
+++ linux-2.5.62_patched/include/asm-sparc64/agp.h Sat Feb 22 02:02:18 2003
@@ -6,6 +6,13 @@
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_mappings()
-#define flush_agp_cache() mb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+ need to be called for each cacheline of the whole page so it may not be
+ worth it. Would need a page for it. */
+static void flush_agp_cache(void* info)
+{
+ mb();
+}
#endif
diff -urP linux-2.5.62/include/asm-x86_64/agp.h linux-2.5.62_patched/include/asm-x86_64/agp.h
--- linux-2.5.62/include/asm-x86_64/agp.h Mon Feb 17 23:55:49 2003
+++ linux-2.5.62_patched/include/asm-x86_64/agp.h Sat Feb 22 02:02:18 2003
@@ -18,6 +18,9 @@
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
-#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+static void flush_agp_cache(void* info)
+{
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+}
#endif
diff -urP linux-2.5.62/include/linux/smp.h linux-2.5.62_patched/include/linux/smp.h
--- linux-2.5.62/include/linux/smp.h Mon Feb 17 23:56:16 2003
+++ linux-2.5.62_patched/include/linux/smp.h Sat Feb 22 02:06:57 2003
@@ -10,9 +10,10 @@
#ifdef CONFIG_SMP
+#include <linux/preempt.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
-#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/smp.h>
#include <asm/bug.h>
@@ -54,6 +55,31 @@
int retry, int wait);
/*
+ * Call a function on all processors
+ */
+static inline int on_each_cpu(void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ int ret = 0;
+
+ preempt_disable();
+
+ if(num_online_cpus() == 1)
+ goto only_one;
+
+ ret = smp_call_function(func, info, retry, wait);
+ if(ret != 0)
+ printk(KERN_ERR "%p: IPI timeout, giving up\n",
+ __builtin_return_address(0));
+
+only_one:
+ func(info);
+ preempt_enable();
+
+ return ret;
+}
+
+/*
* True once the per process idle is forked
*/
extern int smp_threads_ready;
@@ -96,6 +122,7 @@
#define hard_smp_processor_id() 0
#define smp_threads_ready 1
#define smp_call_function(func,info,retry,wait) ({ 0; })
+#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { }
#define cpu_online_map 1
diff -urP linux-2.5.62/mm/slab.c linux-2.5.62_patched/mm/slab.c
--- linux-2.5.62/mm/slab.c Mon Feb 17 23:56:45 2003
+++ linux-2.5.62_patched/mm/slab.c Sat Feb 22 02:02:18 2003
@@ -1116,12 +1116,16 @@
static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
{
check_irq_on();
+ preempt_disable();
+
local_irq_disable();
func(arg);
local_irq_enable();
if (smp_call_function(func, arg, 1, 1))
BUG();
+
+ preempt_enable();
}
static void free_block (kmem_cache_t* cachep, void** objpp, int len);
[-- Attachment #2: signature --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH][2.5] fix preempt-issues with smp_call_function()
@ 2003-02-25 18:08 Thomas Schlichter
2003-02-26 9:27 ` Andrew Morton
2003-02-26 10:37 ` Dave Jones
0 siblings, 2 replies; 14+ messages in thread
From: Thomas Schlichter @ 2003-02-25 18:08 UTC (permalink / raw)
To: Linus Torvalds; +Cc: Dave Jones, Hugh Dickins, Andrew Morton, Linux Kernel
[-- Attachment #1.1: body text --]
[-- Type: text/plain, Size: 429 bytes --]
Hello,
here is a patch to solve all (I hope I missed none) possible problems that
could occur on SMP machines running a preemptible kernel when
smp_call_function() calls a function which should be also executed on the
current processor.
This patch is based on the one Dave Jones sent to the LKML last friday and
applies to the linux kernel version 2.5.63.
Thank you for any response...
Thomas Schlichter
[-- Attachment #1.2: preempt_fix.patch --]
[-- Type: text/x-diff, Size: 26527 bytes --]
diff -urP linux-2.5.63/arch/alpha/kernel/process.c linux-2.5.63_patched/arch/alpha/kernel/process.c
--- linux-2.5.63/arch/alpha/kernel/process.c Mon Feb 24 20:05:42 2003
+++ linux-2.5.63_patched/arch/alpha/kernel/process.c Mon Feb 24 23:02:43 2003
@@ -155,10 +155,7 @@
struct halt_info args;
args.mode = mode;
args.restart_cmd = restart_cmd;
-#ifdef CONFIG_SMP
- smp_call_function(common_shutdown_1, &args, 1, 0);
-#endif
- common_shutdown_1(&args);
+ on_each_cpu(common_shutdown_1, &args, 1, 0);
}
void
diff -urP linux-2.5.63/arch/alpha/kernel/smp.c linux-2.5.63_patched/arch/alpha/kernel/smp.c
--- linux-2.5.63/arch/alpha/kernel/smp.c Mon Feb 24 20:05:14 2003
+++ linux-2.5.63_patched/arch/alpha/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -899,10 +899,8 @@
smp_imb(void)
{
/* Must wait other processors to flush their icache before continue. */
- if (smp_call_function(ipi_imb, NULL, 1, 1))
+ if (on_each_cpu(ipi_imb, NULL, 1, 1))
printk(KERN_CRIT "smp_imb: timed out\n");
-
- imb();
}
static void
@@ -916,11 +914,9 @@
{
/* Although we don't have any data to pass, we do want to
synchronize with the other processors. */
- if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
+ if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
printk(KERN_CRIT "flush_tlb_all: timed out\n");
}
-
- tbia();
}
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
@@ -938,6 +934,8 @@
void
flush_tlb_mm(struct mm_struct *mm)
{
+ preempt_disable();
+
if (mm == current->active_mm) {
flush_tlb_current(mm);
if (atomic_read(&mm->mm_users) <= 1) {
@@ -948,6 +946,7 @@
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
@@ -955,6 +954,8 @@
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
}
+
+ preempt_enable();
}
struct flush_tlb_page_struct {
@@ -981,6 +982,8 @@
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
+ preempt_disable();
+
if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr);
if (atomic_read(&mm->mm_users) <= 1) {
@@ -991,6 +994,7 @@
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
@@ -1002,6 +1006,8 @@
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
+
+ preempt_enable();
}
void
@@ -1030,6 +1036,8 @@
if ((vma->vm_flags & VM_EXEC) == 0)
return;
+ preempt_disable();
+
if (mm == current->active_mm) {
__load_new_mm_context(mm);
if (atomic_read(&mm->mm_users) <= 1) {
@@ -1040,6 +1048,7 @@
if (mm->context[cpu])
mm->context[cpu] = 0;
}
+ preempt_enable();
return;
}
}
@@ -1047,6 +1056,8 @@
if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
printk(KERN_CRIT "flush_icache_page: timed out\n");
}
+
+ preempt_enable();
}
\f
#ifdef CONFIG_DEBUG_SPINLOCK
diff -urP linux-2.5.63/arch/i386/kernel/io_apic.c linux-2.5.63_patched/arch/i386/kernel/io_apic.c
--- linux-2.5.63/arch/i386/kernel/io_apic.c Mon Feb 24 20:05:15 2003
+++ linux-2.5.63_patched/arch/i386/kernel/io_apic.c Mon Feb 24 23:02:43 2003
@@ -1376,8 +1376,7 @@
void print_all_local_APICs (void)
{
- smp_call_function(print_local_APIC, NULL, 1, 1);
- print_local_APIC(NULL);
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
}
void /*__init*/ print_PIC(void)
@@ -1843,8 +1842,7 @@
*/
printk(KERN_INFO "activating NMI Watchdog ...");
- smp_call_function(enable_NMI_through_LVT0, NULL, 1, 1);
- enable_NMI_through_LVT0(NULL);
+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
printk(" done.\n");
}
diff -urP linux-2.5.63/arch/i386/kernel/ldt.c linux-2.5.63_patched/arch/i386/kernel/ldt.c
--- linux-2.5.63/arch/i386/kernel/ldt.c Mon Feb 24 20:05:38 2003
+++ linux-2.5.63_patched/arch/i386/kernel/ldt.c Mon Feb 24 23:02:43 2003
@@ -55,13 +55,13 @@
wmb();
if (reload) {
+ preempt_disable();
load_LDT(pc);
#ifdef CONFIG_SMP
- preempt_disable();
if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
- preempt_enable();
#endif
+ preempt_enable();
}
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
diff -urP linux-2.5.63/arch/i386/kernel/microcode.c linux-2.5.63_patched/arch/i386/kernel/microcode.c
--- linux-2.5.63/arch/i386/kernel/microcode.c Mon Feb 24 20:05:12 2003
+++ linux-2.5.63_patched/arch/i386/kernel/microcode.c Mon Feb 24 23:02:32 2003
@@ -183,11 +183,10 @@
int i, error = 0, err;
struct microcode *m;
- if (smp_call_function(do_update_one, NULL, 1, 1) != 0) {
+ if (on_each_cpu(do_update_one, NULL, 1, 1) != 0) {
printk(KERN_ERR "microcode: IPI timeout, giving up\n");
return -EIO;
}
- do_update_one(NULL);
for (i=0; i<NR_CPUS; i++) {
err = update_req[i].err;
diff -urP linux-2.5.63/arch/i386/kernel/smp.c linux-2.5.63_patched/arch/i386/kernel/smp.c
--- linux-2.5.63/arch/i386/kernel/smp.c Mon Feb 24 20:05:06 2003
+++ linux-2.5.63_patched/arch/i386/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -436,7 +436,7 @@
preempt_enable();
}
-static inline void do_flush_tlb_all_local(void)
+static void do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
@@ -445,18 +445,9 @@
leave_mm(cpu);
}
-static void flush_tlb_all_ipi(void* info)
-{
- do_flush_tlb_all_local();
-}
-
void flush_tlb_all(void)
{
- preempt_disable();
- smp_call_function (flush_tlb_all_ipi,0,1,1);
-
- do_flush_tlb_all_local();
- preempt_enable();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
/*
diff -urP linux-2.5.63/arch/i386/kernel/sysenter.c linux-2.5.63_patched/arch/i386/kernel/sysenter.c
--- linux-2.5.63/arch/i386/kernel/sysenter.c Mon Feb 24 20:06:02 2003
+++ linux-2.5.63_patched/arch/i386/kernel/sysenter.c Mon Feb 24 23:02:43 2003
@@ -95,8 +95,7 @@
return 0;
memcpy((void *) page, sysent, sizeof(sysent));
- enable_sep_cpu(NULL);
- smp_call_function(enable_sep_cpu, NULL, 1, 1);
+ on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0;
}
diff -urP linux-2.5.63/arch/i386/mach-voyager/voyager_smp.c linux-2.5.63_patched/arch/i386/mach-voyager/voyager_smp.c
--- linux-2.5.63/arch/i386/mach-voyager/voyager_smp.c Mon Feb 24 20:05:16 2003
+++ linux-2.5.63_patched/arch/i386/mach-voyager/voyager_smp.c Mon Feb 24 23:02:43 2003
@@ -1209,8 +1209,8 @@
smp_call_function_interrupt();
}
-static inline void
-do_flush_tlb_all_local(void)
+static void
+do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
@@ -1220,19 +1220,11 @@
}
-static void
-flush_tlb_all_function(void* info)
-{
- do_flush_tlb_all_local();
-}
-
/* flush the TLB of every active CPU in the system */
void
flush_tlb_all(void)
{
- smp_call_function (flush_tlb_all_function, 0, 1, 1);
-
- do_flush_tlb_all_local();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
/* used to set up the trampoline for other CPUs when the memory manager
diff -urP linux-2.5.63/arch/i386/mm/pageattr.c linux-2.5.63_patched/arch/i386/mm/pageattr.c
--- linux-2.5.63/arch/i386/mm/pageattr.c Mon Feb 24 20:05:29 2003
+++ linux-2.5.63_patched/arch/i386/mm/pageattr.c Mon Feb 24 23:02:43 2003
@@ -130,11 +130,8 @@
}
static inline void flush_map(void)
-{
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, NULL, 1, 1);
-#endif
- flush_kernel_map(NULL);
+{
+ on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
struct deferred_page {
diff -urP linux-2.5.63/arch/i386/oprofile/nmi_int.c linux-2.5.63_patched/arch/i386/oprofile/nmi_int.c
--- linux-2.5.63/arch/i386/oprofile/nmi_int.c Mon Feb 24 20:05:44 2003
+++ linux-2.5.63_patched/arch/i386/oprofile/nmi_int.c Mon Feb 24 23:02:43 2003
@@ -95,8 +95,7 @@
* without actually triggering any NMIs as this will
* break the core code horrifically.
*/
- smp_call_function(nmi_cpu_setup, NULL, 0, 1);
- nmi_cpu_setup(0);
+ on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
set_nmi_callback(nmi_callback);
oprofile_pmdev = set_nmi_pm_callback(oprofile_pm_callback);
return 0;
@@ -148,8 +147,7 @@
{
unset_nmi_pm_callback(oprofile_pmdev);
unset_nmi_callback();
- smp_call_function(nmi_cpu_shutdown, NULL, 0, 1);
- nmi_cpu_shutdown(0);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
}
@@ -162,8 +160,7 @@
static int nmi_start(void)
{
- smp_call_function(nmi_cpu_start, NULL, 0, 1);
- nmi_cpu_start(0);
+ on_each_cpu(nmi_cpu_start, NULL, 0, 1);
return 0;
}
@@ -177,8 +174,7 @@
static void nmi_stop(void)
{
- smp_call_function(nmi_cpu_stop, NULL, 0, 1);
- nmi_cpu_stop(0);
+ on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
}
diff -urP linux-2.5.63/arch/ia64/kernel/smp.c linux-2.5.63_patched/arch/ia64/kernel/smp.c
--- linux-2.5.63/arch/ia64/kernel/smp.c Mon Feb 24 20:06:01 2003
+++ linux-2.5.63_patched/arch/ia64/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -206,18 +206,18 @@
void
smp_flush_tlb_all (void)
{
- smp_call_function((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
- local_flush_tlb_all();
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
}
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
- local_finish_flush_tlb_mm(mm);
-
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
+ {
+ local_finish_flush_tlb_mm(mm);
return;
+ }
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
@@ -226,7 +226,7 @@
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
- smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+ on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
}
/*
diff -urP linux-2.5.63/arch/mips64/kernel/smp.c linux-2.5.63_patched/arch/mips64/kernel/smp.c
--- linux-2.5.63/arch/mips64/kernel/smp.c Mon Feb 24 20:05:38 2003
+++ linux-2.5.63_patched/arch/mips64/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -195,8 +195,7 @@
void flush_tlb_all(void)
{
- smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
- _flush_tlb_all();
+ on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -219,6 +218,8 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ preempt_disable();
+
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
} else {
@@ -228,6 +229,8 @@
CPU_CONTEXT(i, mm) = 0;
}
_flush_tlb_mm(mm);
+
+ preempt_enable();
}
struct flush_tlb_data {
@@ -246,6 +249,8 @@
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
+ preempt_disable();
+
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
@@ -260,6 +265,8 @@
CPU_CONTEXT(i, mm) = 0;
}
_flush_tlb_range(mm, start, end);
+
+ preempt_enable();
}
static void flush_tlb_page_ipi(void *info)
@@ -271,6 +278,8 @@
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ preempt_disable();
+
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
@@ -284,5 +293,7 @@
CPU_CONTEXT(i, vma->vm_mm) = 0;
}
_flush_tlb_page(vma, page);
+
+ preempt_enable();
}
diff -urP linux-2.5.63/arch/parisc/kernel/cache.c linux-2.5.63_patched/arch/parisc/kernel/cache.c
--- linux-2.5.63/arch/parisc/kernel/cache.c Mon Feb 24 20:05:29 2003
+++ linux-2.5.63_patched/arch/parisc/kernel/cache.c Mon Feb 24 23:02:43 2003
@@ -39,8 +39,7 @@
void
flush_data_cache(void)
{
- smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
- flush_data_cache_local();
+ on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
}
#endif
diff -urP linux-2.5.63/arch/parisc/kernel/irq.c linux-2.5.63_patched/arch/parisc/kernel/irq.c
--- linux-2.5.63/arch/parisc/kernel/irq.c Mon Feb 24 20:05:16 2003
+++ linux-2.5.63_patched/arch/parisc/kernel/irq.c Mon Feb 24 23:02:43 2003
@@ -61,20 +61,17 @@
static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
-#ifdef CONFIG_SMP
static void cpu_set_eiem(void *info)
{
set_eiem((unsigned long) info);
}
-#endif
static inline void disable_cpu_irq(void *unused, int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit;
- set_eiem(cpu_eiem);
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
static void enable_cpu_irq(void *unused, int irq)
@@ -83,8 +80,7 @@
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit;
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
- set_eiem(cpu_eiem);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
/* mask and disable are the same at the CPU level
@@ -100,8 +96,7 @@
** handle *any* unmasked pending interrupts.
** ie We don't need to check for pending interrupts here.
*/
- smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
- set_eiem(cpu_eiem);
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
/*
diff -urP linux-2.5.63/arch/parisc/kernel/smp.c linux-2.5.63_patched/arch/parisc/kernel/smp.c
--- linux-2.5.63/arch/parisc/kernel/smp.c Mon Feb 24 20:06:01 2003
+++ linux-2.5.63_patched/arch/parisc/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -401,7 +401,7 @@
__setup("maxcpus=", maxcpus);
/*
- * Flush all other CPU's tlb and then mine. Do this with smp_call_function()
+ * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
*/
@@ -410,8 +410,7 @@
void
smp_flush_tlb_all(void)
{
- smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
- flush_tlb_all_local();
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
}
diff -urP linux-2.5.63/arch/parisc/mm/init.c linux-2.5.63_patched/arch/parisc/mm/init.c
--- linux-2.5.63/arch/parisc/mm/init.c Mon Feb 24 20:05:32 2003
+++ linux-2.5.63_patched/arch/parisc/mm/init.c Mon Feb 24 23:02:43 2003
@@ -974,8 +974,7 @@
do_recycle++;
}
spin_unlock(&sid_lock);
- smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
- flush_tlb_all_local();
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
if (do_recycle) {
spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array);
diff -urP linux-2.5.63/arch/ppc/kernel/temp.c linux-2.5.63_patched/arch/ppc/kernel/temp.c
--- linux-2.5.63/arch/ppc/kernel/temp.c Mon Feb 24 20:05:33 2003
+++ linux-2.5.63_patched/arch/ppc/kernel/temp.c Mon Feb 24 23:02:43 2003
@@ -194,10 +194,7 @@
/* schedule ourselves to be run again */
mod_timer(&tau_timer, jiffies + shrink_timer) ;
-#ifdef CONFIG_SMP
- smp_call_function(tau_timeout, NULL, 1, 0);
-#endif
- tau_timeout(NULL);
+ on_each_cpu(tau_timeout, NULL, 1, 0);
}
/*
@@ -239,10 +236,7 @@
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
-#ifdef CONFIG_SMP
- smp_call_function(TAU_init_smp, NULL, 1, 0);
-#endif
- TAU_init_smp(NULL);
+ on_each_cpu(TAU_init_smp, NULL, 1, 0);
printk("Thermal assist unit ");
#ifdef CONFIG_TAU_INT
diff -urP linux-2.5.63/arch/s390/kernel/smp.c linux-2.5.63_patched/arch/s390/kernel/smp.c
--- linux-2.5.63/arch/s390/kernel/smp.c Mon Feb 24 20:05:05 2003
+++ linux-2.5.63_patched/arch/s390/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -228,8 +228,7 @@
void machine_restart_smp(char * __unused)
{
cpu_restart_map = cpu_online_map;
- smp_call_function(do_machine_restart, NULL, 0, 0);
- do_machine_restart(NULL);
+ on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
@@ -247,8 +246,7 @@
void machine_halt_smp(void)
{
- smp_call_function(do_machine_halt, NULL, 0, 0);
- do_machine_halt(NULL);
+ on_each_cpu(do_machine_halt, NULL, 0, 0);
}
static void do_machine_power_off(void * __unused)
@@ -266,8 +264,7 @@
void machine_power_off_smp(void)
{
- smp_call_function(do_machine_power_off, NULL, 0, 0);
- do_machine_power_off(NULL);
+ on_each_cpu(do_machine_power_off, NULL, 0, 0);
}
/*
@@ -339,8 +336,7 @@
void smp_ptlb_all(void)
{
- smp_call_function(smp_ptlb_callback, NULL, 0, 1);
- local_flush_tlb();
+ on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
}
/*
@@ -400,8 +396,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = 0xFFFFFFFF;
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_set_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
@@ -414,8 +409,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 0x00000000;
parms.andvals[cr] = ~(1 << bit);
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_clear_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
diff -urP linux-2.5.63/arch/s390x/kernel/smp.c linux-2.5.63_patched/arch/s390x/kernel/smp.c
--- linux-2.5.63/arch/s390x/kernel/smp.c Mon Feb 24 20:05:32 2003
+++ linux-2.5.63_patched/arch/s390x/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -227,8 +227,7 @@
void machine_restart_smp(char * __unused)
{
cpu_restart_map = cpu_online_map;
- smp_call_function(do_machine_restart, NULL, 0, 0);
- do_machine_restart(NULL);
+ on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
@@ -246,8 +245,7 @@
void machine_halt_smp(void)
{
- smp_call_function(do_machine_halt, NULL, 0, 0);
- do_machine_halt(NULL);
+ on_each_cpu(do_machine_halt, NULL, 0, 0);
}
static void do_machine_power_off(void * __unused)
@@ -265,8 +263,7 @@
void machine_power_off_smp(void)
{
- smp_call_function(do_machine_power_off, NULL, 0, 0);
- do_machine_power_off(NULL);
+ on_each_cpu(do_machine_power_off, NULL, 0, 0);
}
/*
@@ -383,8 +380,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = -1L;
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_set_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
@@ -397,8 +393,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 0;
parms.andvals[cr] = ~(1L << bit);
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_clear_bit(cr, bit);
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
diff -urP linux-2.5.63/arch/x86_64/kernel/bluesmoke.c linux-2.5.63_patched/arch/x86_64/kernel/bluesmoke.c
--- linux-2.5.63/arch/x86_64/kernel/bluesmoke.c Mon Feb 24 20:05:43 2003
+++ linux-2.5.63_patched/arch/x86_64/kernel/bluesmoke.c Mon Feb 24 23:02:43 2003
@@ -111,11 +111,7 @@
{
u32 low, high;
int i;
- unsigned int *cpu = info;
- BUG_ON (*cpu != smp_processor_id());
-
- preempt_disable();
for (i=0; i<banks; i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
@@ -130,20 +126,12 @@
wmb();
}
}
- preempt_enable();
}
static void mce_timerfunc (unsigned long data)
{
- unsigned int i;
-
- for (i=0; i<smp_num_cpus; i++) {
- if (i == smp_processor_id())
- mce_checkregs(&i);
- else
- smp_call_function (mce_checkregs, &i, 1, 1);
- }
+ on_each_cpu (mce_checkregs, NULL, 1, 1);
/* Refresh the timer. */
mce_timer.expires = jiffies + MCE_RATE;
diff -urP linux-2.5.63/arch/x86_64/kernel/io_apic.c linux-2.5.63_patched/arch/x86_64/kernel/io_apic.c
--- linux-2.5.63/arch/x86_64/kernel/io_apic.c Mon Feb 24 20:05:32 2003
+++ linux-2.5.63_patched/arch/x86_64/kernel/io_apic.c Mon Feb 24 23:02:43 2003
@@ -928,8 +928,7 @@
void print_all_local_APICs (void)
{
- smp_call_function(print_local_APIC, NULL, 1, 1);
- print_local_APIC(NULL);
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
}
void /*__init*/ print_PIC(void)
diff -urP linux-2.5.63/arch/x86_64/kernel/ldt.c linux-2.5.63_patched/arch/x86_64/kernel/ldt.c
--- linux-2.5.63/arch/x86_64/kernel/ldt.c Mon Feb 24 20:05:44 2003
+++ linux-2.5.63_patched/arch/x86_64/kernel/ldt.c Mon Feb 24 23:02:43 2003
@@ -60,13 +60,13 @@
pc->size = mincount;
wmb();
if (reload) {
+ preempt_disable();
load_LDT(pc);
#ifdef CONFIG_SMP
- preempt_disable();
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
- preempt_enable();
#endif
+ preempt_enable();
}
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
diff -urP linux-2.5.63/arch/x86_64/kernel/smp.c linux-2.5.63_patched/arch/x86_64/kernel/smp.c
--- linux-2.5.63/arch/x86_64/kernel/smp.c Mon Feb 24 20:05:13 2003
+++ linux-2.5.63_patched/arch/x86_64/kernel/smp.c Mon Feb 24 23:02:43 2003
@@ -328,7 +328,7 @@
preempt_enable();
}
-static inline void do_flush_tlb_all_local(void)
+static void do_flush_tlb_all(void* info)
{
unsigned long cpu = smp_processor_id();
@@ -337,18 +337,9 @@
leave_mm(cpu);
}
-static void flush_tlb_all_ipi(void* info)
-{
- do_flush_tlb_all_local();
-}
-
void flush_tlb_all(void)
{
- preempt_disable();
- smp_call_function (flush_tlb_all_ipi,0,1,1);
-
- do_flush_tlb_all_local();
- preempt_enable();
+ on_each_cpu(do_flush_tlb_all, 0, 1, 1);
}
void smp_kdb_stop(void)
diff -urP linux-2.5.63/arch/x86_64/mm/pageattr.c linux-2.5.63_patched/arch/x86_64/mm/pageattr.c
--- linux-2.5.63/arch/x86_64/mm/pageattr.c Mon Feb 24 20:05:32 2003
+++ linux-2.5.63_patched/arch/x86_64/mm/pageattr.c Mon Feb 24 23:02:43 2003
@@ -123,12 +123,7 @@
static inline void flush_map(unsigned long address)
{
- preempt_disable();
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, (void *)address, 1, 1);
-#endif
- flush_kernel_map((void *)address);
- preempt_enable();
+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
}
struct deferred_page {
diff -urP linux-2.5.63/drivers/char/agp/agp.h linux-2.5.63_patched/drivers/char/agp/agp.h
--- linux-2.5.63/drivers/char/agp/agp.h Mon Feb 24 20:05:29 2003
+++ linux-2.5.63_patched/drivers/char/agp/agp.h Mon Feb 24 23:02:43 2003
@@ -42,9 +42,8 @@
static void __attribute__((unused)) global_cache_flush(void)
{
- if (smp_call_function(ipi_handler, NULL, 1, 1) != 0)
+ if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
panic(PFX "timed out waiting for the other CPUs!\n");
- flush_agp_cache();
}
#else
static inline void global_cache_flush(void)
diff -urP linux-2.5.63/drivers/s390/char/sclp.c linux-2.5.63_patched/drivers/s390/char/sclp.c
--- linux-2.5.63/drivers/s390/char/sclp.c Mon Feb 24 20:05:35 2003
+++ linux-2.5.63_patched/drivers/s390/char/sclp.c Mon Feb 24 23:02:43 2003
@@ -481,8 +481,7 @@
do_machine_quiesce(void)
{
cpu_quiesce_map = cpu_online_map;
- smp_call_function(do_load_quiesce_psw, NULL, 0, 0);
- do_load_quiesce_psw(NULL);
+ on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
}
#else
static void
diff -urP linux-2.5.63/fs/buffer.c linux-2.5.63_patched/fs/buffer.c
--- linux-2.5.63/fs/buffer.c Mon Feb 24 20:05:34 2003
+++ linux-2.5.63_patched/fs/buffer.c Mon Feb 24 23:02:43 2003
@@ -1403,10 +1403,7 @@
static void invalidate_bh_lrus(void)
{
- preempt_disable();
- invalidate_bh_lru(NULL);
- smp_call_function(invalidate_bh_lru, NULL, 1, 1);
- preempt_enable();
+ on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
}
void set_bh_page(struct buffer_head *bh,
diff -urP linux-2.5.63/include/asm-parisc/cacheflush.h linux-2.5.63_patched/include/asm-parisc/cacheflush.h
--- linux-2.5.63/include/asm-parisc/cacheflush.h Mon Feb 24 20:05:47 2003
+++ linux-2.5.63_patched/include/asm-parisc/cacheflush.h Mon Feb 24 23:02:43 2003
@@ -25,16 +25,10 @@
extern void flush_cache_all_local(void);
-#ifdef CONFIG_SMP
static inline void flush_cache_all(void)
{
- smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
- flush_cache_all_local();
+ on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
}
-#else
-#define flush_cache_all flush_cache_all_local
-#endif
-
/* The following value needs to be tuned and probably scaled with the
* cache size.
diff -urP linux-2.5.63/include/linux/smp.h linux-2.5.63_patched/include/linux/smp.h
--- linux-2.5.63/include/linux/smp.h Mon Feb 24 20:05:33 2003
+++ linux-2.5.63_patched/include/linux/smp.h Mon Feb 24 23:02:44 2003
@@ -10,9 +10,10 @@
#ifdef CONFIG_SMP
+#include <linux/preempt.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
-#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/smp.h>
#include <asm/bug.h>
@@ -54,6 +55,25 @@
int retry, int wait);
/*
+ * Call a function on all processors
+ */
+static inline int on_each_cpu(void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ int ret = 0;
+
+ preempt_disable();
+
+ if(num_online_cpus() > 1)
+ ret = smp_call_function(func, info, retry, wait);
+ func(info);
+
+ preempt_enable();
+
+ return ret;
+}
+
+/*
* True once the per process idle is forked
*/
extern int smp_threads_ready;
@@ -96,6 +116,7 @@
#define hard_smp_processor_id() 0
#define smp_threads_ready 1
#define smp_call_function(func,info,retry,wait) ({ 0; })
+#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { }
#define cpu_online_map 1
diff -urP linux-2.5.63/mm/slab.c linux-2.5.63_patched/mm/slab.c
--- linux-2.5.63/mm/slab.c Mon Feb 24 20:05:39 2003
+++ linux-2.5.63_patched/mm/slab.c Mon Feb 24 23:02:44 2003
@@ -1116,12 +1116,16 @@
static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
{
check_irq_on();
+ preempt_disable();
+
local_irq_disable();
func(arg);
local_irq_enable();
if (smp_call_function(func, arg, 1, 1))
BUG();
+
+ preempt_enable();
}
static void free_block (kmem_cache_t* cachep, void** objpp, int len);
[-- Attachment #2: signature --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-25 18:08 [PATCH][2.5] fix preempt-issues with smp_call_function() Thomas Schlichter
@ 2003-02-26 9:27 ` Andrew Morton
2003-02-26 10:37 ` Dave Jones
1 sibling, 0 replies; 14+ messages in thread
From: Andrew Morton @ 2003-02-26 9:27 UTC (permalink / raw)
To: Thomas Schlichter; +Cc: torvalds, davej, hugh, linux-kernel
Thomas Schlichter <schlicht@uni-mannheim.de> wrote:
>
> Hello,
>
> here is a patch to solve all (I hope I missed none) possible problems that
> could occur on SMP machines running a preemptible kernel when
> smp_call_function() calls a function which should be also executed on the
> current processor.
>
Patch looks pretty good, thanks. Fixes a real bug.
I worry a little about the s390/s390x change. smp_ctl_set_bit() and
smp_ctl_clear_bit(). The functions which are being called on local and
remote are fairly different. I'm sure it's OK, but... I changed that bit
to open-code the preempt_disable/enable.
The arch/x86_64/kernel/bluesmoke.c change looks right. Seems that someone
didn't understand the smp_call_function() API in there.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 10:37 ` Dave Jones
@ 2003-02-26 9:54 ` Andrew Morton
2003-02-26 10:14 ` Thomas Schlichter
2003-02-26 11:19 ` Dave Jones
0 siblings, 2 replies; 14+ messages in thread
From: Andrew Morton @ 2003-02-26 9:54 UTC (permalink / raw)
To: Dave Jones; +Cc: schlicht, torvalds, hugh, linux-kernel
Dave Jones <davej@codemonkey.org.uk> wrote:
>
> On Tue, Feb 25, 2003 at 07:08:48PM +0100, Thomas Schlichter wrote:
>
> > here is a patch to solve all (I hope I missed none) possible problems that
> > could occur on SMP machines running a preemptible kernel when
> > smp_call_function() calls a function which should be also executed on the
> > current processor.
> >
> > This patch is based on the one Dave Jones sent to the LKML last friday and
> > applies to the linux kernel version 2.5.63.
>
> Just one comment. You moved quite a few of the preempt_disable/enable
> pairs outside of the CONFIG_SMP checks. The issue we're working against
> here is to try and prevent preemption and ending up on a different CPU.
> As this cannot happen if CONFIG_SMP=n, I don't see why you've done this.
>
Just in two places.
arch/i386/kernel/ldt.c | 6 ++++--
arch/x86_64/kernel/ldt.c | 6 ++++--
2 files changed, 8 insertions(+), 4 deletions(-)
diff -puN arch/i386/kernel/ldt.c~on_each_cpu-ldt-cleanup arch/i386/kernel/ldt.c
--- 25/arch/i386/kernel/ldt.c~on_each_cpu-ldt-cleanup 2003-02-26 01:51:27.000000000 -0800
+++ 25-akpm/arch/i386/kernel/ldt.c 2003-02-26 01:52:21.000000000 -0800
@@ -55,13 +55,15 @@ static int alloc_ldt(mm_context_t *pc, i
wmb();
if (reload) {
+#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
-#ifdef CONFIG_SMP
if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
-#endif
preempt_enable();
+#else
+ load_LDT(pc);
+#endif
}
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
diff -puN arch/x86_64/kernel/ldt.c~on_each_cpu-ldt-cleanup arch/x86_64/kernel/ldt.c
--- 25/arch/x86_64/kernel/ldt.c~on_each_cpu-ldt-cleanup 2003-02-26 01:51:36.000000000 -0800
+++ 25-akpm/arch/x86_64/kernel/ldt.c 2003-02-26 01:52:37.000000000 -0800
@@ -60,13 +60,15 @@ static int alloc_ldt(mm_context_t *pc, i
pc->size = mincount;
wmb();
if (reload) {
+#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
-#ifdef CONFIG_SMP
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
-#endif
preempt_enable();
+#else
+ load_LDT(pc);
+#endif
}
if (oldsize) {
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
_
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 9:54 ` Andrew Morton
@ 2003-02-26 10:14 ` Thomas Schlichter
2003-02-26 11:19 ` Dave Jones
1 sibling, 0 replies; 14+ messages in thread
From: Thomas Schlichter @ 2003-02-26 10:14 UTC (permalink / raw)
To: Andrew Morton, Dave Jones; +Cc: torvalds, hugh, linux-kernel
[-- Attachment #1: signed data --]
[-- Type: text/plain, Size: 513 bytes --]
Dave Jones <davej@codemonkey.org.uk> wrote:
> Just one comment. You moved quite a few of the preempt_disable/enable
> pairs outside of the CONFIG_SMP checks. The issue we're working against
> here is to try and prevent preemption and ending up on a different CPU.
> As this cannot happen if CONFIG_SMP=n, I don't see why you've done this.
Andrew Morton wrote:
> Just in two places.
[snip]
Yes, thanks for delivering this better patch!
My approach wanted just to be the most simple possibility... ;-)
Thomas
[-- Attachment #2: signature --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 11:19 ` Dave Jones
@ 2003-02-26 10:28 ` Andrew Morton
2003-02-26 10:52 ` Thomas Schlichter
` (3 more replies)
0 siblings, 4 replies; 14+ messages in thread
From: Andrew Morton @ 2003-02-26 10:28 UTC (permalink / raw)
To: Dave Jones; +Cc: schlicht, torvalds, hugh, linux-kernel
Dave Jones <davej@codemonkey.org.uk> wrote:
>
> btw, (unrelated) shouldn't smp_call_function be doing magick checks
> with cpu_online() ?
Looks OK? It sprays the IPI out to all the other CPUs in cpu_online_map,
and waits for num_online_cpus()-1 CPUs to answer.
All very racy in the presence of CPUs going offline, but that's all over
the place. Depends how the offlining will be done I guess.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-25 18:08 [PATCH][2.5] fix preempt-issues with smp_call_function() Thomas Schlichter
2003-02-26 9:27 ` Andrew Morton
@ 2003-02-26 10:37 ` Dave Jones
2003-02-26 9:54 ` Andrew Morton
1 sibling, 1 reply; 14+ messages in thread
From: Dave Jones @ 2003-02-26 10:37 UTC (permalink / raw)
To: Thomas Schlichter
Cc: Linus Torvalds, Hugh Dickins, Andrew Morton, Linux Kernel
On Tue, Feb 25, 2003 at 07:08:48PM +0100, Thomas Schlichter wrote:
> here is a patch to solve all (I hope I missed none) possible problems that
> could occur on SMP machines running a preemptible kernel when
> smp_call_function() calls a function which should be also executed on the
> current processor.
>
> This patch is based on the one Dave Jones sent to the LKML last friday and
> applies to the linux kernel version 2.5.63.
Just one comment. You moved quite a few of the preempt_disable/enable
pairs outside of the CONFIG_SMP checks. The issue we're working against
here is to try and prevent preemption and ending up on a different CPU.
As this cannot happen if CONFIG_SMP=n, I don't see why you've done this.
Dave
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 10:28 ` Andrew Morton
@ 2003-02-26 10:52 ` Thomas Schlichter
2003-02-26 11:28 ` Dave Jones
` (2 subsequent siblings)
3 siblings, 0 replies; 14+ messages in thread
From: Thomas Schlichter @ 2003-02-26 10:52 UTC (permalink / raw)
To: Andrew Morton, Dave Jones; +Cc: torvalds, hugh, linux-kernel
[-- Attachment #1: signed data --]
[-- Type: text/plain, Size: 2002 bytes --]
Dave Jones <davej@codemonkey.org.uk> wrote:
> btw, (unrelated) shouldn't smp_call_function be doing magick checks
> with cpu_online() ?
Andrew Morton wrote:
> Looks OK? It sprays the IPI out to all the other CPUs in cpu_online_map,
> and waits for num_online_cpus()-1 CPUs to answer.
>
> All very racy in the presence of CPUs going offline, but that's all over
> the place. Depends how the offlining will be done I guess.
Well, now I see the check for num_online_cpus() != 1 in smp_call_function(),
too, so the check in on_each_cpu() is not needed and possibly better this
patch should apply to the include/linux/smp.h file...
Thomas
--- linux-2.5.63/include/linux/smp.h.orig Mon Feb 24 20:05:33 2003
+++ linux-2.5.63/include/linux/smp.h Wed Feb 26 11:41:45 2003
@@ -10,9 +10,10 @@
#ifdef CONFIG_SMP
+#include <linux/preempt.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
-#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/smp.h>
#include <asm/bug.h>
@@ -54,6 +55,24 @@
int retry, int wait);
/*
+ * Call a function on all processors
+ */
+static inline int on_each_cpu(void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ int ret;
+
+ preempt_disable();
+
+ ret = smp_call_function(func, info, retry, wait);
+ func(info);
+
+ preempt_enable();
+
+ return ret;
+}
+
+/*
* True once the per process idle is forked
*/
extern int smp_threads_ready;
@@ -96,6 +115,7 @@
#define hard_smp_processor_id() 0
#define smp_threads_ready 1
#define smp_call_function(func,info,retry,wait) ({ 0; })
+#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { }
#define cpu_online_map 1
[-- Attachment #2: signature --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 9:54 ` Andrew Morton
2003-02-26 10:14 ` Thomas Schlichter
@ 2003-02-26 11:19 ` Dave Jones
2003-02-26 10:28 ` Andrew Morton
1 sibling, 1 reply; 14+ messages in thread
From: Dave Jones @ 2003-02-26 11:19 UTC (permalink / raw)
To: Andrew Morton; +Cc: schlicht, torvalds, hugh, linux-kernel
On Wed, Feb 26, 2003 at 01:54:09AM -0800, Andrew Morton wrote:
> > Just one comment. You moved quite a few of the preempt_disable/enable
> > pairs outside of the CONFIG_SMP checks. The issue we're working against
> > here is to try and prevent preemption and ending up on a different CPU.
> > As this cannot happen if CONFIG_SMP=n, I don't see why you've done this.
> Just in two places.
Ok, slight exaggeration 8-) Looks good to me.
btw, (unrelated) shouldn't smp_call_function be doing magick checks
with cpu_online() ?
Dave
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 10:28 ` Andrew Morton
2003-02-26 10:52 ` Thomas Schlichter
@ 2003-02-26 11:28 ` Dave Jones
2003-02-26 13:39 ` Alan Cox
2003-02-26 16:00 ` Martin J. Bligh
3 siblings, 0 replies; 14+ messages in thread
From: Dave Jones @ 2003-02-26 11:28 UTC (permalink / raw)
To: Andrew Morton; +Cc: schlicht, torvalds, hugh, linux-kernel
On Wed, Feb 26, 2003 at 02:28:19AM -0800, Andrew Morton wrote:
> > btw, (unrelated) shouldn't smp_call_function be doing magick checks
> > with cpu_online() ?
> Looks OK? It sprays the IPI out to all the other CPUs in cpu_online_map,
> and waits for num_online_cpus()-1 CPUs to answer.
Doh, of course.
Ugh, mornings.
Dave
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 10:28 ` Andrew Morton
2003-02-26 10:52 ` Thomas Schlichter
2003-02-26 11:28 ` Dave Jones
@ 2003-02-26 13:39 ` Alan Cox
2003-02-26 18:47 ` Andrew Morton
2003-02-26 16:00 ` Martin J. Bligh
3 siblings, 1 reply; 14+ messages in thread
From: Alan Cox @ 2003-02-26 13:39 UTC (permalink / raw)
To: Andrew Morton
Cc: Dave Jones, schlicht, Linus Torvalds, hugh,
Linux Kernel Mailing List
On Wed, 2003-02-26 at 10:28, Andrew Morton wrote:
> Dave Jones <davej@codemonkey.org.uk> wrote:
> >
> > btw, (unrelated) shouldn't smp_call_function be doing magick checks
> > with cpu_online() ?
>
> Looks OK? It sprays the IPI out to all the other CPUs in cpu_online_map,
> and waits for num_online_cpus()-1 CPUs to answer.
You cannot do that by counting without a lot of care. IPI messages do not have
guaranteed "once only" semantics. On an error a resend can and has been observed
to cause a reissue of an IPI on PII/PIII setups
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 10:28 ` Andrew Morton
` (2 preceding siblings ...)
2003-02-26 13:39 ` Alan Cox
@ 2003-02-26 16:00 ` Martin J. Bligh
3 siblings, 0 replies; 14+ messages in thread
From: Martin J. Bligh @ 2003-02-26 16:00 UTC (permalink / raw)
To: Andrew Morton, Dave Jones; +Cc: schlicht, torvalds, hugh, linux-kernel
>> btw, (unrelated) shouldn't smp_call_function be doing magick checks
>> with cpu_online() ?
>
> Looks OK? It sprays the IPI out to all the other CPUs in cpu_online_map,
> and waits for num_online_cpus()-1 CPUs to answer.
Incidentally, would be nice if this thing had a timeout and bugged out when
one didn't reply, rather than just wedging the whole CPU for ever. Yes,
it's normally some other bug that makes the other CPU not reply, but would
make diags a damned sight easier.
M.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 13:39 ` Alan Cox
@ 2003-02-26 18:47 ` Andrew Morton
2003-02-26 20:11 ` Alan Cox
0 siblings, 1 reply; 14+ messages in thread
From: Andrew Morton @ 2003-02-26 18:47 UTC (permalink / raw)
To: Alan Cox; +Cc: davej, schlicht, torvalds, hugh, linux-kernel
Alan Cox <alan@lxorguk.ukuu.org.uk> wrote:
>
> On Wed, 2003-02-26 at 10:28, Andrew Morton wrote:
> > Dave Jones <davej@codemonkey.org.uk> wrote:
> > >
> > > btw, (unrelated) shouldn't smp_call_function be doing magick checks
> > > with cpu_online() ?
> >
> > Looks OK? It sprays the IPI out to all the other CPUs in cpu_online_map,
> > and waits for num_online_cpus()-1 CPUs to answer.
>
> You cannot do that by counting without a lot of care. IPI messages do not have
> guaranteed "once only" semantics. On an error a resend can and has been observed
> to cause a reissue of an IPI on PII/PIII setups
If that resend results in delivery of an actual extra interrupt, the
resent-to CPU can start playing with stuff which used to be on the sender's
stack and the box goes splat.
Didn't sct have a fix for that?
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH][2.5] fix preempt-issues with smp_call_function()
2003-02-26 18:47 ` Andrew Morton
@ 2003-02-26 20:11 ` Alan Cox
0 siblings, 0 replies; 14+ messages in thread
From: Alan Cox @ 2003-02-26 20:11 UTC (permalink / raw)
To: Andrew Morton
Cc: davej, schlicht, Linus Torvalds, hugh, Linux Kernel Mailing List
On Wed, 2003-02-26 at 18:47, Andrew Morton wrote:
> If that resend results in delivery of an actual extra interrupt, the
> resent-to CPU can start playing with stuff which used to be on the sender's
> stack and the box goes splat.
>
> Didn't sct have a fix for that?
Yes but it was never merged mainstream for some reason. I think it kind of
got away
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2003-02-26 18:59 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2003-02-25 18:08 [PATCH][2.5] fix preempt-issues with smp_call_function() Thomas Schlichter
2003-02-26 9:27 ` Andrew Morton
2003-02-26 10:37 ` Dave Jones
2003-02-26 9:54 ` Andrew Morton
2003-02-26 10:14 ` Thomas Schlichter
2003-02-26 11:19 ` Dave Jones
2003-02-26 10:28 ` Andrew Morton
2003-02-26 10:52 ` Thomas Schlichter
2003-02-26 11:28 ` Dave Jones
2003-02-26 13:39 ` Alan Cox
2003-02-26 18:47 ` Andrew Morton
2003-02-26 20:11 ` Alan Cox
2003-02-26 16:00 ` Martin J. Bligh
-- strict thread matches above, loose matches on Subject: below --
2003-02-21 14:20 [PATCH][2.5] replace flush_map() in arch/i386/mm/pageattr.c w ith flush_tlb_all() Dave Jones
2003-02-21 17:36 ` Dave Jones
2003-02-22 3:23 ` [PATCH][2.5] fix preempt-issues with smp_call_function() Thomas Schlichter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox