* [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-2-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` [PATCH 2/11] x86: convert to generic helpers for " Jens Axboe
` (11 subsequent siblings)
12 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.
The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle-VXdhtT5mjnY@public.gmane.org> has
contributed lots of fixes and suggestions as well.
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
include/linux/smp.h | 27 ++++-
init/main.c | 3 +
kernel/Makefile | 1 +
kernel/smp.c | 366 +++++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 395 insertions(+), 2 deletions(-)
create mode 100644 kernel/smp.c
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232cc..d7f7a9f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,19 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -53,9 +63,23 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
+extern spinlock_t call_function_lock;
+#endif
/*
* Call a function on all processors
@@ -112,7 +136,6 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
#endif /* !SMP */
/*
diff --git a/init/main.c b/init/main.c
index 833a67d..9804503 100644
--- a/init/main.c
+++ b/init/main.c
@@ -773,6 +773,9 @@ static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
+#ifdef CONFIG_GENERIC_SMP_HELPERS
+ init_call_single_data();
+#endif
migration_init();
spawn_ksoftirqd();
if (!nosoftlockup)
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..55c62b1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_GENERIC_SMP_HELPERS) += smp.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..9fc8b98
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,366 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+ CSD_FLAG_WAIT = 0x01,
+ CSD_FLAG_ALLOC = 0x02,
+ CSD_FLAG_FALLBACK = 0x04,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_t cpumask;
+ struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+/*
+ * Fallback data to use, if the dyn allocation fails
+ */
+static struct call_function_data call_data_fallback;
+static unsigned long call_fallback_used;
+
+void __cpuinit init_call_single_data(void)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_possible_map) {
+ struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->list);
+ }
+}
+
+static inline void csd_flag_wait(struct call_single_data *data)
+{
+ /* Wait for response */
+ do {
+ /*
+ * We need to see the flags store in the IPI handler
+ */
+ smp_mb();
+ if (!(data->flags & CSD_FLAG_WAIT))
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+ struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+ int wait = data->flags & CSD_FLAG_WAIT, ipi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dst->lock, flags);
+ ipi = list_empty(&dst->list);
+ list_add_tail(&data->list, &dst->list);
+ spin_unlock_irqrestore(&dst->lock, flags);
+
+ if (ipi)
+ arch_send_call_function_single_ipi(cpu);
+
+ if (wait)
+ csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+ struct call_function_data *cfd;
+
+ cfd = container_of(head, struct call_function_data, rcu_head);
+ kfree(cfd);
+}
+
+static void call_func_data_free(struct call_function_data *data)
+{
+ if (data->csd.flags & CSD_FLAG_ALLOC)
+ call_rcu(&data->rcu_head, rcu_free_call_data);
+ else
+ clear_bit_unlock(0, &call_fallback_used);
+}
+
+static struct call_function_data *call_func_data_alloc(gfp_t gfp, int wait_done)
+{
+ struct call_function_data *data;
+
+ data = kmalloc(sizeof(*data), gfp);
+ if (likely(data))
+ data->csd.flags = CSD_FLAG_ALLOC;
+ else {
+ while (test_and_set_bit_lock(0, &call_fallback_used))
+ cpu_relax();
+
+ data = &call_data_fallback;
+ data->csd.flags = CSD_FLAG_FALLBACK;
+ }
+
+ if (wait_done)
+ data->csd.flags |= CSD_FLAG_WAIT;
+
+ spin_lock_init(&data->lock);
+ return data;
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+ struct list_head *pos;
+ int cpu = get_cpu();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may delete
+ * 'pos', since list_del_rcu() doesn't clear ->next
+ */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &call_function_queue) {
+ struct call_function_data *data;
+ int refs;
+
+ data = list_entry(pos, struct call_function_data, csd.list);
+ if (!cpu_isset(cpu, data->cpumask))
+ continue;
+
+ data->csd.func(data->csd.info);
+
+ spin_lock(&data->lock);
+ cpu_clear(cpu, data->cpumask);
+ WARN_ON(data->refs == 0);
+ data->refs--;
+ refs = data->refs;
+ spin_unlock(&data->lock);
+
+ if (refs)
+ continue;
+
+ WARN_ON(cpus_weight(data->cpumask));
+ spin_lock(&call_function_lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function_lock);
+
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else
+ call_func_data_free(data);
+ }
+ rcu_read_unlock();
+
+ put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+ struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ LIST_HEAD(list);
+
+ smp_mb();
+ while (!list_empty(&q->list)) {
+ unsigned int data_flags;
+
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data,
+ list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if
+ * flags == 0 (when called through
+ * generic_exec_single(), so save them away before
+ * making the call.
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
+ if (data_flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+ } else if (data_flags & CSD_FLAG_ALLOC)
+ kfree(data);
+ }
+ smp_mb();
+ }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ unsigned long flags;
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+ int ret = 0;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ if (cpu == me) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ } else {
+ struct call_single_data d;
+ struct call_single_data *data;
+
+ if (!wait) {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->flags = CSD_FLAG_ALLOC;
+ } else {
+ data = &d;
+ data->flags = CSD_FLAG_WAIT;
+ }
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data);
+ }
+out:
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+ generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_function_data *data;
+ cpumask_t allbutself;
+ unsigned long flags;
+ int num_cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ cpus_and(mask, mask, allbutself);
+ num_cpus = cpus_weight(mask);
+
+ if (!num_cpus)
+ return 0;
+
+ data = call_func_data_alloc(GFP_ATOMIC, wait);
+ data->csd.func = func;
+ data->csd.info = info;
+ data->refs = num_cpus;
+ data->cpumask = mask;
+
+ spin_lock_irqsave(&call_function_lock, flags);
+ list_add_tail_rcu(&data->csd.list, &call_function_queue);
+ spin_unlock_irqrestore(&call_function_lock, flags);
+
+ /* Send a message to all CPUs in the map */
+ arch_send_call_function_ipi(mask);
+
+ /* optionally wait for the CPUs to complete */
+ if (wait)
+ csd_flag_wait(&data->csd);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+ int ret;
+
+ preempt_disable();
+ ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 7:57 ` [PATCH 1/11] Add generic helpers for arch IPI function calls Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-2-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.
The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
contributed lots of fixes and suggestions as well.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
include/linux/smp.h | 27 ++++-
init/main.c | 3 +
kernel/Makefile | 1 +
kernel/smp.c | 366 +++++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 395 insertions(+), 2 deletions(-)
create mode 100644 kernel/smp.c
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232cc..d7f7a9f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,19 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -53,9 +63,23 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
+extern spinlock_t call_function_lock;
+#endif
/*
* Call a function on all processors
@@ -112,7 +136,6 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
#endif /* !SMP */
/*
diff --git a/init/main.c b/init/main.c
index 833a67d..9804503 100644
--- a/init/main.c
+++ b/init/main.c
@@ -773,6 +773,9 @@ static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
+#ifdef CONFIG_GENERIC_SMP_HELPERS
+ init_call_single_data();
+#endif
migration_init();
spawn_ksoftirqd();
if (!nosoftlockup)
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..55c62b1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_GENERIC_SMP_HELPERS) += smp.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..9fc8b98
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,366 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe@oracle.com> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+ CSD_FLAG_WAIT = 0x01,
+ CSD_FLAG_ALLOC = 0x02,
+ CSD_FLAG_FALLBACK = 0x04,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_t cpumask;
+ struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+/*
+ * Fallback data to use, if the dyn allocation fails
+ */
+static struct call_function_data call_data_fallback;
+static unsigned long call_fallback_used;
+
+void __cpuinit init_call_single_data(void)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_possible_map) {
+ struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->list);
+ }
+}
+
+static inline void csd_flag_wait(struct call_single_data *data)
+{
+ /* Wait for response */
+ do {
+ /*
+ * We need to see the flags store in the IPI handler
+ */
+ smp_mb();
+ if (!(data->flags & CSD_FLAG_WAIT))
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+ struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+ int wait = data->flags & CSD_FLAG_WAIT, ipi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dst->lock, flags);
+ ipi = list_empty(&dst->list);
+ list_add_tail(&data->list, &dst->list);
+ spin_unlock_irqrestore(&dst->lock, flags);
+
+ if (ipi)
+ arch_send_call_function_single_ipi(cpu);
+
+ if (wait)
+ csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+ struct call_function_data *cfd;
+
+ cfd = container_of(head, struct call_function_data, rcu_head);
+ kfree(cfd);
+}
+
+static void call_func_data_free(struct call_function_data *data)
+{
+ if (data->csd.flags & CSD_FLAG_ALLOC)
+ call_rcu(&data->rcu_head, rcu_free_call_data);
+ else
+ clear_bit_unlock(0, &call_fallback_used);
+}
+
+static struct call_function_data *call_func_data_alloc(gfp_t gfp, int wait_done)
+{
+ struct call_function_data *data;
+
+ data = kmalloc(sizeof(*data), gfp);
+ if (likely(data))
+ data->csd.flags = CSD_FLAG_ALLOC;
+ else {
+ while (test_and_set_bit_lock(0, &call_fallback_used))
+ cpu_relax();
+
+ data = &call_data_fallback;
+ data->csd.flags = CSD_FLAG_FALLBACK;
+ }
+
+ if (wait_done)
+ data->csd.flags |= CSD_FLAG_WAIT;
+
+ spin_lock_init(&data->lock);
+ return data;
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+ struct list_head *pos;
+ int cpu = get_cpu();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may delete
+ * 'pos', since list_del_rcu() doesn't clear ->next
+ */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &call_function_queue) {
+ struct call_function_data *data;
+ int refs;
+
+ data = list_entry(pos, struct call_function_data, csd.list);
+ if (!cpu_isset(cpu, data->cpumask))
+ continue;
+
+ data->csd.func(data->csd.info);
+
+ spin_lock(&data->lock);
+ cpu_clear(cpu, data->cpumask);
+ WARN_ON(data->refs == 0);
+ data->refs--;
+ refs = data->refs;
+ spin_unlock(&data->lock);
+
+ if (refs)
+ continue;
+
+ WARN_ON(cpus_weight(data->cpumask));
+ spin_lock(&call_function_lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function_lock);
+
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else
+ call_func_data_free(data);
+ }
+ rcu_read_unlock();
+
+ put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+ struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ LIST_HEAD(list);
+
+ smp_mb();
+ while (!list_empty(&q->list)) {
+ unsigned int data_flags;
+
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data,
+ list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if
+ * flags == 0 (when called through
+ * generic_exec_single(), so save them away before
+ * making the call.
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
+ if (data_flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+ } else if (data_flags & CSD_FLAG_ALLOC)
+ kfree(data);
+ }
+ smp_mb();
+ }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ unsigned long flags;
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+ int ret = 0;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ if (cpu == me) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ } else {
+ struct call_single_data d;
+ struct call_single_data *data;
+
+ if (!wait) {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->flags = CSD_FLAG_ALLOC;
+ } else {
+ data = &d;
+ data->flags = CSD_FLAG_WAIT;
+ }
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data);
+ }
+out:
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+ generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_function_data *data;
+ cpumask_t allbutself;
+ unsigned long flags;
+ int num_cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ cpus_and(mask, mask, allbutself);
+ num_cpus = cpus_weight(mask);
+
+ if (!num_cpus)
+ return 0;
+
+ data = call_func_data_alloc(GFP_ATOMIC, wait);
+ data->csd.func = func;
+ data->csd.info = info;
+ data->refs = num_cpus;
+ data->cpumask = mask;
+
+ spin_lock_irqsave(&call_function_lock, flags);
+ list_add_tail_rcu(&data->csd.list, &call_function_queue);
+ spin_unlock_irqrestore(&call_function_lock, flags);
+
+ /* Send a message to all CPUs in the map */
+ arch_send_call_function_ipi(mask);
+
+ /* optionally wait for the CPUs to complete */
+ if (wait)
+ csd_flag_wait(&data->csd);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+ int ret;
+
+ preempt_disable();
+ ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <1208851058-8500-2-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <1208851058-8500-2-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2008-04-22 9:16 ` Avi Kivity
2008-04-22 9:16 ` Avi Kivity
[not found] ` <480DACDD.7040108-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2008-04-22 14:43 ` Linus Torvalds
2008-04-22 23:12 ` Mark Lord
2 siblings, 2 replies; 139+ messages in thread
From: Avi Kivity @ 2008-04-22 9:16 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
Jens Axboe wrote:
> This adds kernel/smp.c which contains helpers for IPI function calls. In
> addition to supporting the existing smp_call_function() in a more efficient
> manner, it also adds a more scalable variant called smp_call_function_single()
> for calling a given function on a single CPU only.
>
> The core of this is based on the x86-64 patch from Nick Piggin, lots of
> changes since then. "Alan D. Brunelle" <Alan.Brunelle-VXdhtT5mjnY@public.gmane.org> has
> contributed lots of fixes and suggestions as well.
>
> +int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
> + int retry, int wait)
> +{
> + unsigned long flags;
> + /* prevent preemption and reschedule on another processor */
> + int me = get_cpu();
> + int ret = 0;
> +
> + /* Can deadlock when called with interrupts disabled */
> + WARN_ON(wait && irqs_disabled());
> +
> + if (cpu == me) {
> + local_irq_save(flags);
> + func(info);
> + local_irq_restore(flags);
> + } else {
> + struct call_single_data d;
> + struct call_single_data *data;
> +
> + if (!wait) {
> + data = kmalloc(sizeof(*data), GFP_ATOMIC);
> + if (unlikely(!data)) {
> + ret = -ENOMEM;
> + goto out;
> + }
> + data->flags = CSD_FLAG_ALLOC;
> + } else {
> + data = &d;
> + data->flags = CSD_FLAG_WAIT;
> + }
> +
>
Instead of introducing a rare error case, how about falling back to the
wait case if the allocation fails?
Of course, if the called function relies on the calling cpu doing
something else, then this fails, but I don't think anybody would do
that? On the other hand, there is at least one use of
smp_call_function_single() with !wait, which doesn't check the error return.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 9:16 ` Avi Kivity
@ 2008-04-22 9:16 ` Avi Kivity
[not found] ` <480DACDD.7040108-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Avi Kivity @ 2008-04-22 9:16 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds
Jens Axboe wrote:
> This adds kernel/smp.c which contains helpers for IPI function calls. In
> addition to supporting the existing smp_call_function() in a more efficient
> manner, it also adds a more scalable variant called smp_call_function_single()
> for calling a given function on a single CPU only.
>
> The core of this is based on the x86-64 patch from Nick Piggin, lots of
> changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
> contributed lots of fixes and suggestions as well.
>
> +int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
> + int retry, int wait)
> +{
> + unsigned long flags;
> + /* prevent preemption and reschedule on another processor */
> + int me = get_cpu();
> + int ret = 0;
> +
> + /* Can deadlock when called with interrupts disabled */
> + WARN_ON(wait && irqs_disabled());
> +
> + if (cpu == me) {
> + local_irq_save(flags);
> + func(info);
> + local_irq_restore(flags);
> + } else {
> + struct call_single_data d;
> + struct call_single_data *data;
> +
> + if (!wait) {
> + data = kmalloc(sizeof(*data), GFP_ATOMIC);
> + if (unlikely(!data)) {
> + ret = -ENOMEM;
> + goto out;
> + }
> + data->flags = CSD_FLAG_ALLOC;
> + } else {
> + data = &d;
> + data->flags = CSD_FLAG_WAIT;
> + }
> +
>
Instead of introducing a rare error case, how about falling back to the
wait case if the allocation fails?
Of course, if the called function relies on the calling cpu doing
something else, then this fails, but I don't think anybody would do
that? On the other hand, there is at least one use of
smp_call_function_single() with !wait, which doesn't check the error return.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <480DACDD.7040108-atKUWr5tajBWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <480DACDD.7040108-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2008-04-22 9:22 ` Jens Axboe
2008-04-22 9:22 ` Jens Axboe
[not found] ` <20080422092230.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 9:22 UTC (permalink / raw)
To: Avi Kivity
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Tue, Apr 22 2008, Avi Kivity wrote:
> Jens Axboe wrote:
> >This adds kernel/smp.c which contains helpers for IPI function calls. In
> >addition to supporting the existing smp_call_function() in a more efficient
> >manner, it also adds a more scalable variant called
> >smp_call_function_single()
> >for calling a given function on a single CPU only.
> >
> >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> >changes since then. "Alan D. Brunelle" <Alan.Brunelle-VXdhtT5mjnY@public.gmane.org> has
> >contributed lots of fixes and suggestions as well.
> >
> >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> >*info,
> >+ int retry, int wait)
> >+{
> >+ unsigned long flags;
> >+ /* prevent preemption and reschedule on another processor */
> >+ int me = get_cpu();
> >+ int ret = 0;
> >+
> >+ /* Can deadlock when called with interrupts disabled */
> >+ WARN_ON(wait && irqs_disabled());
> >+
> >+ if (cpu == me) {
> >+ local_irq_save(flags);
> >+ func(info);
> >+ local_irq_restore(flags);
> >+ } else {
> >+ struct call_single_data d;
> >+ struct call_single_data *data;
> >+
> >+ if (!wait) {
> >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> >+ if (unlikely(!data)) {
> >+ ret = -ENOMEM;
> >+ goto out;
> >+ }
> >+ data->flags = CSD_FLAG_ALLOC;
> >+ } else {
> >+ data = &d;
> >+ data->flags = CSD_FLAG_WAIT;
> >+ }
> >+
> >
>
> Instead of introducing a rare error case, how about falling back to the
> wait case if the allocation fails?
>
> Of course, if the called function relies on the calling cpu doing
> something else, then this fails, but I don't think anybody would do
> that? On the other hand, there is at least one use of
> smp_call_function_single() with !wait, which doesn't check the error return.
Sure, either failling back to waiting, or add a static call_single_data
like it exists for smp_call_function(). In reality it'll never happen,
so the fallback static structure appeals the most to me.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 9:22 ` Jens Axboe
@ 2008-04-22 9:22 ` Jens Axboe
[not found] ` <20080422092230.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 9:22 UTC (permalink / raw)
To: Avi Kivity; +Cc: linux-arch, linux-kernel, npiggin, torvalds
On Tue, Apr 22 2008, Avi Kivity wrote:
> Jens Axboe wrote:
> >This adds kernel/smp.c which contains helpers for IPI function calls. In
> >addition to supporting the existing smp_call_function() in a more efficient
> >manner, it also adds a more scalable variant called
> >smp_call_function_single()
> >for calling a given function on a single CPU only.
> >
> >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> >changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
> >contributed lots of fixes and suggestions as well.
> >
> >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> >*info,
> >+ int retry, int wait)
> >+{
> >+ unsigned long flags;
> >+ /* prevent preemption and reschedule on another processor */
> >+ int me = get_cpu();
> >+ int ret = 0;
> >+
> >+ /* Can deadlock when called with interrupts disabled */
> >+ WARN_ON(wait && irqs_disabled());
> >+
> >+ if (cpu == me) {
> >+ local_irq_save(flags);
> >+ func(info);
> >+ local_irq_restore(flags);
> >+ } else {
> >+ struct call_single_data d;
> >+ struct call_single_data *data;
> >+
> >+ if (!wait) {
> >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> >+ if (unlikely(!data)) {
> >+ ret = -ENOMEM;
> >+ goto out;
> >+ }
> >+ data->flags = CSD_FLAG_ALLOC;
> >+ } else {
> >+ data = &d;
> >+ data->flags = CSD_FLAG_WAIT;
> >+ }
> >+
> >
>
> Instead of introducing a rare error case, how about falling back to the
> wait case if the allocation fails?
>
> Of course, if the called function relies on the calling cpu doing
> something else, then this fails, but I don't think anybody would do
> that? On the other hand, there is at least one use of
> smp_call_function_single() with !wait, which doesn't check the error return.
Sure, either failling back to waiting, or add a static call_single_data
like it exists for smp_call_function(). In reality it'll never happen,
so the fallback static structure appeals the most to me.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422092230.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080422092230.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 11:14 ` Jens Axboe
2008-04-22 11:14 ` Jens Axboe
2008-04-22 13:00 ` Peter Zijlstra
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 11:14 UTC (permalink / raw)
To: Avi Kivity
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Tue, Apr 22 2008, Jens Axboe wrote:
> On Tue, Apr 22 2008, Avi Kivity wrote:
> > Jens Axboe wrote:
> > >This adds kernel/smp.c which contains helpers for IPI function calls. In
> > >addition to supporting the existing smp_call_function() in a more efficient
> > >manner, it also adds a more scalable variant called
> > >smp_call_function_single()
> > >for calling a given function on a single CPU only.
> > >
> > >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> > >changes since then. "Alan D. Brunelle" <Alan.Brunelle-VXdhtT5mjnY@public.gmane.org> has
> > >contributed lots of fixes and suggestions as well.
> > >
> > >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> > >*info,
> > >+ int retry, int wait)
> > >+{
> > >+ unsigned long flags;
> > >+ /* prevent preemption and reschedule on another processor */
> > >+ int me = get_cpu();
> > >+ int ret = 0;
> > >+
> > >+ /* Can deadlock when called with interrupts disabled */
> > >+ WARN_ON(wait && irqs_disabled());
> > >+
> > >+ if (cpu == me) {
> > >+ local_irq_save(flags);
> > >+ func(info);
> > >+ local_irq_restore(flags);
> > >+ } else {
> > >+ struct call_single_data d;
> > >+ struct call_single_data *data;
> > >+
> > >+ if (!wait) {
> > >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > >+ if (unlikely(!data)) {
> > >+ ret = -ENOMEM;
> > >+ goto out;
> > >+ }
> > >+ data->flags = CSD_FLAG_ALLOC;
> > >+ } else {
> > >+ data = &d;
> > >+ data->flags = CSD_FLAG_WAIT;
> > >+ }
> > >+
> > >
> >
> > Instead of introducing a rare error case, how about falling back to the
> > wait case if the allocation fails?
> >
> > Of course, if the called function relies on the calling cpu doing
> > something else, then this fails, but I don't think anybody would do
> > that? On the other hand, there is at least one use of
> > smp_call_function_single() with !wait, which doesn't check the error return.
>
> Sure, either failling back to waiting, or add a static call_single_data
> like it exists for smp_call_function(). In reality it'll never happen,
> so the fallback static structure appeals the most to me.
We don't need any extra statically allocated data, we can just reuse the
'csd' element of the existing call_data_fallback. So that is what I did.
Once all archs are converted, we can now change
smp_call_function_single() to a void return, as it always succeeds now.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 11:14 ` Jens Axboe
@ 2008-04-22 11:14 ` Jens Axboe
2008-04-22 13:00 ` Peter Zijlstra
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 11:14 UTC (permalink / raw)
To: Avi Kivity; +Cc: linux-arch, linux-kernel, npiggin, torvalds
On Tue, Apr 22 2008, Jens Axboe wrote:
> On Tue, Apr 22 2008, Avi Kivity wrote:
> > Jens Axboe wrote:
> > >This adds kernel/smp.c which contains helpers for IPI function calls. In
> > >addition to supporting the existing smp_call_function() in a more efficient
> > >manner, it also adds a more scalable variant called
> > >smp_call_function_single()
> > >for calling a given function on a single CPU only.
> > >
> > >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> > >changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
> > >contributed lots of fixes and suggestions as well.
> > >
> > >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> > >*info,
> > >+ int retry, int wait)
> > >+{
> > >+ unsigned long flags;
> > >+ /* prevent preemption and reschedule on another processor */
> > >+ int me = get_cpu();
> > >+ int ret = 0;
> > >+
> > >+ /* Can deadlock when called with interrupts disabled */
> > >+ WARN_ON(wait && irqs_disabled());
> > >+
> > >+ if (cpu == me) {
> > >+ local_irq_save(flags);
> > >+ func(info);
> > >+ local_irq_restore(flags);
> > >+ } else {
> > >+ struct call_single_data d;
> > >+ struct call_single_data *data;
> > >+
> > >+ if (!wait) {
> > >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > >+ if (unlikely(!data)) {
> > >+ ret = -ENOMEM;
> > >+ goto out;
> > >+ }
> > >+ data->flags = CSD_FLAG_ALLOC;
> > >+ } else {
> > >+ data = &d;
> > >+ data->flags = CSD_FLAG_WAIT;
> > >+ }
> > >+
> > >
> >
> > Instead of introducing a rare error case, how about falling back to the
> > wait case if the allocation fails?
> >
> > Of course, if the called function relies on the calling cpu doing
> > something else, then this fails, but I don't think anybody would do
> > that? On the other hand, there is at least one use of
> > smp_call_function_single() with !wait, which doesn't check the error return.
>
> Sure, either failling back to waiting, or add a static call_single_data
> like it exists for smp_call_function(). In reality it'll never happen,
> so the fallback static structure appeals the most to me.
We don't need any extra statically allocated data, we can just reuse the
'csd' element of the existing call_data_fallback. So that is what I did.
Once all archs are converted, we can now change
smp_call_function_single() to a void return, as it always succeeds now.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 11:14 ` Jens Axboe
2008-04-22 11:14 ` Jens Axboe
@ 2008-04-22 13:00 ` Peter Zijlstra
2008-04-22 13:00 ` Peter Zijlstra
2008-04-22 14:25 ` Jens Axboe
1 sibling, 2 replies; 139+ messages in thread
From: Peter Zijlstra @ 2008-04-22 13:00 UTC (permalink / raw)
To: Jens Axboe; +Cc: Avi Kivity, linux-arch, linux-kernel, npiggin, torvalds
On Tue, 2008-04-22 at 13:14 +0200, Jens Axboe wrote:
> On Tue, Apr 22 2008, Jens Axboe wrote:
> > On Tue, Apr 22 2008, Avi Kivity wrote:
> > > Jens Axboe wrote:
> > > >This adds kernel/smp.c which contains helpers for IPI function calls. In
> > > >addition to supporting the existing smp_call_function() in a more efficient
> > > >manner, it also adds a more scalable variant called
> > > >smp_call_function_single()
> > > >for calling a given function on a single CPU only.
> > > >
> > > >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> > > >changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
> > > >contributed lots of fixes and suggestions as well.
> > > >
> > > >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> > > >*info,
> > > >+ int retry, int wait)
> > > >+{
> > > >+ unsigned long flags;
> > > >+ /* prevent preemption and reschedule on another processor */
> > > >+ int me = get_cpu();
> > > >+ int ret = 0;
> > > >+
> > > >+ /* Can deadlock when called with interrupts disabled */
> > > >+ WARN_ON(wait && irqs_disabled());
> > > >+
> > > >+ if (cpu == me) {
> > > >+ local_irq_save(flags);
> > > >+ func(info);
> > > >+ local_irq_restore(flags);
> > > >+ } else {
> > > >+ struct call_single_data d;
> > > >+ struct call_single_data *data;
> > > >+
> > > >+ if (!wait) {
> > > >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > > >+ if (unlikely(!data)) {
> > > >+ ret = -ENOMEM;
> > > >+ goto out;
> > > >+ }
> > > >+ data->flags = CSD_FLAG_ALLOC;
> > > >+ } else {
> > > >+ data = &d;
> > > >+ data->flags = CSD_FLAG_WAIT;
> > > >+ }
> > > >+
> > > >
> > >
> > > Instead of introducing a rare error case, how about falling back to the
> > > wait case if the allocation fails?
> > >
> > > Of course, if the called function relies on the calling cpu doing
> > > something else, then this fails, but I don't think anybody would do
> > > that? On the other hand, there is at least one use of
> > > smp_call_function_single() with !wait, which doesn't check the error return.
> >
> > Sure, either failling back to waiting, or add a static call_single_data
> > like it exists for smp_call_function(). In reality it'll never happen,
> > so the fallback static structure appeals the most to me.
>
> We don't need any extra statically allocated data, we can just reuse the
> 'csd' element of the existing call_data_fallback. So that is what I did.
> Once all archs are converted, we can now change
> smp_call_function_single() to a void return, as it always succeeds now.
Introducing this fallback will make any usage from irq disabled context
deadlock prone.
I rather like the current interface.
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 13:00 ` Peter Zijlstra
@ 2008-04-22 13:00 ` Peter Zijlstra
2008-04-22 14:25 ` Jens Axboe
1 sibling, 0 replies; 139+ messages in thread
From: Peter Zijlstra @ 2008-04-22 13:00 UTC (permalink / raw)
To: Jens Axboe; +Cc: Avi Kivity, linux-arch, linux-kernel, npiggin, torvalds
On Tue, 2008-04-22 at 13:14 +0200, Jens Axboe wrote:
> On Tue, Apr 22 2008, Jens Axboe wrote:
> > On Tue, Apr 22 2008, Avi Kivity wrote:
> > > Jens Axboe wrote:
> > > >This adds kernel/smp.c which contains helpers for IPI function calls. In
> > > >addition to supporting the existing smp_call_function() in a more efficient
> > > >manner, it also adds a more scalable variant called
> > > >smp_call_function_single()
> > > >for calling a given function on a single CPU only.
> > > >
> > > >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> > > >changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
> > > >contributed lots of fixes and suggestions as well.
> > > >
> > > >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> > > >*info,
> > > >+ int retry, int wait)
> > > >+{
> > > >+ unsigned long flags;
> > > >+ /* prevent preemption and reschedule on another processor */
> > > >+ int me = get_cpu();
> > > >+ int ret = 0;
> > > >+
> > > >+ /* Can deadlock when called with interrupts disabled */
> > > >+ WARN_ON(wait && irqs_disabled());
> > > >+
> > > >+ if (cpu == me) {
> > > >+ local_irq_save(flags);
> > > >+ func(info);
> > > >+ local_irq_restore(flags);
> > > >+ } else {
> > > >+ struct call_single_data d;
> > > >+ struct call_single_data *data;
> > > >+
> > > >+ if (!wait) {
> > > >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > > >+ if (unlikely(!data)) {
> > > >+ ret = -ENOMEM;
> > > >+ goto out;
> > > >+ }
> > > >+ data->flags = CSD_FLAG_ALLOC;
> > > >+ } else {
> > > >+ data = &d;
> > > >+ data->flags = CSD_FLAG_WAIT;
> > > >+ }
> > > >+
> > > >
> > >
> > > Instead of introducing a rare error case, how about falling back to the
> > > wait case if the allocation fails?
> > >
> > > Of course, if the called function relies on the calling cpu doing
> > > something else, then this fails, but I don't think anybody would do
> > > that? On the other hand, there is at least one use of
> > > smp_call_function_single() with !wait, which doesn't check the error return.
> >
> > Sure, either failling back to waiting, or add a static call_single_data
> > like it exists for smp_call_function(). In reality it'll never happen,
> > so the fallback static structure appeals the most to me.
>
> We don't need any extra statically allocated data, we can just reuse the
> 'csd' element of the existing call_data_fallback. So that is what I did.
> Once all archs are converted, we can now change
> smp_call_function_single() to a void return, as it always succeeds now.
Introducing this fallback will make any usage from irq disabled context
deadlock prone.
I rather like the current interface.
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 13:00 ` Peter Zijlstra
2008-04-22 13:00 ` Peter Zijlstra
@ 2008-04-22 14:25 ` Jens Axboe
2008-04-22 14:25 ` Jens Axboe
[not found] ` <20080422142543.GG12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:25 UTC (permalink / raw)
To: Peter Zijlstra
Cc: Avi Kivity, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Tue, Apr 22 2008, Peter Zijlstra wrote:
> On Tue, 2008-04-22 at 13:14 +0200, Jens Axboe wrote:
> > On Tue, Apr 22 2008, Jens Axboe wrote:
> > > On Tue, Apr 22 2008, Avi Kivity wrote:
> > > > Jens Axboe wrote:
> > > > >This adds kernel/smp.c which contains helpers for IPI function calls. In
> > > > >addition to supporting the existing smp_call_function() in a more efficient
> > > > >manner, it also adds a more scalable variant called
> > > > >smp_call_function_single()
> > > > >for calling a given function on a single CPU only.
> > > > >
> > > > >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> > > > >changes since then. "Alan D. Brunelle" <Alan.Brunelle-VXdhtT5mjnY@public.gmane.org> has
> > > > >contributed lots of fixes and suggestions as well.
> > > > >
> > > > >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> > > > >*info,
> > > > >+ int retry, int wait)
> > > > >+{
> > > > >+ unsigned long flags;
> > > > >+ /* prevent preemption and reschedule on another processor */
> > > > >+ int me = get_cpu();
> > > > >+ int ret = 0;
> > > > >+
> > > > >+ /* Can deadlock when called with interrupts disabled */
> > > > >+ WARN_ON(wait && irqs_disabled());
> > > > >+
> > > > >+ if (cpu == me) {
> > > > >+ local_irq_save(flags);
> > > > >+ func(info);
> > > > >+ local_irq_restore(flags);
> > > > >+ } else {
> > > > >+ struct call_single_data d;
> > > > >+ struct call_single_data *data;
> > > > >+
> > > > >+ if (!wait) {
> > > > >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > > > >+ if (unlikely(!data)) {
> > > > >+ ret = -ENOMEM;
> > > > >+ goto out;
> > > > >+ }
> > > > >+ data->flags = CSD_FLAG_ALLOC;
> > > > >+ } else {
> > > > >+ data = &d;
> > > > >+ data->flags = CSD_FLAG_WAIT;
> > > > >+ }
> > > > >+
> > > > >
> > > >
> > > > Instead of introducing a rare error case, how about falling back to the
> > > > wait case if the allocation fails?
> > > >
> > > > Of course, if the called function relies on the calling cpu doing
> > > > something else, then this fails, but I don't think anybody would do
> > > > that? On the other hand, there is at least one use of
> > > > smp_call_function_single() with !wait, which doesn't check the error return.
> > >
> > > Sure, either failling back to waiting, or add a static call_single_data
> > > like it exists for smp_call_function(). In reality it'll never happen,
> > > so the fallback static structure appeals the most to me.
> >
> > We don't need any extra statically allocated data, we can just reuse the
> > 'csd' element of the existing call_data_fallback. So that is what I did.
> > Once all archs are converted, we can now change
> > smp_call_function_single() to a void return, as it always succeeds now.
>
> Introducing this fallback will make any usage from irq disabled context
> deadlock prone.
>
> I rather like the current interface.
Hmm good point, I'll back that bit out again.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:25 ` Jens Axboe
@ 2008-04-22 14:25 ` Jens Axboe
[not found] ` <20080422142543.GG12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:25 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: Avi Kivity, linux-arch, linux-kernel, npiggin, torvalds
On Tue, Apr 22 2008, Peter Zijlstra wrote:
> On Tue, 2008-04-22 at 13:14 +0200, Jens Axboe wrote:
> > On Tue, Apr 22 2008, Jens Axboe wrote:
> > > On Tue, Apr 22 2008, Avi Kivity wrote:
> > > > Jens Axboe wrote:
> > > > >This adds kernel/smp.c which contains helpers for IPI function calls. In
> > > > >addition to supporting the existing smp_call_function() in a more efficient
> > > > >manner, it also adds a more scalable variant called
> > > > >smp_call_function_single()
> > > > >for calling a given function on a single CPU only.
> > > > >
> > > > >The core of this is based on the x86-64 patch from Nick Piggin, lots of
> > > > >changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
> > > > >contributed lots of fixes and suggestions as well.
> > > > >
> > > > >+int smp_call_function_single(int cpu, void (*func) (void *info), void
> > > > >*info,
> > > > >+ int retry, int wait)
> > > > >+{
> > > > >+ unsigned long flags;
> > > > >+ /* prevent preemption and reschedule on another processor */
> > > > >+ int me = get_cpu();
> > > > >+ int ret = 0;
> > > > >+
> > > > >+ /* Can deadlock when called with interrupts disabled */
> > > > >+ WARN_ON(wait && irqs_disabled());
> > > > >+
> > > > >+ if (cpu == me) {
> > > > >+ local_irq_save(flags);
> > > > >+ func(info);
> > > > >+ local_irq_restore(flags);
> > > > >+ } else {
> > > > >+ struct call_single_data d;
> > > > >+ struct call_single_data *data;
> > > > >+
> > > > >+ if (!wait) {
> > > > >+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > > > >+ if (unlikely(!data)) {
> > > > >+ ret = -ENOMEM;
> > > > >+ goto out;
> > > > >+ }
> > > > >+ data->flags = CSD_FLAG_ALLOC;
> > > > >+ } else {
> > > > >+ data = &d;
> > > > >+ data->flags = CSD_FLAG_WAIT;
> > > > >+ }
> > > > >+
> > > > >
> > > >
> > > > Instead of introducing a rare error case, how about falling back to the
> > > > wait case if the allocation fails?
> > > >
> > > > Of course, if the called function relies on the calling cpu doing
> > > > something else, then this fails, but I don't think anybody would do
> > > > that? On the other hand, there is at least one use of
> > > > smp_call_function_single() with !wait, which doesn't check the error return.
> > >
> > > Sure, either failling back to waiting, or add a static call_single_data
> > > like it exists for smp_call_function(). In reality it'll never happen,
> > > so the fallback static structure appeals the most to me.
> >
> > We don't need any extra statically allocated data, we can just reuse the
> > 'csd' element of the existing call_data_fallback. So that is what I did.
> > Once all archs are converted, we can now change
> > smp_call_function_single() to a void return, as it always succeeds now.
>
> Introducing this fallback will make any usage from irq disabled context
> deadlock prone.
>
> I rather like the current interface.
Hmm good point, I'll back that bit out again.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422142543.GG12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080422142543.GG12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 14:38 ` Avi Kivity
2008-04-22 14:38 ` Avi Kivity
` (2 more replies)
0 siblings, 3 replies; 139+ messages in thread
From: Avi Kivity @ 2008-04-22 14:38 UTC (permalink / raw)
To: Jens Axboe
Cc: Peter Zijlstra, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
Jens Axboe wrote:
>> Introducing this fallback will make any usage from irq disabled context
>> deadlock prone.
>>
>> I rather like the current interface.
>>
>
> Hmm good point, I'll back that bit out again.
>
But then you need to fix all callers to handle errors.
Perhaps we need a new flag for failable async IPIs (or better, API).
I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
since it will WARN_ON(irqs_disabled()).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:38 ` Avi Kivity
@ 2008-04-22 14:38 ` Avi Kivity
[not found] ` <480DF861.6000705-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2008-04-22 14:53 ` Jens Axboe
2 siblings, 0 replies; 139+ messages in thread
From: Avi Kivity @ 2008-04-22 14:38 UTC (permalink / raw)
To: Jens Axboe; +Cc: Peter Zijlstra, linux-arch, linux-kernel, npiggin, torvalds
Jens Axboe wrote:
>> Introducing this fallback will make any usage from irq disabled context
>> deadlock prone.
>>
>> I rather like the current interface.
>>
>
> Hmm good point, I'll back that bit out again.
>
But then you need to fix all callers to handle errors.
Perhaps we need a new flag for failable async IPIs (or better, API).
I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
since it will WARN_ON(irqs_disabled()).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <480DF861.6000705-atKUWr5tajBWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <480DF861.6000705-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2008-04-22 14:43 ` Peter Zijlstra
2008-04-22 14:43 ` Peter Zijlstra
2008-04-22 14:47 ` Avi Kivity
0 siblings, 2 replies; 139+ messages in thread
From: Peter Zijlstra @ 2008-04-22 14:43 UTC (permalink / raw)
To: Avi Kivity
Cc: Jens Axboe, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Tue, 2008-04-22 at 17:38 +0300, Avi Kivity wrote:
> Jens Axboe wrote:
> >> Introducing this fallback will make any usage from irq disabled context
> >> deadlock prone.
> >>
> >> I rather like the current interface.
> >>
> >
> > Hmm good point, I'll back that bit out again.
> >
>
> But then you need to fix all callers to handle errors.
Sure.
> Perhaps we need a new flag for failable async IPIs (or better, API).
> I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
> since it will WARN_ON(irqs_disabled()).
Yeah, no current users, but that doesn't say I don't want to add new
ones ;-)
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:43 ` Peter Zijlstra
@ 2008-04-22 14:43 ` Peter Zijlstra
2008-04-22 14:47 ` Avi Kivity
1 sibling, 0 replies; 139+ messages in thread
From: Peter Zijlstra @ 2008-04-22 14:43 UTC (permalink / raw)
To: Avi Kivity; +Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds
On Tue, 2008-04-22 at 17:38 +0300, Avi Kivity wrote:
> Jens Axboe wrote:
> >> Introducing this fallback will make any usage from irq disabled context
> >> deadlock prone.
> >>
> >> I rather like the current interface.
> >>
> >
> > Hmm good point, I'll back that bit out again.
> >
>
> But then you need to fix all callers to handle errors.
Sure.
> Perhaps we need a new flag for failable async IPIs (or better, API).
> I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
> since it will WARN_ON(irqs_disabled()).
Yeah, no current users, but that doesn't say I don't want to add new
ones ;-)
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:43 ` Peter Zijlstra
2008-04-22 14:43 ` Peter Zijlstra
@ 2008-04-22 14:47 ` Avi Kivity
2008-04-22 14:47 ` Avi Kivity
1 sibling, 1 reply; 139+ messages in thread
From: Avi Kivity @ 2008-04-22 14:47 UTC (permalink / raw)
To: Peter Zijlstra
Cc: Jens Axboe, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
Peter Zijlstra wrote:
>> Perhaps we need a new flag for failable async IPIs (or better, API).
>> I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
>> since it will WARN_ON(irqs_disabled()).
>>
>
> Yeah, no current users, but that doesn't say I don't want to add new
> ones ;-
I suggest having a new API for that, I dislike long tails of flag arguments.
smp_call_function_single_async()?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:47 ` Avi Kivity
@ 2008-04-22 14:47 ` Avi Kivity
0 siblings, 0 replies; 139+ messages in thread
From: Avi Kivity @ 2008-04-22 14:47 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds
Peter Zijlstra wrote:
>> Perhaps we need a new flag for failable async IPIs (or better, API).
>> I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
>> since it will WARN_ON(irqs_disabled()).
>>
>
> Yeah, no current users, but that doesn't say I don't want to add new
> ones ;-
I suggest having a new API for that, I dislike long tails of flag arguments.
smp_call_function_single_async()?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:38 ` Avi Kivity
2008-04-22 14:38 ` Avi Kivity
[not found] ` <480DF861.6000705-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2008-04-22 14:53 ` Jens Axboe
2008-04-22 14:53 ` Jens Axboe
2 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:53 UTC (permalink / raw)
To: Avi Kivity; +Cc: Peter Zijlstra, linux-arch, linux-kernel, npiggin, torvalds
On Tue, Apr 22 2008, Avi Kivity wrote:
> Jens Axboe wrote:
> >>Introducing this fallback will make any usage from irq disabled context
> >>deadlock prone.
> >>
> >>I rather like the current interface.
> >>
> >
> >Hmm good point, I'll back that bit out again.
> >
>
> But then you need to fix all callers to handle errors.
They should already, there were various error conditions before
depending on the arch. I'm aware that some do not, so I'd consider that
a general cleanup (that must also be done, agree).
> Perhaps we need a new flag for failable async IPIs (or better, API).
> I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
> since it will WARN_ON(irqs_disabled()).
That'd work. I tried to keep things really simple here, I have other
ideas for cleanups and improvements once this batch goes in.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:53 ` Jens Axboe
@ 2008-04-22 14:53 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:53 UTC (permalink / raw)
To: Avi Kivity; +Cc: Peter Zijlstra, linux-arch, linux-kernel, npiggin, torvalds
On Tue, Apr 22 2008, Avi Kivity wrote:
> Jens Axboe wrote:
> >>Introducing this fallback will make any usage from irq disabled context
> >>deadlock prone.
> >>
> >>I rather like the current interface.
> >>
> >
> >Hmm good point, I'll back that bit out again.
> >
>
> But then you need to fix all callers to handle errors.
They should already, there were various error conditions before
depending on the arch. I'm aware that some do not, so I'd consider that
a general cleanup (that must also be done, agree).
> Perhaps we need a new flag for failable async IPIs (or better, API).
> I'm pretty sure no one uses s_c_f_s() from irq disabled context on x86,
> since it will WARN_ON(irqs_disabled()).
That'd work. I tried to keep things really simple here, I have other
ideas for cleanups and improvements once this batch goes in.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <1208851058-8500-2-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 9:16 ` Avi Kivity
@ 2008-04-22 14:43 ` Linus Torvalds
2008-04-22 14:43 ` Linus Torvalds
[not found] ` <alpine.LFD.1.10.0804220735350.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
2008-04-22 23:12 ` Mark Lord
2 siblings, 2 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 14:43 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, 22 Apr 2008, Jens Axboe wrote:
> +
> +static inline void csd_flag_wait(struct call_single_data *data)
> +{
> + /* Wait for response */
> + do {
> + /*
> + * We need to see the flags store in the IPI handler
> + */
> + smp_mb();
> + if (!(data->flags & CSD_FLAG_WAIT))
> + break;
> + cpu_relax();
> + } while (1);
> +}
You forgot to free the "data" here? The waiter must also free the object,
since now the callee does not.
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:43 ` Linus Torvalds
@ 2008-04-22 14:43 ` Linus Torvalds
[not found] ` <alpine.LFD.1.10.0804220735350.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 14:43 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin
On Tue, 22 Apr 2008, Jens Axboe wrote:
> +
> +static inline void csd_flag_wait(struct call_single_data *data)
> +{
> + /* Wait for response */
> + do {
> + /*
> + * We need to see the flags store in the IPI handler
> + */
> + smp_mb();
> + if (!(data->flags & CSD_FLAG_WAIT))
> + break;
> + cpu_relax();
> + } while (1);
> +}
You forgot to free the "data" here? The waiter must also free the object,
since now the callee does not.
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <alpine.LFD.1.10.0804220735350.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <alpine.LFD.1.10.0804220735350.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
@ 2008-04-22 14:51 ` Jens Axboe
2008-04-22 14:51 ` Jens Axboe
2008-04-22 15:01 ` Linus Torvalds
2008-04-22 14:58 ` Linus Torvalds
1 sibling, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:51 UTC (permalink / raw)
To: Linus Torvalds
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Jens Axboe wrote:
> > +
> > +static inline void csd_flag_wait(struct call_single_data *data)
> > +{
> > + /* Wait for response */
> > + do {
> > + /*
> > + * We need to see the flags store in the IPI handler
> > + */
> > + smp_mb();
> > + if (!(data->flags & CSD_FLAG_WAIT))
> > + break;
> > + cpu_relax();
> > + } while (1);
> > +}
>
> You forgot to free the "data" here? The waiter must also free the object,
> since now the callee does not.
The ipi interrupt handler does that, see kfree() in
generic_smp_call_function_single_interrupt() or call_func_data_free() in
generic_smp_call_function_interrupt().
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:51 ` Jens Axboe
@ 2008-04-22 14:51 ` Jens Axboe
2008-04-22 15:01 ` Linus Torvalds
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:51 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Jens Axboe wrote:
> > +
> > +static inline void csd_flag_wait(struct call_single_data *data)
> > +{
> > + /* Wait for response */
> > + do {
> > + /*
> > + * We need to see the flags store in the IPI handler
> > + */
> > + smp_mb();
> > + if (!(data->flags & CSD_FLAG_WAIT))
> > + break;
> > + cpu_relax();
> > + } while (1);
> > +}
>
> You forgot to free the "data" here? The waiter must also free the object,
> since now the callee does not.
The ipi interrupt handler does that, see kfree() in
generic_smp_call_function_single_interrupt() or call_func_data_free() in
generic_smp_call_function_interrupt().
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:51 ` Jens Axboe
2008-04-22 14:51 ` Jens Axboe
@ 2008-04-22 15:01 ` Linus Torvalds
2008-04-22 15:01 ` Linus Torvalds
2008-04-22 16:49 ` Jens Axboe
1 sibling, 2 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 15:01 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin
On Tue, 22 Apr 2008, Jens Axboe wrote:
> >
> > You forgot to free the "data" here? The waiter must also free the object,
> > since now the callee does not.
>
> The ipi interrupt handler does that, see kfree() in
> generic_smp_call_function_single_interrupt() or call_func_data_free() in
> generic_smp_call_function_interrupt().
Hell no, it does *not*.
Doing that for the waiting case would be a *huge* bug, since the waiter
needs to wait until the flag is clear - and if the waitee free's the
allocation, that will never happen.
So the rule *must* be:
- waiter frees
- ipi interrupt frees non-waiting ones.
because anything else cannot work.
And you must have known that, because the code you pointed me to does
*not* free the data at all. It just clears the FLAG_WAIT flag:
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else
+ call_func_data_free(data);
So please think about this some more.
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 15:01 ` Linus Torvalds
@ 2008-04-22 15:01 ` Linus Torvalds
2008-04-22 16:49 ` Jens Axboe
1 sibling, 0 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 15:01 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin
On Tue, 22 Apr 2008, Jens Axboe wrote:
> >
> > You forgot to free the "data" here? The waiter must also free the object,
> > since now the callee does not.
>
> The ipi interrupt handler does that, see kfree() in
> generic_smp_call_function_single_interrupt() or call_func_data_free() in
> generic_smp_call_function_interrupt().
Hell no, it does *not*.
Doing that for the waiting case would be a *huge* bug, since the waiter
needs to wait until the flag is clear - and if the waitee free's the
allocation, that will never happen.
So the rule *must* be:
- waiter frees
- ipi interrupt frees non-waiting ones.
because anything else cannot work.
And you must have known that, because the code you pointed me to does
*not* free the data at all. It just clears the FLAG_WAIT flag:
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else
+ call_func_data_free(data);
So please think about this some more.
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 15:01 ` Linus Torvalds
2008-04-22 15:01 ` Linus Torvalds
@ 2008-04-22 16:49 ` Jens Axboe
2008-04-22 16:49 ` Jens Axboe
[not found] ` <20080422164947.GN12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 16:49 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Jens Axboe wrote:
> > >
> > > You forgot to free the "data" here? The waiter must also free the object,
> > > since now the callee does not.
> >
> > The ipi interrupt handler does that, see kfree() in
> > generic_smp_call_function_single_interrupt() or call_func_data_free() in
> > generic_smp_call_function_interrupt().
>
> Hell no, it does *not*.
>
> Doing that for the waiting case would be a *huge* bug, since the waiter
> needs to wait until the flag is clear - and if the waitee free's the
> allocation, that will never happen.
>
> So the rule *must* be:
> - waiter frees
> - ipi interrupt frees non-waiting ones.
> because anything else cannot work.
>
> And you must have known that, because the code you pointed me to does
> *not* free the data at all. It just clears the FLAG_WAIT flag:
>
> + if (data->csd.flags & CSD_FLAG_WAIT) {
> + smp_wmb();
> + data->csd.flags &= ~CSD_FLAG_WAIT;
> + } else
> + call_func_data_free(data);
>
> So please think about this some more.
Yep, sorry, pre-dinner sugar low. So how about this, it implements what
I/you detailed:
- If wait == 1, then we always use on-stack allocation
- If wait == 0 and alloc fails, set wait = 1 and repeat.
- For wait == 1, there's never anything to free.
- For wait == 0, the ipi interrupt handler frees the structure if
CSD_FLAG_ALLOC is set. This COULD be an else without checking
CSD_FLAG_ALLOC for smp_call_function_mask(), only
smp_call_function_single() really needs the check. But I left it there
in case we allow pre-allocated insert for that path as well in the
future.
OK?
From 94bbe959d98add7fa23c270afbedea08917c5fd6 Mon Sep 17 00:00:00 2001
From: Jens Axboe <jens.axboe@oracle.com>
Date: Tue, 22 Apr 2008 18:42:25 +0200
Subject: [PATCH] Add generic helpers for arch IPI function calls
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.
The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
contributed lots of fixes and suggestions as well.
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/Kconfig | 3 +
include/linux/smp.h | 27 ++++-
init/main.c | 3 +
kernel/Makefile | 1 +
kernel/smp.c | 343 +++++++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 375 insertions(+), 2 deletions(-)
create mode 100644 kernel/smp.c
diff --git a/arch/Kconfig b/arch/Kconfig
index 694c9af..a5a0184 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -36,3 +36,6 @@ config HAVE_KPROBES
config HAVE_KRETPROBES
def_bool n
+
+config USE_GENERIC_SMP_HELPERS
+ def_bool n
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232cc..4a5418b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,19 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -53,9 +63,23 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
+extern spinlock_t call_function_lock;
+#endif
/*
* Call a function on all processors
@@ -112,7 +136,6 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
#endif /* !SMP */
/*
diff --git a/init/main.c b/init/main.c
index 833a67d..0b7578c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -773,6 +773,9 @@ static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+ init_call_single_data();
+#endif
migration_init();
spawn_ksoftirqd();
if (!nosoftlockup)
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..7e275d4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..7b44820
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,343 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe@oracle.com> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+ CSD_FLAG_WAIT = 0x01,
+ CSD_FLAG_ALLOC = 0x02,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_t cpumask;
+ struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+void __cpuinit init_call_single_data(void)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_possible_map) {
+ struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->list);
+ }
+}
+
+static inline void csd_flag_wait(struct call_single_data *data)
+{
+ /* Wait for response */
+ do {
+ /*
+ * We need to see the flags store in the IPI handler
+ */
+ smp_mb();
+ if (!(data->flags & CSD_FLAG_WAIT))
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+ struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+ int wait = data->flags & CSD_FLAG_WAIT, ipi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dst->lock, flags);
+ ipi = list_empty(&dst->list);
+ list_add_tail(&data->list, &dst->list);
+ spin_unlock_irqrestore(&dst->lock, flags);
+
+ if (ipi)
+ arch_send_call_function_single_ipi(cpu);
+
+ if (wait)
+ csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+ struct call_function_data *cfd;
+
+ cfd = container_of(head, struct call_function_data, rcu_head);
+ kfree(cfd);
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+ struct list_head *pos;
+ int cpu = get_cpu();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may delete
+ * 'pos', since list_del_rcu() doesn't clear ->next
+ */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &call_function_queue) {
+ struct call_function_data *data;
+ int refs;
+
+ data = list_entry(pos, struct call_function_data, csd.list);
+ if (!cpu_isset(cpu, data->cpumask))
+ continue;
+
+ data->csd.func(data->csd.info);
+
+ spin_lock(&data->lock);
+ cpu_clear(cpu, data->cpumask);
+ WARN_ON(data->refs == 0);
+ data->refs--;
+ refs = data->refs;
+ spin_unlock(&data->lock);
+
+ if (refs)
+ continue;
+
+ WARN_ON(cpus_weight(data->cpumask));
+ spin_lock(&call_function_lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function_lock);
+
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else if (data->csd.flags & CSD_FLAG_ALLOC)
+ call_rcu(&data->rcu_head, rcu_free_call_data);
+ }
+ rcu_read_unlock();
+
+ put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+ struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ LIST_HEAD(list);
+
+ smp_mb();
+ while (!list_empty(&q->list)) {
+ unsigned int data_flags;
+
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data,
+ list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if
+ * flags == 0 (when called through
+ * generic_exec_single(), so save them away before
+ * making the call.
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
+ if (data_flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+ } else if (data_flags & CSD_FLAG_ALLOC)
+ kfree(data);
+ }
+ smp_mb();
+ }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ unsigned long flags;
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ if (cpu == me) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ } else {
+ struct call_single_data *data;
+
+ if (wait) {
+ struct call_single_data d;
+do_wait:
+ data = &d;
+ data->flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ wait = 1;
+ goto do_wait;
+ }
+ data->flags = CSD_FLAG_ALLOC;
+ }
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data);
+ }
+
+ put_cpu();
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+ generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_function_data *data;
+ cpumask_t allbutself;
+ unsigned long flags;
+ int num_cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ cpus_and(mask, mask, allbutself);
+ num_cpus = cpus_weight(mask);
+
+ if (!num_cpus)
+ return 0;
+
+ if (wait) {
+ struct call_function_data d;
+do_wait:
+ data = &d;
+ data->csd.flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (!unlikely(!data)) {
+ wait = 1;
+ goto do_wait;
+ }
+ data->csd.flags = CSD_FLAG_ALLOC;
+ }
+
+ spin_lock_init(&data->lock);
+ data->csd.func = func;
+ data->csd.info = info;
+ data->refs = num_cpus;
+ data->cpumask = mask;
+
+ spin_lock_irqsave(&call_function_lock, flags);
+ list_add_tail_rcu(&data->csd.list, &call_function_queue);
+ spin_unlock_irqrestore(&call_function_lock, flags);
+
+ /* Send a message to all CPUs in the map */
+ arch_send_call_function_ipi(mask);
+
+ /* optionally wait for the CPUs to complete */
+ if (wait)
+ csd_flag_wait(&data->csd);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+ int ret;
+
+ preempt_disable();
+ ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
--
1.5.5.1.57.g5909c
--
Jens Axboe
^ permalink raw reply related [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 16:49 ` Jens Axboe
@ 2008-04-22 16:49 ` Jens Axboe
[not found] ` <20080422164947.GN12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 16:49 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Jens Axboe wrote:
> > >
> > > You forgot to free the "data" here? The waiter must also free the object,
> > > since now the callee does not.
> >
> > The ipi interrupt handler does that, see kfree() in
> > generic_smp_call_function_single_interrupt() or call_func_data_free() in
> > generic_smp_call_function_interrupt().
>
> Hell no, it does *not*.
>
> Doing that for the waiting case would be a *huge* bug, since the waiter
> needs to wait until the flag is clear - and if the waitee free's the
> allocation, that will never happen.
>
> So the rule *must* be:
> - waiter frees
> - ipi interrupt frees non-waiting ones.
> because anything else cannot work.
>
> And you must have known that, because the code you pointed me to does
> *not* free the data at all. It just clears the FLAG_WAIT flag:
>
> + if (data->csd.flags & CSD_FLAG_WAIT) {
> + smp_wmb();
> + data->csd.flags &= ~CSD_FLAG_WAIT;
> + } else
> + call_func_data_free(data);
>
> So please think about this some more.
Yep, sorry, pre-dinner sugar low. So how about this, it implements what
I/you detailed:
- If wait == 1, then we always use on-stack allocation
- If wait == 0 and alloc fails, set wait = 1 and repeat.
- For wait == 1, there's never anything to free.
- For wait == 0, the ipi interrupt handler frees the structure if
CSD_FLAG_ALLOC is set. This COULD be an else without checking
CSD_FLAG_ALLOC for smp_call_function_mask(), only
smp_call_function_single() really needs the check. But I left it there
in case we allow pre-allocated insert for that path as well in the
future.
OK?
From 94bbe959d98add7fa23c270afbedea08917c5fd6 Mon Sep 17 00:00:00 2001
From: Jens Axboe <jens.axboe@oracle.com>
Date: Tue, 22 Apr 2008 18:42:25 +0200
Subject: [PATCH] Add generic helpers for arch IPI function calls
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.
The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
contributed lots of fixes and suggestions as well.
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/Kconfig | 3 +
include/linux/smp.h | 27 ++++-
init/main.c | 3 +
kernel/Makefile | 1 +
kernel/smp.c | 343 +++++++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 375 insertions(+), 2 deletions(-)
create mode 100644 kernel/smp.c
diff --git a/arch/Kconfig b/arch/Kconfig
index 694c9af..a5a0184 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -36,3 +36,6 @@ config HAVE_KPROBES
config HAVE_KRETPROBES
def_bool n
+
+config USE_GENERIC_SMP_HELPERS
+ def_bool n
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232cc..4a5418b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,19 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -53,9 +63,23 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
+extern spinlock_t call_function_lock;
+#endif
/*
* Call a function on all processors
@@ -112,7 +136,6 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
#endif /* !SMP */
/*
diff --git a/init/main.c b/init/main.c
index 833a67d..0b7578c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -773,6 +773,9 @@ static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+ init_call_single_data();
+#endif
migration_init();
spawn_ksoftirqd();
if (!nosoftlockup)
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..7e275d4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..7b44820
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,343 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe@oracle.com> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+ CSD_FLAG_WAIT = 0x01,
+ CSD_FLAG_ALLOC = 0x02,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_t cpumask;
+ struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+void __cpuinit init_call_single_data(void)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_possible_map) {
+ struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->list);
+ }
+}
+
+static inline void csd_flag_wait(struct call_single_data *data)
+{
+ /* Wait for response */
+ do {
+ /*
+ * We need to see the flags store in the IPI handler
+ */
+ smp_mb();
+ if (!(data->flags & CSD_FLAG_WAIT))
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+ struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+ int wait = data->flags & CSD_FLAG_WAIT, ipi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dst->lock, flags);
+ ipi = list_empty(&dst->list);
+ list_add_tail(&data->list, &dst->list);
+ spin_unlock_irqrestore(&dst->lock, flags);
+
+ if (ipi)
+ arch_send_call_function_single_ipi(cpu);
+
+ if (wait)
+ csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+ struct call_function_data *cfd;
+
+ cfd = container_of(head, struct call_function_data, rcu_head);
+ kfree(cfd);
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+ struct list_head *pos;
+ int cpu = get_cpu();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may delete
+ * 'pos', since list_del_rcu() doesn't clear ->next
+ */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &call_function_queue) {
+ struct call_function_data *data;
+ int refs;
+
+ data = list_entry(pos, struct call_function_data, csd.list);
+ if (!cpu_isset(cpu, data->cpumask))
+ continue;
+
+ data->csd.func(data->csd.info);
+
+ spin_lock(&data->lock);
+ cpu_clear(cpu, data->cpumask);
+ WARN_ON(data->refs == 0);
+ data->refs--;
+ refs = data->refs;
+ spin_unlock(&data->lock);
+
+ if (refs)
+ continue;
+
+ WARN_ON(cpus_weight(data->cpumask));
+ spin_lock(&call_function_lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function_lock);
+
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else if (data->csd.flags & CSD_FLAG_ALLOC)
+ call_rcu(&data->rcu_head, rcu_free_call_data);
+ }
+ rcu_read_unlock();
+
+ put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+ struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ LIST_HEAD(list);
+
+ smp_mb();
+ while (!list_empty(&q->list)) {
+ unsigned int data_flags;
+
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data,
+ list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if
+ * flags == 0 (when called through
+ * generic_exec_single(), so save them away before
+ * making the call.
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
+ if (data_flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+ } else if (data_flags & CSD_FLAG_ALLOC)
+ kfree(data);
+ }
+ smp_mb();
+ }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ unsigned long flags;
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ if (cpu == me) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ } else {
+ struct call_single_data *data;
+
+ if (wait) {
+ struct call_single_data d;
+do_wait:
+ data = &d;
+ data->flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ wait = 1;
+ goto do_wait;
+ }
+ data->flags = CSD_FLAG_ALLOC;
+ }
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data);
+ }
+
+ put_cpu();
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+ generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_function_data *data;
+ cpumask_t allbutself;
+ unsigned long flags;
+ int num_cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ cpus_and(mask, mask, allbutself);
+ num_cpus = cpus_weight(mask);
+
+ if (!num_cpus)
+ return 0;
+
+ if (wait) {
+ struct call_function_data d;
+do_wait:
+ data = &d;
+ data->csd.flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (!unlikely(!data)) {
+ wait = 1;
+ goto do_wait;
+ }
+ data->csd.flags = CSD_FLAG_ALLOC;
+ }
+
+ spin_lock_init(&data->lock);
+ data->csd.func = func;
+ data->csd.info = info;
+ data->refs = num_cpus;
+ data->cpumask = mask;
+
+ spin_lock_irqsave(&call_function_lock, flags);
+ list_add_tail_rcu(&data->csd.list, &call_function_queue);
+ spin_unlock_irqrestore(&call_function_lock, flags);
+
+ /* Send a message to all CPUs in the map */
+ arch_send_call_function_ipi(mask);
+
+ /* optionally wait for the CPUs to complete */
+ if (wait)
+ csd_flag_wait(&data->csd);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+ int ret;
+
+ preempt_disable();
+ ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
--
1.5.5.1.57.g5909c
--
Jens Axboe
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <20080422164947.GN12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080422164947.GN12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 17:04 ` Jens Axboe
2008-04-22 17:04 ` Jens Axboe
[not found] ` <20080422170405.GO12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 17:04 UTC (permalink / raw)
To: Linus Torvalds
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, Apr 22 2008, Jens Axboe wrote:
> + data = kmalloc(sizeof(*data), GFP_ATOMIC);
> + if (!unlikely(!data)) {
Woops. I'll test everything and send out the full series, at least the
intention should have been clear.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 17:04 ` Jens Axboe
@ 2008-04-22 17:04 ` Jens Axboe
[not found] ` <20080422170405.GO12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 17:04 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Jens Axboe wrote:
> + data = kmalloc(sizeof(*data), GFP_ATOMIC);
> + if (!unlikely(!data)) {
Woops. I'll test everything and send out the full series, at least the
intention should have been clear.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422170405.GO12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080422170405.GO12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 17:13 ` Jens Axboe
2008-04-22 17:13 ` Jens Axboe
[not found] ` <20080422171324.GP12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 17:13 UTC (permalink / raw)
To: Linus Torvalds
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, Apr 22 2008, Jens Axboe wrote:
> On Tue, Apr 22 2008, Jens Axboe wrote:
> > + data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > + if (!unlikely(!data)) {
>
> Woops. I'll test everything and send out the full series, at least the
> intention should have been clear.
But wait, falling back to wait cannot always be safe if we are called
with interrupts disabled. I don't see any way around using the global
fallback for that case.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 17:13 ` Jens Axboe
@ 2008-04-22 17:13 ` Jens Axboe
[not found] ` <20080422171324.GP12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 17:13 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Jens Axboe wrote:
> On Tue, Apr 22 2008, Jens Axboe wrote:
> > + data = kmalloc(sizeof(*data), GFP_ATOMIC);
> > + if (!unlikely(!data)) {
>
> Woops. I'll test everything and send out the full series, at least the
> intention should have been clear.
But wait, falling back to wait cannot always be safe if we are called
with interrupts disabled. I don't see any way around using the global
fallback for that case.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422171324.GP12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080422171324.GP12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 17:29 ` Linus Torvalds
2008-04-22 17:29 ` Linus Torvalds
[not found] ` <alpine.LFD.1.10.0804221027210.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 17:29 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, 22 Apr 2008, Jens Axboe wrote:
>
> But wait, falling back to wait cannot always be safe if we are called
> with interrupts disabled. I don't see any way around using the global
> fallback for that case.
Hmm. Good point. It does look like you need three cases.
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 17:29 ` Linus Torvalds
@ 2008-04-22 17:29 ` Linus Torvalds
[not found] ` <alpine.LFD.1.10.0804221027210.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 17:29 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin
On Tue, 22 Apr 2008, Jens Axboe wrote:
>
> But wait, falling back to wait cannot always be safe if we are called
> with interrupts disabled. I don't see any way around using the global
> fallback for that case.
Hmm. Good point. It does look like you need three cases.
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <alpine.LFD.1.10.0804221027210.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <alpine.LFD.1.10.0804221027210.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
@ 2008-04-22 18:23 ` Jens Axboe
2008-04-22 18:23 ` Jens Axboe
[not found] ` <20080422182337.GQ12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 18:23 UTC (permalink / raw)
To: Linus Torvalds
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Jens Axboe wrote:
> >
> > But wait, falling back to wait cannot always be safe if we are called
> > with interrupts disabled. I don't see any way around using the global
> > fallback for that case.
>
> Hmm. Good point. It does look like you need three cases.
OK, so how about this then? All three cases for both
smp_call_function_single() and smp_call_function_mask(). Data is always
freed in the ipi interrupt handler, since the 'wait' case is always
allocated on the stack.
From 98cafbfd86760ccd3b0498b83b5b9ca044534b04 Mon Sep 17 00:00:00 2001
From: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
Date: Tue, 22 Apr 2008 20:14:09 +0200
Subject: [PATCH] Add generic helpers for arch IPI function calls
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.
The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle-VXdhtT5mjnY@public.gmane.org> has
contributed lots of fixes and suggestions as well.
Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/Kconfig | 3 +
include/linux/smp.h | 27 ++++-
init/main.c | 3 +
kernel/Makefile | 1 +
kernel/smp.c | 366 +++++++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 398 insertions(+), 2 deletions(-)
create mode 100644 kernel/smp.c
diff --git a/arch/Kconfig b/arch/Kconfig
index 694c9af..a5a0184 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -36,3 +36,6 @@ config HAVE_KPROBES
config HAVE_KRETPROBES
def_bool n
+
+config USE_GENERIC_SMP_HELPERS
+ def_bool n
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232cc..4a5418b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,19 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -53,9 +63,23 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
+extern spinlock_t call_function_lock;
+#endif
/*
* Call a function on all processors
@@ -112,7 +136,6 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
#endif /* !SMP */
/*
diff --git a/init/main.c b/init/main.c
index 833a67d..0b7578c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -773,6 +773,9 @@ static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+ init_call_single_data();
+#endif
migration_init();
spawn_ksoftirqd();
if (!nosoftlockup)
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..7e275d4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..a177a0d
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,366 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+ CSD_FLAG_WAIT = 0x01,
+ CSD_FLAG_ALLOC = 0x02,
+ CSD_FLAG_FALLBACK = 0x04,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_t cpumask;
+ struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+static struct call_function_data cfd_fallback;
+static unsigned long cfd_fallback_used;
+
+void __cpuinit init_call_single_data(void)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_possible_map) {
+ struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->list);
+ }
+}
+
+static inline void csd_flag_wait(struct call_single_data *data)
+{
+ /* Wait for response */
+ do {
+ /*
+ * We need to see the flags store in the IPI handler
+ */
+ smp_mb();
+ if (!(data->flags & CSD_FLAG_WAIT))
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+ struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+ int wait = data->flags & CSD_FLAG_WAIT, ipi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dst->lock, flags);
+ ipi = list_empty(&dst->list);
+ list_add_tail(&data->list, &dst->list);
+ spin_unlock_irqrestore(&dst->lock, flags);
+
+ if (ipi)
+ arch_send_call_function_single_ipi(cpu);
+
+ if (wait)
+ csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+ struct call_function_data *cfd;
+
+ cfd = container_of(head, struct call_function_data, rcu_head);
+ kfree(cfd);
+}
+
+static void call_func_data_free(struct call_function_data *data)
+{
+ if (data->csd.flags & CSD_FLAG_ALLOC)
+ call_rcu(&data->rcu_head, rcu_free_call_data);
+ else
+ clear_bit_unlock(0, &cfd_fallback_used);
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+ struct list_head *pos;
+ int cpu = get_cpu();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may delete
+ * 'pos', since list_del_rcu() doesn't clear ->next
+ */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &call_function_queue) {
+ struct call_function_data *data;
+ int refs;
+
+ data = list_entry(pos, struct call_function_data, csd.list);
+ if (!cpu_isset(cpu, data->cpumask))
+ continue;
+
+ data->csd.func(data->csd.info);
+
+ spin_lock(&data->lock);
+ cpu_clear(cpu, data->cpumask);
+ WARN_ON(data->refs == 0);
+ data->refs--;
+ refs = data->refs;
+ spin_unlock(&data->lock);
+
+ if (refs)
+ continue;
+
+ WARN_ON(cpus_weight(data->cpumask));
+ spin_lock(&call_function_lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function_lock);
+
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else
+ call_func_data_free(data);
+ }
+ rcu_read_unlock();
+
+ put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+ struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ LIST_HEAD(list);
+
+ smp_mb();
+ while (!list_empty(&q->list)) {
+ unsigned int data_flags;
+
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data,
+ list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if
+ * flags == 0 (when called through
+ * generic_exec_single(), so save them away before
+ * making the call.
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
+ if (data_flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+ } else if (data_flags & CSD_FLAG_ALLOC)
+ kfree(data);
+ else if (data_flags & CSD_FLAG_FALLBACK)
+ clear_bit_unlock(0, &cfd_fallback_used);
+ }
+ smp_mb();
+ }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ unsigned long flags;
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ if (cpu == me) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ } else {
+ struct call_single_data *data;
+
+ if (wait) {
+ struct call_single_data d;
+
+ data = &d;
+ data->flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (data)
+ data->flags = CSD_FLAG_ALLOC;
+ else {
+ while (test_and_set_bit_lock(0,
+ &cfd_fallback_used))
+ cpu_relax();
+
+ data = &cfd_fallback.csd;
+ data->flags = CSD_FLAG_FALLBACK;
+ }
+ }
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data);
+ }
+
+ put_cpu();
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+ generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_function_data *data;
+ cpumask_t allbutself;
+ unsigned long flags;
+ int num_cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ cpus_and(mask, mask, allbutself);
+ num_cpus = cpus_weight(mask);
+
+ if (!num_cpus)
+ return 0;
+
+ if (wait) {
+ struct call_function_data d;
+
+ data = &d;
+ data->csd.flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (data)
+ data->csd.flags = CSD_FLAG_ALLOC;
+ else {
+ while (test_and_set_bit_lock(0, &cfd_fallback_used))
+ cpu_relax();
+
+ data = &cfd_fallback;
+ data->csd.flags = CSD_FLAG_FALLBACK;
+ }
+ }
+
+ spin_lock_init(&data->lock);
+ data->csd.func = func;
+ data->csd.info = info;
+ data->refs = num_cpus;
+ data->cpumask = mask;
+
+ spin_lock_irqsave(&call_function_lock, flags);
+ list_add_tail_rcu(&data->csd.list, &call_function_queue);
+ spin_unlock_irqrestore(&call_function_lock, flags);
+
+ /* Send a message to all CPUs in the map */
+ arch_send_call_function_ipi(mask);
+
+ /* optionally wait for the CPUs to complete */
+ if (wait)
+ csd_flag_wait(&data->csd);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+ int ret;
+
+ preempt_disable();
+ ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
--
1.5.5.1.57.g5909c
--
Jens Axboe
^ permalink raw reply related [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 18:23 ` Jens Axboe
@ 2008-04-22 18:23 ` Jens Axboe
[not found] ` <20080422182337.GQ12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 18:23 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Jens Axboe wrote:
> >
> > But wait, falling back to wait cannot always be safe if we are called
> > with interrupts disabled. I don't see any way around using the global
> > fallback for that case.
>
> Hmm. Good point. It does look like you need three cases.
OK, so how about this then? All three cases for both
smp_call_function_single() and smp_call_function_mask(). Data is always
freed in the ipi interrupt handler, since the 'wait' case is always
allocated on the stack.
From 98cafbfd86760ccd3b0498b83b5b9ca044534b04 Mon Sep 17 00:00:00 2001
From: Jens Axboe <jens.axboe@oracle.com>
Date: Tue, 22 Apr 2008 20:14:09 +0200
Subject: [PATCH] Add generic helpers for arch IPI function calls
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.
The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
contributed lots of fixes and suggestions as well.
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/Kconfig | 3 +
include/linux/smp.h | 27 ++++-
init/main.c | 3 +
kernel/Makefile | 1 +
kernel/smp.c | 366 +++++++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 398 insertions(+), 2 deletions(-)
create mode 100644 kernel/smp.c
diff --git a/arch/Kconfig b/arch/Kconfig
index 694c9af..a5a0184 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -36,3 +36,6 @@ config HAVE_KPROBES
config HAVE_KRETPROBES
def_bool n
+
+config USE_GENERIC_SMP_HELPERS
+ def_bool n
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232cc..4a5418b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,19 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -53,9 +63,23 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi(cpumask_t mask);
+extern spinlock_t call_function_lock;
+#endif
/*
* Call a function on all processors
@@ -112,7 +136,6 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
#endif /* !SMP */
/*
diff --git a/init/main.c b/init/main.c
index 833a67d..0b7578c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -773,6 +773,9 @@ static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+ init_call_single_data();
+#endif
migration_init();
spawn_ksoftirqd();
if (!nosoftlockup)
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..7e275d4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..a177a0d
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,366 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe@oracle.com> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+ CSD_FLAG_WAIT = 0x01,
+ CSD_FLAG_ALLOC = 0x02,
+ CSD_FLAG_FALLBACK = 0x04,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_t cpumask;
+ struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+static struct call_function_data cfd_fallback;
+static unsigned long cfd_fallback_used;
+
+void __cpuinit init_call_single_data(void)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_possible_map) {
+ struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->list);
+ }
+}
+
+static inline void csd_flag_wait(struct call_single_data *data)
+{
+ /* Wait for response */
+ do {
+ /*
+ * We need to see the flags store in the IPI handler
+ */
+ smp_mb();
+ if (!(data->flags & CSD_FLAG_WAIT))
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+ struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+ int wait = data->flags & CSD_FLAG_WAIT, ipi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dst->lock, flags);
+ ipi = list_empty(&dst->list);
+ list_add_tail(&data->list, &dst->list);
+ spin_unlock_irqrestore(&dst->lock, flags);
+
+ if (ipi)
+ arch_send_call_function_single_ipi(cpu);
+
+ if (wait)
+ csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+ struct call_function_data *cfd;
+
+ cfd = container_of(head, struct call_function_data, rcu_head);
+ kfree(cfd);
+}
+
+static void call_func_data_free(struct call_function_data *data)
+{
+ if (data->csd.flags & CSD_FLAG_ALLOC)
+ call_rcu(&data->rcu_head, rcu_free_call_data);
+ else
+ clear_bit_unlock(0, &cfd_fallback_used);
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+ struct list_head *pos;
+ int cpu = get_cpu();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may delete
+ * 'pos', since list_del_rcu() doesn't clear ->next
+ */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &call_function_queue) {
+ struct call_function_data *data;
+ int refs;
+
+ data = list_entry(pos, struct call_function_data, csd.list);
+ if (!cpu_isset(cpu, data->cpumask))
+ continue;
+
+ data->csd.func(data->csd.info);
+
+ spin_lock(&data->lock);
+ cpu_clear(cpu, data->cpumask);
+ WARN_ON(data->refs == 0);
+ data->refs--;
+ refs = data->refs;
+ spin_unlock(&data->lock);
+
+ if (refs)
+ continue;
+
+ WARN_ON(cpus_weight(data->cpumask));
+ spin_lock(&call_function_lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function_lock);
+
+ if (data->csd.flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->csd.flags &= ~CSD_FLAG_WAIT;
+ } else
+ call_func_data_free(data);
+ }
+ rcu_read_unlock();
+
+ put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+ struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ LIST_HEAD(list);
+
+ smp_mb();
+ while (!list_empty(&q->list)) {
+ unsigned int data_flags;
+
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data,
+ list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if
+ * flags == 0 (when called through
+ * generic_exec_single(), so save them away before
+ * making the call.
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
+ if (data_flags & CSD_FLAG_WAIT) {
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+ } else if (data_flags & CSD_FLAG_ALLOC)
+ kfree(data);
+ else if (data_flags & CSD_FLAG_FALLBACK)
+ clear_bit_unlock(0, &cfd_fallback_used);
+ }
+ smp_mb();
+ }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ unsigned long flags;
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ if (cpu == me) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ } else {
+ struct call_single_data *data;
+
+ if (wait) {
+ struct call_single_data d;
+
+ data = &d;
+ data->flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (data)
+ data->flags = CSD_FLAG_ALLOC;
+ else {
+ while (test_and_set_bit_lock(0,
+ &cfd_fallback_used))
+ cpu_relax();
+
+ data = &cfd_fallback.csd;
+ data->flags = CSD_FLAG_FALLBACK;
+ }
+ }
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data);
+ }
+
+ put_cpu();
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+ generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_function_data *data;
+ cpumask_t allbutself;
+ unsigned long flags;
+ int num_cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(wait && irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ cpus_and(mask, mask, allbutself);
+ num_cpus = cpus_weight(mask);
+
+ if (!num_cpus)
+ return 0;
+
+ if (wait) {
+ struct call_function_data d;
+
+ data = &d;
+ data->csd.flags = CSD_FLAG_WAIT;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (data)
+ data->csd.flags = CSD_FLAG_ALLOC;
+ else {
+ while (test_and_set_bit_lock(0, &cfd_fallback_used))
+ cpu_relax();
+
+ data = &cfd_fallback;
+ data->csd.flags = CSD_FLAG_FALLBACK;
+ }
+ }
+
+ spin_lock_init(&data->lock);
+ data->csd.func = func;
+ data->csd.info = info;
+ data->refs = num_cpus;
+ data->cpumask = mask;
+
+ spin_lock_irqsave(&call_function_lock, flags);
+ list_add_tail_rcu(&data->csd.list, &call_function_queue);
+ spin_unlock_irqrestore(&call_function_lock, flags);
+
+ /* Send a message to all CPUs in the map */
+ arch_send_call_function_ipi(mask);
+
+ /* optionally wait for the CPUs to complete */
+ if (wait)
+ csd_flag_wait(&data->csd);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+ int ret;
+
+ preempt_disable();
+ ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
--
1.5.5.1.57.g5909c
--
Jens Axboe
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <20080422182337.GQ12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <alpine.LFD.1.10.0804220735350.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
2008-04-22 14:51 ` Jens Axboe
@ 2008-04-22 14:58 ` Linus Torvalds
2008-04-22 14:58 ` Linus Torvalds
[not found] ` <alpine.LFD.1.10.0804220749450.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
1 sibling, 2 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 14:58 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, 22 Apr 2008, Linus Torvalds wrote:
>
> You forgot to free the "data" here? The waiter must also free the object,
> since now the callee does not.
Ahh. For the single case, the caller will have it on the stack.
But the smp_call_function_mask() case seems to leak these things.
How about just always doing the "wait" case on the stack? Also, I'd
suggest you get rid of the static allocation, and just turn it into a wait
case, so that you don't need *three* different allocation cases, just two.
Or am I missing something?
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 14:58 ` Linus Torvalds
@ 2008-04-22 14:58 ` Linus Torvalds
[not found] ` <alpine.LFD.1.10.0804220749450.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Linus Torvalds @ 2008-04-22 14:58 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin
On Tue, 22 Apr 2008, Linus Torvalds wrote:
>
> You forgot to free the "data" here? The waiter must also free the object,
> since now the callee does not.
Ahh. For the single case, the caller will have it on the stack.
But the smp_call_function_mask() case seems to leak these things.
How about just always doing the "wait" case on the stack? Also, I'd
suggest you get rid of the static allocation, and just turn it into a wait
case, so that you don't need *three* different allocation cases, just two.
Or am I missing something?
Linus
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <alpine.LFD.1.10.0804220749450.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <alpine.LFD.1.10.0804220749450.2779-5CScLwifNT1QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
@ 2008-04-22 15:07 ` Jens Axboe
2008-04-22 15:07 ` Jens Axboe
0 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 15:07 UTC (permalink / raw)
To: Linus Torvalds
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Linus Torvalds wrote:
> >
> > You forgot to free the "data" here? The waiter must also free the object,
> > since now the callee does not.
>
> Ahh. For the single case, the caller will have it on the stack.
Yes, if wait is set, it's on the stack.
> But the smp_call_function_mask() case seems to leak these things.
Hmm yes, double checking it and it would seem to leak for wait &&
kmalloc'ed data. I think I originally had the wait case as always on
stack, which would explain this leak.
> How about just always doing the "wait" case on the stack? Also, I'd
> suggest you get rid of the static allocation, and just turn it into a wait
> case, so that you don't need *three* different allocation cases, just two.
Alright, I'll make 'wait' always alloc on the stack and I'll make
allocation failure turn into 'wait' as well. That'll leave just
kmalloc() and stack allocation, killing the static data fallback.
> Or am I missing something?
No, I think that should work ok. I'll post a new series with the changes
so we can see if we agree.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 15:07 ` Jens Axboe
@ 2008-04-22 15:07 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 15:07 UTC (permalink / raw)
To: Linus Torvalds; +Cc: linux-arch, linux-kernel, npiggin
On Tue, Apr 22 2008, Linus Torvalds wrote:
>
>
> On Tue, 22 Apr 2008, Linus Torvalds wrote:
> >
> > You forgot to free the "data" here? The waiter must also free the object,
> > since now the callee does not.
>
> Ahh. For the single case, the caller will have it on the stack.
Yes, if wait is set, it's on the stack.
> But the smp_call_function_mask() case seems to leak these things.
Hmm yes, double checking it and it would seem to leak for wait &&
kmalloc'ed data. I think I originally had the wait case as always on
stack, which would explain this leak.
> How about just always doing the "wait" case on the stack? Also, I'd
> suggest you get rid of the static allocation, and just turn it into a wait
> case, so that you don't need *three* different allocation cases, just two.
Alright, I'll make 'wait' always alloc on the stack and I'll make
allocation failure turn into 'wait' as well. That'll leave just
kmalloc() and stack allocation, killing the static data fallback.
> Or am I missing something?
No, I think that should work ok. I'll post a new series with the changes
so we can see if we agree.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <1208851058-8500-2-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 9:16 ` Avi Kivity
2008-04-22 14:43 ` Linus Torvalds
@ 2008-04-22 23:12 ` Mark Lord
2008-04-22 23:12 ` Mark Lord
[not found] ` <480E70ED.3030701-gsilrlXbHYg@public.gmane.org>
2 siblings, 2 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-22 23:12 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
Jens,
While you're in there, :)
Could you perhaps fix this bug (below) if it still exists?
> Date: Thu, 15 Nov 2007 12:07:48 -0500
> From: Mark Lord <lkml-gsilrlXbHYg@public.gmane.org>
> To: Greg KH <gregkh-l3A5Bk7waGM@public.gmane.org>
> Cc: Yasunori Goto <y-goto-+CUm20s59erQFUHtdCDX3A@public.gmane.org>,
> Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
> Alexey Dobriyan <adobriyan-3ImXcnM4P+0@public.gmane.org>, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> Subject: Re: EIP is at device_shutdown+0x32/0x60
> Content-Type: text/plain; charset=ISO-8859-1; format=flowed
> Content-Transfer-Encoding: 7bit
> Sender: linux-kernel-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
>
> ... < snip > ...
>
> Greg, I don't know if this is relevant or not,
> but x86 has bugs in the halt/reboot code for SMP.
>
> Specifically, in native_smp_send_stop() the code now uses
> spin_trylock() to "lock" the shared call buffers,
> but then ignores the result.
>
> This means that multiple CPUs can/will clobber each other
> in that code.
>
> The second bug, is that this code does not wait for the
> target CPUs to actually stop before it continues.
>
> This was the real cause of the failure-to-poweroff problems
> I was having with 2.6.23, which we fixed by using CPU hotplug
> to disable_nonboot_cpus() before the above code ever got run.
>
> Maybe it's related, maybe not.
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-22 23:12 ` Mark Lord
@ 2008-04-22 23:12 ` Mark Lord
[not found] ` <480E70ED.3030701-gsilrlXbHYg@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-22 23:12 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds
Jens,
While you're in there, :)
Could you perhaps fix this bug (below) if it still exists?
> Date: Thu, 15 Nov 2007 12:07:48 -0500
> From: Mark Lord <lkml@rtr.ca>
> To: Greg KH <gregkh@suse.de>
> Cc: Yasunori Goto <y-goto@jp.fujitsu.com>,
> Andrew Morton <akpm@linux-foundation.org>,
> Alexey Dobriyan <adobriyan@sw.ru>, linux-kernel@vger.kernel.org
> Subject: Re: EIP is at device_shutdown+0x32/0x60
> Content-Type: text/plain; charset=ISO-8859-1; format=flowed
> Content-Transfer-Encoding: 7bit
> Sender: linux-kernel-owner@vger.kernel.org
>
> ... < snip > ...
>
> Greg, I don't know if this is relevant or not,
> but x86 has bugs in the halt/reboot code for SMP.
>
> Specifically, in native_smp_send_stop() the code now uses
> spin_trylock() to "lock" the shared call buffers,
> but then ignores the result.
>
> This means that multiple CPUs can/will clobber each other
> in that code.
>
> The second bug, is that this code does not wait for the
> target CPUs to actually stop before it continues.
>
> This was the real cause of the failure-to-poweroff problems
> I was having with 2.6.23, which we fixed by using CPU hotplug
> to disable_nonboot_cpus() before the above code ever got run.
>
> Maybe it's related, maybe not.
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <480E70ED.3030701-gsilrlXbHYg@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <480E70ED.3030701-gsilrlXbHYg@public.gmane.org>
@ 2008-04-23 7:24 ` Jens Axboe
2008-04-23 7:24 ` Jens Axboe
[not found] ` <20080423072432.GX12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-23 7:24 UTC (permalink / raw)
To: Mark Lord
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Tue, Apr 22 2008, Mark Lord wrote:
> Jens,
>
> While you're in there, :)
>
> Could you perhaps fix this bug (below) if it still exists?
I don't understand the bug - what are the shared call buffers you are
talking of?
With the changes, there's not even an spin_trylock() in there anymore.
But I don't see the original bug either, so...
>
> >Date: Thu, 15 Nov 2007 12:07:48 -0500
> >From: Mark Lord <lkml-gsilrlXbHYg@public.gmane.org>
> >To: Greg KH <gregkh-l3A5Bk7waGM@public.gmane.org>
> >Cc: Yasunori Goto <y-goto-+CUm20s59erQFUHtdCDX3A@public.gmane.org>,
> > Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
> > Alexey Dobriyan <adobriyan-3ImXcnM4P+0@public.gmane.org>, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> >Subject: Re: EIP is at device_shutdown+0x32/0x60
> >Content-Type: text/plain; charset=ISO-8859-1; format=flowed
> >Content-Transfer-Encoding: 7bit
> >Sender: linux-kernel-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> >
> >... < snip > ...
> >
> >Greg, I don't know if this is relevant or not,
> >but x86 has bugs in the halt/reboot code for SMP.
> >
> >Specifically, in native_smp_send_stop() the code now uses
> >spin_trylock() to "lock" the shared call buffers,
> >but then ignores the result.
> >
> >This means that multiple CPUs can/will clobber each other
> >in that code.
> >
> >The second bug, is that this code does not wait for the
> >target CPUs to actually stop before it continues.
> >
> >This was the real cause of the failure-to-poweroff problems
> >I was having with 2.6.23, which we fixed by using CPU hotplug
> >to disable_nonboot_cpus() before the above code ever got run.
> >
> >Maybe it's related, maybe not.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-23 7:24 ` Jens Axboe
@ 2008-04-23 7:24 ` Jens Axboe
[not found] ` <20080423072432.GX12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-23 7:24 UTC (permalink / raw)
To: Mark Lord; +Cc: linux-arch, linux-kernel, npiggin, torvalds
On Tue, Apr 22 2008, Mark Lord wrote:
> Jens,
>
> While you're in there, :)
>
> Could you perhaps fix this bug (below) if it still exists?
I don't understand the bug - what are the shared call buffers you are
talking of?
With the changes, there's not even an spin_trylock() in there anymore.
But I don't see the original bug either, so...
>
> >Date: Thu, 15 Nov 2007 12:07:48 -0500
> >From: Mark Lord <lkml@rtr.ca>
> >To: Greg KH <gregkh@suse.de>
> >Cc: Yasunori Goto <y-goto@jp.fujitsu.com>,
> > Andrew Morton <akpm@linux-foundation.org>,
> > Alexey Dobriyan <adobriyan@sw.ru>, linux-kernel@vger.kernel.org
> >Subject: Re: EIP is at device_shutdown+0x32/0x60
> >Content-Type: text/plain; charset=ISO-8859-1; format=flowed
> >Content-Transfer-Encoding: 7bit
> >Sender: linux-kernel-owner@vger.kernel.org
> >
> >... < snip > ...
> >
> >Greg, I don't know if this is relevant or not,
> >but x86 has bugs in the halt/reboot code for SMP.
> >
> >Specifically, in native_smp_send_stop() the code now uses
> >spin_trylock() to "lock" the shared call buffers,
> >but then ignores the result.
> >
> >This means that multiple CPUs can/will clobber each other
> >in that code.
> >
> >The second bug, is that this code does not wait for the
> >target CPUs to actually stop before it continues.
> >
> >This was the real cause of the failure-to-poweroff problems
> >I was having with 2.6.23, which we fixed by using CPU hotplug
> >to disable_nonboot_cpus() before the above code ever got run.
> >
> >Maybe it's related, maybe not.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080423072432.GX12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080423072432.GX12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-23 13:42 ` Mark Lord
2008-04-23 13:42 ` Mark Lord
[not found] ` <480F3CBC.60305-gsilrlXbHYg@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-23 13:42 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
>>> Date: Thu, 15 Nov 2007 12:07:48 -0500
>>> From: Mark Lord <lkml-gsilrlXbHYg@public.gmane.org>
>>> To: Greg KH <gregkh-l3A5Bk7waGM@public.gmane.org>
>>> Cc: Yasunori Goto <y-goto-+CUm20s59erQFUHtdCDX3A@public.gmane.org>,
>>> Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
>>> Alexey Dobriyan <adobriyan-3ImXcnM4P+0@public.gmane.org>, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
>>> Subject: Re: EIP is at device_shutdown+0x32/0x60
>>> Content-Type: text/plain; charset=ISO-8859-1; format=flowed
>>> Content-Transfer-Encoding: 7bit
>>> Sender: linux-kernel-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
>>>
>>> ... < snip > ...
>>>
>>> Greg, I don't know if this is relevant or not,
>>> but x86 has bugs in the halt/reboot code for SMP.
>>>
>>> Specifically, in native_smp_send_stop() the code now uses
>>> spin_trylock() to "lock" the shared call buffers,
>>> but then ignores the result.
>>>
>>> This means that multiple CPUs can/will clobber each other
>>> in that code.
>>>
>>> The second bug, is that this code does not wait for the
>>> target CPUs to actually stop before it continues.
>>>
>>> This was the real cause of the failure-to-poweroff problems
>>> I was having with 2.6.23, which we fixed by using CPU hotplug
>>> to disable_nonboot_cpus() before the above code ever got run.
Jens Axboe wrote:
> On Tue, Apr 22 2008, Mark Lord wrote:
>> Jens,
>>
>> While you're in there, :)
>>
>> Could you perhaps fix this bug (above) if it still exists?
>
> I don't understand the bug - what are the shared call buffers you are
> talking of?
>
> With the changes, there's not even an spin_trylock() in there anymore.
> But I don't see the original bug either, so...
..
arch/x86/kernel/smp.c:
static void native_smp_send_stop(void)
{
int nolock;
unsigned long flags;
if (reboot_force)
return;
/* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock); <<<<<<<<<< buggy
local_irq_save(flags);
__smp_call_function(stop_this_cpu, NULL, 0, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
local_irq_restore(flags);
}
The spinlock is trying to protect access to the global variable
"call_data" (higher up in the same file), which is used
in __smp_call_function() and friends.
But since the spinlock is ignored in this case,
the global "call_data will get clobbered if it was already in-use.
The second bug, is that for the halt case at least,
nobody waits for the other CPU to actually halt
before continuing.. so we sometimes enter the shutdown
code while other CPUs are still active.
This causes some machines to hang at shutdown,
unless CPU_HOTPLUG is configured and takes them offline
before we get here.
Cheers
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-23 13:42 ` Mark Lord
@ 2008-04-23 13:42 ` Mark Lord
[not found] ` <480F3CBC.60305-gsilrlXbHYg@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-23 13:42 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds
>>> Date: Thu, 15 Nov 2007 12:07:48 -0500
>>> From: Mark Lord <lkml@rtr.ca>
>>> To: Greg KH <gregkh@suse.de>
>>> Cc: Yasunori Goto <y-goto@jp.fujitsu.com>,
>>> Andrew Morton <akpm@linux-foundation.org>,
>>> Alexey Dobriyan <adobriyan@sw.ru>, linux-kernel@vger.kernel.org
>>> Subject: Re: EIP is at device_shutdown+0x32/0x60
>>> Content-Type: text/plain; charset=ISO-8859-1; format=flowed
>>> Content-Transfer-Encoding: 7bit
>>> Sender: linux-kernel-owner@vger.kernel.org
>>>
>>> ... < snip > ...
>>>
>>> Greg, I don't know if this is relevant or not,
>>> but x86 has bugs in the halt/reboot code for SMP.
>>>
>>> Specifically, in native_smp_send_stop() the code now uses
>>> spin_trylock() to "lock" the shared call buffers,
>>> but then ignores the result.
>>>
>>> This means that multiple CPUs can/will clobber each other
>>> in that code.
>>>
>>> The second bug, is that this code does not wait for the
>>> target CPUs to actually stop before it continues.
>>>
>>> This was the real cause of the failure-to-poweroff problems
>>> I was having with 2.6.23, which we fixed by using CPU hotplug
>>> to disable_nonboot_cpus() before the above code ever got run.
Jens Axboe wrote:
> On Tue, Apr 22 2008, Mark Lord wrote:
>> Jens,
>>
>> While you're in there, :)
>>
>> Could you perhaps fix this bug (above) if it still exists?
>
> I don't understand the bug - what are the shared call buffers you are
> talking of?
>
> With the changes, there's not even an spin_trylock() in there anymore.
> But I don't see the original bug either, so...
..
arch/x86/kernel/smp.c:
static void native_smp_send_stop(void)
{
int nolock;
unsigned long flags;
if (reboot_force)
return;
/* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock); <<<<<<<<<< buggy
local_irq_save(flags);
__smp_call_function(stop_this_cpu, NULL, 0, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
local_irq_restore(flags);
}
The spinlock is trying to protect access to the global variable
"call_data" (higher up in the same file), which is used
in __smp_call_function() and friends.
But since the spinlock is ignored in this case,
the global "call_data will get clobbered if it was already in-use.
The second bug, is that for the halt case at least,
nobody waits for the other CPU to actually halt
before continuing.. so we sometimes enter the shutdown
code while other CPUs are still active.
This causes some machines to hang at shutdown,
unless CPU_HOTPLUG is configured and takes them offline
before we get here.
Cheers
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <480F3CBC.60305-gsilrlXbHYg@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <480F3CBC.60305-gsilrlXbHYg@public.gmane.org>
@ 2008-04-23 13:51 ` Jens Axboe
2008-04-23 13:51 ` Jens Axboe
2008-04-23 14:46 ` Mark Lord
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-23 13:51 UTC (permalink / raw)
To: Mark Lord
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Wed, Apr 23 2008, Mark Lord wrote:
> >>>Date: Thu, 15 Nov 2007 12:07:48 -0500
> >>>From: Mark Lord <lkml-gsilrlXbHYg@public.gmane.org>
> >>>To: Greg KH <gregkh-l3A5Bk7waGM@public.gmane.org>
> >>>Cc: Yasunori Goto <y-goto-+CUm20s59erQFUHtdCDX3A@public.gmane.org>,
> >>> Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
> >>> Alexey Dobriyan <adobriyan-3ImXcnM4P+0@public.gmane.org>, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> >>>Subject: Re: EIP is at device_shutdown+0x32/0x60
> >>>Content-Type: text/plain; charset=ISO-8859-1; format=flowed
> >>>Content-Transfer-Encoding: 7bit
> >>>Sender: linux-kernel-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> >>>
> >>>... < snip > ...
> >>>
> >>>Greg, I don't know if this is relevant or not,
> >>>but x86 has bugs in the halt/reboot code for SMP.
> >>>
> >>>Specifically, in native_smp_send_stop() the code now uses
> >>>spin_trylock() to "lock" the shared call buffers,
> >>>but then ignores the result.
> >>>
> >>>This means that multiple CPUs can/will clobber each other
> >>>in that code.
> >>>
> >>>The second bug, is that this code does not wait for the
> >>>target CPUs to actually stop before it continues.
> >>>
> >>>This was the real cause of the failure-to-poweroff problems
> >>>I was having with 2.6.23, which we fixed by using CPU hotplug
> >>>to disable_nonboot_cpus() before the above code ever got run.
>
> Jens Axboe wrote:
> >On Tue, Apr 22 2008, Mark Lord wrote:
> >>Jens,
> >>
> >>While you're in there, :)
> >>
> >>Could you perhaps fix this bug (above) if it still exists?
> >
> >I don't understand the bug - what are the shared call buffers you are
> >talking of?
> >
> >With the changes, there's not even an spin_trylock() in there anymore.
> >But I don't see the original bug either, so...
> ..
>
> arch/x86/kernel/smp.c:
>
> static void native_smp_send_stop(void)
> {
> int nolock;
> unsigned long flags;
>
> if (reboot_force)
> return;
>
> /* Don't deadlock on the call lock in panic */
> nolock = !spin_trylock(&call_lock); <<<<<<<<<< buggy
> local_irq_save(flags);
> __smp_call_function(stop_this_cpu, NULL, 0, 0);
> if (!nolock)
> spin_unlock(&call_lock);
> disable_local_APIC();
> local_irq_restore(flags);
> }
>
> The spinlock is trying to protect access to the global variable
> "call_data" (higher up in the same file), which is used
> in __smp_call_function() and friends.
>
> But since the spinlock is ignored in this case,
> the global "call_data will get clobbered if it was already in-use.
Ah I see, that bug doesn't exist with the converted code.
> The second bug, is that for the halt case at least,
> nobody waits for the other CPU to actually halt
> before continuing.. so we sometimes enter the shutdown
> code while other CPUs are still active.
>
> This causes some machines to hang at shutdown,
> unless CPU_HOTPLUG is configured and takes them offline
> before we get here.
I'm guessing there's a reason it doesn't pass '1' as the last argument,
because that would fix that issue?
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-23 13:51 ` Jens Axboe
@ 2008-04-23 13:51 ` Jens Axboe
2008-04-23 14:46 ` Mark Lord
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-23 13:51 UTC (permalink / raw)
To: Mark Lord; +Cc: linux-arch, linux-kernel, npiggin, torvalds
On Wed, Apr 23 2008, Mark Lord wrote:
> >>>Date: Thu, 15 Nov 2007 12:07:48 -0500
> >>>From: Mark Lord <lkml@rtr.ca>
> >>>To: Greg KH <gregkh@suse.de>
> >>>Cc: Yasunori Goto <y-goto@jp.fujitsu.com>,
> >>> Andrew Morton <akpm@linux-foundation.org>,
> >>> Alexey Dobriyan <adobriyan@sw.ru>, linux-kernel@vger.kernel.org
> >>>Subject: Re: EIP is at device_shutdown+0x32/0x60
> >>>Content-Type: text/plain; charset=ISO-8859-1; format=flowed
> >>>Content-Transfer-Encoding: 7bit
> >>>Sender: linux-kernel-owner@vger.kernel.org
> >>>
> >>>... < snip > ...
> >>>
> >>>Greg, I don't know if this is relevant or not,
> >>>but x86 has bugs in the halt/reboot code for SMP.
> >>>
> >>>Specifically, in native_smp_send_stop() the code now uses
> >>>spin_trylock() to "lock" the shared call buffers,
> >>>but then ignores the result.
> >>>
> >>>This means that multiple CPUs can/will clobber each other
> >>>in that code.
> >>>
> >>>The second bug, is that this code does not wait for the
> >>>target CPUs to actually stop before it continues.
> >>>
> >>>This was the real cause of the failure-to-poweroff problems
> >>>I was having with 2.6.23, which we fixed by using CPU hotplug
> >>>to disable_nonboot_cpus() before the above code ever got run.
>
> Jens Axboe wrote:
> >On Tue, Apr 22 2008, Mark Lord wrote:
> >>Jens,
> >>
> >>While you're in there, :)
> >>
> >>Could you perhaps fix this bug (above) if it still exists?
> >
> >I don't understand the bug - what are the shared call buffers you are
> >talking of?
> >
> >With the changes, there's not even an spin_trylock() in there anymore.
> >But I don't see the original bug either, so...
> ..
>
> arch/x86/kernel/smp.c:
>
> static void native_smp_send_stop(void)
> {
> int nolock;
> unsigned long flags;
>
> if (reboot_force)
> return;
>
> /* Don't deadlock on the call lock in panic */
> nolock = !spin_trylock(&call_lock); <<<<<<<<<< buggy
> local_irq_save(flags);
> __smp_call_function(stop_this_cpu, NULL, 0, 0);
> if (!nolock)
> spin_unlock(&call_lock);
> disable_local_APIC();
> local_irq_restore(flags);
> }
>
> The spinlock is trying to protect access to the global variable
> "call_data" (higher up in the same file), which is used
> in __smp_call_function() and friends.
>
> But since the spinlock is ignored in this case,
> the global "call_data will get clobbered if it was already in-use.
Ah I see, that bug doesn't exist with the converted code.
> The second bug, is that for the halt case at least,
> nobody waits for the other CPU to actually halt
> before continuing.. so we sometimes enter the shutdown
> code while other CPUs are still active.
>
> This causes some machines to hang at shutdown,
> unless CPU_HOTPLUG is configured and takes them offline
> before we get here.
I'm guessing there's a reason it doesn't pass '1' as the last argument,
because that would fix that issue?
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-23 13:51 ` Jens Axboe
2008-04-23 13:51 ` Jens Axboe
@ 2008-04-23 14:46 ` Mark Lord
2008-04-23 14:46 ` Mark Lord
[not found] ` <480F4BD9.8090003-gsilrlXbHYg@public.gmane.org>
1 sibling, 2 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-23 14:46 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds
Jens Axboe wrote:
> On Wed, Apr 23 2008, Mark Lord wrote:
>..
>> The second bug, is that for the halt case at least,
>> nobody waits for the other CPU to actually halt
>> before continuing.. so we sometimes enter the shutdown
>> code while other CPUs are still active.
>>
>> This causes some machines to hang at shutdown,
>> unless CPU_HOTPLUG is configured and takes them offline
>> before we get here.
>
> I'm guessing there's a reason it doesn't pass '1' as the last argument,
> because that would fix that issue?
..
Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
But some kind of pre-halt ack, perhaps plus a short delay by the caller
after receipt of the ack, would probably suffice to kill that bug.
But I really haven't studied this code enough to know,
other than that it historically has been a sticky area
to poke around in.
Cheers
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-23 14:46 ` Mark Lord
@ 2008-04-23 14:46 ` Mark Lord
[not found] ` <480F4BD9.8090003-gsilrlXbHYg@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-23 14:46 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds
Jens Axboe wrote:
> On Wed, Apr 23 2008, Mark Lord wrote:
>..
>> The second bug, is that for the halt case at least,
>> nobody waits for the other CPU to actually halt
>> before continuing.. so we sometimes enter the shutdown
>> code while other CPUs are still active.
>>
>> This causes some machines to hang at shutdown,
>> unless CPU_HOTPLUG is configured and takes them offline
>> before we get here.
>
> I'm guessing there's a reason it doesn't pass '1' as the last argument,
> because that would fix that issue?
..
Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
But some kind of pre-halt ack, perhaps plus a short delay by the caller
after receipt of the ack, would probably suffice to kill that bug.
But I really haven't studied this code enough to know,
other than that it historically has been a sticky area
to poke around in.
Cheers
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <480F4BD9.8090003-gsilrlXbHYg@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <480F4BD9.8090003-gsilrlXbHYg@public.gmane.org>
@ 2008-04-24 10:59 ` Jens Axboe
2008-04-24 10:59 ` Jens Axboe
[not found] ` <20080424105908.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-24 10:59 UTC (permalink / raw)
To: Mark Lord
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Wed, Apr 23 2008, Mark Lord wrote:
> Jens Axboe wrote:
> >On Wed, Apr 23 2008, Mark Lord wrote:
> >..
> >>The second bug, is that for the halt case at least,
> >>nobody waits for the other CPU to actually halt
> >>before continuing.. so we sometimes enter the shutdown
> >>code while other CPUs are still active.
> >>
> >>This causes some machines to hang at shutdown,
> >>unless CPU_HOTPLUG is configured and takes them offline
> >>before we get here.
> >
> >I'm guessing there's a reason it doesn't pass '1' as the last argument,
> >because that would fix that issue?
> ..
>
> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
> But some kind of pre-halt ack, perhaps plus a short delay by the caller
> after receipt of the ack, would probably suffice to kill that bug.
>
> But I really haven't studied this code enough to know,
> other than that it historically has been a sticky area
> to poke around in.
Something like this will close the window to right up until the point
where the other CPUs have 'almost' called halt().
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 5398385..94ec9bf 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ smp_wmb();
if (hlt_works(smp_processor_id()))
for (;;) halt();
for (;;);
@@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
local_irq_save(flags);
smp_call_function(stop_this_cpu, NULL, 0, 0);
+
+ while (cpus_weight(cpu_online_map) > 1) {
+ cpu_relax();
+ smp_rmb();
+ }
+
disable_local_APIC();
local_irq_restore(flags);
}
--
Jens Axboe
^ permalink raw reply related [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-24 10:59 ` Jens Axboe
@ 2008-04-24 10:59 ` Jens Axboe
[not found] ` <20080424105908.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-24 10:59 UTC (permalink / raw)
To: Mark Lord; +Cc: linux-arch, linux-kernel, npiggin, torvalds
On Wed, Apr 23 2008, Mark Lord wrote:
> Jens Axboe wrote:
> >On Wed, Apr 23 2008, Mark Lord wrote:
> >..
> >>The second bug, is that for the halt case at least,
> >>nobody waits for the other CPU to actually halt
> >>before continuing.. so we sometimes enter the shutdown
> >>code while other CPUs are still active.
> >>
> >>This causes some machines to hang at shutdown,
> >>unless CPU_HOTPLUG is configured and takes them offline
> >>before we get here.
> >
> >I'm guessing there's a reason it doesn't pass '1' as the last argument,
> >because that would fix that issue?
> ..
>
> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
> But some kind of pre-halt ack, perhaps plus a short delay by the caller
> after receipt of the ack, would probably suffice to kill that bug.
>
> But I really haven't studied this code enough to know,
> other than that it historically has been a sticky area
> to poke around in.
Something like this will close the window to right up until the point
where the other CPUs have 'almost' called halt().
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 5398385..94ec9bf 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ smp_wmb();
if (hlt_works(smp_processor_id()))
for (;;) halt();
for (;;);
@@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
local_irq_save(flags);
smp_call_function(stop_this_cpu, NULL, 0, 0);
+
+ while (cpus_weight(cpu_online_map) > 1) {
+ cpu_relax();
+ smp_rmb();
+ }
+
disable_local_APIC();
local_irq_restore(flags);
}
--
Jens Axboe
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <20080424105908.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080424105908.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-24 12:44 ` Mark Lord
2008-04-24 12:44 ` Mark Lord
[not found] ` <481080A0.9050804-gsilrlXbHYg@public.gmane.org>
2008-04-26 8:04 ` Pavel Machek
1 sibling, 2 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-24 12:44 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, pavel-+ZI9xUNit7I,
Rafael J. Wysocki
Jens Axboe wrote:
> On Wed, Apr 23 2008, Mark Lord wrote:
>> Jens Axboe wrote:
>>> On Wed, Apr 23 2008, Mark Lord wrote:
>>> ..
>>>> The second bug, is that for the halt case at least,
>>>> nobody waits for the other CPU to actually halt
>>>> before continuing.. so we sometimes enter the shutdown
>>>> code while other CPUs are still active.
>>>>
>>>> This causes some machines to hang at shutdown,
>>>> unless CPU_HOTPLUG is configured and takes them offline
>>>> before we get here.
>>> I'm guessing there's a reason it doesn't pass '1' as the last argument,
>>> because that would fix that issue?
>> ..
>>
>> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
>
> Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
>
>> But some kind of pre-halt ack, perhaps plus a short delay by the caller
>> after receipt of the ack, would probably suffice to kill that bug.
>>
>> But I really haven't studied this code enough to know,
>> other than that it historically has been a sticky area
>> to poke around in.
>
> Something like this will close the window to right up until the point
> where the other CPUs have 'almost' called halt().
>
> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> index 5398385..94ec9bf 100644
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
> /*
> * Remove this CPU:
> */
> - cpu_clear(smp_processor_id(), cpu_online_map);
> disable_local_APIC();
> + cpu_clear(smp_processor_id(), cpu_online_map);
> + smp_wmb();
> if (hlt_works(smp_processor_id()))
> for (;;) halt();
> for (;;);
> @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
>
> local_irq_save(flags);
> smp_call_function(stop_this_cpu, NULL, 0, 0);
> +
> + while (cpus_weight(cpu_online_map) > 1) {
> + cpu_relax();
> + smp_rmb();
> + }
> +
> disable_local_APIC();
> local_irq_restore(flags);
> }
..
Yup, that looks like it oughta work consistently.
Now we just need to hear from some of the folks who
have danced around this code in the past.
(added Pavel & Rafael to Cc:).
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-24 12:44 ` Mark Lord
@ 2008-04-24 12:44 ` Mark Lord
[not found] ` <481080A0.9050804-gsilrlXbHYg@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-24 12:44 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch, linux-kernel, npiggin, torvalds, pavel,
Rafael J. Wysocki
Jens Axboe wrote:
> On Wed, Apr 23 2008, Mark Lord wrote:
>> Jens Axboe wrote:
>>> On Wed, Apr 23 2008, Mark Lord wrote:
>>> ..
>>>> The second bug, is that for the halt case at least,
>>>> nobody waits for the other CPU to actually halt
>>>> before continuing.. so we sometimes enter the shutdown
>>>> code while other CPUs are still active.
>>>>
>>>> This causes some machines to hang at shutdown,
>>>> unless CPU_HOTPLUG is configured and takes them offline
>>>> before we get here.
>>> I'm guessing there's a reason it doesn't pass '1' as the last argument,
>>> because that would fix that issue?
>> ..
>>
>> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
>
> Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
>
>> But some kind of pre-halt ack, perhaps plus a short delay by the caller
>> after receipt of the ack, would probably suffice to kill that bug.
>>
>> But I really haven't studied this code enough to know,
>> other than that it historically has been a sticky area
>> to poke around in.
>
> Something like this will close the window to right up until the point
> where the other CPUs have 'almost' called halt().
>
> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> index 5398385..94ec9bf 100644
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
> /*
> * Remove this CPU:
> */
> - cpu_clear(smp_processor_id(), cpu_online_map);
> disable_local_APIC();
> + cpu_clear(smp_processor_id(), cpu_online_map);
> + smp_wmb();
> if (hlt_works(smp_processor_id()))
> for (;;) halt();
> for (;;);
> @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
>
> local_irq_save(flags);
> smp_call_function(stop_this_cpu, NULL, 0, 0);
> +
> + while (cpus_weight(cpu_online_map) > 1) {
> + cpu_relax();
> + smp_rmb();
> + }
> +
> disable_local_APIC();
> local_irq_restore(flags);
> }
..
Yup, that looks like it oughta work consistently.
Now we just need to hear from some of the folks who
have danced around this code in the past.
(added Pavel & Rafael to Cc:).
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <481080A0.9050804-gsilrlXbHYg@public.gmane.org>]
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <481080A0.9050804-gsilrlXbHYg@public.gmane.org>
@ 2008-04-24 21:30 ` Rafael J. Wysocki
2008-04-24 21:30 ` Rafael J. Wysocki
2008-04-25 11:08 ` Pavel Machek
1 sibling, 1 reply; 139+ messages in thread
From: Rafael J. Wysocki @ 2008-04-24 21:30 UTC (permalink / raw)
To: Mark Lord
Cc: Jens Axboe, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, pavel-+ZI9xUNit7I
On Thursday, 24 of April 2008, Mark Lord wrote:
> Jens Axboe wrote:
> > On Wed, Apr 23 2008, Mark Lord wrote:
> >> Jens Axboe wrote:
> >>> On Wed, Apr 23 2008, Mark Lord wrote:
> >>> ..
> >>>> The second bug, is that for the halt case at least,
> >>>> nobody waits for the other CPU to actually halt
> >>>> before continuing.. so we sometimes enter the shutdown
> >>>> code while other CPUs are still active.
> >>>>
> >>>> This causes some machines to hang at shutdown,
> >>>> unless CPU_HOTPLUG is configured and takes them offline
> >>>> before we get here.
> >>> I'm guessing there's a reason it doesn't pass '1' as the last argument,
> >>> because that would fix that issue?
> >> ..
> >>
> >> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
> >
> > Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
> >
> >> But some kind of pre-halt ack, perhaps plus a short delay by the caller
> >> after receipt of the ack, would probably suffice to kill that bug.
> >>
> >> But I really haven't studied this code enough to know,
> >> other than that it historically has been a sticky area
> >> to poke around in.
> >
> > Something like this will close the window to right up until the point
> > where the other CPUs have 'almost' called halt().
> >
> > diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> > index 5398385..94ec9bf 100644
> > --- a/arch/x86/kernel/smp.c
> > +++ b/arch/x86/kernel/smp.c
> > @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
> > /*
> > * Remove this CPU:
> > */
> > - cpu_clear(smp_processor_id(), cpu_online_map);
> > disable_local_APIC();
> > + cpu_clear(smp_processor_id(), cpu_online_map);
> > + smp_wmb();
> > if (hlt_works(smp_processor_id()))
> > for (;;) halt();
> > for (;;);
> > @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
> >
> > local_irq_save(flags);
> > smp_call_function(stop_this_cpu, NULL, 0, 0);
> > +
> > + while (cpus_weight(cpu_online_map) > 1) {
> > + cpu_relax();
> > + smp_rmb();
> > + }
> > +
> > disable_local_APIC();
> > local_irq_restore(flags);
> > }
> ..
>
> Yup, that looks like it oughta work consistently.
> Now we just need to hear from some of the folks who
> have danced around this code in the past.
>
> (added Pavel & Rafael to Cc:).
Well, it looks sane to me, but I'm not really an expert here.
Thanks,
Rafael
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-24 21:30 ` Rafael J. Wysocki
@ 2008-04-24 21:30 ` Rafael J. Wysocki
0 siblings, 0 replies; 139+ messages in thread
From: Rafael J. Wysocki @ 2008-04-24 21:30 UTC (permalink / raw)
To: Mark Lord; +Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds, pavel
On Thursday, 24 of April 2008, Mark Lord wrote:
> Jens Axboe wrote:
> > On Wed, Apr 23 2008, Mark Lord wrote:
> >> Jens Axboe wrote:
> >>> On Wed, Apr 23 2008, Mark Lord wrote:
> >>> ..
> >>>> The second bug, is that for the halt case at least,
> >>>> nobody waits for the other CPU to actually halt
> >>>> before continuing.. so we sometimes enter the shutdown
> >>>> code while other CPUs are still active.
> >>>>
> >>>> This causes some machines to hang at shutdown,
> >>>> unless CPU_HOTPLUG is configured and takes them offline
> >>>> before we get here.
> >>> I'm guessing there's a reason it doesn't pass '1' as the last argument,
> >>> because that would fix that issue?
> >> ..
> >>
> >> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
> >
> > Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
> >
> >> But some kind of pre-halt ack, perhaps plus a short delay by the caller
> >> after receipt of the ack, would probably suffice to kill that bug.
> >>
> >> But I really haven't studied this code enough to know,
> >> other than that it historically has been a sticky area
> >> to poke around in.
> >
> > Something like this will close the window to right up until the point
> > where the other CPUs have 'almost' called halt().
> >
> > diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> > index 5398385..94ec9bf 100644
> > --- a/arch/x86/kernel/smp.c
> > +++ b/arch/x86/kernel/smp.c
> > @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
> > /*
> > * Remove this CPU:
> > */
> > - cpu_clear(smp_processor_id(), cpu_online_map);
> > disable_local_APIC();
> > + cpu_clear(smp_processor_id(), cpu_online_map);
> > + smp_wmb();
> > if (hlt_works(smp_processor_id()))
> > for (;;) halt();
> > for (;;);
> > @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
> >
> > local_irq_save(flags);
> > smp_call_function(stop_this_cpu, NULL, 0, 0);
> > +
> > + while (cpus_weight(cpu_online_map) > 1) {
> > + cpu_relax();
> > + smp_rmb();
> > + }
> > +
> > disable_local_APIC();
> > local_irq_restore(flags);
> > }
> ..
>
> Yup, that looks like it oughta work consistently.
> Now we just need to hear from some of the folks who
> have danced around this code in the past.
>
> (added Pavel & Rafael to Cc:).
Well, it looks sane to me, but I'm not really an expert here.
Thanks,
Rafael
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <481080A0.9050804-gsilrlXbHYg@public.gmane.org>
2008-04-24 21:30 ` Rafael J. Wysocki
@ 2008-04-25 11:08 ` Pavel Machek
2008-04-25 11:08 ` Pavel Machek
1 sibling, 1 reply; 139+ messages in thread
From: Pavel Machek @ 2008-04-25 11:08 UTC (permalink / raw)
To: Mark Lord
Cc: Jens Axboe, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Rafael J. Wysocki
Hi!
>> Something like this will close the window to right up until the point
>> where the other CPUs have 'almost' called halt().
>>
>> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
>> index 5398385..94ec9bf 100644
>> --- a/arch/x86/kernel/smp.c
>> +++ b/arch/x86/kernel/smp.c
>> @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
>> /*
>> * Remove this CPU:
>> */
>> - cpu_clear(smp_processor_id(), cpu_online_map);
>> disable_local_APIC();
>> + cpu_clear(smp_processor_id(), cpu_online_map);
>> + smp_wmb();
>> if (hlt_works(smp_processor_id()))
>> for (;;) halt();
>> for (;;);
>> @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
>> local_irq_save(flags);
>> smp_call_function(stop_this_cpu, NULL, 0, 0);
>> +
>> + while (cpus_weight(cpu_online_map) > 1) {
>> + cpu_relax();
>> + smp_rmb();
>> + }
>> +
>> disable_local_APIC();
>> local_irq_restore(flags);
>> }
> ..
>
> Yup, that looks like it oughta work consistently.
> Now we just need to hear from some of the folks who
> have danced around this code in the past.
>
> (added Pavel & Rafael to Cc:).
I can't see anything wrong with this one, but I'm not smp expert...
Pavel
--
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-25 11:08 ` Pavel Machek
@ 2008-04-25 11:08 ` Pavel Machek
0 siblings, 0 replies; 139+ messages in thread
From: Pavel Machek @ 2008-04-25 11:08 UTC (permalink / raw)
To: Mark Lord
Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds,
Rafael J. Wysocki
Hi!
>> Something like this will close the window to right up until the point
>> where the other CPUs have 'almost' called halt().
>>
>> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
>> index 5398385..94ec9bf 100644
>> --- a/arch/x86/kernel/smp.c
>> +++ b/arch/x86/kernel/smp.c
>> @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
>> /*
>> * Remove this CPU:
>> */
>> - cpu_clear(smp_processor_id(), cpu_online_map);
>> disable_local_APIC();
>> + cpu_clear(smp_processor_id(), cpu_online_map);
>> + smp_wmb();
>> if (hlt_works(smp_processor_id()))
>> for (;;) halt();
>> for (;;);
>> @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
>> local_irq_save(flags);
>> smp_call_function(stop_this_cpu, NULL, 0, 0);
>> +
>> + while (cpus_weight(cpu_online_map) > 1) {
>> + cpu_relax();
>> + smp_rmb();
>> + }
>> +
>> disable_local_APIC();
>> local_irq_restore(flags);
>> }
> ..
>
> Yup, that looks like it oughta work consistently.
> Now we just need to hear from some of the folks who
> have danced around this code in the past.
>
> (added Pavel & Rafael to Cc:).
I can't see anything wrong with this one, but I'm not smp expert...
Pavel
--
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
[not found] ` <20080424105908.GW12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2008-04-24 12:44 ` Mark Lord
@ 2008-04-26 8:04 ` Pavel Machek
2008-04-26 8:04 ` Pavel Machek
2008-04-28 15:13 ` Mark Lord
1 sibling, 2 replies; 139+ messages in thread
From: Pavel Machek @ 2008-04-26 8:04 UTC (permalink / raw)
To: Jens Axboe
Cc: Mark Lord, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
Hi!
> > >>The second bug, is that for the halt case at least,
> > >>nobody waits for the other CPU to actually halt
> > >>before continuing.. so we sometimes enter the shutdown
> > >>code while other CPUs are still active.
> > >>
> > >>This causes some machines to hang at shutdown,
> > >>unless CPU_HOTPLUG is configured and takes them offline
> > >>before we get here.
> > >
> > >I'm guessing there's a reason it doesn't pass '1' as the last argument,
> > >because that would fix that issue?
> >
> > Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
>
> Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
>
> > But some kind of pre-halt ack, perhaps plus a short delay by the caller
> > after receipt of the ack, would probably suffice to kill that bug.
> >
> > But I really haven't studied this code enough to know,
> > other than that it historically has been a sticky area
> > to poke around in.
>
> Something like this will close the window to right up until the point
> where the other CPUs have 'almost' called halt().
Now I took a look at context... why not simply use same trick swsusp
uses, and do a hot unplug of all cpus at the end of shutdown?
>
> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> index 5398385..94ec9bf 100644
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
> /*
> * Remove this CPU:
> */
> - cpu_clear(smp_processor_id(), cpu_online_map);
> disable_local_APIC();
> + cpu_clear(smp_processor_id(), cpu_online_map);
> + smp_wmb();
> if (hlt_works(smp_processor_id()))
> for (;;) halt();
> for (;;);
> @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
>
> local_irq_save(flags);
> smp_call_function(stop_this_cpu, NULL, 0, 0);
> +
> + while (cpus_weight(cpu_online_map) > 1) {
> + cpu_relax();
> + smp_rmb();
> + }
> +
> disable_local_APIC();
> local_irq_restore(flags);
> }
--
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-26 8:04 ` Pavel Machek
@ 2008-04-26 8:04 ` Pavel Machek
2008-04-28 15:13 ` Mark Lord
1 sibling, 0 replies; 139+ messages in thread
From: Pavel Machek @ 2008-04-26 8:04 UTC (permalink / raw)
To: Jens Axboe; +Cc: Mark Lord, linux-arch, linux-kernel, npiggin, torvalds
Hi!
> > >>The second bug, is that for the halt case at least,
> > >>nobody waits for the other CPU to actually halt
> > >>before continuing.. so we sometimes enter the shutdown
> > >>code while other CPUs are still active.
> > >>
> > >>This causes some machines to hang at shutdown,
> > >>unless CPU_HOTPLUG is configured and takes them offline
> > >>before we get here.
> > >
> > >I'm guessing there's a reason it doesn't pass '1' as the last argument,
> > >because that would fix that issue?
> >
> > Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
>
> Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
>
> > But some kind of pre-halt ack, perhaps plus a short delay by the caller
> > after receipt of the ack, would probably suffice to kill that bug.
> >
> > But I really haven't studied this code enough to know,
> > other than that it historically has been a sticky area
> > to poke around in.
>
> Something like this will close the window to right up until the point
> where the other CPUs have 'almost' called halt().
Now I took a look at context... why not simply use same trick swsusp
uses, and do a hot unplug of all cpus at the end of shutdown?
>
> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> index 5398385..94ec9bf 100644
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -155,8 +155,9 @@ static void stop_this_cpu(void *dummy)
> /*
> * Remove this CPU:
> */
> - cpu_clear(smp_processor_id(), cpu_online_map);
> disable_local_APIC();
> + cpu_clear(smp_processor_id(), cpu_online_map);
> + smp_wmb();
> if (hlt_works(smp_processor_id()))
> for (;;) halt();
> for (;;);
> @@ -175,6 +176,12 @@ static void native_smp_send_stop(void)
>
> local_irq_save(flags);
> smp_call_function(stop_this_cpu, NULL, 0, 0);
> +
> + while (cpus_weight(cpu_online_map) > 1) {
> + cpu_relax();
> + smp_rmb();
> + }
> +
> disable_local_APIC();
> local_irq_restore(flags);
> }
--
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-26 8:04 ` Pavel Machek
2008-04-26 8:04 ` Pavel Machek
@ 2008-04-28 15:13 ` Mark Lord
2008-04-28 15:13 ` Mark Lord
2008-05-01 16:23 ` Pavel Machek
1 sibling, 2 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-28 15:13 UTC (permalink / raw)
To: Pavel Machek; +Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds
Pavel Machek wrote:
> Hi!
>
>>>>> The second bug, is that for the halt case at least,
>>>>> nobody waits for the other CPU to actually halt
>>>>> before continuing.. so we sometimes enter the shutdown
>>>>> code while other CPUs are still active.
>>>>>
>>>>> This causes some machines to hang at shutdown,
>>>>> unless CPU_HOTPLUG is configured and takes them offline
>>>>> before we get here.
>>>> I'm guessing there's a reason it doesn't pass '1' as the last argument,
>>>> because that would fix that issue?
>>> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
>> Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
>>
>>> But some kind of pre-halt ack, perhaps plus a short delay by the caller
>>> after receipt of the ack, would probably suffice to kill that bug.
>>>
>>> But I really haven't studied this code enough to know,
>>> other than that it historically has been a sticky area
>>> to poke around in.
>> Something like this will close the window to right up until the point
>> where the other CPUs have 'almost' called halt().
>
> Now I took a look at context... why not simply use same trick swsusp
> uses, and do a hot unplug of all cpus at the end of shutdown?
..
That's the current existing workaround for this bug,
but not everybody has cpu hotplug in their config,
and this bug should still get fixed.
Cheers
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-28 15:13 ` Mark Lord
@ 2008-04-28 15:13 ` Mark Lord
2008-05-01 16:23 ` Pavel Machek
1 sibling, 0 replies; 139+ messages in thread
From: Mark Lord @ 2008-04-28 15:13 UTC (permalink / raw)
To: Pavel Machek; +Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds
Pavel Machek wrote:
> Hi!
>
>>>>> The second bug, is that for the halt case at least,
>>>>> nobody waits for the other CPU to actually halt
>>>>> before continuing.. so we sometimes enter the shutdown
>>>>> code while other CPUs are still active.
>>>>>
>>>>> This causes some machines to hang at shutdown,
>>>>> unless CPU_HOTPLUG is configured and takes them offline
>>>>> before we get here.
>>>> I'm guessing there's a reason it doesn't pass '1' as the last argument,
>>>> because that would fix that issue?
>>> Undoubtedly -- perhaps the called CPU halts, and therefore cannot reply. :)
>> Uhm yes, I guess stop_this_cpu() does exactly what the name implies :-)
>>
>>> But some kind of pre-halt ack, perhaps plus a short delay by the caller
>>> after receipt of the ack, would probably suffice to kill that bug.
>>>
>>> But I really haven't studied this code enough to know,
>>> other than that it historically has been a sticky area
>>> to poke around in.
>> Something like this will close the window to right up until the point
>> where the other CPUs have 'almost' called halt().
>
> Now I took a look at context... why not simply use same trick swsusp
> uses, and do a hot unplug of all cpus at the end of shutdown?
..
That's the current existing workaround for this bug,
but not everybody has cpu hotplug in their config,
and this bug should still get fixed.
Cheers
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 1/11] Add generic helpers for arch IPI function calls
2008-04-28 15:13 ` Mark Lord
2008-04-28 15:13 ` Mark Lord
@ 2008-05-01 16:23 ` Pavel Machek
1 sibling, 0 replies; 139+ messages in thread
From: Pavel Machek @ 2008-05-01 16:23 UTC (permalink / raw)
To: Mark Lord; +Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds
>>> Something like this will close the window to right up until the point
>>> where the other CPUs have 'almost' called halt().
>>
>> Now I took a look at context... why not simply use same trick swsusp
>> uses, and do a hot unplug of all cpus at the end of shutdown?
> ..
>
> That's the current existing workaround for this bug,
> but not everybody has cpu hotplug in their config,
> and this bug should still get fixed.
Ok... alternative solution is to keep cpu hotplug always
enabled. Suspend & hibernation seems to need it, anyway, and
dupliating code between cpu hotplug and shutdown does not look _that_
attractive.
Pavel
--
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html
^ permalink raw reply [flat|nested] 139+ messages in thread
* [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` [PATCH 1/11] Add generic helpers for arch IPI function calls Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-3-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` [PATCH 3/11] powerpc: " Jens Axboe
` (10 subsequent siblings)
12 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Ingo Molnar
This converts x86 and x86-64 to use the new helpers for
smp_call_function() and friends, and adds support for
smp_call_function_single().
Cc: Ingo Molnar <mingo-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/x86/Kconfig | 5 +
arch/x86/kernel/apic_32.c | 4 +
arch/x86/kernel/entry_64.S | 3 +
arch/x86/kernel/i8259_64.c | 4 +
arch/x86/kernel/smp.c | 148 ++++------------------------
arch/x86/kernel/smpcommon.c | 56 -----------
arch/x86/mach-voyager/voyager_smp.c | 91 +++--------------
arch/x86/xen/enlighten.c | 1 -
arch/x86/xen/mmu.c | 2 +-
arch/x86/xen/smp.c | 108 ++++++---------------
include/asm-x86/hw_irq_32.h | 1 +
include/asm-x86/hw_irq_64.h | 2 +
include/asm-x86/mach-default/entry_arch.h | 1 +
include/asm-x86/mach-default/irq_vectors.h | 1 +
include/asm-x86/mach-voyager/entry_arch.h | 2 +-
include/asm-x86/mach-voyager/irq_vectors.h | 4 +-
include/asm-x86/smp.h | 10 --
17 files changed, 88 insertions(+), 355 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 87a693c..aab97e3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 6872081..750a555 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1357,6 +1357,10 @@ void __init smp_intr_init(void)
/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+
+ /* IPI for single call function */
+ set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
+ call_function_single_interrupt);
}
#endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 556a8df..6d1fe27 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -711,6 +711,9 @@ END(invalidate_interrupt\num)
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
END(call_function_interrupt)
+ENTRY(call_function_single_interrupt)
+ apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
+END(call_function_single_interrupt)
ENTRY(irq_move_cleanup_interrupt)
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt)
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
index fa57a15..00d2ccd 100644
--- a/arch/x86/kernel/i8259_64.c
+++ b/arch/x86/kernel/i8259_64.c
@@ -494,6 +494,10 @@ void __init native_init_IRQ(void)
/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+ /* IPI for generic single function call */
+ set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
+ call_function_single_interrupt);
+
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 8f75893..5398385 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -121,131 +121,32 @@ static void native_smp_send_reschedule(int cpu)
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
-
void lock_ipi_call_lock(void)
{
- spin_lock_irq(&call_lock);
+ spin_lock_irq(&call_function_lock);
}
void unlock_ipi_call_lock(void)
{
- spin_unlock_irq(&call_lock);
+ spin_unlock_irq(&call_function_lock);
}
-static struct call_data_struct *call_data;
-
-static void __smp_call_function(void (*func) (void *info), void *info,
- int nonatomic, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- struct call_data_struct data;
- int cpus = num_online_cpus() - 1;
-
- if (!cpus)
- return;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on. Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int
-native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
cpumask_t allbutself;
- int cpus;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
- cpus_and(mask, mask, allbutself);
- cpus = cpus_weight(mask);
-
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
-
- /* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- spin_unlock(&call_lock);
-
- return 0;
}
static void stop_this_cpu(void *dummy)
@@ -267,18 +168,13 @@ static void stop_this_cpu(void *dummy)
static void native_smp_send_stop(void)
{
- int nolock;
unsigned long flags;
if (reboot_force)
return;
- /* Don't deadlock on the call lock in panic */
- nolock = !spin_trylock(&call_lock);
local_irq_save(flags);
- __smp_call_function(stop_this_cpu, NULL, 0, 0);
- if (!nolock)
- spin_unlock(&call_lock);
+ smp_call_function(stop_this_cpu, NULL, 0, 0);
disable_local_APIC();
local_irq_restore(flags);
}
@@ -300,33 +196,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
void smp_call_function_interrupt(struct pt_regs *regs)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
ack_APIC_irq();
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
#ifdef CONFIG_X86_32
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit();
+}
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
+void smp_call_function_single_interrupt(void)
+{
+ ack_APIC_irq();
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+#ifdef CONFIG_X86_32
+ __get_cpu_var(irq_stat).irq_call_count++;
+#else
+ add_pda(irq_call_count, 1);
+#endif
+ irq_exit();
}
struct smp_ops smp_ops = {
@@ -337,7 +228,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule,
- .smp_call_function_mask = native_smp_call_function_mask,
};
EXPORT_SYMBOL_GPL(smp_ops);
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 3449064..99941b3 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu)
per_cpu(cpu_number, cpu) = cpu;
}
#endif
-
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on a specific CPU
- * @cpu: The target CPU. Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int ret;
- int me = get_cpu();
- if (cpu == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 96f60c7..f7fe43d 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -972,94 +972,24 @@ static void smp_stop_cpu_function(void *dummy)
halt();
}
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- volatile unsigned long started;
- volatile unsigned long finished;
- int wait;
-};
-
-static struct call_data_struct *call_data;
-
/* execute a thread on a new CPU. The function to be called must be
* previously set up. This is used to schedule a function for
* execution on all CPUs - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */
static void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- /* must take copy of wait because call_data may be replaced
- * unless the function is waiting for us to finish */
- int wait = call_data->wait;
- __u8 cpu = smp_processor_id();
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- if (!test_and_clear_bit(cpu, &call_data->started)) {
- /* If the bit wasn't set, this could be a replay */
- printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
- " with no call pending\n", cpu);
- return;
- }
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func) (info);
+ generic_smp_call_function_interrupt();
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
- if (wait) {
- mb();
- clear_bit(cpu, &call_data->finished);
- }
}
-static int
-voyager_smp_call_function_mask(cpumask_t cpumask,
- void (*func) (void *info), void *info, int wait)
+static void smp_call_function_single_interrupt(void)
{
- struct call_data_struct data;
- u32 mask = cpus_addr(cpumask)[0];
-
- mask &= ~(1 << smp_processor_id());
-
- if (!mask)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- data.started = mask;
- data.wait = wait;
- if (wait)
- data.finished = mask;
-
- spin_lock(&call_lock);
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_CPI(mask, VIC_CALL_FUNCTION_CPI);
-
- /* Wait for response */
- while (data.started)
- barrier();
-
- if (wait)
- while (data.finished)
- barrier();
-
- spin_unlock(&call_lock);
-
- return 0;
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ __get_cpu_var(irq_stat).irq_call_count++;
+ irq_exit();
}
/* Sorry about the name. In an APIC based system, the APICs
@@ -1116,6 +1046,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs)
smp_call_function_interrupt();
}
+void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
+{
+ ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
+ smp_call_function_single_interrupt();
+}
+
void smp_vic_cpi_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1136,6 +1072,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs)
smp_enable_irq_interrupt();
if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
smp_call_function_interrupt();
+ if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
+ smp_call_function_single_interrupt();
set_irq_regs(old_regs);
}
@@ -1879,5 +1817,4 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule,
- .smp_call_function_mask = voyager_smp_call_function_mask,
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c038822..1762e0d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1109,7 +1109,6 @@ static const struct smp_ops xen_smp_ops __initdata = {
.smp_send_stop = xen_smp_send_stop,
.smp_send_reschedule = xen_smp_send_reschedule,
- .smp_call_function_mask = xen_smp_call_function_mask,
};
#endif /* CONFIG_SMP */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2a054ef..aade134 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -600,7 +600,7 @@ static void drop_mm_ref(struct mm_struct *mm)
}
if (!cpus_empty(mask))
- xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
+ smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
}
#else
static void drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index e340ff9..43e1027 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -38,20 +38,7 @@
static cpumask_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, resched_irq);
static DEFINE_PER_CPU(int, callfunc_irq);
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
+static DEFINE_PER_CPU(int, callfuncsingle_irq);
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
@@ -114,6 +101,17 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(callfunc_irq, cpu) = rc;
+ callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ cpu,
+ xen_call_function_single_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ callfunc_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(callfuncsingle_irq, cpu) = rc;
+
return 0;
fail:
@@ -121,6 +119,9 @@ static int xen_smp_intr_init(unsigned int cpu)
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
if (per_cpu(callfunc_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+ if (per_cpu(callfuncsingle_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+
return rc;
}
@@ -341,81 +342,30 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
- if (wait) {
- mb(); /* commit everything before setting finished */
- atomic_inc(&call_data->finished);
- }
-
return IRQ_HANDLED;
}
-int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait)
+static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
- struct call_data_struct data;
- int cpus, cpu;
- bool yield;
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
-
- cpu_clear(smp_processor_id(), mask);
-
- cpus = cpus_weight(mask);
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ __get_cpu_var(irq_stat).irq_call_count++;
+ irq_exit();
- call_data = &data;
- mb(); /* write everything before IPI */
+ return IRQ_HANDLED;
+}
- /* Send a message to other CPUs and wait for them to respond */
+void arch_send_call_function_ipi(cpumask_t mask)
+{
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+}
- /* Make sure other vcpus get a chance to run if they need to. */
- yield = false;
- for_each_cpu_mask(cpu, mask)
- if (xen_vcpu_stolen(cpu))
- yield = true;
-
- if (yield)
- HYPERVISOR_sched_op(SCHEDOP_yield, 0);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus ||
- (wait && atomic_read(&data.finished) != cpus))
- cpu_relax();
-
- spin_unlock(&call_lock);
-
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
index ea88054..a87b132 100644
--- a/include/asm-x86/hw_irq_32.h
+++ b/include/asm-x86/hw_irq_32.h
@@ -32,6 +32,7 @@ extern void (*const interrupt[NR_IRQS])(void);
void reschedule_interrupt(void);
void invalidate_interrupt(void);
void call_function_interrupt(void);
+void call_function_single_interrupt(void);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 0062ef3..fe65781 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -68,6 +68,7 @@
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
/* fb free - please don't readd KDB here because it's useless
(hint - think what a NMI bit does to a vector) */
#define THERMAL_APIC_VECTOR 0xfa
@@ -102,6 +103,7 @@ void spurious_interrupt(void);
void error_interrupt(void);
void reschedule_interrupt(void);
void call_function_interrupt(void);
+void call_function_single_interrupt(void);
void irq_move_cleanup_interrupt(void);
void invalidate_interrupt0(void);
void invalidate_interrupt1(void);
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc86146..9283b60 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
#endif
/*
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h
index 881c63c..ed7d495 100644
--- a/include/asm-x86/mach-default/irq_vectors.h
+++ b/include/asm-x86/mach-default/irq_vectors.h
@@ -48,6 +48,7 @@
#define INVALIDATE_TLB_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfa
#define THERMAL_APIC_VECTOR 0xf0
/*
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8..ae52624 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
-
+BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h
index 165421f..64e47f6 100644
--- a/include/asm-x86/mach-voyager/irq_vectors.h
+++ b/include/asm-x86/mach-voyager/irq_vectors.h
@@ -33,6 +33,7 @@
#define VIC_RESCHEDULE_CPI 4
#define VIC_ENABLE_IRQ_CPI 5
#define VIC_CALL_FUNCTION_CPI 6
+#define VIC_CALL_FUNCTION_SINGLE_CPI 7
/* Now the QIC CPIs: Since we don't need the two initial levels,
* these are 2 less than the VIC CPIs */
@@ -42,9 +43,10 @@
#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
#define VIC_START_FAKE_CPI VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI
+#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
/* this is the SYS_INT CPI. */
#define VIC_SYS_INT 8
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 62ebdec..f46a275 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -59,9 +59,6 @@ struct smp_ops {
void (*smp_send_stop)(void);
void (*smp_send_reschedule)(int cpu);
- int (*smp_call_function_mask)(cpumask_t mask,
- void (*func)(void *info), void *info,
- int wait);
};
/* Globals due to paravirt */
@@ -103,13 +100,6 @@ static inline void smp_send_reschedule(int cpu)
smp_ops.smp_send_reschedule(cpu);
}
-static inline int smp_call_function_mask(cpumask_t mask,
- void (*func) (void *info), void *info,
- int wait)
-{
- return smp_ops.smp_call_function_mask(mask, func, info, wait);
-}
-
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 2/11] x86: convert to generic helpers for " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-3-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Ingo Molnar
This converts x86 and x86-64 to use the new helpers for
smp_call_function() and friends, and adds support for
smp_call_function_single().
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/x86/Kconfig | 5 +
arch/x86/kernel/apic_32.c | 4 +
arch/x86/kernel/entry_64.S | 3 +
arch/x86/kernel/i8259_64.c | 4 +
arch/x86/kernel/smp.c | 148 ++++------------------------
arch/x86/kernel/smpcommon.c | 56 -----------
arch/x86/mach-voyager/voyager_smp.c | 91 +++--------------
arch/x86/xen/enlighten.c | 1 -
arch/x86/xen/mmu.c | 2 +-
arch/x86/xen/smp.c | 108 ++++++---------------
include/asm-x86/hw_irq_32.h | 1 +
include/asm-x86/hw_irq_64.h | 2 +
include/asm-x86/mach-default/entry_arch.h | 1 +
include/asm-x86/mach-default/irq_vectors.h | 1 +
include/asm-x86/mach-voyager/entry_arch.h | 2 +-
include/asm-x86/mach-voyager/irq_vectors.h | 4 +-
include/asm-x86/smp.h | 10 --
17 files changed, 88 insertions(+), 355 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 87a693c..aab97e3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 6872081..750a555 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1357,6 +1357,10 @@ void __init smp_intr_init(void)
/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+
+ /* IPI for single call function */
+ set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
+ call_function_single_interrupt);
}
#endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 556a8df..6d1fe27 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -711,6 +711,9 @@ END(invalidate_interrupt\num)
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
END(call_function_interrupt)
+ENTRY(call_function_single_interrupt)
+ apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
+END(call_function_single_interrupt)
ENTRY(irq_move_cleanup_interrupt)
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt)
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
index fa57a15..00d2ccd 100644
--- a/arch/x86/kernel/i8259_64.c
+++ b/arch/x86/kernel/i8259_64.c
@@ -494,6 +494,10 @@ void __init native_init_IRQ(void)
/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+ /* IPI for generic single function call */
+ set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
+ call_function_single_interrupt);
+
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 8f75893..5398385 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -121,131 +121,32 @@ static void native_smp_send_reschedule(int cpu)
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
-
void lock_ipi_call_lock(void)
{
- spin_lock_irq(&call_lock);
+ spin_lock_irq(&call_function_lock);
}
void unlock_ipi_call_lock(void)
{
- spin_unlock_irq(&call_lock);
+ spin_unlock_irq(&call_function_lock);
}
-static struct call_data_struct *call_data;
-
-static void __smp_call_function(void (*func) (void *info), void *info,
- int nonatomic, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- struct call_data_struct data;
- int cpus = num_online_cpus() - 1;
-
- if (!cpus)
- return;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on. Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int
-native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
cpumask_t allbutself;
- int cpus;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
- cpus_and(mask, mask, allbutself);
- cpus = cpus_weight(mask);
-
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
-
- /* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- spin_unlock(&call_lock);
-
- return 0;
}
static void stop_this_cpu(void *dummy)
@@ -267,18 +168,13 @@ static void stop_this_cpu(void *dummy)
static void native_smp_send_stop(void)
{
- int nolock;
unsigned long flags;
if (reboot_force)
return;
- /* Don't deadlock on the call lock in panic */
- nolock = !spin_trylock(&call_lock);
local_irq_save(flags);
- __smp_call_function(stop_this_cpu, NULL, 0, 0);
- if (!nolock)
- spin_unlock(&call_lock);
+ smp_call_function(stop_this_cpu, NULL, 0, 0);
disable_local_APIC();
local_irq_restore(flags);
}
@@ -300,33 +196,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
void smp_call_function_interrupt(struct pt_regs *regs)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
ack_APIC_irq();
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
#ifdef CONFIG_X86_32
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit();
+}
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
+void smp_call_function_single_interrupt(void)
+{
+ ack_APIC_irq();
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+#ifdef CONFIG_X86_32
+ __get_cpu_var(irq_stat).irq_call_count++;
+#else
+ add_pda(irq_call_count, 1);
+#endif
+ irq_exit();
}
struct smp_ops smp_ops = {
@@ -337,7 +228,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule,
- .smp_call_function_mask = native_smp_call_function_mask,
};
EXPORT_SYMBOL_GPL(smp_ops);
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 3449064..99941b3 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu)
per_cpu(cpu_number, cpu) = cpu;
}
#endif
-
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on a specific CPU
- * @cpu: The target CPU. Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int ret;
- int me = get_cpu();
- if (cpu == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 96f60c7..f7fe43d 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -972,94 +972,24 @@ static void smp_stop_cpu_function(void *dummy)
halt();
}
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- volatile unsigned long started;
- volatile unsigned long finished;
- int wait;
-};
-
-static struct call_data_struct *call_data;
-
/* execute a thread on a new CPU. The function to be called must be
* previously set up. This is used to schedule a function for
* execution on all CPUs - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */
static void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- /* must take copy of wait because call_data may be replaced
- * unless the function is waiting for us to finish */
- int wait = call_data->wait;
- __u8 cpu = smp_processor_id();
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- if (!test_and_clear_bit(cpu, &call_data->started)) {
- /* If the bit wasn't set, this could be a replay */
- printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
- " with no call pending\n", cpu);
- return;
- }
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func) (info);
+ generic_smp_call_function_interrupt();
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
- if (wait) {
- mb();
- clear_bit(cpu, &call_data->finished);
- }
}
-static int
-voyager_smp_call_function_mask(cpumask_t cpumask,
- void (*func) (void *info), void *info, int wait)
+static void smp_call_function_single_interrupt(void)
{
- struct call_data_struct data;
- u32 mask = cpus_addr(cpumask)[0];
-
- mask &= ~(1 << smp_processor_id());
-
- if (!mask)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- data.started = mask;
- data.wait = wait;
- if (wait)
- data.finished = mask;
-
- spin_lock(&call_lock);
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_CPI(mask, VIC_CALL_FUNCTION_CPI);
-
- /* Wait for response */
- while (data.started)
- barrier();
-
- if (wait)
- while (data.finished)
- barrier();
-
- spin_unlock(&call_lock);
-
- return 0;
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ __get_cpu_var(irq_stat).irq_call_count++;
+ irq_exit();
}
/* Sorry about the name. In an APIC based system, the APICs
@@ -1116,6 +1046,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs)
smp_call_function_interrupt();
}
+void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
+{
+ ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
+ smp_call_function_single_interrupt();
+}
+
void smp_vic_cpi_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1136,6 +1072,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs)
smp_enable_irq_interrupt();
if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
smp_call_function_interrupt();
+ if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
+ smp_call_function_single_interrupt();
set_irq_regs(old_regs);
}
@@ -1879,5 +1817,4 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule,
- .smp_call_function_mask = voyager_smp_call_function_mask,
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c038822..1762e0d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1109,7 +1109,6 @@ static const struct smp_ops xen_smp_ops __initdata = {
.smp_send_stop = xen_smp_send_stop,
.smp_send_reschedule = xen_smp_send_reschedule,
- .smp_call_function_mask = xen_smp_call_function_mask,
};
#endif /* CONFIG_SMP */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2a054ef..aade134 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -600,7 +600,7 @@ static void drop_mm_ref(struct mm_struct *mm)
}
if (!cpus_empty(mask))
- xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
+ smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
}
#else
static void drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index e340ff9..43e1027 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -38,20 +38,7 @@
static cpumask_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, resched_irq);
static DEFINE_PER_CPU(int, callfunc_irq);
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
+static DEFINE_PER_CPU(int, callfuncsingle_irq);
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
@@ -114,6 +101,17 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(callfunc_irq, cpu) = rc;
+ callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ cpu,
+ xen_call_function_single_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ callfunc_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(callfuncsingle_irq, cpu) = rc;
+
return 0;
fail:
@@ -121,6 +119,9 @@ static int xen_smp_intr_init(unsigned int cpu)
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
if (per_cpu(callfunc_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+ if (per_cpu(callfuncsingle_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+
return rc;
}
@@ -341,81 +342,30 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
- if (wait) {
- mb(); /* commit everything before setting finished */
- atomic_inc(&call_data->finished);
- }
-
return IRQ_HANDLED;
}
-int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait)
+static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
- struct call_data_struct data;
- int cpus, cpu;
- bool yield;
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
-
- cpu_clear(smp_processor_id(), mask);
-
- cpus = cpus_weight(mask);
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ __get_cpu_var(irq_stat).irq_call_count++;
+ irq_exit();
- call_data = &data;
- mb(); /* write everything before IPI */
+ return IRQ_HANDLED;
+}
- /* Send a message to other CPUs and wait for them to respond */
+void arch_send_call_function_ipi(cpumask_t mask)
+{
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+}
- /* Make sure other vcpus get a chance to run if they need to. */
- yield = false;
- for_each_cpu_mask(cpu, mask)
- if (xen_vcpu_stolen(cpu))
- yield = true;
-
- if (yield)
- HYPERVISOR_sched_op(SCHEDOP_yield, 0);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus ||
- (wait && atomic_read(&data.finished) != cpus))
- cpu_relax();
-
- spin_unlock(&call_lock);
-
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
index ea88054..a87b132 100644
--- a/include/asm-x86/hw_irq_32.h
+++ b/include/asm-x86/hw_irq_32.h
@@ -32,6 +32,7 @@ extern void (*const interrupt[NR_IRQS])(void);
void reschedule_interrupt(void);
void invalidate_interrupt(void);
void call_function_interrupt(void);
+void call_function_single_interrupt(void);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 0062ef3..fe65781 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -68,6 +68,7 @@
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
/* fb free - please don't readd KDB here because it's useless
(hint - think what a NMI bit does to a vector) */
#define THERMAL_APIC_VECTOR 0xfa
@@ -102,6 +103,7 @@ void spurious_interrupt(void);
void error_interrupt(void);
void reschedule_interrupt(void);
void call_function_interrupt(void);
+void call_function_single_interrupt(void);
void irq_move_cleanup_interrupt(void);
void invalidate_interrupt0(void);
void invalidate_interrupt1(void);
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc86146..9283b60 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
#endif
/*
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h
index 881c63c..ed7d495 100644
--- a/include/asm-x86/mach-default/irq_vectors.h
+++ b/include/asm-x86/mach-default/irq_vectors.h
@@ -48,6 +48,7 @@
#define INVALIDATE_TLB_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfa
#define THERMAL_APIC_VECTOR 0xf0
/*
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8..ae52624 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
-
+BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h
index 165421f..64e47f6 100644
--- a/include/asm-x86/mach-voyager/irq_vectors.h
+++ b/include/asm-x86/mach-voyager/irq_vectors.h
@@ -33,6 +33,7 @@
#define VIC_RESCHEDULE_CPI 4
#define VIC_ENABLE_IRQ_CPI 5
#define VIC_CALL_FUNCTION_CPI 6
+#define VIC_CALL_FUNCTION_SINGLE_CPI 7
/* Now the QIC CPIs: Since we don't need the two initial levels,
* these are 2 less than the VIC CPIs */
@@ -42,9 +43,10 @@
#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
#define VIC_START_FAKE_CPI VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI
+#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
/* this is the SYS_INT CPI. */
#define VIC_SYS_INT 8
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 62ebdec..f46a275 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -59,9 +59,6 @@ struct smp_ops {
void (*smp_send_stop)(void);
void (*smp_send_reschedule)(int cpu);
- int (*smp_call_function_mask)(cpumask_t mask,
- void (*func)(void *info), void *info,
- int wait);
};
/* Globals due to paravirt */
@@ -103,13 +100,6 @@ static inline void smp_send_reschedule(int cpu)
smp_ops.smp_send_reschedule(cpu);
}
-static inline int smp_call_function_mask(cpumask_t mask,
- void (*func) (void *info), void *info,
- int wait)
-{
- return smp_ops.smp_call_function_mask(mask, func, info, wait);
-}
-
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <1208851058-8500-3-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-3-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2008-04-22 8:38 ` Sam Ravnborg
2008-04-22 8:38 ` Sam Ravnborg
[not found] ` <20080422083810.GA23540-QabhHTsIXMSnlFQ6Q1D1Y0B+6BGkLq7r@public.gmane.org>
2008-04-22 8:47 ` Ingo Molnar
1 sibling, 2 replies; 139+ messages in thread
From: Sam Ravnborg @ 2008-04-22 8:38 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar
On Tue, Apr 22, 2008 at 09:57:28AM +0200, Jens Axboe wrote:
> This converts x86 and x86-64 to use the new helpers for
> smp_call_function() and friends, and adds support for
> smp_call_function_single().
>
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 87a693c..aab97e3 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
> depends on GENERIC_HARDIRQS && SMP
> default y
>
> +config GENERIC_SMP_HELPERS
> + bool
> + depends on SMP
> + default y
> +
Hi Jens.
[Not x86 specific - it was the first patch
touching a Kconfig file]
Could we define a single config variable
in for example kernel/Kconfig(*)
like this:
config HAVE_GENERIC_SMP_HELPERS
def_bool n
And then the archs that uses the generic SMP helpers just
do a simple:
config X86
+ select HAVE_GENERIC_SMP_HELPERS
This is the recommended method today and is documented
in Documentation/kbuild/kconfig-language.txt if
you need a bit more intro.
(*) I know we do not have kernel/Kconfig today.
But maybe this is a good time to add it and
source it from arch/Kconfig
Sam
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 8:38 ` Sam Ravnborg
@ 2008-04-22 8:38 ` Sam Ravnborg
[not found] ` <20080422083810.GA23540-QabhHTsIXMSnlFQ6Q1D1Y0B+6BGkLq7r@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Sam Ravnborg @ 2008-04-22 8:38 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Ingo Molnar
On Tue, Apr 22, 2008 at 09:57:28AM +0200, Jens Axboe wrote:
> This converts x86 and x86-64 to use the new helpers for
> smp_call_function() and friends, and adds support for
> smp_call_function_single().
>
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 87a693c..aab97e3 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
> depends on GENERIC_HARDIRQS && SMP
> default y
>
> +config GENERIC_SMP_HELPERS
> + bool
> + depends on SMP
> + default y
> +
Hi Jens.
[Not x86 specific - it was the first patch
touching a Kconfig file]
Could we define a single config variable
in for example kernel/Kconfig(*)
like this:
config HAVE_GENERIC_SMP_HELPERS
def_bool n
And then the archs that uses the generic SMP helpers just
do a simple:
config X86
+ select HAVE_GENERIC_SMP_HELPERS
This is the recommended method today and is documented
in Documentation/kbuild/kconfig-language.txt if
you need a bit more intro.
(*) I know we do not have kernel/Kconfig today.
But maybe this is a good time to add it and
source it from arch/Kconfig
Sam
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422083810.GA23540-QabhHTsIXMSnlFQ6Q1D1Y0B+6BGkLq7r@public.gmane.org>]
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <20080422083810.GA23540-QabhHTsIXMSnlFQ6Q1D1Y0B+6BGkLq7r@public.gmane.org>
@ 2008-04-22 8:43 ` Jens Axboe
2008-04-22 8:43 ` Jens Axboe
[not found] ` <20080422084315.GT12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:43 UTC (permalink / raw)
To: Sam Ravnborg
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar
On Tue, Apr 22 2008, Sam Ravnborg wrote:
> On Tue, Apr 22, 2008 at 09:57:28AM +0200, Jens Axboe wrote:
> > This converts x86 and x86-64 to use the new helpers for
> > smp_call_function() and friends, and adds support for
> > smp_call_function_single().
> >
> > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> > index 87a693c..aab97e3 100644
> > --- a/arch/x86/Kconfig
> > +++ b/arch/x86/Kconfig
> > @@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
> > depends on GENERIC_HARDIRQS && SMP
> > default y
> >
> > +config GENERIC_SMP_HELPERS
> > + bool
> > + depends on SMP
> > + default y
> > +
>
> Hi Jens.
>
> [Not x86 specific - it was the first patch
> touching a Kconfig file]
>
> Could we define a single config variable
> in for example kernel/Kconfig(*)
> like this:
>
> config HAVE_GENERIC_SMP_HELPERS
> def_bool n
>
> And then the archs that uses the generic SMP helpers just
> do a simple:
>
> config X86
> + select HAVE_GENERIC_SMP_HELPERS
>
>
> This is the recommended method today and is documented
> in Documentation/kbuild/kconfig-language.txt if
> you need a bit more intro.
>
> (*) I know we do not have kernel/Kconfig today.
> But maybe this is a good time to add it and
> source it from arch/Kconfig
Ah yes, that looks like a better approach. I'll adapt the patches to
this method instead of selecting smp.o, thanks.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 8:43 ` Jens Axboe
@ 2008-04-22 8:43 ` Jens Axboe
[not found] ` <20080422084315.GT12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:43 UTC (permalink / raw)
To: Sam Ravnborg; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Ingo Molnar
On Tue, Apr 22 2008, Sam Ravnborg wrote:
> On Tue, Apr 22, 2008 at 09:57:28AM +0200, Jens Axboe wrote:
> > This converts x86 and x86-64 to use the new helpers for
> > smp_call_function() and friends, and adds support for
> > smp_call_function_single().
> >
> > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> > index 87a693c..aab97e3 100644
> > --- a/arch/x86/Kconfig
> > +++ b/arch/x86/Kconfig
> > @@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
> > depends on GENERIC_HARDIRQS && SMP
> > default y
> >
> > +config GENERIC_SMP_HELPERS
> > + bool
> > + depends on SMP
> > + default y
> > +
>
> Hi Jens.
>
> [Not x86 specific - it was the first patch
> touching a Kconfig file]
>
> Could we define a single config variable
> in for example kernel/Kconfig(*)
> like this:
>
> config HAVE_GENERIC_SMP_HELPERS
> def_bool n
>
> And then the archs that uses the generic SMP helpers just
> do a simple:
>
> config X86
> + select HAVE_GENERIC_SMP_HELPERS
>
>
> This is the recommended method today and is documented
> in Documentation/kbuild/kconfig-language.txt if
> you need a bit more intro.
>
> (*) I know we do not have kernel/Kconfig today.
> But maybe this is a good time to add it and
> source it from arch/Kconfig
Ah yes, that looks like a better approach. I'll adapt the patches to
this method instead of selecting smp.o, thanks.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422084315.GT12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <20080422084315.GT12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 11:15 ` Jens Axboe
2008-04-22 11:15 ` Jens Axboe
0 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 11:15 UTC (permalink / raw)
To: Sam Ravnborg
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar
On Tue, Apr 22 2008, Jens Axboe wrote:
> On Tue, Apr 22 2008, Sam Ravnborg wrote:
> > On Tue, Apr 22, 2008 at 09:57:28AM +0200, Jens Axboe wrote:
> > > This converts x86 and x86-64 to use the new helpers for
> > > smp_call_function() and friends, and adds support for
> > > smp_call_function_single().
> > >
> > > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> > > index 87a693c..aab97e3 100644
> > > --- a/arch/x86/Kconfig
> > > +++ b/arch/x86/Kconfig
> > > @@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
> > > depends on GENERIC_HARDIRQS && SMP
> > > default y
> > >
> > > +config GENERIC_SMP_HELPERS
> > > + bool
> > > + depends on SMP
> > > + default y
> > > +
> >
> > Hi Jens.
> >
> > [Not x86 specific - it was the first patch
> > touching a Kconfig file]
> >
> > Could we define a single config variable
> > in for example kernel/Kconfig(*)
> > like this:
> >
> > config HAVE_GENERIC_SMP_HELPERS
> > def_bool n
> >
> > And then the archs that uses the generic SMP helpers just
> > do a simple:
> >
> > config X86
> > + select HAVE_GENERIC_SMP_HELPERS
> >
> >
> > This is the recommended method today and is documented
> > in Documentation/kbuild/kconfig-language.txt if
> > you need a bit more intro.
> >
> > (*) I know we do not have kernel/Kconfig today.
> > But maybe this is a good time to add it and
> > source it from arch/Kconfig
>
> Ah yes, that looks like a better approach. I'll adapt the patches to
> this method instead of selecting smp.o, thanks.
I added
config USE_GENERIC_SMP_HELPERS
def_bool n
in arch/Kconfig and added a 'select USE_GENERIC_SMP_HELPERS' in the arch
private Kconfig files instead. It also reduces the line count further,
thanks for the suggestion! It's cleaner this way.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 11:15 ` Jens Axboe
@ 2008-04-22 11:15 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 11:15 UTC (permalink / raw)
To: Sam Ravnborg; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Ingo Molnar
On Tue, Apr 22 2008, Jens Axboe wrote:
> On Tue, Apr 22 2008, Sam Ravnborg wrote:
> > On Tue, Apr 22, 2008 at 09:57:28AM +0200, Jens Axboe wrote:
> > > This converts x86 and x86-64 to use the new helpers for
> > > smp_call_function() and friends, and adds support for
> > > smp_call_function_single().
> > >
> > > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> > > index 87a693c..aab97e3 100644
> > > --- a/arch/x86/Kconfig
> > > +++ b/arch/x86/Kconfig
> > > @@ -156,6 +156,11 @@ config GENERIC_PENDING_IRQ
> > > depends on GENERIC_HARDIRQS && SMP
> > > default y
> > >
> > > +config GENERIC_SMP_HELPERS
> > > + bool
> > > + depends on SMP
> > > + default y
> > > +
> >
> > Hi Jens.
> >
> > [Not x86 specific - it was the first patch
> > touching a Kconfig file]
> >
> > Could we define a single config variable
> > in for example kernel/Kconfig(*)
> > like this:
> >
> > config HAVE_GENERIC_SMP_HELPERS
> > def_bool n
> >
> > And then the archs that uses the generic SMP helpers just
> > do a simple:
> >
> > config X86
> > + select HAVE_GENERIC_SMP_HELPERS
> >
> >
> > This is the recommended method today and is documented
> > in Documentation/kbuild/kconfig-language.txt if
> > you need a bit more intro.
> >
> > (*) I know we do not have kernel/Kconfig today.
> > But maybe this is a good time to add it and
> > source it from arch/Kconfig
>
> Ah yes, that looks like a better approach. I'll adapt the patches to
> this method instead of selecting smp.o, thanks.
I added
config USE_GENERIC_SMP_HELPERS
def_bool n
in arch/Kconfig and added a 'select USE_GENERIC_SMP_HELPERS' in the arch
private Kconfig files instead. It also reduces the line count further,
thanks for the suggestion! It's cleaner this way.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-3-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 8:38 ` Sam Ravnborg
@ 2008-04-22 8:47 ` Ingo Molnar
2008-04-22 8:47 ` Ingo Molnar
[not found] ` <20080422084738.GB2388-X9Un+BFzKDI@public.gmane.org>
1 sibling, 2 replies; 139+ messages in thread
From: Ingo Molnar @ 2008-04-22 8:47 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar,
Peter Zijlstra
* Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
> This converts x86 and x86-64 to use the new helpers for
> smp_call_function() and friends, and adds support for
> smp_call_function_single().
nice stuff. The x86 and generic bits:
Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
this has been brewing for months, what is the testing/confidence status
of it? Could we still merge it into v2.6.26? (i think we should)
Ingo
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 8:47 ` Ingo Molnar
@ 2008-04-22 8:47 ` Ingo Molnar
[not found] ` <20080422084738.GB2388-X9Un+BFzKDI@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Ingo Molnar @ 2008-04-22 8:47 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch, linux-kernel, npiggin, torvalds, Ingo Molnar,
Peter Zijlstra
* Jens Axboe <jens.axboe@oracle.com> wrote:
> This converts x86 and x86-64 to use the new helpers for
> smp_call_function() and friends, and adds support for
> smp_call_function_single().
nice stuff. The x86 and generic bits:
Acked-by: Ingo Molnar <mingo@elte.hu>
this has been brewing for months, what is the testing/confidence status
of it? Could we still merge it into v2.6.26? (i think we should)
Ingo
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422084738.GB2388-X9Un+BFzKDI@public.gmane.org>]
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <20080422084738.GB2388-X9Un+BFzKDI@public.gmane.org>
@ 2008-04-22 8:48 ` Jacek Luczak
2008-04-22 8:48 ` Jacek Luczak
[not found] ` <480DA670.4060707-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2008-04-22 8:52 ` Jens Axboe
2008-04-26 8:59 ` Jeremy Fitzhardinge
2 siblings, 2 replies; 139+ messages in thread
From: Jacek Luczak @ 2008-04-22 8:48 UTC (permalink / raw)
To: Jens Axboe
Cc: Ingo Molnar, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar,
Peter Zijlstra
Ingo Molnar pisze:
> * Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
>
>> This converts x86 and x86-64 to use the new helpers for
>> smp_call_function() and friends, and adds support for
>> smp_call_function_single().
>
> nice stuff. The x86 and generic bits:
>
> Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
>
> this has been brewing for months, what is the testing/confidence status
A'propos testing: Jens did you placed all patches (latest versions) somewhere on
net, so I can grab it and test?
> of it? Could we still merge it into v2.6.26? (i think we should)
-Jacek
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 8:48 ` Jacek Luczak
@ 2008-04-22 8:48 ` Jacek Luczak
[not found] ` <480DA670.4060707-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jacek Luczak @ 2008-04-22 8:48 UTC (permalink / raw)
To: Jens Axboe
Cc: Ingo Molnar, linux-arch, linux-kernel, npiggin, torvalds,
Ingo Molnar, Peter Zijlstra
Ingo Molnar pisze:
> * Jens Axboe <jens.axboe@oracle.com> wrote:
>
>> This converts x86 and x86-64 to use the new helpers for
>> smp_call_function() and friends, and adds support for
>> smp_call_function_single().
>
> nice stuff. The x86 and generic bits:
>
> Acked-by: Ingo Molnar <mingo@elte.hu>
>
> this has been brewing for months, what is the testing/confidence status
A'propos testing: Jens did you placed all patches (latest versions) somewhere on
net, so I can grab it and test?
> of it? Could we still merge it into v2.6.26? (i think we should)
-Jacek
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <480DA670.4060707-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>]
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <480DA670.4060707-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2008-04-22 8:56 ` Jens Axboe
2008-04-22 8:56 ` Jens Axboe
[not found] ` <20080422085629.GV12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:56 UTC (permalink / raw)
To: Jacek Luczak
Cc: Ingo Molnar, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar,
Peter Zijlstra
On Tue, Apr 22 2008, Jacek Luczak wrote:
> Ingo Molnar pisze:
> > * Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
> >
> >> This converts x86 and x86-64 to use the new helpers for
> >> smp_call_function() and friends, and adds support for
> >> smp_call_function_single().
> >
> > nice stuff. The x86 and generic bits:
> >
> > Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
> >
> > this has been brewing for months, what is the testing/confidence status
>
> A'propos testing: Jens did you placed all patches (latest versions)
> somewhere on net, so I can grab it and test?
It's in git as well, generic-ipi branch. So you can pull it into current
linus by doing:
$ git pull git://git.kernel.dk/linux-2.6-block.git generic-ipi
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 8:56 ` Jens Axboe
@ 2008-04-22 8:56 ` Jens Axboe
[not found] ` <20080422085629.GV12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:56 UTC (permalink / raw)
To: Jacek Luczak
Cc: Ingo Molnar, linux-arch, linux-kernel, npiggin, torvalds,
Ingo Molnar, Peter Zijlstra
On Tue, Apr 22 2008, Jacek Luczak wrote:
> Ingo Molnar pisze:
> > * Jens Axboe <jens.axboe@oracle.com> wrote:
> >
> >> This converts x86 and x86-64 to use the new helpers for
> >> smp_call_function() and friends, and adds support for
> >> smp_call_function_single().
> >
> > nice stuff. The x86 and generic bits:
> >
> > Acked-by: Ingo Molnar <mingo@elte.hu>
> >
> > this has been brewing for months, what is the testing/confidence status
>
> A'propos testing: Jens did you placed all patches (latest versions)
> somewhere on net, so I can grab it and test?
It's in git as well, generic-ipi branch. So you can pull it into current
linus by doing:
$ git pull git://git.kernel.dk/linux-2.6-block.git generic-ipi
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422085629.GV12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <20080422085629.GV12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 9:04 ` Jacek Luczak
2008-04-22 9:04 ` Jacek Luczak
0 siblings, 1 reply; 139+ messages in thread
From: Jacek Luczak @ 2008-04-22 9:04 UTC (permalink / raw)
To: Jens Axboe
Cc: Ingo Molnar, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar,
Peter Zijlstra
Jens Axboe pisze:
> On Tue, Apr 22 2008, Jacek Luczak wrote:
>> Ingo Molnar pisze:
>>> * Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
>>>
>>>> This converts x86 and x86-64 to use the new helpers for
>>>> smp_call_function() and friends, and adds support for
>>>> smp_call_function_single().
>>> nice stuff. The x86 and generic bits:
>>>
>>> Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
>>>
>>> this has been brewing for months, what is the testing/confidence status
>> A'propos testing: Jens did you placed all patches (latest versions)
>> somewhere on net, so I can grab it and test?
>
> It's in git as well, generic-ipi branch. So you can pull it into current
> linus by doing:
>
> $ git pull git://git.kernel.dk/linux-2.6-block.git generic-ipi
OK, thanks. I've got number of unused x86 boxes, so I will give it a spin.
Jacek
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 9:04 ` Jacek Luczak
@ 2008-04-22 9:04 ` Jacek Luczak
0 siblings, 0 replies; 139+ messages in thread
From: Jacek Luczak @ 2008-04-22 9:04 UTC (permalink / raw)
To: Jens Axboe
Cc: Ingo Molnar, linux-arch, linux-kernel, npiggin, torvalds,
Ingo Molnar, Peter Zijlstra
Jens Axboe pisze:
> On Tue, Apr 22 2008, Jacek Luczak wrote:
>> Ingo Molnar pisze:
>>> * Jens Axboe <jens.axboe@oracle.com> wrote:
>>>
>>>> This converts x86 and x86-64 to use the new helpers for
>>>> smp_call_function() and friends, and adds support for
>>>> smp_call_function_single().
>>> nice stuff. The x86 and generic bits:
>>>
>>> Acked-by: Ingo Molnar <mingo@elte.hu>
>>>
>>> this has been brewing for months, what is the testing/confidence status
>> A'propos testing: Jens did you placed all patches (latest versions)
>> somewhere on net, so I can grab it and test?
>
> It's in git as well, generic-ipi branch. So you can pull it into current
> linus by doing:
>
> $ git pull git://git.kernel.dk/linux-2.6-block.git generic-ipi
OK, thanks. I've got number of unused x86 boxes, so I will give it a spin.
Jacek
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <20080422084738.GB2388-X9Un+BFzKDI@public.gmane.org>
2008-04-22 8:48 ` Jacek Luczak
@ 2008-04-22 8:52 ` Jens Axboe
2008-04-22 8:52 ` Jens Axboe
2008-04-26 8:59 ` Jeremy Fitzhardinge
2 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:52 UTC (permalink / raw)
To: Ingo Molnar
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar,
Peter Zijlstra
On Tue, Apr 22 2008, Ingo Molnar wrote:
>
> * Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
>
> > This converts x86 and x86-64 to use the new helpers for
> > smp_call_function() and friends, and adds support for
> > smp_call_function_single().
>
> nice stuff. The x86 and generic bits:
>
> Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
Thanks, adding...
> this has been brewing for months, what is the testing/confidence status
> of it? Could we still merge it into v2.6.26? (i think we should)
I'm not aware of any problems. I'd be very grateful if you could give it
a spin on some of your test boxes, just to raise the confidence interval
a bit :-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-22 8:52 ` Jens Axboe
@ 2008-04-22 8:52 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:52 UTC (permalink / raw)
To: Ingo Molnar
Cc: linux-arch, linux-kernel, npiggin, torvalds, Ingo Molnar,
Peter Zijlstra
On Tue, Apr 22 2008, Ingo Molnar wrote:
>
> * Jens Axboe <jens.axboe@oracle.com> wrote:
>
> > This converts x86 and x86-64 to use the new helpers for
> > smp_call_function() and friends, and adds support for
> > smp_call_function_single().
>
> nice stuff. The x86 and generic bits:
>
> Acked-by: Ingo Molnar <mingo@elte.hu>
Thanks, adding...
> this has been brewing for months, what is the testing/confidence status
> of it? Could we still merge it into v2.6.26? (i think we should)
I'm not aware of any problems. I'd be very grateful if you could give it
a spin on some of your test boxes, just to raise the confidence interval
a bit :-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
[not found] ` <20080422084738.GB2388-X9Un+BFzKDI@public.gmane.org>
2008-04-22 8:48 ` Jacek Luczak
2008-04-22 8:52 ` Jens Axboe
@ 2008-04-26 8:59 ` Jeremy Fitzhardinge
2008-04-26 8:59 ` Jeremy Fitzhardinge
2 siblings, 1 reply; 139+ messages in thread
From: Jeremy Fitzhardinge @ 2008-04-26 8:59 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jens Axboe, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Ingo Molnar,
Peter Zijlstra
Ingo Molnar wrote:
> * Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
>
>
>> This converts x86 and x86-64 to use the new helpers for
>> smp_call_function() and friends, and adds support for
>> smp_call_function_single().
>>
>
> nice stuff. The x86 and generic bits:
>
> Acked-by: Ingo Molnar <mingo-X9Un+BFzKDI@public.gmane.org>
>
> this has been brewing for months, what is the testing/confidence status
> of it? Could we still merge it into v2.6.26? (i think we should)
>
I commented on an earlier version of this patch, but it looks like it
will break Xen. If you enable CONFIG_XEN, it will try to define both
native and Xen versions of arch_send_call_function[_single]_ipi(). It
needs to vector them via smp_ops.
J
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 2/11] x86: convert to generic helpers for IPI function calls
2008-04-26 8:59 ` Jeremy Fitzhardinge
@ 2008-04-26 8:59 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 139+ messages in thread
From: Jeremy Fitzhardinge @ 2008-04-26 8:59 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jens Axboe, linux-arch, linux-kernel, npiggin, torvalds,
Ingo Molnar, Peter Zijlstra
Ingo Molnar wrote:
> * Jens Axboe <jens.axboe@oracle.com> wrote:
>
>
>> This converts x86 and x86-64 to use the new helpers for
>> smp_call_function() and friends, and adds support for
>> smp_call_function_single().
>>
>
> nice stuff. The x86 and generic bits:
>
> Acked-by: Ingo Molnar <mingo@elte.hu>
>
> this has been brewing for months, what is the testing/confidence status
> of it? Could we still merge it into v2.6.26? (i think we should)
>
I commented on an earlier version of this patch, but it looks like it
will break Xen. If you enable CONFIG_XEN, it will try to define both
native and Xen versions of arch_send_call_function[_single]_ipi(). It
needs to vector them via smp_ops.
J
^ permalink raw reply [flat|nested] 139+ messages in thread
* [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` [PATCH 1/11] Add generic helpers for arch IPI function calls Jens Axboe
2008-04-22 7:57 ` [PATCH 2/11] x86: convert to generic helpers for " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-4-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` [PATCH 4/11] ia64: " Jens Axboe
` (9 subsequent siblings)
12 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Paul Mackerras
This converts ppc to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Cc: Paul Mackerras <paulus-eUNUBHrolfbYtjvyW6yDsg@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/powerpc/Kconfig | 5 +
arch/powerpc/kernel/smp.c | 220 ++-----------------------------
arch/powerpc/platforms/cell/interrupt.c | 1 +
arch/powerpc/platforms/ps3/smp.c | 7 +-
arch/powerpc/platforms/pseries/xics.c | 6 +-
arch/powerpc/sysdev/mpic.c | 2 +-
include/asm-powerpc/smp.h | 5 +-
7 files changed, 27 insertions(+), 219 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bb2e93..ad394d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -42,6 +42,11 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index be35ffa..facd49d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS];
-void smp_call_function_interrupt(void);
-
int smt_enabled_at_boot = 1;
-static int ipi_fail_ok;
-
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
{
switch(msg) {
case PPC_MSG_CALL_FUNCTION:
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
/* XXX Do we have to do this? */
set_need_resched();
break;
+ case PPC_MSG_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
case PPC_MSG_DEBUGGER_BREAK:
if (crash_ipi_function_ptr) {
crash_ipi_function_ptr(get_irq_regs());
@@ -154,215 +153,22 @@ static void stop_this_cpu(void *dummy)
;
}
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- * Stolen from the i386 version.
- */
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
-
-static struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-} *call_data;
-
-/* delay of at least 8 seconds */
-#define SMP_CALL_TIMEOUT 8
-
-/*
- * These functions send a 'generic call function' IPI to other online
- * CPUS in the system.
- *
- * [SUMMARY] Run a function on other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
- * <map> is a cpu map of the cpus to send IPI to.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int __smp_call_function_map(void (*func) (void *info), void *info,
- int nonatomic, int wait, cpumask_t map)
-{
- struct call_data_struct data;
- int ret = -1, num_cpus;
- int cpu;
- u64 timeout;
-
- if (unlikely(smp_ops == NULL))
- return ret;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- /* remove 'self' from the map */
- if (cpu_isset(smp_processor_id(), map))
- cpu_clear(smp_processor_id(), map);
-
- /* sanity check the map, remove any non-online processors. */
- cpus_and(map, map, cpu_online_map);
-
- num_cpus = cpus_weight(map);
- if (!num_cpus)
- goto done;
-
- call_data = &data;
- smp_wmb();
- /* Send a message to all CPUs in the map */
- for_each_cpu_mask(cpu, map)
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
-
- timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
-
- /* Wait for indication that they have received the message */
- while (atomic_read(&data.started) != num_cpus) {
- HMT_low();
- if (get_tb() >= timeout) {
- printk("smp_call_function on cpu %d: other cpus not "
- "responding (%d)\n", smp_processor_id(),
- atomic_read(&data.started));
- if (!ipi_fail_ok)
- debugger(NULL);
- goto out;
- }
- }
-
- /* optionally wait for the CPUs to complete */
- if (wait) {
- while (atomic_read(&data.finished) != num_cpus) {
- HMT_low();
- if (get_tb() >= timeout) {
- printk("smp_call_function on cpu %d: other "
- "cpus not finishing (%d/%d)\n",
- smp_processor_id(),
- atomic_read(&data.finished),
- atomic_read(&data.started));
- debugger(NULL);
- goto out;
- }
- }
- }
-
- done:
- ret = 0;
-
- out:
- call_data = NULL;
- HMT_medium();
- return ret;
-}
-
-static int __smp_call_function(void (*func)(void *info), void *info,
- int nonatomic, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- int ret;
- spin_lock(&call_lock);
- ret =__smp_call_function_map(func, info, nonatomic, wait,
- cpu_online_map);
- spin_unlock(&call_lock);
- return ret;
-}
-
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- return __smp_call_function(func, info, nonatomic, wait);
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function);
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- cpumask_t map = CPU_MASK_NONE;
- int ret = 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- if (!cpu_online(cpu))
- return -EINVAL;
+ unsigned int cpu;
- cpu_set(cpu, map);
- if (cpu != get_cpu()) {
- spin_lock(&call_lock);
- ret = __smp_call_function_map(func, info, nonatomic, wait, map);
- spin_unlock(&call_lock);
- } else {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
- put_cpu();
- return ret;
+ for_each_cpu_mask(cpu, mask)
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
-EXPORT_SYMBOL(smp_call_function_single);
void smp_send_stop(void)
{
- int nolock;
-
- /* It's OK to fail sending the IPI, since the alternative is to
- * be stuck forever waiting on the other CPU to take the interrupt.
- *
- * It's better to at least continue and go through reboot, since this
- * function is usually called at panic or reboot time in the first
- * place.
- */
- ipi_fail_ok = 1;
-
- /* Don't deadlock in case we got called through panic */
- nolock = !spin_trylock(&call_lock);
- __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
- if (!nolock)
- spin_unlock(&call_lock);
-}
-
-void smp_call_function_interrupt(void)
-{
- void (*func) (void *info);
- void *info;
- int wait;
-
- /* call_data will be NULL if the sender timed out while
- * waiting on us to receive the call.
- */
- if (!call_data)
- return;
-
- func = call_data->func;
- info = call_data->info;
- wait = call_data->wait;
-
- if (!wait)
- smp_mb__before_atomic_inc();
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
- (*func)(info);
- if (wait) {
- smp_mb__before_atomic_inc();
- atomic_inc(&call_data->finished);
- }
+ smp_call_function(stop_this_cpu, NULL, 0, 0);
}
extern struct gettimeofday_struct do_gtod;
@@ -594,9 +400,9 @@ int __devinit start_secondary(void *unused)
secondary_cpu_time_init();
- spin_lock(&call_lock);
+ spin_lock(&call_function_lock);
cpu_set(cpu, cpu_online_map);
- spin_unlock(&call_lock);
+ spin_unlock(&call_function_lock);
local_irq_enable();
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 04f74f9..bc91ec5 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -217,6 +217,7 @@ void iic_request_IPIs(void)
{
iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
+ iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
#ifdef CONFIG_DEBUGGER
iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
#endif /* CONFIG_DEBUGGER */
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f0b12f2..a0927a3 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu)
* to index needs to be setup.
*/
- BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
- BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
- BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
+ BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
+ BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
for (i = 0; i < MSG_COUNT; i++) {
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 43df53c..b343245 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -384,13 +384,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu)
mb();
smp_message_recv(PPC_MSG_RESCHEDULE);
}
-#if 0
- if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
+ if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_MIGRATE_TASK);
+ smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
}
-#endif
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
&xics_ipi_message[cpu].value)) {
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 8619f2a..bdc7b13 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1508,7 +1508,7 @@ void mpic_request_ipis(void)
static char *ipi_names[] = {
"IPI0 (call function)",
"IPI1 (reschedule)",
- "IPI2 (unused)",
+ "IPI2 (call function single)",
"IPI3 (debugger break)",
};
BUG_ON(mpic == NULL);
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 505f35b..78382f6 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
* in /proc/interrupts will be wrong!!! --Troy */
#define PPC_MSG_CALL_FUNCTION 0
#define PPC_MSG_RESCHEDULE 1
-/* This is unused now */
-#if 0
-#define PPC_MSG_MIGRATE_TASK 2
-#endif
+#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
void smp_init_iSeries(void);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 3/11] powerpc: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-4-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Paul Mackerras
This converts ppc to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/powerpc/Kconfig | 5 +
arch/powerpc/kernel/smp.c | 220 ++-----------------------------
arch/powerpc/platforms/cell/interrupt.c | 1 +
arch/powerpc/platforms/ps3/smp.c | 7 +-
arch/powerpc/platforms/pseries/xics.c | 6 +-
arch/powerpc/sysdev/mpic.c | 2 +-
include/asm-powerpc/smp.h | 5 +-
7 files changed, 27 insertions(+), 219 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bb2e93..ad394d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -42,6 +42,11 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index be35ffa..facd49d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS];
-void smp_call_function_interrupt(void);
-
int smt_enabled_at_boot = 1;
-static int ipi_fail_ok;
-
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
{
switch(msg) {
case PPC_MSG_CALL_FUNCTION:
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
/* XXX Do we have to do this? */
set_need_resched();
break;
+ case PPC_MSG_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
case PPC_MSG_DEBUGGER_BREAK:
if (crash_ipi_function_ptr) {
crash_ipi_function_ptr(get_irq_regs());
@@ -154,215 +153,22 @@ static void stop_this_cpu(void *dummy)
;
}
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- * Stolen from the i386 version.
- */
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
-
-static struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-} *call_data;
-
-/* delay of at least 8 seconds */
-#define SMP_CALL_TIMEOUT 8
-
-/*
- * These functions send a 'generic call function' IPI to other online
- * CPUS in the system.
- *
- * [SUMMARY] Run a function on other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
- * <map> is a cpu map of the cpus to send IPI to.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int __smp_call_function_map(void (*func) (void *info), void *info,
- int nonatomic, int wait, cpumask_t map)
-{
- struct call_data_struct data;
- int ret = -1, num_cpus;
- int cpu;
- u64 timeout;
-
- if (unlikely(smp_ops == NULL))
- return ret;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- /* remove 'self' from the map */
- if (cpu_isset(smp_processor_id(), map))
- cpu_clear(smp_processor_id(), map);
-
- /* sanity check the map, remove any non-online processors. */
- cpus_and(map, map, cpu_online_map);
-
- num_cpus = cpus_weight(map);
- if (!num_cpus)
- goto done;
-
- call_data = &data;
- smp_wmb();
- /* Send a message to all CPUs in the map */
- for_each_cpu_mask(cpu, map)
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
-
- timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
-
- /* Wait for indication that they have received the message */
- while (atomic_read(&data.started) != num_cpus) {
- HMT_low();
- if (get_tb() >= timeout) {
- printk("smp_call_function on cpu %d: other cpus not "
- "responding (%d)\n", smp_processor_id(),
- atomic_read(&data.started));
- if (!ipi_fail_ok)
- debugger(NULL);
- goto out;
- }
- }
-
- /* optionally wait for the CPUs to complete */
- if (wait) {
- while (atomic_read(&data.finished) != num_cpus) {
- HMT_low();
- if (get_tb() >= timeout) {
- printk("smp_call_function on cpu %d: other "
- "cpus not finishing (%d/%d)\n",
- smp_processor_id(),
- atomic_read(&data.finished),
- atomic_read(&data.started));
- debugger(NULL);
- goto out;
- }
- }
- }
-
- done:
- ret = 0;
-
- out:
- call_data = NULL;
- HMT_medium();
- return ret;
-}
-
-static int __smp_call_function(void (*func)(void *info), void *info,
- int nonatomic, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- int ret;
- spin_lock(&call_lock);
- ret =__smp_call_function_map(func, info, nonatomic, wait,
- cpu_online_map);
- spin_unlock(&call_lock);
- return ret;
-}
-
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- return __smp_call_function(func, info, nonatomic, wait);
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function);
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- cpumask_t map = CPU_MASK_NONE;
- int ret = 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- if (!cpu_online(cpu))
- return -EINVAL;
+ unsigned int cpu;
- cpu_set(cpu, map);
- if (cpu != get_cpu()) {
- spin_lock(&call_lock);
- ret = __smp_call_function_map(func, info, nonatomic, wait, map);
- spin_unlock(&call_lock);
- } else {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
- put_cpu();
- return ret;
+ for_each_cpu_mask(cpu, mask)
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
-EXPORT_SYMBOL(smp_call_function_single);
void smp_send_stop(void)
{
- int nolock;
-
- /* It's OK to fail sending the IPI, since the alternative is to
- * be stuck forever waiting on the other CPU to take the interrupt.
- *
- * It's better to at least continue and go through reboot, since this
- * function is usually called at panic or reboot time in the first
- * place.
- */
- ipi_fail_ok = 1;
-
- /* Don't deadlock in case we got called through panic */
- nolock = !spin_trylock(&call_lock);
- __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
- if (!nolock)
- spin_unlock(&call_lock);
-}
-
-void smp_call_function_interrupt(void)
-{
- void (*func) (void *info);
- void *info;
- int wait;
-
- /* call_data will be NULL if the sender timed out while
- * waiting on us to receive the call.
- */
- if (!call_data)
- return;
-
- func = call_data->func;
- info = call_data->info;
- wait = call_data->wait;
-
- if (!wait)
- smp_mb__before_atomic_inc();
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
- (*func)(info);
- if (wait) {
- smp_mb__before_atomic_inc();
- atomic_inc(&call_data->finished);
- }
+ smp_call_function(stop_this_cpu, NULL, 0, 0);
}
extern struct gettimeofday_struct do_gtod;
@@ -594,9 +400,9 @@ int __devinit start_secondary(void *unused)
secondary_cpu_time_init();
- spin_lock(&call_lock);
+ spin_lock(&call_function_lock);
cpu_set(cpu, cpu_online_map);
- spin_unlock(&call_lock);
+ spin_unlock(&call_function_lock);
local_irq_enable();
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 04f74f9..bc91ec5 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -217,6 +217,7 @@ void iic_request_IPIs(void)
{
iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
+ iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
#ifdef CONFIG_DEBUGGER
iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
#endif /* CONFIG_DEBUGGER */
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f0b12f2..a0927a3 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu)
* to index needs to be setup.
*/
- BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
- BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
- BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
+ BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
+ BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
for (i = 0; i < MSG_COUNT; i++) {
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 43df53c..b343245 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -384,13 +384,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu)
mb();
smp_message_recv(PPC_MSG_RESCHEDULE);
}
-#if 0
- if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
+ if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_MIGRATE_TASK);
+ smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
}
-#endif
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
&xics_ipi_message[cpu].value)) {
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 8619f2a..bdc7b13 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1508,7 +1508,7 @@ void mpic_request_ipis(void)
static char *ipi_names[] = {
"IPI0 (call function)",
"IPI1 (reschedule)",
- "IPI2 (unused)",
+ "IPI2 (call function single)",
"IPI3 (debugger break)",
};
BUG_ON(mpic == NULL);
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 505f35b..78382f6 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
* in /proc/interrupts will be wrong!!! --Troy */
#define PPC_MSG_CALL_FUNCTION 0
#define PPC_MSG_RESCHEDULE 1
-/* This is unused now */
-#if 0
-#define PPC_MSG_MIGRATE_TASK 2
-#endif
+#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
void smp_init_iSeries(void);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <1208851058-8500-4-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-4-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2008-04-22 12:03 ` Paul Mackerras
2008-04-22 12:03 ` Paul Mackerras
[not found] ` <18445.54284.194023.553595-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Paul Mackerras @ 2008-04-22 12:03 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
anton-eUNUBHrolfbYtjvyW6yDsg
Jens Axboe writes:
> This converts ppc to use the new helpers for smp_call_function() and
> friends, and adds support for smp_call_function_single().
Looks OK, and runs on a dual G5 powermac here. I'll try on some
bigger IBM boxes tomorrow, or maybe Anton can do that.
I notice that you have changed the semantics slightly in that
previously, the powerpc implementation would wait for the other cpus
to have taken the interrupt, and print a message if they didn't all
take the interrupt (this was different from the function of the `wait'
parameter, which said whether to wait for all cpus to finish executing
the specified function).
Now the generic smp_call_function_mask doesn't wait for the other cpus
to take the interrupt. I don't think that's likely to be a problem,
but I do think that's worth mentioning in the patch description.
It's probably also worth mentioning that smp_send_stop no longer has
the behaviour of doing its stuff even if someone is holding the lock,
but that is OK (presumably) because now the lock (call_function_lock
in the new code) is held for much shorter periods and there doesn't
appear to be much danger of other cpus getting stuck or panicking
while holding that lock.
You can put Acked-by: Paul Mackerras <paulus-eUNUBHrolfbYtjvyW6yDsg@public.gmane.org> on the ppc
patch, but please extend the description a bit as outlined above.
Thanks for doing this, BTW.
Regards,
Paul.
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
2008-04-22 12:03 ` Paul Mackerras
@ 2008-04-22 12:03 ` Paul Mackerras
[not found] ` <18445.54284.194023.553595-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Paul Mackerras @ 2008-04-22 12:03 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds, anton
Jens Axboe writes:
> This converts ppc to use the new helpers for smp_call_function() and
> friends, and adds support for smp_call_function_single().
Looks OK, and runs on a dual G5 powermac here. I'll try on some
bigger IBM boxes tomorrow, or maybe Anton can do that.
I notice that you have changed the semantics slightly in that
previously, the powerpc implementation would wait for the other cpus
to have taken the interrupt, and print a message if they didn't all
take the interrupt (this was different from the function of the `wait'
parameter, which said whether to wait for all cpus to finish executing
the specified function).
Now the generic smp_call_function_mask doesn't wait for the other cpus
to take the interrupt. I don't think that's likely to be a problem,
but I do think that's worth mentioning in the patch description.
It's probably also worth mentioning that smp_send_stop no longer has
the behaviour of doing its stuff even if someone is holding the lock,
but that is OK (presumably) because now the lock (call_function_lock
in the new code) is held for much shorter periods and there doesn't
appear to be much danger of other cpus getting stuck or panicking
while holding that lock.
You can put Acked-by: Paul Mackerras <paulus@samba.org> on the ppc
patch, but please extend the description a bit as outlined above.
Thanks for doing this, BTW.
Regards,
Paul.
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <18445.54284.194023.553595-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>]
* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
[not found] ` <18445.54284.194023.553595-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>
@ 2008-04-22 12:13 ` Jens Axboe
2008-04-22 12:13 ` Jens Axboe
[not found] ` <20080422121315.GE12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 12:13 UTC (permalink / raw)
To: Paul Mackerras
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
anton-eUNUBHrolfbYtjvyW6yDsg
On Tue, Apr 22 2008, Paul Mackerras wrote:
> Jens Axboe writes:
>
> > This converts ppc to use the new helpers for smp_call_function() and
> > friends, and adds support for smp_call_function_single().
>
> Looks OK, and runs on a dual G5 powermac here. I'll try on some
> bigger IBM boxes tomorrow, or maybe Anton can do that.
Thanks a lot, Paul!
> I notice that you have changed the semantics slightly in that
> previously, the powerpc implementation would wait for the other cpus
> to have taken the interrupt, and print a message if they didn't all
> take the interrupt (this was different from the function of the `wait'
> parameter, which said whether to wait for all cpus to finish executing
> the specified function).
>
> Now the generic smp_call_function_mask doesn't wait for the other cpus
> to take the interrupt. I don't think that's likely to be a problem,
> but I do think that's worth mentioning in the patch description.
>
> It's probably also worth mentioning that smp_send_stop no longer has
> the behaviour of doing its stuff even if someone is holding the lock,
> but that is OK (presumably) because now the lock (call_function_lock
> in the new code) is held for much shorter periods and there doesn't
> appear to be much danger of other cpus getting stuck or panicking
> while holding that lock.
I touched on both of these points in the initial mail, but perhaps you
missed it as I didn't CC everyone for the 0/11 part. I'll add the full
explanation to the 1/11 patch as well.
> You can put Acked-by: Paul Mackerras <paulus-eUNUBHrolfbYtjvyW6yDsg@public.gmane.org> on the ppc
> patch, but please extend the description a bit as outlined above.
I'll add the ack, let me know if you still think I should do more on the
changelog side.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
2008-04-22 12:13 ` Jens Axboe
@ 2008-04-22 12:13 ` Jens Axboe
[not found] ` <20080422121315.GE12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 12:13 UTC (permalink / raw)
To: Paul Mackerras; +Cc: linux-arch, linux-kernel, npiggin, torvalds, anton
On Tue, Apr 22 2008, Paul Mackerras wrote:
> Jens Axboe writes:
>
> > This converts ppc to use the new helpers for smp_call_function() and
> > friends, and adds support for smp_call_function_single().
>
> Looks OK, and runs on a dual G5 powermac here. I'll try on some
> bigger IBM boxes tomorrow, or maybe Anton can do that.
Thanks a lot, Paul!
> I notice that you have changed the semantics slightly in that
> previously, the powerpc implementation would wait for the other cpus
> to have taken the interrupt, and print a message if they didn't all
> take the interrupt (this was different from the function of the `wait'
> parameter, which said whether to wait for all cpus to finish executing
> the specified function).
>
> Now the generic smp_call_function_mask doesn't wait for the other cpus
> to take the interrupt. I don't think that's likely to be a problem,
> but I do think that's worth mentioning in the patch description.
>
> It's probably also worth mentioning that smp_send_stop no longer has
> the behaviour of doing its stuff even if someone is holding the lock,
> but that is OK (presumably) because now the lock (call_function_lock
> in the new code) is held for much shorter periods and there doesn't
> appear to be much danger of other cpus getting stuck or panicking
> while holding that lock.
I touched on both of these points in the initial mail, but perhaps you
missed it as I didn't CC everyone for the 0/11 part. I'll add the full
explanation to the 1/11 patch as well.
> You can put Acked-by: Paul Mackerras <paulus@samba.org> on the ppc
> patch, but please extend the description a bit as outlined above.
I'll add the ack, let me know if you still think I should do more on the
changelog side.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422121315.GE12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
[not found] ` <20080422121315.GE12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 12:42 ` Paul Mackerras
2008-04-22 12:42 ` Paul Mackerras
[not found] ` <18445.56653.957832.720681-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Paul Mackerras @ 2008-04-22 12:42 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
anton-eUNUBHrolfbYtjvyW6yDsg
Jens Axboe writes:
> I touched on both of these points in the initial mail, but perhaps you
> missed it as I didn't CC everyone for the 0/11 part. I'll add the full
> explanation to the 1/11 patch as well.
OK, that would be fine. I didn't read right to the end of the 0/11
mail in detail, I admit. :)
Now that I have, I don't see anything about smp_send_stop trying to
get the interrupt sent off in a panic situation when some other cpu
might be stuck holding the lock (unless that's the "ipi_lock is a
little muddy" part, but that only talks about cpu onlining). That bit
looks to be powerpc-specific, added in commit e057d985 in December
2007, so perhaps a mention that that has gone away now in the
description for 3/11 would be good.
Regards,
Paul.
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
2008-04-22 12:42 ` Paul Mackerras
@ 2008-04-22 12:42 ` Paul Mackerras
[not found] ` <18445.56653.957832.720681-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Paul Mackerras @ 2008-04-22 12:42 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds, anton
Jens Axboe writes:
> I touched on both of these points in the initial mail, but perhaps you
> missed it as I didn't CC everyone for the 0/11 part. I'll add the full
> explanation to the 1/11 patch as well.
OK, that would be fine. I didn't read right to the end of the 0/11
mail in detail, I admit. :)
Now that I have, I don't see anything about smp_send_stop trying to
get the interrupt sent off in a panic situation when some other cpu
might be stuck holding the lock (unless that's the "ipi_lock is a
little muddy" part, but that only talks about cpu onlining). That bit
looks to be powerpc-specific, added in commit e057d985 in December
2007, so perhaps a mention that that has gone away now in the
description for 3/11 would be good.
Regards,
Paul.
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <18445.56653.957832.720681-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>]
* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
[not found] ` <18445.56653.957832.720681-UYQwCShxghk5kJ7NmlRacFaTQe2KTcn/@public.gmane.org>
@ 2008-04-22 18:51 ` Jens Axboe
2008-04-22 18:51 ` Jens Axboe
0 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 18:51 UTC (permalink / raw)
To: Paul Mackerras
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
anton-eUNUBHrolfbYtjvyW6yDsg
On Tue, Apr 22 2008, Paul Mackerras wrote:
> Jens Axboe writes:
>
> > I touched on both of these points in the initial mail, but perhaps you
> > missed it as I didn't CC everyone for the 0/11 part. I'll add the full
> > explanation to the 1/11 patch as well.
>
> OK, that would be fine. I didn't read right to the end of the 0/11
> mail in detail, I admit. :)
I don't blame you :-)
> Now that I have, I don't see anything about smp_send_stop trying to
> get the interrupt sent off in a panic situation when some other cpu
> might be stuck holding the lock (unless that's the "ipi_lock is a
> little muddy" part, but that only talks about cpu onlining). That bit
> looks to be powerpc-specific, added in commit e057d985 in December
> 2007, so perhaps a mention that that has gone away now in the
> description for 3/11 would be good.
Oh that, yes I'll add a description. I didn't really think there was
much need for it anymore, but it should at least be documented. The
ipi_lock stuff was really more about stopping IPIs completely, not the
smp_send_stop() panic() you described.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 3/11] powerpc: convert to generic helpers for IPI function calls
2008-04-22 18:51 ` Jens Axboe
@ 2008-04-22 18:51 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 18:51 UTC (permalink / raw)
To: Paul Mackerras; +Cc: linux-arch, linux-kernel, npiggin, torvalds, anton
On Tue, Apr 22 2008, Paul Mackerras wrote:
> Jens Axboe writes:
>
> > I touched on both of these points in the initial mail, but perhaps you
> > missed it as I didn't CC everyone for the 0/11 part. I'll add the full
> > explanation to the 1/11 patch as well.
>
> OK, that would be fine. I didn't read right to the end of the 0/11
> mail in detail, I admit. :)
I don't blame you :-)
> Now that I have, I don't see anything about smp_send_stop trying to
> get the interrupt sent off in a panic situation when some other cpu
> might be stuck holding the lock (unless that's the "ipi_lock is a
> little muddy" part, but that only talks about cpu onlining). That bit
> looks to be powerpc-specific, added in commit e057d985 in December
> 2007, so perhaps a mention that that has gone away now in the
> description for 3/11 would be good.
Oh that, yes I'll add a description. I didn't really think there was
much need for it anymore, but it should at least be documented. The
ipi_lock stuff was really more about stopping IPIs completely, not the
smp_send_stop() panic() you described.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* [PATCH 4/11] ia64: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (2 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 3/11] powerpc: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` [PATCH 5/11] alpha: " Jens Axboe
` (8 subsequent siblings)
12 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Tony Luck
This converts ia64 to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Cc: Tony Luck <tony.luck-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/ia64/Kconfig | 5 +
arch/ia64/kernel/smp.c | 239 +++---------------------------------------------
include/asm-ia64/smp.h | 3 -
3 files changed, 20 insertions(+), 227 deletions(-)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ed21737..e09ab05 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -51,6 +51,11 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config RWSEM_XCHGADD_ALGORITHM
bool
default y
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9a9d4c4..c5dcd03 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts {
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise static memory
- * requirements. It also looks cleaner.
- */
-static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t started;
- atomic_t finished;
-};
-
-static volatile struct call_data_struct *call_data;
-
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_CALL_FUNC_SINGLE 2
#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
@@ -89,13 +73,13 @@ extern void cpu_halt (void);
void
lock_ipi_calllock(void)
{
- spin_lock_irq(&call_lock);
+ spin_lock_irq(&call_function_lock);
}
void
unlock_ipi_calllock(void)
{
- spin_unlock_irq(&call_lock);
+ spin_unlock_irq(&call_function_lock);
}
static void
@@ -139,32 +123,12 @@ handle_IPI (int irq, void *dev_id)
switch (which) {
case IPI_CALL_FUNC:
- {
- struct call_data_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- /* release the 'pointer lock' */
- data = (struct call_data_struct *) call_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_inc(&data->started);
- /*
- * At this point the structure may be gone unless
- * wait is true.
- */
- (*func)(info);
-
- /* Notify the sending CPU that the task is done. */
- mb();
- if (wait)
- atomic_inc(&data->finished);
- }
- break;
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
case IPI_CPU_STOP:
stop_this_cpu();
@@ -185,6 +149,8 @@ handle_IPI (int irq, void *dev_id)
return IRQ_HANDLED;
}
+
+
/*
* Called with preemption disabled.
*/
@@ -358,190 +324,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
}
-/*
- * Run a function on a specific CPU
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> Currently unused.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
- int me = get_cpu(); /* prevent preemption and reschedule on another processor */
-
- if (cpuid == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock_bh(&call_lock);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_single(cpuid, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock_bh(&call_lock);
- put_cpu();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * <mask> The set of cpus to run on. Must not include the current cpu.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <wait> If true, wait (atomically) until function
- * has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- struct call_data_struct data;
- cpumask_t allbutself;
- int cpus;
-
- spin_lock(&call_lock);
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
-
- cpus_and(mask, mask, allbutself);
- cpus = cpus_weight(mask);
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
-
- /* Send a message to other CPUs */
- if (cpus_equal(mask, allbutself))
- send_IPI_allbutself(IPI_CALL_FUNC);
- else
- send_IPI_mask(mask, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
-
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function_mask);
-/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-
-/*
- * [SUMMARY] Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func> or are or have
- * executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpus;
-
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
/*
* this function calls the 'stop' function on all other CPUs in the system.
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index ec5f355..4fa733d 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,9 +38,6 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
-
#define hard_smp_processor_id() ia64_get_lid()
#ifdef CONFIG_SMP
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 4/11] ia64: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 4/11] ia64: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Tony Luck
This converts ia64 to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/ia64/Kconfig | 5 +
arch/ia64/kernel/smp.c | 239 +++---------------------------------------------
include/asm-ia64/smp.h | 3 -
3 files changed, 20 insertions(+), 227 deletions(-)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ed21737..e09ab05 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -51,6 +51,11 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config RWSEM_XCHGADD_ALGORITHM
bool
default y
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9a9d4c4..c5dcd03 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts {
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise static memory
- * requirements. It also looks cleaner.
- */
-static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t started;
- atomic_t finished;
-};
-
-static volatile struct call_data_struct *call_data;
-
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_CALL_FUNC_SINGLE 2
#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
@@ -89,13 +73,13 @@ extern void cpu_halt (void);
void
lock_ipi_calllock(void)
{
- spin_lock_irq(&call_lock);
+ spin_lock_irq(&call_function_lock);
}
void
unlock_ipi_calllock(void)
{
- spin_unlock_irq(&call_lock);
+ spin_unlock_irq(&call_function_lock);
}
static void
@@ -139,32 +123,12 @@ handle_IPI (int irq, void *dev_id)
switch (which) {
case IPI_CALL_FUNC:
- {
- struct call_data_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- /* release the 'pointer lock' */
- data = (struct call_data_struct *) call_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_inc(&data->started);
- /*
- * At this point the structure may be gone unless
- * wait is true.
- */
- (*func)(info);
-
- /* Notify the sending CPU that the task is done. */
- mb();
- if (wait)
- atomic_inc(&data->finished);
- }
- break;
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
case IPI_CPU_STOP:
stop_this_cpu();
@@ -185,6 +149,8 @@ handle_IPI (int irq, void *dev_id)
return IRQ_HANDLED;
}
+
+
/*
* Called with preemption disabled.
*/
@@ -358,190 +324,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
}
-/*
- * Run a function on a specific CPU
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> Currently unused.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
- int me = get_cpu(); /* prevent preemption and reschedule on another processor */
-
- if (cpuid == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock_bh(&call_lock);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_single(cpuid, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock_bh(&call_lock);
- put_cpu();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * <mask> The set of cpus to run on. Must not include the current cpu.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <wait> If true, wait (atomically) until function
- * has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- struct call_data_struct data;
- cpumask_t allbutself;
- int cpus;
-
- spin_lock(&call_lock);
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
-
- cpus_and(mask, mask, allbutself);
- cpus = cpus_weight(mask);
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
-
- /* Send a message to other CPUs */
- if (cpus_equal(mask, allbutself))
- send_IPI_allbutself(IPI_CALL_FUNC);
- else
- send_IPI_mask(mask, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
-
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function_mask);
-/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-
-/*
- * [SUMMARY] Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func> or are or have
- * executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpus;
-
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
/*
* this function calls the 'stop' function on all other CPUs in the system.
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index ec5f355..4fa733d 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,9 +38,6 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
-
#define hard_smp_processor_id() ia64_get_lid()
#ifdef CONFIG_SMP
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread
* [PATCH 5/11] alpha: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (3 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 4/11] ia64: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` [PATCH 6/11] arm: " Jens Axboe
` (7 subsequent siblings)
12 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe
This converts alpha to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/alpha/Kconfig | 5 +
arch/alpha/kernel/core_marvel.c | 6 +-
arch/alpha/kernel/smp.c | 170 +++------------------------------------
include/asm-alpha/smp.h | 2 -
4 files changed, 18 insertions(+), 165 deletions(-)
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 729cdbd..194fcb0 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -63,6 +63,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config AUTO_IRQ_AFFINITY
bool
depends on SMP
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index f10d2ed..45cc4f2 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
#ifdef CONFIG_SMP
if (smp_processor_id() != boot_cpuid)
- smp_call_function_on_cpu(__marvel_access_rtc,
- &rtc_access, 1, 1,
- cpumask_of_cpu(boot_cpuid));
+ smp_call_function_single(boot_cpuid,
+ __marvel_access_rtc,
+ &rtc_access, 1, 1);
else
__marvel_access_rtc(&rtc_access);
#else
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 63c2073..95c905b 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -62,6 +62,7 @@ static struct {
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
wripir(i);
}
-/* Structure and data for smp_call_function. This is designed to
- minimize static memory requirements. Plus it looks cleaner. */
-
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-
-static struct smp_call_struct *smp_call_function_data;
-
-/* Atomicly drop data into a shared pointer. The pointer is free if
- it is initially locked. If retry, spin until free. */
-
-static int
-pointer_lock (void *lock, void *data, int retry)
-{
- void *old, *tmp;
-
- mb();
- again:
- /* Compare and swap with zero. */
- asm volatile (
- "1: ldq_l %0,%1\n"
- " mov %3,%2\n"
- " bne %0,2f\n"
- " stq_c %2,%1\n"
- " beq %2,1b\n"
- "2:"
- : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
- : "r"(data)
- : "memory");
-
- if (old == 0)
- return 0;
- if (! retry)
- return -EBUSY;
-
- while (*(void **)lock)
- barrier();
- goto again;
-}
-
void
handle_ipi(struct pt_regs *regs)
{
@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- {
- struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- /* Notify the sending CPU that the data has been
- received, and execution is about to begin. */
- mb();
- atomic_dec (&data->unstarted_count);
-
- /* At this point the structure may be gone unless
- wait is true. */
- (*func)(info);
-
- /* Notify the sending CPU that the task is done. */
- mb();
- if (wait) atomic_dec (&data->unfinished_count);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
break;
- }
case IPI_CPU_STOP:
halt();
@@ -700,102 +637,15 @@ smp_send_stop(void)
send_ipi_message(to_whom, IPI_CPU_STOP);
}
-/*
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-
-int
-smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
- int wait, cpumask_t to_whom)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- int num_cpus_to_call;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- cpu_clear(smp_processor_id(), to_whom);
- num_cpus_to_call = cpus_weight(to_whom);
-
- atomic_set(&data.unstarted_count, num_cpus_to_call);
- atomic_set(&data.unfinished_count, num_cpus_to_call);
-
- /* Acquire the smp_call_function_data mutex. */
- if (pointer_lock(&smp_call_function_data, &data, retry))
- return -EBUSY;
-
- /* Send a message to the requested CPUs. */
- send_ipi_message(to_whom, IPI_CALL_FUNC);
-
- /* Wait for a minimal response. */
- timeout = jiffies + HZ;
- while (atomic_read (&data.unstarted_count) > 0
- && time_before (jiffies, timeout))
- barrier();
-
- /* If there's no response yet, log a message but allow a longer
- * timeout period -- if we get a response this time, log
- * a message saying when we got it..
- */
- if (atomic_read(&data.unstarted_count) > 0) {
- long start_time = jiffies;
- printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
- __FUNCTION__);
- timeout = jiffies + 30 * HZ;
- while (atomic_read(&data.unstarted_count) > 0
- && time_before(jiffies, timeout))
- barrier();
- if (atomic_read(&data.unstarted_count) <= 0) {
- long delta = jiffies - start_time;
- printk(KERN_ERR
- "%s: response %ld.%ld seconds into long wait\n",
- __FUNCTION__, delta / HZ,
- (100 * (delta - ((delta / HZ) * HZ))) / HZ);
- }
- }
-
- /* We either got one or timed out -- clear the lock. */
- mb();
- smp_call_function_data = NULL;
-
- /*
- * If after both the initial and long timeout periods we still don't
- * have a response, something is very wrong...
- */
- BUG_ON(atomic_read (&data.unstarted_count) > 0);
-
- /* Wait for a complete response, if needed. */
- if (wait) {
- while (atomic_read (&data.unfinished_count) > 0)
- barrier();
- }
-
- return 0;
+ send_ipi_message(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function_on_cpu);
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_on_cpu (func, info, retry, wait,
- cpu_online_map);
+ send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function);
static void
ipi_imb(void *ignored)
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 286e1d8..a9090b6 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -47,8 +47,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus;
#define cpu_possible_map cpu_present_map
-int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
-
#else /* CONFIG_SMP */
#define hard_smp_processor_id() 0
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 5/11] alpha: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 5/11] alpha: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe
This converts alpha to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/alpha/Kconfig | 5 +
arch/alpha/kernel/core_marvel.c | 6 +-
arch/alpha/kernel/smp.c | 170 +++------------------------------------
include/asm-alpha/smp.h | 2 -
4 files changed, 18 insertions(+), 165 deletions(-)
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 729cdbd..194fcb0 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -63,6 +63,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config AUTO_IRQ_AFFINITY
bool
depends on SMP
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index f10d2ed..45cc4f2 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
#ifdef CONFIG_SMP
if (smp_processor_id() != boot_cpuid)
- smp_call_function_on_cpu(__marvel_access_rtc,
- &rtc_access, 1, 1,
- cpumask_of_cpu(boot_cpuid));
+ smp_call_function_single(boot_cpuid,
+ __marvel_access_rtc,
+ &rtc_access, 1, 1);
else
__marvel_access_rtc(&rtc_access);
#else
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 63c2073..95c905b 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -62,6 +62,7 @@ static struct {
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
wripir(i);
}
-/* Structure and data for smp_call_function. This is designed to
- minimize static memory requirements. Plus it looks cleaner. */
-
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-
-static struct smp_call_struct *smp_call_function_data;
-
-/* Atomicly drop data into a shared pointer. The pointer is free if
- it is initially locked. If retry, spin until free. */
-
-static int
-pointer_lock (void *lock, void *data, int retry)
-{
- void *old, *tmp;
-
- mb();
- again:
- /* Compare and swap with zero. */
- asm volatile (
- "1: ldq_l %0,%1\n"
- " mov %3,%2\n"
- " bne %0,2f\n"
- " stq_c %2,%1\n"
- " beq %2,1b\n"
- "2:"
- : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
- : "r"(data)
- : "memory");
-
- if (old == 0)
- return 0;
- if (! retry)
- return -EBUSY;
-
- while (*(void **)lock)
- barrier();
- goto again;
-}
-
void
handle_ipi(struct pt_regs *regs)
{
@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- {
- struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- /* Notify the sending CPU that the data has been
- received, and execution is about to begin. */
- mb();
- atomic_dec (&data->unstarted_count);
-
- /* At this point the structure may be gone unless
- wait is true. */
- (*func)(info);
-
- /* Notify the sending CPU that the task is done. */
- mb();
- if (wait) atomic_dec (&data->unfinished_count);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
break;
- }
case IPI_CPU_STOP:
halt();
@@ -700,102 +637,15 @@ smp_send_stop(void)
send_ipi_message(to_whom, IPI_CPU_STOP);
}
-/*
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-
-int
-smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
- int wait, cpumask_t to_whom)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- int num_cpus_to_call;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- cpu_clear(smp_processor_id(), to_whom);
- num_cpus_to_call = cpus_weight(to_whom);
-
- atomic_set(&data.unstarted_count, num_cpus_to_call);
- atomic_set(&data.unfinished_count, num_cpus_to_call);
-
- /* Acquire the smp_call_function_data mutex. */
- if (pointer_lock(&smp_call_function_data, &data, retry))
- return -EBUSY;
-
- /* Send a message to the requested CPUs. */
- send_ipi_message(to_whom, IPI_CALL_FUNC);
-
- /* Wait for a minimal response. */
- timeout = jiffies + HZ;
- while (atomic_read (&data.unstarted_count) > 0
- && time_before (jiffies, timeout))
- barrier();
-
- /* If there's no response yet, log a message but allow a longer
- * timeout period -- if we get a response this time, log
- * a message saying when we got it..
- */
- if (atomic_read(&data.unstarted_count) > 0) {
- long start_time = jiffies;
- printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
- __FUNCTION__);
- timeout = jiffies + 30 * HZ;
- while (atomic_read(&data.unstarted_count) > 0
- && time_before(jiffies, timeout))
- barrier();
- if (atomic_read(&data.unstarted_count) <= 0) {
- long delta = jiffies - start_time;
- printk(KERN_ERR
- "%s: response %ld.%ld seconds into long wait\n",
- __FUNCTION__, delta / HZ,
- (100 * (delta - ((delta / HZ) * HZ))) / HZ);
- }
- }
-
- /* We either got one or timed out -- clear the lock. */
- mb();
- smp_call_function_data = NULL;
-
- /*
- * If after both the initial and long timeout periods we still don't
- * have a response, something is very wrong...
- */
- BUG_ON(atomic_read (&data.unstarted_count) > 0);
-
- /* Wait for a complete response, if needed. */
- if (wait) {
- while (atomic_read (&data.unfinished_count) > 0)
- barrier();
- }
-
- return 0;
+ send_ipi_message(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function_on_cpu);
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_on_cpu (func, info, retry, wait,
- cpu_online_map);
+ send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function);
static void
ipi_imb(void *ignored)
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 286e1d8..a9090b6 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -47,8 +47,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus;
#define cpu_possible_map cpu_present_map
-int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
-
#else /* CONFIG_SMP */
#define hard_smp_processor_id() 0
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread
* [PATCH 6/11] arm: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (4 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 5/11] alpha: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 15:00 ` Catalin Marinas
2008-04-22 7:57 ` [PATCH 7/11] m32r: " Jens Axboe
` (6 subsequent siblings)
12 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Russell King
This converts arm to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested
at all, not even compiled.
Cc: Russell King <rmk-lFZ/pmaqli7XmaaqVzeoHQ@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/arm/Kconfig | 5 ++
arch/arm/kernel/smp.c | 148 ++++---------------------------------------------
2 files changed, 16 insertions(+), 137 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d8d2532..b34cedb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -79,6 +79,11 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config STACKTRACE_SUPPORT
bool
default y
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index eefae1d..9c85329 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -68,20 +68,10 @@ enum ipi_msg_type {
IPI_TIMER,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t pending;
- cpumask_t unfinished;
-};
-
-static struct smp_call_struct * volatile smp_call_function_data;
-static DEFINE_SPINLOCK(smp_call_function_lock);
-
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
local_irq_restore(flags);
}
-/*
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler, nor from a bottom half handler.
- */
-static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
- int retry, int wait, cpumask_t callmap)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- int ret = 0;
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- goto out;
-
- data.pending = callmap;
- if (wait)
- data.unfinished = callmap;
-
- /*
- * try to get the mutex on smp_call_function_data
- */
- spin_lock(&smp_call_function_lock);
- smp_call_function_data = &data;
-
- send_ipi_message(callmap, IPI_CALL_FUNC);
-
- timeout = jiffies + HZ;
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- /*
- * did we time out?
- */
- if (!cpus_empty(data.pending)) {
- /*
- * this may be causing our panic - report it
- */
- printk(KERN_CRIT
- "CPU%u: smp_call_function timeout for %p(%p)\n"
- " callmap %lx pending %lx, %swait\n",
- smp_processor_id(), func, info, *cpus_addr(callmap),
- *cpus_addr(data.pending), wait ? "" : "no ");
-
- /*
- * TRACE
- */
- timeout = jiffies + (5 * HZ);
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- if (cpus_empty(data.pending))
- printk(KERN_CRIT " RESOLVED\n");
- else
- printk(KERN_CRIT " STILL STUCK\n");
- }
-
- /*
- * whatever happened, we're done with the data, so release it
- */
- smp_call_function_data = NULL;
- spin_unlock(&smp_call_function_lock);
-
- if (!cpus_empty(data.pending)) {
- ret = -ETIMEDOUT;
- goto out;
- }
-
- if (wait)
- while (!cpus_empty(data.unfinished))
- barrier();
- out:
-
- return 0;
+ send_ipi_message(mask, IPI_CALL_FUNC);
}
-int smp_call_function(void (*func)(void *info), void *info, int retry,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_on_cpu(func, info, retry, wait,
- cpu_online_map);
+ send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL_GPL(smp_call_function);
-
-int smp_call_function_single(int cpu, void (*func)(void *info), void *info,
- int retry, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int current_cpu = get_cpu();
- int ret = 0;
-
- if (cpu == current_cpu) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- } else
- ret = smp_call_function_on_cpu(func, info, retry, wait,
- cpumask_of_cpu(cpu));
-
- put_cpu();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(smp_call_function_single);
void show_ipi_list(struct seq_file *p)
{
@@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)
}
#endif
-/*
- * ipi_call_function - handle IPI from smp_call_function()
- *
- * Note that we copy data out of the cross-call structure and then
- * let the caller know that we're here and have done with their data
- */
-static void ipi_call_function(unsigned int cpu)
-{
- struct smp_call_struct *data = smp_call_function_data;
- void (*func)(void *info) = data->func;
- void *info = data->info;
- int wait = data->wait;
-
- cpu_clear(cpu, data->pending);
-
- func(info);
-
- if (wait)
- cpu_clear(cpu, data->unfinished);
-}
-
static DEFINE_SPINLOCK(stop_lock);
/*
@@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- ipi_call_function(cpu);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_STOP:
@@ -669,7 +543,7 @@ on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
preempt_disable();
- ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
+ ret = smp_call_function_mask(mask, func, info, retry, wait);
if (cpu_isset(smp_processor_id(), mask))
func(info);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 6/11] arm: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 6/11] arm: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 15:00 ` Catalin Marinas
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Russell King
This converts arm to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested
at all, not even compiled.
Cc: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/arm/Kconfig | 5 ++
arch/arm/kernel/smp.c | 148 ++++---------------------------------------------
2 files changed, 16 insertions(+), 137 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d8d2532..b34cedb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -79,6 +79,11 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config STACKTRACE_SUPPORT
bool
default y
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index eefae1d..9c85329 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -68,20 +68,10 @@ enum ipi_msg_type {
IPI_TIMER,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t pending;
- cpumask_t unfinished;
-};
-
-static struct smp_call_struct * volatile smp_call_function_data;
-static DEFINE_SPINLOCK(smp_call_function_lock);
-
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
local_irq_restore(flags);
}
-/*
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler, nor from a bottom half handler.
- */
-static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
- int retry, int wait, cpumask_t callmap)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- int ret = 0;
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- goto out;
-
- data.pending = callmap;
- if (wait)
- data.unfinished = callmap;
-
- /*
- * try to get the mutex on smp_call_function_data
- */
- spin_lock(&smp_call_function_lock);
- smp_call_function_data = &data;
-
- send_ipi_message(callmap, IPI_CALL_FUNC);
-
- timeout = jiffies + HZ;
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- /*
- * did we time out?
- */
- if (!cpus_empty(data.pending)) {
- /*
- * this may be causing our panic - report it
- */
- printk(KERN_CRIT
- "CPU%u: smp_call_function timeout for %p(%p)\n"
- " callmap %lx pending %lx, %swait\n",
- smp_processor_id(), func, info, *cpus_addr(callmap),
- *cpus_addr(data.pending), wait ? "" : "no ");
-
- /*
- * TRACE
- */
- timeout = jiffies + (5 * HZ);
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- if (cpus_empty(data.pending))
- printk(KERN_CRIT " RESOLVED\n");
- else
- printk(KERN_CRIT " STILL STUCK\n");
- }
-
- /*
- * whatever happened, we're done with the data, so release it
- */
- smp_call_function_data = NULL;
- spin_unlock(&smp_call_function_lock);
-
- if (!cpus_empty(data.pending)) {
- ret = -ETIMEDOUT;
- goto out;
- }
-
- if (wait)
- while (!cpus_empty(data.unfinished))
- barrier();
- out:
-
- return 0;
+ send_ipi_message(mask, IPI_CALL_FUNC);
}
-int smp_call_function(void (*func)(void *info), void *info, int retry,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_on_cpu(func, info, retry, wait,
- cpu_online_map);
+ send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL_GPL(smp_call_function);
-
-int smp_call_function_single(int cpu, void (*func)(void *info), void *info,
- int retry, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int current_cpu = get_cpu();
- int ret = 0;
-
- if (cpu == current_cpu) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- } else
- ret = smp_call_function_on_cpu(func, info, retry, wait,
- cpumask_of_cpu(cpu));
-
- put_cpu();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(smp_call_function_single);
void show_ipi_list(struct seq_file *p)
{
@@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)
}
#endif
-/*
- * ipi_call_function - handle IPI from smp_call_function()
- *
- * Note that we copy data out of the cross-call structure and then
- * let the caller know that we're here and have done with their data
- */
-static void ipi_call_function(unsigned int cpu)
-{
- struct smp_call_struct *data = smp_call_function_data;
- void (*func)(void *info) = data->func;
- void *info = data->info;
- int wait = data->wait;
-
- cpu_clear(cpu, data->pending);
-
- func(info);
-
- if (wait)
- cpu_clear(cpu, data->unfinished);
-}
-
static DEFINE_SPINLOCK(stop_lock);
/*
@@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- ipi_call_function(cpu);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_STOP:
@@ -669,7 +543,7 @@ on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
preempt_disable();
- ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
+ ret = smp_call_function_mask(mask, func, info, retry, wait);
if (cpu_isset(smp_processor_id(), mask))
func(info);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* Re: [PATCH 6/11] arm: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 6/11] arm: " Jens Axboe
2008-04-22 7:57 ` Jens Axboe
@ 2008-04-22 15:00 ` Catalin Marinas
2008-04-22 15:00 ` Catalin Marinas
[not found] ` <1208876447.31997.30.camel-hhZApKj8DF/YkXV2EHHjLW3o5bpOHsLO@public.gmane.org>
1 sibling, 2 replies; 139+ messages in thread
From: Catalin Marinas @ 2008-04-22 15:00 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Russell King
On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> This converts arm to use the new helpers for smp_call_function() and
> friends, and adds support for smp_call_function_single(). Not tested
> at all, not even compiled.
With the minor fixes below, it compiles and seems to run OK on RealView
EB + ARM11MPCore (4 CPUs):
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 9c85329..6344466 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -484,7 +484,7 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
generic_smp_call_function_interrupt();
break;
- case IPI_CALL_FUNC:
+ case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
@@ -536,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier)
}
static int
-on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
- cpumask_t mask)
+on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
{
int ret = 0;
preempt_disable();
- ret = smp_call_function_mask(mask, func, info, retry, wait);
+ ret = smp_call_function_mask(mask, func, info, wait);
if (cpu_isset(smp_processor_id(), mask))
func(info);
@@ -612,7 +611,7 @@ void flush_tlb_mm(struct mm_struct *mm)
{
cpumask_t mask = mm->cpu_vm_mask;
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -623,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
ta.ta_vma = vma;
ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -645,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_start = start;
ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
--
Catalin
^ permalink raw reply related [flat|nested] 139+ messages in thread* Re: [PATCH 6/11] arm: convert to generic helpers for IPI function calls
2008-04-22 15:00 ` Catalin Marinas
@ 2008-04-22 15:00 ` Catalin Marinas
[not found] ` <1208876447.31997.30.camel-hhZApKj8DF/YkXV2EHHjLW3o5bpOHsLO@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Catalin Marinas @ 2008-04-22 15:00 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Russell King
On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> This converts arm to use the new helpers for smp_call_function() and
> friends, and adds support for smp_call_function_single(). Not tested
> at all, not even compiled.
With the minor fixes below, it compiles and seems to run OK on RealView
EB + ARM11MPCore (4 CPUs):
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 9c85329..6344466 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -484,7 +484,7 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
generic_smp_call_function_interrupt();
break;
- case IPI_CALL_FUNC:
+ case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
@@ -536,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier)
}
static int
-on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
- cpumask_t mask)
+on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
{
int ret = 0;
preempt_disable();
- ret = smp_call_function_mask(mask, func, info, retry, wait);
+ ret = smp_call_function_mask(mask, func, info, wait);
if (cpu_isset(smp_processor_id(), mask))
func(info);
@@ -612,7 +611,7 @@ void flush_tlb_mm(struct mm_struct *mm)
{
cpumask_t mask = mm->cpu_vm_mask;
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -623,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
ta.ta_vma = vma;
ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -645,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_start = start;
ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
--
Catalin
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <1208876447.31997.30.camel-hhZApKj8DF/YkXV2EHHjLW3o5bpOHsLO@public.gmane.org>]
* Re: [PATCH 6/11] arm: convert to generic helpers for IPI function calls
[not found] ` <1208876447.31997.30.camel-hhZApKj8DF/YkXV2EHHjLW3o5bpOHsLO@public.gmane.org>
@ 2008-04-22 18:43 ` Jens Axboe
2008-04-22 18:43 ` Jens Axboe
0 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 18:43 UTC (permalink / raw)
To: Catalin Marinas
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Russell King
On Tue, Apr 22 2008, Catalin Marinas wrote:
> On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> > This converts arm to use the new helpers for smp_call_function() and
> > friends, and adds support for smp_call_function_single(). Not tested
> > at all, not even compiled.
>
> With the minor fixes below, it compiles and seems to run OK on RealView
> EB + ARM11MPCore (4 CPUs):
Pretty silly errors, missing the cross compiler... Thanks a lot for
checking and testing, I've added your changes!
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 6/11] arm: convert to generic helpers for IPI function calls
2008-04-22 18:43 ` Jens Axboe
@ 2008-04-22 18:43 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 18:43 UTC (permalink / raw)
To: Catalin Marinas; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Russell King
On Tue, Apr 22 2008, Catalin Marinas wrote:
> On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> > This converts arm to use the new helpers for smp_call_function() and
> > friends, and adds support for smp_call_function_single(). Not tested
> > at all, not even compiled.
>
> With the minor fixes below, it compiles and seems to run OK on RealView
> EB + ARM11MPCore (4 CPUs):
Pretty silly errors, missing the cross compiler... Thanks a lot for
checking and testing, I've added your changes!
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* [PATCH 7/11] m32r: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (5 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 6/11] arm: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` [PATCH 8/11] mips: " Jens Axboe
` (5 subsequent siblings)
12 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Hirokazu Takata
This converts m32r to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
not even compiled.
Cc: Hirokazu Takata <takata-TMSiXQfHlQjsTix1lMzHGQ@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/m32r/Kconfig | 5 ++
arch/m32r/kernel/smp.c | 128 +++++-----------------------------------------
arch/m32r/kernel/traps.c | 3 +-
include/asm-m32r/smp.h | 1 +
4 files changed, 21 insertions(+), 116 deletions(-)
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index de153de..14d56f9 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -30,6 +30,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config NO_IOPORT
def_bool y
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c837bc1..74eb7bc 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -35,22 +35,6 @@
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
-
-static struct call_data_struct *call_data;
-
-/*
* For flush_cache_all()
*/
static DEFINE_SPINLOCK(flushcache_lock);
@@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void);
void smp_send_stop(void);
static void stop_this_cpu(void *);
-int smp_call_function(void (*) (void *), void *, int, int);
-void smp_call_function_interrupt(void);
-
void smp_send_timer(void);
void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
@@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy)
for ( ; ; );
}
-/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Call function Routines */
-/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-
-/*==========================================================================*
- * Name: smp_call_function
- *
- * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
- * in the system.
- *
- * Born on Date: 2002.02.05
- *
- * Arguments: *func - The function to run. This must be fast and
- * non-blocking.
- * *info - An arbitrary pointer to pass to the function.
- * nonatomic - currently unused.
- * wait - If true, wait (atomically) until function has
- * completed on other CPUs.
- *
- * Returns: 0 on success, else a negative status code. Does not return
- * until remote CPUs are nearly ready to execute <<func>> or
- * are or have executed.
- *
- * Cautions: You must not call this function with disabled interrupts or
- * from a hardware interrupt handler, you may call it from a
- * bottom half handler.
- *
- * Modification log:
- * Date Who Description
- * ---------- --- --------------------------------------------------------
- *
- *==========================================================================*/
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpus;
-
-#ifdef DEBUG_SMP
- unsigned long flags;
- __save_flags(flags);
- if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
- BUG();
-#endif /* DEBUG_SMP */
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
-
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- spin_unlock(&call_lock);
+ send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
/*==========================================================================*
@@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
*==========================================================================*/
void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
+}
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
+void smp_call_function_single_interrupt(void)
+{
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ irq_exit();
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 89ba4a0..46159a4 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void);
extern void smp_call_function_interrupt(void);
extern void smp_ipi_timer_interrupt(void);
extern void smp_flush_cache_all_interrupt(void);
+extern void smp_call_function_single_interrupt(void);
/*
* for Boot AP function
@@ -103,7 +104,7 @@ void set_eit_vector_entries(void)
eit_vector[186] = (unsigned long)smp_call_function_interrupt;
eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
- eit_vector[189] = 0;
+ eit_vector[189] = (unsigned long)smp_call_function_single_interrupt;
eit_vector[190] = 0;
eit_vector[191] = 0;
#endif
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index 078e1a5..6a7f3af 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -104,6 +104,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
+#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0)
#define IPI_SHIFT (0)
#define NR_IPIS (8)
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 7/11] m32r: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 7/11] m32r: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Hirokazu Takata
This converts m32r to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
not even compiled.
Cc: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/m32r/Kconfig | 5 ++
arch/m32r/kernel/smp.c | 128 +++++-----------------------------------------
arch/m32r/kernel/traps.c | 3 +-
include/asm-m32r/smp.h | 1 +
4 files changed, 21 insertions(+), 116 deletions(-)
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index de153de..14d56f9 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -30,6 +30,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config NO_IOPORT
def_bool y
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c837bc1..74eb7bc 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -35,22 +35,6 @@
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
-
-static struct call_data_struct *call_data;
-
-/*
* For flush_cache_all()
*/
static DEFINE_SPINLOCK(flushcache_lock);
@@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void);
void smp_send_stop(void);
static void stop_this_cpu(void *);
-int smp_call_function(void (*) (void *), void *, int, int);
-void smp_call_function_interrupt(void);
-
void smp_send_timer(void);
void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
@@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy)
for ( ; ; );
}
-/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Call function Routines */
-/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-
-/*==========================================================================*
- * Name: smp_call_function
- *
- * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
- * in the system.
- *
- * Born on Date: 2002.02.05
- *
- * Arguments: *func - The function to run. This must be fast and
- * non-blocking.
- * *info - An arbitrary pointer to pass to the function.
- * nonatomic - currently unused.
- * wait - If true, wait (atomically) until function has
- * completed on other CPUs.
- *
- * Returns: 0 on success, else a negative status code. Does not return
- * until remote CPUs are nearly ready to execute <<func>> or
- * are or have executed.
- *
- * Cautions: You must not call this function with disabled interrupts or
- * from a hardware interrupt handler, you may call it from a
- * bottom half handler.
- *
- * Modification log:
- * Date Who Description
- * ---------- --- --------------------------------------------------------
- *
- *==========================================================================*/
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpus;
-
-#ifdef DEBUG_SMP
- unsigned long flags;
- __save_flags(flags);
- if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
- BUG();
-#endif /* DEBUG_SMP */
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
-
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- spin_unlock(&call_lock);
+ send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
/*==========================================================================*
@@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
*==========================================================================*/
void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
+}
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
+void smp_call_function_single_interrupt(void)
+{
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ irq_exit();
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 89ba4a0..46159a4 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void);
extern void smp_call_function_interrupt(void);
extern void smp_ipi_timer_interrupt(void);
extern void smp_flush_cache_all_interrupt(void);
+extern void smp_call_function_single_interrupt(void);
/*
* for Boot AP function
@@ -103,7 +104,7 @@ void set_eit_vector_entries(void)
eit_vector[186] = (unsigned long)smp_call_function_interrupt;
eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
- eit_vector[189] = 0;
+ eit_vector[189] = (unsigned long)smp_call_function_single_interrupt;
eit_vector[190] = 0;
eit_vector[191] = 0;
#endif
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index 078e1a5..6a7f3af 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -104,6 +104,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
+#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0)
#define IPI_SHIFT (0)
#define NR_IPIS (8)
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread
* [PATCH 8/11] mips: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (6 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 7/11] m32r: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` [PATCH 9/11] parisc: " Jens Axboe
` (4 subsequent siblings)
12 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Ralf Baechle
This converts mips to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
but it compiles.
Cc: Ralf Baechle <ralf-6z/3iImG2C8G8FEW9MqTrA@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/mips/Kconfig | 5 ++
arch/mips/kernel/smp-mt.c | 27 ++++++++-
arch/mips/kernel/smp.c | 133 +++-------------------------------------
arch/mips/kernel/smtc.c | 7 ++
arch/mips/sibyte/bcm1480/smp.c | 3 +
arch/mips/sibyte/sb1250/smp.c | 2 +
include/asm-mips/smp.h | 12 +---
7 files changed, 53 insertions(+), 136 deletions(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8724ed3..6fdf0f1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1669,6 +1669,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config IRQ_PER_CPU
bool
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 89e6f6a..31049fc 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -38,8 +38,9 @@
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
+#define MIPS_CPU_IPI_CALL_SINGLE_IRQ 2
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
+static int cpu_ipi_resched_irq, cpu_ipi_call_irq, cpu_ipi_call_single_irq;
#if 0
static void dump_mtregisters(int vpe, int tc)
@@ -115,6 +116,11 @@ static void ipi_call_dispatch(void)
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
+static void ipi_call_single_dispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_SINGLE_IRQ);
+}
+
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
return IRQ_HANDLED;
@@ -127,6 +133,13 @@ static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t ipi_call_single_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_single_interrupt();
+
+ return IRQ_HANDLED;
+}
+
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = IRQF_DISABLED|IRQF_PERCPU,
@@ -139,6 +152,12 @@ static struct irqaction irq_call = {
.name = "IPI_call"
};
+static struct irqaction irq_call_single = {
+ .handler = ipi_call_single_interrupt,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
+ .name = "IPI_call_single"
+};
+
static void __init smp_copy_vpe_config(void)
{
write_vpe_c0_status(
@@ -376,16 +395,22 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus)
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+ set_vi_handler(MIPS_CPU_IPI_CALL_SINGLE_IRQ,
+ ipi_call_single_dispatch);
}
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
+ cpu_ipi_call_single_irq = MIPS_CPU_IRQ_BASE +
+ MIPS_CPU_IPI_CALL_SINGLE_IRQ;
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
+ setup_irq(cpu_ipi_call_single_irq, &irq_call_single);
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
+ set_irq_handler(cpu_ipi_call_single_irq, handle_percpu_irq);
}
struct plat_smp_ops vsmp_smp_ops = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab..931c0d9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -128,145 +128,28 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-DEFINE_SPINLOCK(smp_call_lock);
-
-struct call_data_struct *call_data;
-
-/*
- * Run a function on all other CPUs.
- *
- * <mask> cpuset_t of all processors to run the function on.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler:
- *
- * CPU A CPU B
- * Disable interrupts
- * smp_call_function()
- * Take call_lock
- * Send IPIs
- * Wait for all cpus to acknowledge IPI
- * CPU A has not responded, spin waiting
- * for cpu A to respond, holding call_lock
- * smp_call_function()
- * Spin waiting for call_lock
- * Deadlock Deadlock
- */
-int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
- void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpu = smp_processor_id();
- int cpus;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- BUG_ON(!cpu_online(cpu));
-
- cpu_clear(cpu, mask);
- cpus = cpus_weight(mask);
- if (!cpus)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- smp_mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- call_data = NULL;
- spin_unlock(&smp_call_lock);
-
- return 0;
}
-int smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
+ mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION_SINGLE);
}
void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function.
- */
- smp_mb();
- atomic_inc(&call_data->started);
-
- /*
- * At this point the info structure may be out of scope unless wait==1.
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
-
- if (wait) {
- smp_mb();
- atomic_inc(&call_data->finished);
- }
}
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int retry, int wait)
+void smp_call_function_single_interrupt(void)
{
- int ret, me;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- if (!cpu_online(cpu))
- return 0;
-
- me = get_cpu();
- BUG_ON(!cpu_online(me));
-
- if (cpu == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
- wait);
-
- put_cpu();
- return 0;
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ irq_exit();
}
static void stop_this_cpu(void *dummy)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index b42e71c..434605f 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -884,6 +884,10 @@ static void ipi_resched_interrupt(void)
/* Return from interrupt should be enough to cause scheduler check */
}
+static void ipi_call_single_interrupt(void)
+{
+ smp_call_function_single_interrupt();
+}
static void ipi_call_interrupt(void)
{
@@ -924,6 +928,9 @@ void ipi_decode(struct smtc_ipi *pipi)
case SMP_CALL_FUNCTION:
ipi_call_interrupt();
break;
+ case SMP_CALL_FUNCTION_SINGLE:
+ ipi_call_single_interrupt();
+ break;
default:
printk("Impossible SMTC IPI Argument 0x%x\n",
(int)arg_copy);
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index bd9eeb4..76d36ff 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,6 +29,7 @@
#include <asm/sibyte/bcm1480_int.h>
extern void smp_call_function_interrupt(void);
+extern void smp_call_function_single_interrupt(void);
/*
* These are routines for dealing with the bcm1480 smp capabilities
@@ -194,4 +195,6 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
+ else if (action & SMP_CALL_FUNCTION_SINGLE)
+ smp_call_function_single_interrupt();
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 0734b93..8742127 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -182,4 +182,6 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
+ else if (action & SMP_CALL_FUNCTION_SINGLE)
+ smp_call_function_single_interrupt();
}
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 84fef1a..d35c0ad 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -35,18 +35,9 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-struct call_data_struct {
- void (*func)(void *);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
-
-extern struct call_data_struct *call_data;
-
#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
+#define SMP_CALL_FUNCTION_SINGLE 0x4
extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map
@@ -66,5 +57,6 @@ static inline void smp_send_reschedule(int cpu)
}
extern asmlinkage void smp_call_function_interrupt(void);
+extern asmlinkage void smp_call_function_single_interrupt(void);
#endif /* __ASM_SMP_H */
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 8/11] mips: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 8/11] mips: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Ralf Baechle
This converts mips to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
but it compiles.
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/mips/Kconfig | 5 ++
arch/mips/kernel/smp-mt.c | 27 ++++++++-
arch/mips/kernel/smp.c | 133 +++-------------------------------------
arch/mips/kernel/smtc.c | 7 ++
arch/mips/sibyte/bcm1480/smp.c | 3 +
arch/mips/sibyte/sb1250/smp.c | 2 +
include/asm-mips/smp.h | 12 +---
7 files changed, 53 insertions(+), 136 deletions(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8724ed3..6fdf0f1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1669,6 +1669,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config IRQ_PER_CPU
bool
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 89e6f6a..31049fc 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -38,8 +38,9 @@
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
+#define MIPS_CPU_IPI_CALL_SINGLE_IRQ 2
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
+static int cpu_ipi_resched_irq, cpu_ipi_call_irq, cpu_ipi_call_single_irq;
#if 0
static void dump_mtregisters(int vpe, int tc)
@@ -115,6 +116,11 @@ static void ipi_call_dispatch(void)
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
+static void ipi_call_single_dispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_SINGLE_IRQ);
+}
+
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
return IRQ_HANDLED;
@@ -127,6 +133,13 @@ static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t ipi_call_single_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_single_interrupt();
+
+ return IRQ_HANDLED;
+}
+
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = IRQF_DISABLED|IRQF_PERCPU,
@@ -139,6 +152,12 @@ static struct irqaction irq_call = {
.name = "IPI_call"
};
+static struct irqaction irq_call_single = {
+ .handler = ipi_call_single_interrupt,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
+ .name = "IPI_call_single"
+};
+
static void __init smp_copy_vpe_config(void)
{
write_vpe_c0_status(
@@ -376,16 +395,22 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus)
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+ set_vi_handler(MIPS_CPU_IPI_CALL_SINGLE_IRQ,
+ ipi_call_single_dispatch);
}
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
+ cpu_ipi_call_single_irq = MIPS_CPU_IRQ_BASE +
+ MIPS_CPU_IPI_CALL_SINGLE_IRQ;
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
+ setup_irq(cpu_ipi_call_single_irq, &irq_call_single);
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
+ set_irq_handler(cpu_ipi_call_single_irq, handle_percpu_irq);
}
struct plat_smp_ops vsmp_smp_ops = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab..931c0d9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -128,145 +128,28 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-DEFINE_SPINLOCK(smp_call_lock);
-
-struct call_data_struct *call_data;
-
-/*
- * Run a function on all other CPUs.
- *
- * <mask> cpuset_t of all processors to run the function on.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler:
- *
- * CPU A CPU B
- * Disable interrupts
- * smp_call_function()
- * Take call_lock
- * Send IPIs
- * Wait for all cpus to acknowledge IPI
- * CPU A has not responded, spin waiting
- * for cpu A to respond, holding call_lock
- * smp_call_function()
- * Spin waiting for call_lock
- * Deadlock Deadlock
- */
-int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
- void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpu = smp_processor_id();
- int cpus;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- BUG_ON(!cpu_online(cpu));
-
- cpu_clear(cpu, mask);
- cpus = cpus_weight(mask);
- if (!cpus)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- smp_mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- call_data = NULL;
- spin_unlock(&smp_call_lock);
-
- return 0;
}
-int smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
+ mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION_SINGLE);
}
void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function.
- */
- smp_mb();
- atomic_inc(&call_data->started);
-
- /*
- * At this point the info structure may be out of scope unless wait==1.
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
-
- if (wait) {
- smp_mb();
- atomic_inc(&call_data->finished);
- }
}
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int retry, int wait)
+void smp_call_function_single_interrupt(void)
{
- int ret, me;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- if (!cpu_online(cpu))
- return 0;
-
- me = get_cpu();
- BUG_ON(!cpu_online(me));
-
- if (cpu == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
- wait);
-
- put_cpu();
- return 0;
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ irq_exit();
}
static void stop_this_cpu(void *dummy)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index b42e71c..434605f 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -884,6 +884,10 @@ static void ipi_resched_interrupt(void)
/* Return from interrupt should be enough to cause scheduler check */
}
+static void ipi_call_single_interrupt(void)
+{
+ smp_call_function_single_interrupt();
+}
static void ipi_call_interrupt(void)
{
@@ -924,6 +928,9 @@ void ipi_decode(struct smtc_ipi *pipi)
case SMP_CALL_FUNCTION:
ipi_call_interrupt();
break;
+ case SMP_CALL_FUNCTION_SINGLE:
+ ipi_call_single_interrupt();
+ break;
default:
printk("Impossible SMTC IPI Argument 0x%x\n",
(int)arg_copy);
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index bd9eeb4..76d36ff 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,6 +29,7 @@
#include <asm/sibyte/bcm1480_int.h>
extern void smp_call_function_interrupt(void);
+extern void smp_call_function_single_interrupt(void);
/*
* These are routines for dealing with the bcm1480 smp capabilities
@@ -194,4 +195,6 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
+ else if (action & SMP_CALL_FUNCTION_SINGLE)
+ smp_call_function_single_interrupt();
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 0734b93..8742127 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -182,4 +182,6 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
+ else if (action & SMP_CALL_FUNCTION_SINGLE)
+ smp_call_function_single_interrupt();
}
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 84fef1a..d35c0ad 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -35,18 +35,9 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-struct call_data_struct {
- void (*func)(void *);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
-
-extern struct call_data_struct *call_data;
-
#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
+#define SMP_CALL_FUNCTION_SINGLE 0x4
extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map
@@ -66,5 +57,6 @@ static inline void smp_send_reschedule(int cpu)
}
extern asmlinkage void smp_call_function_interrupt(void);
+extern asmlinkage void smp_call_function_single_interrupt(void);
#endif /* __ASM_SMP_H */
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread
* [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (7 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 8/11] mips: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-10-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` [PATCH 10/11] sh: " Jens Axboe
` (3 subsequent siblings)
12 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Kyle McMartin, Matthew Wilcox, Grant Grundler
This converts mips to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
not even compiled.
Cc: Kyle McMartin <kyle-6jwH94ZQLHl74goWV3ctuw@public.gmane.org>
Cc: Matthew Wilcox <matthew-Ztpu424NOJ8@public.gmane.org>
Cc: Grant Grundler <grundler-6jwH94ZQLHl74goWV3ctuw@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/parisc/Kconfig | 5 ++
arch/parisc/kernel/smp.c | 134 +++++++--------------------------------------
2 files changed, 26 insertions(+), 113 deletions(-)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bc7a19d..73ab474 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -72,6 +72,11 @@ config GENERIC_HARDIRQS
config GENERIC_IRQ_PROBE
def_bool y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config IRQ_PER_CPU
bool
default y
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 85fc775..126105c 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map);
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-static volatile struct smp_call_struct *smp_call_function_data;
-
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST
@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
- {
- volatile struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_dec ((atomic_t *)&data->unstarted_count);
-
- /* At this point, *data can't
- * be relied upon.
- */
-
- (*func)(info);
-
- /* Notify the sending CPU that the
- * task is done.
- */
- mb();
- if (wait)
- atomic_dec ((atomic_t *)&data->unfinished_count);
- }
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_START:
@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op)
spin_unlock_irqrestore(lock, flags);
}
+static void
+send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ ipi_send(cpu, op);
+}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
@@ -295,86 +274,15 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP);
}
-
-/**
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or have executed.
- */
-
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- static DEFINE_SPINLOCK(lock);
- int retries = 0;
-
- if (num_online_cpus() < 2)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* can also deadlock if IPIs are disabled */
- WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
-
-
- data.func = func;
- data.info = info;
- data.wait = wait;
- atomic_set(&data.unstarted_count, num_online_cpus() - 1);
- atomic_set(&data.unfinished_count, num_online_cpus() - 1);
-
- if (retry) {
- spin_lock (&lock);
- while (smp_call_function_data != 0)
- barrier();
- }
- else {
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock);
- return -EBUSY;
- }
- }
-
- smp_call_function_data = &data;
- spin_unlock (&lock);
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- retry:
- /* Wait for response */
- timeout = jiffies + HZ;
- while ( (atomic_read (&data.unstarted_count) > 0) &&
- time_before (jiffies, timeout) )
- barrier ();
-
- if (atomic_read (&data.unstarted_count) > 0) {
- printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
- smp_processor_id(), ++retries);
- goto retry;
- }
- /* We either got one or timed out. Release the lock */
-
- mb();
- smp_call_function_data = NULL;
-
- while (wait && atomic_read (&data.unfinished_count) > 0)
- barrier ();
-
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+}
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 9/11] parisc: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-10-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel
Cc: npiggin, torvalds, Jens Axboe, Kyle McMartin, Matthew Wilcox,
Grant Grundler
This converts mips to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
not even compiled.
Cc: Kyle McMartin <kyle@parisc-linux.org>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Grant Grundler <grundler@parisc-linux.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/parisc/Kconfig | 5 ++
arch/parisc/kernel/smp.c | 134 +++++++--------------------------------------
2 files changed, 26 insertions(+), 113 deletions(-)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bc7a19d..73ab474 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -72,6 +72,11 @@ config GENERIC_HARDIRQS
config GENERIC_IRQ_PROBE
def_bool y
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config IRQ_PER_CPU
bool
default y
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 85fc775..126105c 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map);
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-static volatile struct smp_call_struct *smp_call_function_data;
-
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST
@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
- {
- volatile struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_dec ((atomic_t *)&data->unstarted_count);
-
- /* At this point, *data can't
- * be relied upon.
- */
-
- (*func)(info);
-
- /* Notify the sending CPU that the
- * task is done.
- */
- mb();
- if (wait)
- atomic_dec ((atomic_t *)&data->unfinished_count);
- }
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_START:
@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op)
spin_unlock_irqrestore(lock, flags);
}
+static void
+send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ ipi_send(cpu, op);
+}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
@@ -295,86 +274,15 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP);
}
-
-/**
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or have executed.
- */
-
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- static DEFINE_SPINLOCK(lock);
- int retries = 0;
-
- if (num_online_cpus() < 2)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* can also deadlock if IPIs are disabled */
- WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
-
-
- data.func = func;
- data.info = info;
- data.wait = wait;
- atomic_set(&data.unstarted_count, num_online_cpus() - 1);
- atomic_set(&data.unfinished_count, num_online_cpus() - 1);
-
- if (retry) {
- spin_lock (&lock);
- while (smp_call_function_data != 0)
- barrier();
- }
- else {
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock);
- return -EBUSY;
- }
- }
-
- smp_call_function_data = &data;
- spin_unlock (&lock);
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- retry:
- /* Wait for response */
- timeout = jiffies + HZ;
- while ( (atomic_read (&data.unstarted_count) > 0) &&
- time_before (jiffies, timeout) )
- barrier ();
-
- if (atomic_read (&data.unstarted_count) > 0) {
- printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
- smp_processor_id(), ++retries);
- goto retry;
- }
- /* We either got one or timed out. Release the lock */
-
- mb();
- smp_call_function_data = NULL;
-
- while (wait && atomic_read (&data.unfinished_count) > 0)
- barrier ();
-
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+}
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <1208851058-8500-10-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-10-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2008-04-22 14:09 ` Kyle McMartin
2008-04-22 14:09 ` Kyle McMartin
[not found] ` <20080422140914.GD19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Kyle McMartin @ 2008-04-22 14:09 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> This converts mips to use the new helpers for smp_call_function() and
> friends, and adds support for smp_call_function_single(). Not tested,
> not even compiled.
>
At first glance, this looks alright. +1 from me.
cheers, Kyle
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
2008-04-22 14:09 ` Kyle McMartin
@ 2008-04-22 14:09 ` Kyle McMartin
[not found] ` <20080422140914.GD19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Kyle McMartin @ 2008-04-22 14:09 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch, linux-kernel, npiggin, torvalds, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> This converts mips to use the new helpers for smp_call_function() and
> friends, and adds support for smp_call_function_single(). Not tested,
> not even compiled.
>
At first glance, this looks alright. +1 from me.
cheers, Kyle
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422140914.GD19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>]
* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
[not found] ` <20080422140914.GD19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>
@ 2008-04-22 14:27 ` Jens Axboe
2008-04-22 14:27 ` Jens Axboe
[not found] ` <20080422142738.GH12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:27 UTC (permalink / raw)
To: Kyle McMartin
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22 2008, Kyle McMartin wrote:
> On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> > This converts mips to use the new helpers for smp_call_function() and
> > friends, and adds support for smp_call_function_single(). Not tested,
> > not even compiled.
> >
>
> At first glance, this looks alright. +1 from me.
Is that an official parisc ack? :-)
And I'll make sure the patch comment matches... I wish that Andrews xb
compiler collection had all the supported archs covered.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
2008-04-22 14:27 ` Jens Axboe
@ 2008-04-22 14:27 ` Jens Axboe
[not found] ` <20080422142738.GH12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:27 UTC (permalink / raw)
To: Kyle McMartin
Cc: linux-arch, linux-kernel, npiggin, torvalds, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22 2008, Kyle McMartin wrote:
> On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> > This converts mips to use the new helpers for smp_call_function() and
> > friends, and adds support for smp_call_function_single(). Not tested,
> > not even compiled.
> >
>
> At first glance, this looks alright. +1 from me.
Is that an official parisc ack? :-)
And I'll make sure the patch comment matches... I wish that Andrews xb
compiler collection had all the supported archs covered.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422142738.GH12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>]
* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
[not found] ` <20080422142738.GH12774-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
@ 2008-04-22 14:30 ` Kyle McMartin
2008-04-22 14:30 ` Kyle McMartin
[not found] ` <20080422143016.GE19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>
0 siblings, 2 replies; 139+ messages in thread
From: Kyle McMartin @ 2008-04-22 14:30 UTC (permalink / raw)
To: Jens Axboe
Cc: Kyle McMartin, linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22, 2008 at 04:27:38PM +0200, Jens Axboe wrote:
> On Tue, Apr 22 2008, Kyle McMartin wrote:
> > On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> > > This converts mips to use the new helpers for smp_call_function() and
> > > friends, and adds support for smp_call_function_single(). Not tested,
> > > not even compiled.
> > >
> >
> > At first glance, this looks alright. +1 from me.
>
> Is that an official parisc ack? :-)
>
> And I'll make sure the patch comment matches... I wish that Andrews xb
> compiler collection had all the supported archs covered.
>
I'm not sure if these are still good, but:
http://kyle.mcmartin.ca/hppa-hp-linux-gcc-3.4.tar
http://kyle.mcmartin.ca/hppa64-hp-linux-gcc-3.4.tar
cheers, Kyle
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
2008-04-22 14:30 ` Kyle McMartin
@ 2008-04-22 14:30 ` Kyle McMartin
[not found] ` <20080422143016.GE19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Kyle McMartin @ 2008-04-22 14:30 UTC (permalink / raw)
To: Jens Axboe
Cc: Kyle McMartin, linux-arch, linux-kernel, npiggin, torvalds,
Kyle McMartin, Matthew Wilcox, Grant Grundler
On Tue, Apr 22, 2008 at 04:27:38PM +0200, Jens Axboe wrote:
> On Tue, Apr 22 2008, Kyle McMartin wrote:
> > On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> > > This converts mips to use the new helpers for smp_call_function() and
> > > friends, and adds support for smp_call_function_single(). Not tested,
> > > not even compiled.
> > >
> >
> > At first glance, this looks alright. +1 from me.
>
> Is that an official parisc ack? :-)
>
> And I'll make sure the patch comment matches... I wish that Andrews xb
> compiler collection had all the supported archs covered.
>
I'm not sure if these are still good, but:
http://kyle.mcmartin.ca/hppa-hp-linux-gcc-3.4.tar
http://kyle.mcmartin.ca/hppa64-hp-linux-gcc-3.4.tar
cheers, Kyle
^ permalink raw reply [flat|nested] 139+ messages in thread[parent not found: <20080422143016.GE19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>]
* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
[not found] ` <20080422143016.GE19802-EK4dZfYtfFRW/gs8oUvUg/d9D2ou9A/h@public.gmane.org>
@ 2008-04-22 14:49 ` Jens Axboe
2008-04-22 14:49 ` Jens Axboe
0 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:49 UTC (permalink / raw)
To: Kyle McMartin
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22 2008, Kyle McMartin wrote:
> On Tue, Apr 22, 2008 at 04:27:38PM +0200, Jens Axboe wrote:
> > On Tue, Apr 22 2008, Kyle McMartin wrote:
> > > On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> > > > This converts mips to use the new helpers for smp_call_function() and
> > > > friends, and adds support for smp_call_function_single(). Not tested,
> > > > not even compiled.
> > > >
> > >
> > > At first glance, this looks alright. +1 from me.
> >
> > Is that an official parisc ack? :-)
> >
> > And I'll make sure the patch comment matches... I wish that Andrews xb
> > compiler collection had all the supported archs covered.
> >
>
> I'm not sure if these are still good, but:
> http://kyle.mcmartin.ca/hppa-hp-linux-gcc-3.4.tar
> http://kyle.mcmartin.ca/hppa64-hp-linux-gcc-3.4.tar
If you could polish those off and ship to Andrew, lots more people could
add parisc to their compile coverage. Pretty please :-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 9/11] parisc: convert to generic helpers for IPI function calls
2008-04-22 14:49 ` Jens Axboe
@ 2008-04-22 14:49 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 14:49 UTC (permalink / raw)
To: Kyle McMartin
Cc: linux-arch, linux-kernel, npiggin, torvalds, Kyle McMartin,
Matthew Wilcox, Grant Grundler
On Tue, Apr 22 2008, Kyle McMartin wrote:
> On Tue, Apr 22, 2008 at 04:27:38PM +0200, Jens Axboe wrote:
> > On Tue, Apr 22 2008, Kyle McMartin wrote:
> > > On Tue, Apr 22, 2008 at 09:57:35AM +0200, Jens Axboe wrote:
> > > > This converts mips to use the new helpers for smp_call_function() and
> > > > friends, and adds support for smp_call_function_single(). Not tested,
> > > > not even compiled.
> > > >
> > >
> > > At first glance, this looks alright. +1 from me.
> >
> > Is that an official parisc ack? :-)
> >
> > And I'll make sure the patch comment matches... I wish that Andrews xb
> > compiler collection had all the supported archs covered.
> >
>
> I'm not sure if these are still good, but:
> http://kyle.mcmartin.ca/hppa-hp-linux-gcc-3.4.tar
> http://kyle.mcmartin.ca/hppa64-hp-linux-gcc-3.4.tar
If you could polish those off and ship to Andrew, lots more people could
add parisc to their compile coverage. Pretty please :-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* [PATCH 10/11] sh: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (8 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 9/11] parisc: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` [PATCH 11/11] s390: " Jens Axboe
` (2 subsequent siblings)
12 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Paul Mundt
This converts sh to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
but it compiles.
Cc: Paul Mundt <lethal-M7jkjyW5wf5g9hUCZPvPmw@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/sh/Kconfig | 5 +++++
arch/sh/kernel/smp.c | 48 ++++++++----------------------------------------
include/asm-sh/smp.h | 12 ++----------
3 files changed, 15 insertions(+), 50 deletions(-)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6a679c3..ba25a01 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -56,6 +56,11 @@ config GENERIC_TIME
config GENERIC_CLOCKEVENTS
def_bool n
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config SYS_SUPPORTS_PM
bool
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d1..2ed8dce 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-static atomic_t cpus_booted = ATOMIC_INIT(0);
-
-/*
- * Run specified function on a particular processor.
- */
-void __smp_call_function(unsigned int cpu);
-
static inline void __init smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -178,42 +171,17 @@ void smp_send_stop(void)
smp_call_function(stop_this_cpu, 0, 1, 0);
}
-struct smp_fn_call_struct smp_fn_call = {
- .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
- .finished = ATOMIC_INIT(0),
-};
-
-/*
- * The caller of this wants the passed function to run on every cpu. If wait
- * is set, wait until all cpus have finished the function before returning.
- * The lock is here to protect the call structure.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- unsigned int nr_cpus = atomic_read(&cpus_booted);
- int i;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock(&smp_fn_call.lock);
-
- atomic_set(&smp_fn_call.finished, 0);
- smp_fn_call.fn = func;
- smp_fn_call.data = info;
-
- for (i = 0; i < nr_cpus; i++)
- if (i != smp_processor_id())
- plat_send_ipi(i, SMP_MSG_FUNCTION);
-
- if (wait)
- while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
+ int cpu;
- spin_unlock(&smp_fn_call.lock);
+ for_each_cpu_mask(cpu, mask)
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
/* Not really SMP stuff ... */
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h
index 9c8d34b..7982516 100644
--- a/include/asm-sh/smp.h
+++ b/include/asm-sh/smp.h
@@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-struct smp_fn_call_struct {
- spinlock_t lock;
- atomic_t finished;
- void (*fn)(void *);
- void *data;
-};
-
-extern struct smp_fn_call_struct smp_fn_call;
-
#define SMP_MSG_FUNCTION 0
#define SMP_MSG_RESCHEDULE 1
-#define SMP_MSG_NR 2
+#define SMP_MSG_FUNCTION_SINGLE 2
+#define SMP_MSG_NR 3
void plat_smp_setup(void);
void plat_prepare_cpus(unsigned int max_cpus);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 10/11] sh: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 10/11] sh: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds, Jens Axboe, Paul Mundt
This converts sh to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
but it compiles.
Cc: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/sh/Kconfig | 5 +++++
arch/sh/kernel/smp.c | 48 ++++++++----------------------------------------
include/asm-sh/smp.h | 12 ++----------
3 files changed, 15 insertions(+), 50 deletions(-)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6a679c3..ba25a01 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -56,6 +56,11 @@ config GENERIC_TIME
config GENERIC_CLOCKEVENTS
def_bool n
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
config SYS_SUPPORTS_PM
bool
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d1..2ed8dce 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-static atomic_t cpus_booted = ATOMIC_INIT(0);
-
-/*
- * Run specified function on a particular processor.
- */
-void __smp_call_function(unsigned int cpu);
-
static inline void __init smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -178,42 +171,17 @@ void smp_send_stop(void)
smp_call_function(stop_this_cpu, 0, 1, 0);
}
-struct smp_fn_call_struct smp_fn_call = {
- .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
- .finished = ATOMIC_INIT(0),
-};
-
-/*
- * The caller of this wants the passed function to run on every cpu. If wait
- * is set, wait until all cpus have finished the function before returning.
- * The lock is here to protect the call structure.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- unsigned int nr_cpus = atomic_read(&cpus_booted);
- int i;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock(&smp_fn_call.lock);
-
- atomic_set(&smp_fn_call.finished, 0);
- smp_fn_call.fn = func;
- smp_fn_call.data = info;
-
- for (i = 0; i < nr_cpus; i++)
- if (i != smp_processor_id())
- plat_send_ipi(i, SMP_MSG_FUNCTION);
-
- if (wait)
- while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
+ int cpu;
- spin_unlock(&smp_fn_call.lock);
+ for_each_cpu_mask(cpu, mask)
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
/* Not really SMP stuff ... */
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h
index 9c8d34b..7982516 100644
--- a/include/asm-sh/smp.h
+++ b/include/asm-sh/smp.h
@@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-struct smp_fn_call_struct {
- spinlock_t lock;
- atomic_t finished;
- void (*fn)(void *);
- void *data;
-};
-
-extern struct smp_fn_call_struct smp_fn_call;
-
#define SMP_MSG_FUNCTION 0
#define SMP_MSG_RESCHEDULE 1
-#define SMP_MSG_NR 2
+#define SMP_MSG_FUNCTION_SINGLE 2
+#define SMP_MSG_NR 3
void plat_smp_setup(void);
void plat_prepare_cpus(unsigned int max_cpus);
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread
* [PATCH 11/11] s390: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (9 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 10/11] sh: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-12-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2008-04-22 7:57 ` Jens Axboe
2008-04-22 8:48 ` [PATCH 0/11] Generic smp_call_function() and friends Peter Zijlstra
12 siblings, 2 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b,
Jens Axboe, Martin Schwidefsky, Heiko Carstens
This converts s390 to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
but it compiles.
Cc: Martin Schwidefsky <schwidefsky-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Cc: Heiko Carstens <heiko.carstens-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Signed-off-by: Jens Axboe <jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
---
arch/s390/Kconfig | 5 +
arch/s390/kernel/smp.c | 160 ++---------------------------------
arch/sparc64/kernel/sparc64_ksyms.c | 1 +
include/asm-s390/sigp.h | 1 +
include/asm-s390/smp.h | 2 -
5 files changed, 15 insertions(+), 154 deletions(-)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f6a68e1..ebaf50f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,11 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
mainmenu "Linux Kernel Configuration"
config S390
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0dfa988..2ee3484 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -77,164 +77,18 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
-/*
- * Structure and data for __smp_call_function_map(). This is designed to
- * minimise static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- cpumask_t started;
- cpumask_t finished;
- int wait;
-};
-
-static struct call_data_struct *call_data;
-
-/*
- * 'Call function' interrupt callback
- */
-static void do_call_function(void)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- cpu_set(smp_processor_id(), call_data->started);
- (*func)(info);
- if (wait)
- cpu_set(smp_processor_id(), call_data->finished);;
-}
-
-static void __smp_call_function_map(void (*func) (void *info), void *info,
- int nonatomic, int wait, cpumask_t map)
-{
- struct call_data_struct data;
- int cpu, local = 0;
-
- /*
- * Can deadlock when interrupts are disabled or if in wrong context.
- */
- WARN_ON(irqs_disabled() || in_irq());
-
- /*
- * Check for local function call. We have to have the same call order
- * as in on_each_cpu() because of machine_restart_smp().
- */
- if (cpu_isset(smp_processor_id(), map)) {
- local = 1;
- cpu_clear(smp_processor_id(), map);
- }
-
- cpus_and(map, map, cpu_online_map);
- if (cpus_empty(map))
- goto out;
-
- data.func = func;
- data.info = info;
- data.started = CPU_MASK_NONE;
- data.wait = wait;
- if (wait)
- data.finished = CPU_MASK_NONE;
-
- spin_lock(&call_lock);
- call_data = &data;
+ int cpu;
- for_each_cpu_mask(cpu, map)
+ for_each_cpu_mask(cpu, mask)
smp_ext_bitcall(cpu, ec_call_function);
-
- /* Wait for response */
- while (!cpus_equal(map, data.started))
- cpu_relax();
- if (wait)
- while (!cpus_equal(map, data.finished))
- cpu_relax();
- spin_unlock(&call_lock);
-out:
- if (local) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
}
-/*
- * smp_call_function:
- * @func: the function to run; this must be fast and non-blocking
- * @info: an arbitrary pointer to pass to the function
- * @nonatomic: unused
- * @wait: if true, wait (atomically) until function has completed on other CPUs
- *
- * Run a function on all other CPUs.
- *
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler or from a bottom half.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- cpumask_t map;
-
- preempt_disable();
- map = cpu_online_map;
- cpu_clear(smp_processor_id(), map);
- __smp_call_function_map(func, info, nonatomic, wait, map);
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * smp_call_function_single:
- * @cpu: the CPU where func should run
- * @func: the function to run; this must be fast and non-blocking
- * @info: an arbitrary pointer to pass to the function
- * @nonatomic: unused
- * @wait: if true, wait (atomically) until function has completed on other CPUs
- *
- * Run a function on one processor.
- *
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler or from a bottom half.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- preempt_disable();
- __smp_call_function_map(func, info, nonatomic, wait,
- cpumask_of_cpu(cpu));
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on. Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
-{
- preempt_disable();
- cpu_clear(smp_processor_id(), mask);
- __smp_call_function_map(func, info, 0, wait, mask);
- preempt_enable();
- return 0;
+ smp_ext_bitcall(cpu, ec_call_function_single);
}
-EXPORT_SYMBOL(smp_call_function_mask);
void smp_send_stop(void)
{
@@ -277,7 +131,9 @@ static void do_ext_call_interrupt(__u16 code)
bits = xchg(&S390_lowcore.ext_call_fast, 0);
if (test_bit(ec_call_function, &bits))
- do_call_function();
+ generic_smp_call_function_interrupt();
+ else if (test_bit(ec_call_function_single, &bits))
+ generic_smp_call_function_single_interrupt();
}
/*
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 3873646..17b16b1 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -112,6 +112,7 @@ EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_call_function_single);
#endif /* CONFIG_SMP */
#if defined(CONFIG_MCOUNT)
diff --git a/include/asm-s390/sigp.h b/include/asm-s390/sigp.h
index e16d56f..ec403d4 100644
--- a/include/asm-s390/sigp.h
+++ b/include/asm-s390/sigp.h
@@ -61,6 +61,7 @@ typedef enum
{
ec_schedule=0,
ec_call_function,
+ ec_call_function_single,
ec_bit_last
} ec_bit_sig;
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 6f3821a..ae0d0b5 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -93,8 +93,6 @@ extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
#endif
#ifndef CONFIG_SMP
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread* [PATCH 11/11] s390: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` [PATCH 11/11] s390: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
[not found] ` <1208851058-8500-12-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
1 sibling, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel
Cc: npiggin, torvalds, Jens Axboe, Martin Schwidefsky, Heiko Carstens
This converts s390 to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Not tested,
but it compiles.
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
arch/s390/Kconfig | 5 +
arch/s390/kernel/smp.c | 160 ++---------------------------------
arch/sparc64/kernel/sparc64_ksyms.c | 1 +
include/asm-s390/sigp.h | 1 +
include/asm-s390/smp.h | 2 -
5 files changed, 15 insertions(+), 154 deletions(-)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f6a68e1..ebaf50f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,11 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
+config GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
mainmenu "Linux Kernel Configuration"
config S390
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0dfa988..2ee3484 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -77,164 +77,18 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
-/*
- * Structure and data for __smp_call_function_map(). This is designed to
- * minimise static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- cpumask_t started;
- cpumask_t finished;
- int wait;
-};
-
-static struct call_data_struct *call_data;
-
-/*
- * 'Call function' interrupt callback
- */
-static void do_call_function(void)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- cpu_set(smp_processor_id(), call_data->started);
- (*func)(info);
- if (wait)
- cpu_set(smp_processor_id(), call_data->finished);;
-}
-
-static void __smp_call_function_map(void (*func) (void *info), void *info,
- int nonatomic, int wait, cpumask_t map)
-{
- struct call_data_struct data;
- int cpu, local = 0;
-
- /*
- * Can deadlock when interrupts are disabled or if in wrong context.
- */
- WARN_ON(irqs_disabled() || in_irq());
-
- /*
- * Check for local function call. We have to have the same call order
- * as in on_each_cpu() because of machine_restart_smp().
- */
- if (cpu_isset(smp_processor_id(), map)) {
- local = 1;
- cpu_clear(smp_processor_id(), map);
- }
-
- cpus_and(map, map, cpu_online_map);
- if (cpus_empty(map))
- goto out;
-
- data.func = func;
- data.info = info;
- data.started = CPU_MASK_NONE;
- data.wait = wait;
- if (wait)
- data.finished = CPU_MASK_NONE;
-
- spin_lock(&call_lock);
- call_data = &data;
+ int cpu;
- for_each_cpu_mask(cpu, map)
+ for_each_cpu_mask(cpu, mask)
smp_ext_bitcall(cpu, ec_call_function);
-
- /* Wait for response */
- while (!cpus_equal(map, data.started))
- cpu_relax();
- if (wait)
- while (!cpus_equal(map, data.finished))
- cpu_relax();
- spin_unlock(&call_lock);
-out:
- if (local) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
}
-/*
- * smp_call_function:
- * @func: the function to run; this must be fast and non-blocking
- * @info: an arbitrary pointer to pass to the function
- * @nonatomic: unused
- * @wait: if true, wait (atomically) until function has completed on other CPUs
- *
- * Run a function on all other CPUs.
- *
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler or from a bottom half.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- cpumask_t map;
-
- preempt_disable();
- map = cpu_online_map;
- cpu_clear(smp_processor_id(), map);
- __smp_call_function_map(func, info, nonatomic, wait, map);
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * smp_call_function_single:
- * @cpu: the CPU where func should run
- * @func: the function to run; this must be fast and non-blocking
- * @info: an arbitrary pointer to pass to the function
- * @nonatomic: unused
- * @wait: if true, wait (atomically) until function has completed on other CPUs
- *
- * Run a function on one processor.
- *
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler or from a bottom half.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- preempt_disable();
- __smp_call_function_map(func, info, nonatomic, wait,
- cpumask_of_cpu(cpu));
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on. Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
-{
- preempt_disable();
- cpu_clear(smp_processor_id(), mask);
- __smp_call_function_map(func, info, 0, wait, mask);
- preempt_enable();
- return 0;
+ smp_ext_bitcall(cpu, ec_call_function_single);
}
-EXPORT_SYMBOL(smp_call_function_mask);
void smp_send_stop(void)
{
@@ -277,7 +131,9 @@ static void do_ext_call_interrupt(__u16 code)
bits = xchg(&S390_lowcore.ext_call_fast, 0);
if (test_bit(ec_call_function, &bits))
- do_call_function();
+ generic_smp_call_function_interrupt();
+ else if (test_bit(ec_call_function_single, &bits))
+ generic_smp_call_function_single_interrupt();
}
/*
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 3873646..17b16b1 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -112,6 +112,7 @@ EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_call_function_single);
#endif /* CONFIG_SMP */
#if defined(CONFIG_MCOUNT)
diff --git a/include/asm-s390/sigp.h b/include/asm-s390/sigp.h
index e16d56f..ec403d4 100644
--- a/include/asm-s390/sigp.h
+++ b/include/asm-s390/sigp.h
@@ -61,6 +61,7 @@ typedef enum
{
ec_schedule=0,
ec_call_function,
+ ec_call_function_single,
ec_bit_last
} ec_bit_sig;
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 6f3821a..ae0d0b5 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -93,8 +93,6 @@ extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
#endif
#ifndef CONFIG_SMP
--
1.5.5.49.gf43e2
^ permalink raw reply related [flat|nested] 139+ messages in thread[parent not found: <1208851058-8500-12-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH 11/11] s390: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-12-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2008-04-22 8:04 ` Martin Schwidefsky
2008-04-22 8:04 ` Martin Schwidefsky
2008-04-22 8:07 ` Jens Axboe
0 siblings, 2 replies; 139+ messages in thread
From: Martin Schwidefsky @ 2008-04-22 8:04 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Heiko Carstens
On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
> index 3873646..17b16b1 100644
> --- a/arch/sparc64/kernel/sparc64_ksyms.c
> +++ b/arch/sparc64/kernel/sparc64_ksyms.c
> @@ -112,6 +112,7 @@ EXPORT_SYMBOL(__write_unlock);
> EXPORT_SYMBOL(__write_trylock);
>
> EXPORT_SYMBOL(smp_call_function);
> +EXPORT_SYMBOL(smp_call_function_single);
> #endif /* CONFIG_SMP */
>
> #if defined(CONFIG_MCOUNT)
This does not belong into the s390 patch ;-)
--
blue skies,
Martin.
"Reality continues to ruin my life." - Calvin.
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 11/11] s390: convert to generic helpers for IPI function calls
2008-04-22 8:04 ` Martin Schwidefsky
@ 2008-04-22 8:04 ` Martin Schwidefsky
2008-04-22 8:07 ` Jens Axboe
1 sibling, 0 replies; 139+ messages in thread
From: Martin Schwidefsky @ 2008-04-22 8:04 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds, Heiko Carstens
On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
> index 3873646..17b16b1 100644
> --- a/arch/sparc64/kernel/sparc64_ksyms.c
> +++ b/arch/sparc64/kernel/sparc64_ksyms.c
> @@ -112,6 +112,7 @@ EXPORT_SYMBOL(__write_unlock);
> EXPORT_SYMBOL(__write_trylock);
>
> EXPORT_SYMBOL(smp_call_function);
> +EXPORT_SYMBOL(smp_call_function_single);
> #endif /* CONFIG_SMP */
>
> #if defined(CONFIG_MCOUNT)
This does not belong into the s390 patch ;-)
--
blue skies,
Martin.
"Reality continues to ruin my life." - Calvin.
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 11/11] s390: convert to generic helpers for IPI function calls
2008-04-22 8:04 ` Martin Schwidefsky
2008-04-22 8:04 ` Martin Schwidefsky
@ 2008-04-22 8:07 ` Jens Axboe
2008-04-22 8:07 ` Jens Axboe
1 sibling, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:07 UTC (permalink / raw)
To: Martin Schwidefsky
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b, Heiko Carstens
On Tue, Apr 22 2008, Martin Schwidefsky wrote:
> On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> > diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
> > index 3873646..17b16b1 100644
> > --- a/arch/sparc64/kernel/sparc64_ksyms.c
> > +++ b/arch/sparc64/kernel/sparc64_ksyms.c
> > @@ -112,6 +112,7 @@ EXPORT_SYMBOL(__write_unlock);
> > EXPORT_SYMBOL(__write_trylock);
> >
> > EXPORT_SYMBOL(smp_call_function);
> > +EXPORT_SYMBOL(smp_call_function_single);
> > #endif /* CONFIG_SMP */
> >
> > #if defined(CONFIG_MCOUNT)
>
> This does not belong into the s390 patch ;-)
Woops, I wonder how that got in there! Thanks, will kill it.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 11/11] s390: convert to generic helpers for IPI function calls
2008-04-22 8:07 ` Jens Axboe
@ 2008-04-22 8:07 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 8:07 UTC (permalink / raw)
To: Martin Schwidefsky
Cc: linux-arch, linux-kernel, npiggin, torvalds, Heiko Carstens
On Tue, Apr 22 2008, Martin Schwidefsky wrote:
> On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> > diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
> > index 3873646..17b16b1 100644
> > --- a/arch/sparc64/kernel/sparc64_ksyms.c
> > +++ b/arch/sparc64/kernel/sparc64_ksyms.c
> > @@ -112,6 +112,7 @@ EXPORT_SYMBOL(__write_unlock);
> > EXPORT_SYMBOL(__write_trylock);
> >
> > EXPORT_SYMBOL(smp_call_function);
> > +EXPORT_SYMBOL(smp_call_function_single);
> > #endif /* CONFIG_SMP */
> >
> > #if defined(CONFIG_MCOUNT)
>
> This does not belong into the s390 patch ;-)
Woops, I wonder how that got in there! Thanks, will kill it.
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* [PATCH 11/11] s390: convert to generic helpers for IPI function calls
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (10 preceding siblings ...)
2008-04-22 7:57 ` [PATCH 11/11] s390: " Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
2008-04-22 7:57 ` Jens Axboe
2008-04-22 8:48 ` [PATCH 0/11] Generic smp_call_function() and friends Peter Zijlstra
12 siblings, 1 reply; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA
Cc: npiggin-l3A5Bk7waGM, torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
While working on the more scalable and faster
smp_call_function_single(), the amount of arch duplicated code in there
botherered me. So I started adding a generic kernel/smp.c helper that
the archs could use instead.
Arch code exports two helper functions:
arch_send_call_function_single_ipi(cpu)
Send call function single ipi to given cpu
arch_send_call_function_ipi(cpumask)
Send call function ipi to cpus in cpumask
and then use the generic ipi interrupt helpers to execute the code. I
converted most of the archs capable of SMP, I think only sparc and
sparc64 are still missing. Archs set CONFIG_GENERIC_SMP_HELPERS if they
wish to use the generic helpers for this.
The end result is that we have unified code for handling ipi for
function calls. Even with adding special handling for the single cpu
function call, the diffstat is pretty nice (see below.
There are a couple of changes that affect all archs, and changes that
affect some archs:
- smp_call_function() and friends used to wait for all other CPUs to see
the individual structure before returning, even if 'wait' wasn't set.
Not sure what the reasoning behind this was, the new code does not
wait for other CPUs to start up before returning. If 'wait' is 1,
it'll wait for completion as before of course. This affects all archs.
It's easy enough to add if there's a good reason, it'll slow it down a
little and add an atomic_t to the call_single_data structure.
- A few archs had timeout code, most did not. I removed the timeout
code, we can add it back if requested.
- The ipi_lock is a little muddy, which may or may not break cpu
onlining.
In general the new code should be easier to maintain, and it is much
faster than before. The code works fine on x86, x86-64, ia64, and
powerpc. Other archs have been compiled whenever possible, some have
not. Each arch patch should contain a note saying what the status of it
is.
arch/alpha/Kconfig | 5
arch/alpha/kernel/core_marvel.c | 6
arch/alpha/kernel/smp.c | 170 ---------
arch/arm/Kconfig | 5
arch/arm/kernel/smp.c | 148 --------
arch/ia64/Kconfig | 5
arch/ia64/kernel/smp.c | 239 -------------
arch/m32r/Kconfig | 5
arch/m32r/kernel/smp.c | 128 -------
arch/m32r/kernel/traps.c | 3
arch/mips/Kconfig | 5
arch/mips/kernel/smp-mt.c | 27 +
arch/mips/kernel/smp.c | 133 -------
arch/mips/kernel/smtc.c | 7
arch/mips/sibyte/bcm1480/smp.c | 3
arch/mips/sibyte/sb1250/smp.c | 2
arch/parisc/Kconfig | 5
arch/parisc/kernel/smp.c | 134 +------
arch/powerpc/Kconfig | 5
arch/powerpc/kernel/smp.c | 220 ------------
arch/powerpc/platforms/cell/interrupt.c | 1
arch/powerpc/platforms/ps3/smp.c | 7
arch/powerpc/platforms/pseries/xics.c | 6
arch/powerpc/sysdev/mpic.c | 2
arch/s390/Kconfig | 5
arch/s390/kernel/smp.c | 160 ---------
arch/sh/Kconfig | 5
arch/sh/kernel/smp.c | 48 --
arch/sparc64/kernel/sparc64_ksyms.c | 1
arch/x86/Kconfig | 5
arch/x86/kernel/apic_32.c | 4
arch/x86/kernel/entry_64.S | 3
arch/x86/kernel/i8259_64.c | 4
arch/x86/kernel/smp.c | 148 +-------
arch/x86/kernel/smpcommon.c | 56 ---
arch/x86/mach-voyager/voyager_smp.c | 91 -----
arch/x86/xen/enlighten.c | 1
arch/x86/xen/mmu.c | 2
arch/x86/xen/smp.c | 108 +-----
include/asm-alpha/smp.h | 2
include/asm-ia64/smp.h | 3
include/asm-m32r/smp.h | 1
include/asm-mips/smp.h | 12
include/asm-powerpc/smp.h | 5
include/asm-s390/sigp.h | 1
include/asm-s390/smp.h | 2
include/asm-sh/smp.h | 12
include/asm-x86/hw_irq_32.h | 1
include/asm-x86/hw_irq_64.h | 2
include/asm-x86/mach-default/entry_arch.h | 1
include/asm-x86/mach-default/irq_vectors.h | 1
include/asm-x86/mach-voyager/entry_arch.h | 2
include/asm-x86/mach-voyager/irq_vectors.h | 4
include/asm-x86/smp.h | 10
include/linux/smp.h | 27 +
init/main.c | 3
kernel/Makefile | 1
kernel/smp.c | 366 +++++++++++++++++++++
58 files changed, 694 insertions(+), 1674 deletions(-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread* [PATCH 11/11] s390: convert to generic helpers for IPI function calls
2008-04-22 7:57 ` Jens Axboe
@ 2008-04-22 7:57 ` Jens Axboe
0 siblings, 0 replies; 139+ messages in thread
From: Jens Axboe @ 2008-04-22 7:57 UTC (permalink / raw)
To: linux-arch, linux-kernel; +Cc: npiggin, torvalds
While working on the more scalable and faster
smp_call_function_single(), the amount of arch duplicated code in there
botherered me. So I started adding a generic kernel/smp.c helper that
the archs could use instead.
Arch code exports two helper functions:
arch_send_call_function_single_ipi(cpu)
Send call function single ipi to given cpu
arch_send_call_function_ipi(cpumask)
Send call function ipi to cpus in cpumask
and then use the generic ipi interrupt helpers to execute the code. I
converted most of the archs capable of SMP, I think only sparc and
sparc64 are still missing. Archs set CONFIG_GENERIC_SMP_HELPERS if they
wish to use the generic helpers for this.
The end result is that we have unified code for handling ipi for
function calls. Even with adding special handling for the single cpu
function call, the diffstat is pretty nice (see below.
There are a couple of changes that affect all archs, and changes that
affect some archs:
- smp_call_function() and friends used to wait for all other CPUs to see
the individual structure before returning, even if 'wait' wasn't set.
Not sure what the reasoning behind this was, the new code does not
wait for other CPUs to start up before returning. If 'wait' is 1,
it'll wait for completion as before of course. This affects all archs.
It's easy enough to add if there's a good reason, it'll slow it down a
little and add an atomic_t to the call_single_data structure.
- A few archs had timeout code, most did not. I removed the timeout
code, we can add it back if requested.
- The ipi_lock is a little muddy, which may or may not break cpu
onlining.
In general the new code should be easier to maintain, and it is much
faster than before. The code works fine on x86, x86-64, ia64, and
powerpc. Other archs have been compiled whenever possible, some have
not. Each arch patch should contain a note saying what the status of it
is.
arch/alpha/Kconfig | 5
arch/alpha/kernel/core_marvel.c | 6
arch/alpha/kernel/smp.c | 170 ---------
arch/arm/Kconfig | 5
arch/arm/kernel/smp.c | 148 --------
arch/ia64/Kconfig | 5
arch/ia64/kernel/smp.c | 239 -------------
arch/m32r/Kconfig | 5
arch/m32r/kernel/smp.c | 128 -------
arch/m32r/kernel/traps.c | 3
arch/mips/Kconfig | 5
arch/mips/kernel/smp-mt.c | 27 +
arch/mips/kernel/smp.c | 133 -------
arch/mips/kernel/smtc.c | 7
arch/mips/sibyte/bcm1480/smp.c | 3
arch/mips/sibyte/sb1250/smp.c | 2
arch/parisc/Kconfig | 5
arch/parisc/kernel/smp.c | 134 +------
arch/powerpc/Kconfig | 5
arch/powerpc/kernel/smp.c | 220 ------------
arch/powerpc/platforms/cell/interrupt.c | 1
arch/powerpc/platforms/ps3/smp.c | 7
arch/powerpc/platforms/pseries/xics.c | 6
arch/powerpc/sysdev/mpic.c | 2
arch/s390/Kconfig | 5
arch/s390/kernel/smp.c | 160 ---------
arch/sh/Kconfig | 5
arch/sh/kernel/smp.c | 48 --
arch/sparc64/kernel/sparc64_ksyms.c | 1
arch/x86/Kconfig | 5
arch/x86/kernel/apic_32.c | 4
arch/x86/kernel/entry_64.S | 3
arch/x86/kernel/i8259_64.c | 4
arch/x86/kernel/smp.c | 148 +-------
arch/x86/kernel/smpcommon.c | 56 ---
arch/x86/mach-voyager/voyager_smp.c | 91 -----
arch/x86/xen/enlighten.c | 1
arch/x86/xen/mmu.c | 2
arch/x86/xen/smp.c | 108 +-----
include/asm-alpha/smp.h | 2
include/asm-ia64/smp.h | 3
include/asm-m32r/smp.h | 1
include/asm-mips/smp.h | 12
include/asm-powerpc/smp.h | 5
include/asm-s390/sigp.h | 1
include/asm-s390/smp.h | 2
include/asm-sh/smp.h | 12
include/asm-x86/hw_irq_32.h | 1
include/asm-x86/hw_irq_64.h | 2
include/asm-x86/mach-default/entry_arch.h | 1
include/asm-x86/mach-default/irq_vectors.h | 1
include/asm-x86/mach-voyager/entry_arch.h | 2
include/asm-x86/mach-voyager/irq_vectors.h | 4
include/asm-x86/smp.h | 10
include/linux/smp.h | 27 +
init/main.c | 3
kernel/Makefile | 1
kernel/smp.c | 366 +++++++++++++++++++++
58 files changed, 694 insertions(+), 1674 deletions(-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 139+ messages in thread
* Re: [PATCH 0/11] Generic smp_call_function() and friends
[not found] ` <1208851058-8500-1-git-send-email-jens.axboe-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
` (11 preceding siblings ...)
2008-04-22 7:57 ` Jens Axboe
@ 2008-04-22 8:48 ` Peter Zijlstra
2008-04-22 8:48 ` Peter Zijlstra
12 siblings, 1 reply; 139+ messages in thread
From: Peter Zijlstra @ 2008-04-22 8:48 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-arch-u79uwXL29TY76Z2rM5mHXA,
linux-kernel-u79uwXL29TY76Z2rM5mHXA, npiggin-l3A5Bk7waGM,
torvalds-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b
On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> Hi,
>
> While working on the more scalable and faster
> smp_call_function_single(), the amount of arch duplicated code in there
> botherered me. So I started adding a generic kernel/smp.c helper that
> the archs could use instead.
>
> Arch code exports two helper functions:
>
> arch_send_call_function_single_ipi(cpu)
> Send call function single ipi to given cpu
> arch_send_call_function_ipi(cpumask)
> Send call function ipi to cpus in cpumask
>
> and then use the generic ipi interrupt helpers to execute the code. I
> converted most of the archs capable of SMP, I think only sparc and
> sparc64 are still missing. Archs set CONFIG_GENERIC_SMP_HELPERS if they
> wish to use the generic helpers for this.
>
> The end result is that we have unified code for handling ipi for
> function calls. Even with adding special handling for the single cpu
> function call, the diffstat is pretty nice (see below.
Real nice work Jens!
^ permalink raw reply [flat|nested] 139+ messages in thread* Re: [PATCH 0/11] Generic smp_call_function() and friends
2008-04-22 8:48 ` [PATCH 0/11] Generic smp_call_function() and friends Peter Zijlstra
@ 2008-04-22 8:48 ` Peter Zijlstra
0 siblings, 0 replies; 139+ messages in thread
From: Peter Zijlstra @ 2008-04-22 8:48 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-arch, linux-kernel, npiggin, torvalds
On Tue, 2008-04-22 at 09:57 +0200, Jens Axboe wrote:
> Hi,
>
> While working on the more scalable and faster
> smp_call_function_single(), the amount of arch duplicated code in there
> botherered me. So I started adding a generic kernel/smp.c helper that
> the archs could use instead.
>
> Arch code exports two helper functions:
>
> arch_send_call_function_single_ipi(cpu)
> Send call function single ipi to given cpu
> arch_send_call_function_ipi(cpumask)
> Send call function ipi to cpus in cpumask
>
> and then use the generic ipi interrupt helpers to execute the code. I
> converted most of the archs capable of SMP, I think only sparc and
> sparc64 are still missing. Archs set CONFIG_GENERIC_SMP_HELPERS if they
> wish to use the generic helpers for this.
>
> The end result is that we have unified code for handling ipi for
> function calls. Even with adding special handling for the single cpu
> function call, the diffstat is pretty nice (see below.
Real nice work Jens!
^ permalink raw reply [flat|nested] 139+ messages in thread