From: Mike Travis <travis@sgi.com>
To: Ingo Molnar <mingo@elte.hu>, Rusty Russell <rusty@rustcorp.com.au>
Cc: davej@codemonkey.org.uk, Jeremy Fitzhardinge <jeremy@goop.org>,
Jes Sorensen <jes@sgi.com>, IA64 <linux-ia64@vger.kernel.org>,
S390 <linux-s390@vger.kernel.org>,
peterz@infradead.org, Jack Steiner <steiner@sgi.com>,
linux-kernel@vger.kernel.org, Eric Dumazet <dada1@cosmosbay.com>,
PowerPC <linuxppc-dev@ozlabs.org>,
Andi Kleen <andi@firstfloor.org>,
Thomas Gleixner <tglx@linutronix.de>,
Yinghai Lu <yhlu.kernel@gmail.com>,
"H. Peter Anvin" <hpa@zytor.com>,
SPARC <sparclinux@vger.kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
David Miller <davem@davemloft.net>
Subject: [PATCH 35/35] cpumask: smp_call_function_many()
Date: Mon, 20 Oct 2008 10:03:54 -0700 [thread overview]
Message-ID: <20081020170324.412681000@polaris-admin.engr.sgi.com> (raw)
In-Reply-To: 20081020170319.539427000@polaris-admin.engr.sgi.com
Transition from cpumask_t-taking smp_call_function_mask() to a new
smp_call_function_many() which takes a struct cpumask *.
(Naming is inspired by smp_call_function_single).
Note that the new one returns void: the old one couldn't fail either
unless there was a logic bug.
From: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
arch/s390/include/asm/smp.h | 3 -
arch/s390/kernel/smp.c | 30 +++++++++++--------
include/linux/smp.h | 13 +++++++-
kernel/smp.c | 68 +++++++++++++++++++++++---------------------
4 files changed, 64 insertions(+), 50 deletions(-)
--- test-compile.orig/arch/s390/include/asm/smp.h
+++ test-compile/arch/s390/include/asm/smp.h
@@ -90,9 +90,6 @@ extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
-
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
#endif
#ifndef CONFIG_SMP
--- test-compile.orig/arch/s390/kernel/smp.c
+++ test-compile/arch/s390/kernel/smp.c
@@ -103,7 +103,7 @@ static void do_call_function(void)
}
static void __smp_call_function_map(void (*func) (void *info), void *info,
- int wait, cpumask_t map)
+ int wait, struct cpumask *map)
{
struct call_data_struct data;
int cpu, local = 0;
@@ -163,14 +163,16 @@ out:
* You must not call this function with disabled interrupts, from a
* hardware interrupt handler or from a bottom half.
*/
+
+/* protected by call_lock */
+static DEFINE_BITMAP(smp_call_map, CONFIG_NR_CPUS);
+
int smp_call_function(void (*func) (void *info), void *info, int wait)
{
- cpumask_t map;
-
spin_lock(&call_lock);
- map = cpu_online_map;
- cpu_clear(smp_processor_id(), map);
- __smp_call_function_map(func, info, wait, map);
+ cpumask_copy(to_cpumask(smp_call_map), cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), to_cpumask(smp_call_map));
+ __smp_call_function_map(func, info, wait, to_cpumask(smp_call_map));
spin_unlock(&call_lock);
return 0;
}
@@ -192,14 +194,15 @@ int smp_call_function_single(int cpu, vo
int wait)
{
spin_lock(&call_lock);
- __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
+ cpumask_copy(to_cpumask(smp_call_map), cpumask_of(cpu));
+ __smp_call_function_map(func, info, wait, cpumask_of(cpu));
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
@@ -213,16 +216,17 @@ EXPORT_SYMBOL(smp_call_function_single);
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
+int smp_call_function_many(const struct cpumask *mask,
+ void (*func)(void *), void *info, bool wait)
{
spin_lock(&call_lock);
- cpu_clear(smp_processor_id(), mask);
- __smp_call_function_map(func, info, wait, mask);
+ cpumask_copy(to_cpumask(smp_call_map), cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), to_cpumask(smp_call_map));
+ __smp_call_function_map(func, info, wait, to_cpumask(smp_call_map));
spin_unlock(&call_lock);
return 0;
}
-EXPORT_SYMBOL(smp_call_function_mask);
+EXPORT_SYMBOL(smp_call_function_many);
void smp_send_stop(void)
{
--- test-compile.orig/include/linux/smp.h
+++ test-compile/include/linux/smp.h
@@ -62,12 +62,21 @@ extern void smp_cpus_done(unsigned int m
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int wait);
-int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
- int wait);
+void smp_call_function_many(const struct cpumask *mask,
+ void(*func)(void *info), void *info, bool wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int wait);
void __smp_call_function_single(int cpuid, struct call_single_data *data);
+/* Use smp_call_function_many, which takes a pointer to the mask. */
+static inline int __deprecated
+smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait)
+{
+ smp_call_function_many(&mask, func, info, wait);
+ return 0;
+}
+
/*
* Generic and arch helpers
*/
--- test-compile.orig/kernel/smp.c
+++ test-compile/kernel/smp.c
@@ -24,7 +24,7 @@ struct call_function_data {
struct call_single_data csd;
spinlock_t lock;
unsigned int refs;
- cpumask_t cpumask;
+ struct cpumask *cpumask;
struct rcu_head rcu_head;
};
@@ -109,13 +109,13 @@ void generic_smp_call_function_interrupt
list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
int refs;
- if (!cpu_isset(cpu, data->cpumask))
+ if (!cpumask_test_cpu(cpu, data->cpumask))
continue;
data->csd.func(data->csd.info);
spin_lock(&data->lock);
- cpu_clear(cpu, data->cpumask);
+ cpumask_clear_cpu(cpu, data->cpumask);
WARN_ON(data->refs == 0);
data->refs--;
refs = data->refs;
@@ -273,7 +273,7 @@ static void quiesce_dummy(void *unused)
/*
* Ensure stack based data used in call function mask is safe to free.
*
- * This is needed by smp_call_function_mask when using on-stack data, because
+ * This is needed by smp_call_function_many when using on-stack data, because
* a single call function queue is shared by all CPUs, and any CPU may pick up
* the data item on the queue at any time before it is deleted. So we need to
* ensure that all CPUs have transitioned through a quiescent state after
@@ -287,7 +287,7 @@ static void quiesce_dummy(void *unused)
* If a faster scheme can be made, we could go back to preferring stack based
* data -- the data allocation/free is non-zero cost.
*/
-static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
+static void smp_call_function_mask_quiesce_stack(const struct cpumask *mask)
{
struct call_single_data data;
int cpu;
@@ -295,21 +295,19 @@ static void smp_call_function_mask_quies
data.func = quiesce_dummy;
data.info = NULL;
- for_each_cpu_mask(cpu, mask) {
+ for_each_cpu(cpu, mask) {
data.flags = CSD_FLAG_WAIT;
generic_exec_single(cpu, &data);
}
}
/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.
+ * smp_call_function_many(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code.
- *
* If @wait is true, then returns once @func has returned. Note that @wait
* will be implicitly turned on in case of allocation failures, since
* we fall back to on-stack allocation.
@@ -318,12 +316,13 @@ static void smp_call_function_mask_quies
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
-int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
+void smp_call_function_many(const struct cpumask *mask,
+ void (*func)(void *), void *info,
+ bool wait)
{
struct call_function_data d;
struct call_function_data *data = NULL;
- cpumask_t allbutself;
+ cpumask_var_t allbutself;
unsigned long flags;
int cpu, num_cpus;
int slowpath = 0;
@@ -331,21 +330,28 @@ int smp_call_function_mask(cpumask_t mas
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
- cpu = smp_processor_id();
- allbutself = cpu_online_map;
- cpu_clear(cpu, allbutself);
- cpus_and(mask, mask, allbutself);
- num_cpus = cpus_weight(mask);
+ if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
+ /* Slow path. */
+ for_each_online_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, mask))
+ smp_call_function_single(cpu, func, info, wait);
+ }
+ return;
+ }
+ cpumask_and(allbutself, cpu_online_mask, mask);
+ cpumask_clear_cpu(smp_processor_id(), allbutself);
+ num_cpus = cpumask_weight(allbutself);
/*
* If zero CPUs, return. If just a single CPU, turn this request
* into a targetted single call instead since it's faster.
*/
if (!num_cpus)
- return 0;
+ return;
else if (num_cpus == 1) {
- cpu = first_cpu(mask);
- return smp_call_function_single(cpu, func, info, wait);
+ cpu = cpumask_first(allbutself);
+ smp_call_function_single(cpu, func, info, wait);
+ goto out;
}
data = kmalloc(sizeof(*data), GFP_ATOMIC);
@@ -364,25 +370,25 @@ int smp_call_function_mask(cpumask_t mas
data->csd.func = func;
data->csd.info = info;
data->refs = num_cpus;
- data->cpumask = mask;
+ data->cpumask = allbutself;
spin_lock_irqsave(&call_function_lock, flags);
list_add_tail_rcu(&data->csd.list, &call_function_queue);
spin_unlock_irqrestore(&call_function_lock, flags);
/* Send a message to all CPUs in the map */
- arch_send_call_function_ipi(mask);
+ arch_send_call_function_ipi((cpumask_t)*allbutself);
/* optionally wait for the CPUs to complete */
if (wait) {
csd_flag_wait(&data->csd);
if (unlikely(slowpath))
- smp_call_function_mask_quiesce_stack(mask);
+ smp_call_function_mask_quiesce_stack(allbutself);
}
-
- return 0;
+out:
+ free_cpumask_var(allbutself);
}
-EXPORT_SYMBOL(smp_call_function_mask);
+EXPORT_SYMBOL(smp_call_function_many);
/**
* smp_call_function(): Run a function on all other CPUs.
@@ -390,7 +396,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code.
+ * Returns 0.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func. In case of allocation
@@ -401,12 +407,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
*/
int smp_call_function(void (*func)(void *), void *info, int wait)
{
- int ret;
-
preempt_disable();
- ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
- return ret;
+ return 0;
}
EXPORT_SYMBOL(smp_call_function);
--
prev parent reply other threads:[~2008-10-20 17:03 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-10-20 17:03 [PATCH 00/35] cpumask: Replace cpumask_t with struct cpumask Mike Travis
2008-10-20 17:03 ` [PATCH 01/35] x86: clean up speedctep-centrino and reduce cpumask_t usage Mike Travis
2008-10-21 0:09 ` Stephen Rothwell
2008-10-21 12:28 ` Mike Travis
2008-10-20 17:03 ` [PATCH 02/35] cpumask: remove min from first_cpu/next_cpu Mike Travis
2008-10-20 17:03 ` [PATCH 03/35] cpumask: add for_each_cpu_mask_and function Mike Travis
2008-10-20 17:03 ` [PATCH 04/35] x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers Mike Travis
2008-10-20 17:03 ` [PATCH 05/35] sched: Reduce stack size requirements in kernel/sched.c Mike Travis
2008-10-20 17:03 ` [PATCH 06/35] cpumask: introduce struct cpumask Mike Travis
2008-10-20 17:03 ` [PATCH 07/35] cpumask: change cpumask_scnprintf, cpumask_parse_user, cpulist_parse, and cpulist_scnprintf to take pointers Mike Travis
2008-10-20 17:03 ` [PATCH 08/35] cpumask: cpumask_size() Mike Travis
2008-10-20 17:03 ` [PATCH 09/35] cpumask: add cpumask_copy() Mike Travis
2008-10-20 17:03 ` [PATCH 10/35] cpumask: introduce cpumask_var_t for local cpumask vars Mike Travis
2008-10-20 17:03 ` [PATCH 11/35] x86: enable MAXSMP Mike Travis
2008-10-20 17:03 ` [PATCH 12/35] cpumask: make CONFIG_NR_CPUS always valid Mike Travis
2008-10-20 17:03 ` [PATCH 13/35] cpumask: use setup_nr_cpu_ids() instead of direct assignment Mike Travis
2008-10-20 17:03 ` [PATCH 14/35] cpumask: make nr_cpu_ids valid in all configurations Mike Travis
2008-10-20 17:03 ` [PATCH 15/35] cpumask: prepare for iterators to only go to nr_cpu_ids Mike Travis
2008-10-20 17:03 ` [PATCH 16/35] percpu: fix percpu accessors to potentially !cpu_possible() cpus Mike Travis
2008-10-20 17:03 ` [PATCH 17/35] cpumask: make nr_cpu_ids the actual limit on bitmap size Mike Travis
2008-10-20 17:03 ` [PATCH 18/35] cpumask: add nr_cpumask_bits Mike Travis
2008-10-21 12:26 ` Rusty Russell
2008-10-21 13:53 ` Mike Travis
2008-10-20 17:03 ` [PATCH 19/35] cpumask: use cpumask_bits() everywhere Mike Travis
2008-10-20 17:03 ` [PATCH 20/35] cpumask: cpumask_of(): cpumask_of_cpu() which returns a pointer Mike Travis
2008-10-20 17:03 ` [PATCH 21/35] cpumask: for_each_cpu(): for_each_cpu_mask which takes " Mike Travis
2008-10-20 17:03 ` [PATCH 22/35] cpumask: cpumask_first/cpumask_next Mike Travis
2008-10-20 17:03 ` [PATCH 23/35] cpumask: deprecate any_online_cpu() in favour of cpumask_any/cpumask_any_and Mike Travis
2008-10-20 17:03 ` [PATCH 24/35] cpumask: cpumask_any_but() Mike Travis
2008-10-20 17:03 ` [PATCH 25/35] cpumask: Deprecate CPUMASK_ALLOC etc in favor of cpumask_var_t Mike Travis
2008-10-20 17:03 ` [PATCH 26/35] cpumask: get rid of boutique sched.c allocations, use cpumask_var_t Mike Travis
2008-10-20 17:03 ` [PATCH 27/35] cpumask: to_cpumask() Mike Travis
2008-10-20 17:03 ` [PATCH 28/35] cpumask: accessors to manipulate possible/present/online/active maps Mike Travis
2008-10-20 17:03 ` [PATCH 29/35] cpumask: Use accessors code Mike Travis
2008-10-20 17:03 ` [PATCH 30/35] cpumask: CONFIG_BITS_ALL, CONFIG_BITS_NONE and CONFIG_BITS_CPU0 Mike Travis
2008-10-20 17:03 ` [PATCH 31/35] cpumask: switch over to cpu_online/possible/active/present_mask Mike Travis
2008-10-20 17:03 ` [PATCH 32/35] cpumask: cpu_all_mask and cpu_none_mask Mike Travis
2008-10-20 17:03 ` [PATCH 33/35] cpumask: reorder header to minimize separate #ifdefs Mike Travis
2008-10-20 17:03 ` [PATCH 34/35] cpumask: debug options for cpumasks Mike Travis
2008-10-20 17:03 ` Mike Travis [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20081020170324.412681000@polaris-admin.engr.sgi.com \
--to=travis@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=andi@firstfloor.org \
--cc=dada1@cosmosbay.com \
--cc=davej@codemonkey.org.uk \
--cc=davem@davemloft.net \
--cc=hpa@zytor.com \
--cc=jeremy@goop.org \
--cc=jes@sgi.com \
--cc=linux-ia64@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=linuxppc-dev@ozlabs.org \
--cc=mingo@elte.hu \
--cc=peterz@infradead.org \
--cc=rusty@rustcorp.com.au \
--cc=sparclinux@vger.kernel.org \
--cc=steiner@sgi.com \
--cc=tglx@linutronix.de \
--cc=yhlu.kernel@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).