* [PATCH] Remove stop_machine during module load v2
@ 2008-08-30 8:09 Andi Kleen
2008-08-30 14:02 ` Paul E. McKenney
2008-09-01 7:08 ` Rusty Russell
0 siblings, 2 replies; 3+ messages in thread
From: Andi Kleen @ 2008-08-30 8:09 UTC (permalink / raw)
To: linux-kernel, akpm, rusty, paulmck
Remove stop_machine during module load v2
module loading currently does a stop_machine on each module load to insert
the module into the global module lists. Especially on larger systems this
can be quite expensive.
It does that to handle concurrent lock lessmodule list readers
like kallsyms.
I don't think stop_machine() is actually needed to insert something
into a list though. There are no concurrent writers because the
module mutex is taken. And the RCU list functions know how to insert
a node into a list with the right memory ordering so that concurrent
readers don't go off into the wood.
So remove the stop_machine for the module list insert and just
do a list_add_rcu() instead.
Module removal will still do a stop_machine of course, it needs
that for other reasons.
v2: Revised readers based on Paul's comments. All readers that only
rely on disabled preemption need to be changed to list_for_each_rcu().
Done that. The others are ok because they have the modules mutex.
Also added a possible missing preempt disable for print_modules().
[cc Paul McKenney for review. It's not RCU, but quite similar.]
Cc: rusty@rustcorp.com.au
Cc: paulmck@us.ibm.com
Index: linux-2.6.27-rc4-misc/kernel/module.c
===================================================================
--- linux-2.6.27-rc4-misc.orig/kernel/module.c
+++ linux-2.6.27-rc4-misc/kernel/module.c
@@ -42,6 +42,7 @@
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/unwind.h>
+#include <linux/rculist.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/license.h>
@@ -61,7 +62,7 @@
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/* List of modules, protected by module_mutex or preempt_disable
- * (add/delete uses stop_machine). */
+ * (delete uses stop_machine/add uses RCU list operations). */
static DEFINE_MUTEX(module_mutex);
static LIST_HEAD(modules);
@@ -216,7 +217,7 @@ static bool each_symbol(bool (*fn)(const
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
return true;
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
struct symsearch arr[] = {
{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
NOT_GPL_ONLY, false },
@@ -1391,17 +1392,6 @@ static void mod_kobject_remove(struct mo
}
/*
- * link the module with the whole machine is stopped with interrupts off
- * - this defends against kallsyms not taking locks
- */
-static int __link_module(void *_mod)
-{
- struct module *mod = _mod;
- list_add(&mod->list, &modules);
- return 0;
-}
-
-/*
* unlink the module with the whole machine is stopped with interrupts off
* - this defends against kallsyms not taking locks
*/
@@ -2196,8 +2186,12 @@ static struct module *load_module(void _
/* Now sew it into the lists so we can get lockdep and oops
* info during argument parsing. Noone should access us, since
- * strong_try_module_get() will fail. */
- stop_machine(__link_module, mod, NULL);
+ * strong_try_module_get() will fail.
+ * lockdep/oops can run asynchronous, so use the RCU list insertion
+ * function to insert in a way safe to concurrent readers.
+ * The mutex protects against concurrent writers.
+ */
+ list_add_rcu(&mod->list, &modules);
/* Size of section 0 is 0, so this works well if no params */
err = parse_args(mod->name, mod->args,
@@ -2401,7 +2395,7 @@ const char *module_address_lookup(unsign
const char *ret = NULL;
preempt_disable();
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size)
|| within(addr, mod->module_core, mod->core_size)) {
if (modname)
@@ -2424,7 +2418,7 @@ int lookup_module_symbol_name(unsigned l
struct module *mod;
preempt_disable();
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
const char *sym;
@@ -2448,7 +2442,7 @@ int lookup_module_symbol_attrs(unsigned
struct module *mod;
preempt_disable();
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
const char *sym;
@@ -2475,7 +2469,7 @@ int module_get_kallsym(unsigned int symn
struct module *mod;
preempt_disable();
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
if (symnum < mod->num_symtab) {
*value = mod->symtab[symnum].st_value;
*type = mod->symtab[symnum].st_info;
@@ -2518,7 +2512,7 @@ unsigned long module_kallsyms_lookup_nam
ret = mod_find_symname(mod, colon+1);
*colon = ':';
} else {
- list_for_each_entry(mod, &modules, list)
+ list_for_each_entry_rcu(mod, &modules, list)
if ((ret = mod_find_symname(mod, name)) != 0)
break;
}
@@ -2619,7 +2613,7 @@ const struct exception_table_entry *sear
struct module *mod;
preempt_disable();
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
if (mod->num_exentries == 0)
continue;
@@ -2645,7 +2639,7 @@ int is_module_address(unsigned long addr
preempt_disable();
- list_for_each_entry(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_core, mod->core_size)) {
preempt_enable();
return 1;
@@ -2666,7 +2660,7 @@ struct module *__module_text_address(uns
if (addr < module_addr_min || addr > module_addr_max)
return NULL;
- list_for_each_entry(mod, &modules, list)
+ list_for_each_entry_rcu(mod, &modules, list)
if (within(addr, mod->module_init, mod->init_text_size)
|| within(addr, mod->module_core, mod->core_text_size))
return mod;
@@ -2691,8 +2685,11 @@ void print_modules(void)
char buf[8];
printk("Modules linked in:");
- list_for_each_entry(mod, &modules, list)
+ /* Most callers should already have preempt disabled, but make sure */
+ preempt_disable();
+ list_for_each_entry_rcu(mod, &modules, list)
printk(" %s%s", mod->name, module_flags(mod, buf));
+ preempt_enable();
if (last_unloaded_module[0])
printk(" [last unloaded: %s]", last_unloaded_module);
printk("\n");
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] Remove stop_machine during module load v2
2008-08-30 8:09 [PATCH] Remove stop_machine during module load v2 Andi Kleen
@ 2008-08-30 14:02 ` Paul E. McKenney
2008-09-01 7:08 ` Rusty Russell
1 sibling, 0 replies; 3+ messages in thread
From: Paul E. McKenney @ 2008-08-30 14:02 UTC (permalink / raw)
To: Andi Kleen; +Cc: linux-kernel, akpm, rusty
On Sat, Aug 30, 2008 at 10:09:00AM +0200, Andi Kleen wrote:
> Remove stop_machine during module load v2
>
> module loading currently does a stop_machine on each module load to insert
> the module into the global module lists. Especially on larger systems this
> can be quite expensive.
>
> It does that to handle concurrent lock lessmodule list readers
> like kallsyms.
>
> I don't think stop_machine() is actually needed to insert something
> into a list though. There are no concurrent writers because the
> module mutex is taken. And the RCU list functions know how to insert
> a node into a list with the right memory ordering so that concurrent
> readers don't go off into the wood.
>
> So remove the stop_machine for the module list insert and just
> do a list_add_rcu() instead.
>
> Module removal will still do a stop_machine of course, it needs
> that for other reasons.
>
> v2: Revised readers based on Paul's comments. All readers that only
> rely on disabled preemption need to be changed to list_for_each_rcu().
> Done that. The others are ok because they have the modules mutex.
> Also added a possible missing preempt disable for print_modules().
>
> [cc Paul McKenney for review. It's not RCU, but quite similar.]
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
> Cc: rusty@rustcorp.com.au
> Cc: paulmck@us.ibm.com
>
>
> Index: linux-2.6.27-rc4-misc/kernel/module.c
> ===================================================================
> --- linux-2.6.27-rc4-misc.orig/kernel/module.c
> +++ linux-2.6.27-rc4-misc/kernel/module.c
> @@ -42,6 +42,7 @@
> #include <linux/string.h>
> #include <linux/mutex.h>
> #include <linux/unwind.h>
> +#include <linux/rculist.h>
> #include <asm/uaccess.h>
> #include <asm/cacheflush.h>
> #include <linux/license.h>
> @@ -61,7 +62,7 @@
> #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
>
> /* List of modules, protected by module_mutex or preempt_disable
> - * (add/delete uses stop_machine). */
> + * (delete uses stop_machine/add uses RCU list operations). */
> static DEFINE_MUTEX(module_mutex);
> static LIST_HEAD(modules);
>
> @@ -216,7 +217,7 @@ static bool each_symbol(bool (*fn)(const
> if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
> return true;
>
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> struct symsearch arr[] = {
> { mod->syms, mod->syms + mod->num_syms, mod->crcs,
> NOT_GPL_ONLY, false },
> @@ -1391,17 +1392,6 @@ static void mod_kobject_remove(struct mo
> }
>
> /*
> - * link the module with the whole machine is stopped with interrupts off
> - * - this defends against kallsyms not taking locks
> - */
> -static int __link_module(void *_mod)
> -{
> - struct module *mod = _mod;
> - list_add(&mod->list, &modules);
> - return 0;
> -}
> -
> -/*
> * unlink the module with the whole machine is stopped with interrupts off
> * - this defends against kallsyms not taking locks
> */
> @@ -2196,8 +2186,12 @@ static struct module *load_module(void _
>
> /* Now sew it into the lists so we can get lockdep and oops
> * info during argument parsing. Noone should access us, since
> - * strong_try_module_get() will fail. */
> - stop_machine(__link_module, mod, NULL);
> + * strong_try_module_get() will fail.
> + * lockdep/oops can run asynchronous, so use the RCU list insertion
> + * function to insert in a way safe to concurrent readers.
> + * The mutex protects against concurrent writers.
> + */
> + list_add_rcu(&mod->list, &modules);
>
> /* Size of section 0 is 0, so this works well if no params */
> err = parse_args(mod->name, mod->args,
> @@ -2401,7 +2395,7 @@ const char *module_address_lookup(unsign
> const char *ret = NULL;
>
> preempt_disable();
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> if (within(addr, mod->module_init, mod->init_size)
> || within(addr, mod->module_core, mod->core_size)) {
> if (modname)
> @@ -2424,7 +2418,7 @@ int lookup_module_symbol_name(unsigned l
> struct module *mod;
>
> preempt_disable();
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> if (within(addr, mod->module_init, mod->init_size) ||
> within(addr, mod->module_core, mod->core_size)) {
> const char *sym;
> @@ -2448,7 +2442,7 @@ int lookup_module_symbol_attrs(unsigned
> struct module *mod;
>
> preempt_disable();
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> if (within(addr, mod->module_init, mod->init_size) ||
> within(addr, mod->module_core, mod->core_size)) {
> const char *sym;
> @@ -2475,7 +2469,7 @@ int module_get_kallsym(unsigned int symn
> struct module *mod;
>
> preempt_disable();
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> if (symnum < mod->num_symtab) {
> *value = mod->symtab[symnum].st_value;
> *type = mod->symtab[symnum].st_info;
> @@ -2518,7 +2512,7 @@ unsigned long module_kallsyms_lookup_nam
> ret = mod_find_symname(mod, colon+1);
> *colon = ':';
> } else {
> - list_for_each_entry(mod, &modules, list)
> + list_for_each_entry_rcu(mod, &modules, list)
> if ((ret = mod_find_symname(mod, name)) != 0)
> break;
> }
> @@ -2619,7 +2613,7 @@ const struct exception_table_entry *sear
> struct module *mod;
>
> preempt_disable();
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> if (mod->num_exentries == 0)
> continue;
>
> @@ -2645,7 +2639,7 @@ int is_module_address(unsigned long addr
>
> preempt_disable();
>
> - list_for_each_entry(mod, &modules, list) {
> + list_for_each_entry_rcu(mod, &modules, list) {
> if (within(addr, mod->module_core, mod->core_size)) {
> preempt_enable();
> return 1;
> @@ -2666,7 +2660,7 @@ struct module *__module_text_address(uns
> if (addr < module_addr_min || addr > module_addr_max)
> return NULL;
>
> - list_for_each_entry(mod, &modules, list)
> + list_for_each_entry_rcu(mod, &modules, list)
> if (within(addr, mod->module_init, mod->init_text_size)
> || within(addr, mod->module_core, mod->core_text_size))
> return mod;
> @@ -2691,8 +2685,11 @@ void print_modules(void)
> char buf[8];
>
> printk("Modules linked in:");
> - list_for_each_entry(mod, &modules, list)
> + /* Most callers should already have preempt disabled, but make sure */
> + preempt_disable();
> + list_for_each_entry_rcu(mod, &modules, list)
> printk(" %s%s", mod->name, module_flags(mod, buf));
> + preempt_enable();
> if (last_unloaded_module[0])
> printk(" [last unloaded: %s]", last_unloaded_module);
> printk("\n");
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] Remove stop_machine during module load v2
2008-08-30 8:09 [PATCH] Remove stop_machine during module load v2 Andi Kleen
2008-08-30 14:02 ` Paul E. McKenney
@ 2008-09-01 7:08 ` Rusty Russell
1 sibling, 0 replies; 3+ messages in thread
From: Rusty Russell @ 2008-09-01 7:08 UTC (permalink / raw)
To: Andi Kleen; +Cc: linux-kernel, akpm, paulmck
On Saturday 30 August 2008 18:09:00 Andi Kleen wrote:
> Remove stop_machine during module load v2
>
> module loading currently does a stop_machine on each module load to insert
> the module into the global module lists. Especially on larger systems this
> can be quite expensive.
>
> It does that to handle concurrent lock lessmodule list readers
> like kallsyms.
Thanks, Andi!
Applied,
Rusty.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2008-09-01 7:09 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-08-30 8:09 [PATCH] Remove stop_machine during module load v2 Andi Kleen
2008-08-30 14:02 ` Paul E. McKenney
2008-09-01 7:08 ` Rusty Russell
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox