* [PATCH 45/51] md, raid5: Fix CPU hotplug callback registration
[not found] <20140205220251.19080.92336.stgit@srivatsabhat.in.ibm.com>
@ 2014-02-05 22:12 ` Srivatsa S. Bhat
2014-02-06 1:11 ` NeilBrown
0 siblings, 1 reply; 4+ messages in thread
From: Srivatsa S. Bhat @ 2014-02-05 22:12 UTC (permalink / raw)
To: paulus, oleg, rusty, peterz, tglx, akpm
Cc: mingo, paulmck, tj, walken, ego, linux, linux-kernel,
srivatsa.bhat, Neil Brown, linux-raid, stable
From: Oleg Nesterov <oleg@redhat.com>
Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
register_cpu_notifier(&foobar_cpu_notifier);
put_online_cpus();
This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).
Interestingly, the raid5 code can actually prevent double initialization and
hence can use the following simplified form of callback registration:
register_cpu_notifier(&foobar_cpu_notifier);
get_online_cpus();
for_each_online_cpu(cpu)
init_cpu(cpu);
put_online_cpus();
A hotplug operation that occurs between registering the notifier and calling
get_online_cpus(), won't disrupt anything, because the code takes care to
perform the memory allocations only once.
So reorganize the code in raid5 this way to fix the deadlock with callback
registration.
Cc: Neil Brown <neilb@suse.de>
Cc: linux-raid@vger.kernel.org
Cc: stable@vger.kernel.org
[Srivatsa: Fixed the unregister_cpu_notifier() deadlock, added the
free_scratch_buffer() helper to condense code further and wrote the changelog.]
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---
drivers/md/raid5.c | 90 +++++++++++++++++++++++++---------------------------
1 file changed, 44 insertions(+), 46 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feade..16f5c21 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 45/51] md, raid5: Fix CPU hotplug callback registration
2014-02-05 22:12 ` [PATCH 45/51] md, raid5: Fix CPU hotplug callback registration Srivatsa S. Bhat
@ 2014-02-06 1:11 ` NeilBrown
2014-02-06 10:05 ` Srivatsa S. Bhat
0 siblings, 1 reply; 4+ messages in thread
From: NeilBrown @ 2014-02-06 1:11 UTC (permalink / raw)
To: Srivatsa S. Bhat
Cc: paulus, oleg, rusty, peterz, tglx, akpm, mingo, paulmck, tj,
walken, ego, linux, linux-kernel, linux-raid, stable
[-- Attachment #1: Type: text/plain, Size: 6308 bytes --]
On Thu, 06 Feb 2014 03:42:45 +0530 "Srivatsa S. Bhat"
<srivatsa.bhat@linux.vnet.ibm.com> wrote:
> From: Oleg Nesterov <oleg@redhat.com>
>
> Subsystems that want to register CPU hotplug callbacks, as well as perform
> initialization for the CPUs that are already online, often do it as shown
> below:
>
> get_online_cpus();
>
> for_each_online_cpu(cpu)
> init_cpu(cpu);
>
> register_cpu_notifier(&foobar_cpu_notifier);
>
> put_online_cpus();
>
> This is wrong, since it is prone to ABBA deadlocks involving the
> cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
> with CPU hotplug operations).
>
> Interestingly, the raid5 code can actually prevent double initialization and
> hence can use the following simplified form of callback registration:
>
> register_cpu_notifier(&foobar_cpu_notifier);
>
> get_online_cpus();
>
> for_each_online_cpu(cpu)
> init_cpu(cpu);
>
> put_online_cpus();
>
> A hotplug operation that occurs between registering the notifier and calling
> get_online_cpus(), won't disrupt anything, because the code takes care to
> perform the memory allocations only once.
>
> So reorganize the code in raid5 this way to fix the deadlock with callback
> registration.
>
> Cc: Neil Brown <neilb@suse.de>
> Cc: linux-raid@vger.kernel.org
> Cc: stable@vger.kernel.org
> [Srivatsa: Fixed the unregister_cpu_notifier() deadlock, added the
> free_scratch_buffer() helper to condense code further and wrote the changelog.]
> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
> ---
>
> drivers/md/raid5.c | 90 +++++++++++++++++++++++++---------------------------
> 1 file changed, 44 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index f1feade..16f5c21 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
> return sectors * (raid_disks - conf->max_degraded);
> }
>
> +static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
> +{
> + safe_put_page(percpu->spare_page);
> + kfree(percpu->scribble);
> + percpu->spare_page = NULL;
> + percpu->scribble = NULL;
> +}
> +
> +static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
> +{
> + if (conf->level == 6 && !percpu->spare_page)
> + percpu->spare_page = alloc_page(GFP_KERNEL);
> + if (!percpu->scribble)
> + percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
> +
> + if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
> + free_scratch_buffer(conf, percpu);
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> static void raid5_free_percpu(struct r5conf *conf)
> {
> - struct raid5_percpu *percpu;
> unsigned long cpu;
>
> if (!conf->percpu)
> return;
>
> - get_online_cpus();
> - for_each_possible_cpu(cpu) {
> - percpu = per_cpu_ptr(conf->percpu, cpu);
> - safe_put_page(percpu->spare_page);
> - kfree(percpu->scribble);
> - }
> #ifdef CONFIG_HOTPLUG_CPU
> unregister_cpu_notifier(&conf->cpu_notify);
> #endif
> +
> + get_online_cpus();
> + for_each_possible_cpu(cpu)
> + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
> put_online_cpus();
>
> free_percpu(conf->percpu);
> @@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
> switch (action) {
> case CPU_UP_PREPARE:
> case CPU_UP_PREPARE_FROZEN:
> - if (conf->level == 6 && !percpu->spare_page)
> - percpu->spare_page = alloc_page(GFP_KERNEL);
> - if (!percpu->scribble)
> - percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
> -
> - if (!percpu->scribble ||
> - (conf->level == 6 && !percpu->spare_page)) {
> - safe_put_page(percpu->spare_page);
> - kfree(percpu->scribble);
> + if (alloc_scratch_buffer(conf, percpu)) {
> pr_err("%s: failed memory allocation for cpu%ld\n",
> __func__, cpu);
> return notifier_from_errno(-ENOMEM);
> @@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
> break;
> case CPU_DEAD:
> case CPU_DEAD_FROZEN:
> - safe_put_page(percpu->spare_page);
> - kfree(percpu->scribble);
> - percpu->spare_page = NULL;
> - percpu->scribble = NULL;
> + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
> break;
> default:
> break;
> @@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
> static int raid5_alloc_percpu(struct r5conf *conf)
> {
> unsigned long cpu;
> - struct page *spare_page;
> - struct raid5_percpu __percpu *allcpus;
> - void *scribble;
> - int err;
> + int err = 0;
>
> - allcpus = alloc_percpu(struct raid5_percpu);
> - if (!allcpus)
> + conf->percpu = alloc_percpu(struct raid5_percpu);
> + if (!conf->percpu)
> return -ENOMEM;
> - conf->percpu = allcpus;
> +
> +#ifdef CONFIG_HOTPLUG_CPU
> + conf->cpu_notify.notifier_call = raid456_cpu_notify;
> + conf->cpu_notify.priority = 0;
> + err = register_cpu_notifier(&conf->cpu_notify);
> + if (err)
> + return err;
> +#endif
>
> get_online_cpus();
> - err = 0;
> for_each_present_cpu(cpu) {
> - if (conf->level == 6) {
> - spare_page = alloc_page(GFP_KERNEL);
> - if (!spare_page) {
> - err = -ENOMEM;
> - break;
> - }
> - per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
> - }
> - scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
> - if (!scribble) {
> - err = -ENOMEM;
> + err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
> + if (err) {
> + pr_err("%s: failed memory allocation for cpu%ld\n",
> + __func__, cpu);
> break;
> }
> - per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
> }
> -#ifdef CONFIG_HOTPLUG_CPU
> - conf->cpu_notify.notifier_call = raid456_cpu_notify;
> - conf->cpu_notify.priority = 0;
> - if (err == 0)
> - err = register_cpu_notifier(&conf->cpu_notify);
> -#endif
> put_online_cpus();
>
> return err;
Looks good, thanks.
Shall I wait for a signed-of-by from Oleg, then queue it through my md tree?
NeilBrown
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 828 bytes --]
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 45/51] md, raid5: Fix CPU hotplug callback registration
2014-02-06 1:11 ` NeilBrown
@ 2014-02-06 10:05 ` Srivatsa S. Bhat
2014-02-06 18:43 ` Oleg Nesterov
0 siblings, 1 reply; 4+ messages in thread
From: Srivatsa S. Bhat @ 2014-02-06 10:05 UTC (permalink / raw)
To: NeilBrown, oleg
Cc: paulus, rusty, peterz, tglx, akpm, mingo, paulmck, tj, walken,
ego, linux, linux-kernel, linux-raid, stable
On 02/06/2014 06:41 AM, NeilBrown wrote:
> On Thu, 06 Feb 2014 03:42:45 +0530 "Srivatsa S. Bhat"
> <srivatsa.bhat@linux.vnet.ibm.com> wrote:
>
>> From: Oleg Nesterov <oleg@redhat.com>
>>
>> Subsystems that want to register CPU hotplug callbacks, as well as perform
>> initialization for the CPUs that are already online, often do it as shown
>> below:
>>
>> get_online_cpus();
>>
>> for_each_online_cpu(cpu)
>> init_cpu(cpu);
>>
>> register_cpu_notifier(&foobar_cpu_notifier);
>>
>> put_online_cpus();
>>
>> This is wrong, since it is prone to ABBA deadlocks involving the
>> cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
>> with CPU hotplug operations).
>>
>> Interestingly, the raid5 code can actually prevent double initialization and
>> hence can use the following simplified form of callback registration:
>>
>> register_cpu_notifier(&foobar_cpu_notifier);
>>
>> get_online_cpus();
>>
>> for_each_online_cpu(cpu)
>> init_cpu(cpu);
>>
>> put_online_cpus();
>>
>> A hotplug operation that occurs between registering the notifier and calling
>> get_online_cpus(), won't disrupt anything, because the code takes care to
>> perform the memory allocations only once.
>>
>> So reorganize the code in raid5 this way to fix the deadlock with callback
>> registration.
>>
>> Cc: Neil Brown <neilb@suse.de>
>> Cc: linux-raid@vger.kernel.org
>> Cc: stable@vger.kernel.org
>> [Srivatsa: Fixed the unregister_cpu_notifier() deadlock, added the
>> free_scratch_buffer() helper to condense code further and wrote the changelog.]
>> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
>> ---
[...]
>
>
> Looks good, thanks.
> Shall I wait for a signed-of-by from Oleg, then queue it through my md tree?
>
Sure, that sounds great, since this patch doesn't have any dependency.
Thanks a lot!
Oleg, it would be great if you could kindly add your S-O-B to this patch.
Thanks!
Regards,
Srivatsa S. Bhat
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 45/51] md, raid5: Fix CPU hotplug callback registration
2014-02-06 10:05 ` Srivatsa S. Bhat
@ 2014-02-06 18:43 ` Oleg Nesterov
0 siblings, 0 replies; 4+ messages in thread
From: Oleg Nesterov @ 2014-02-06 18:43 UTC (permalink / raw)
To: Srivatsa S. Bhat
Cc: NeilBrown, paulus, rusty, peterz, tglx, akpm, mingo, paulmck, tj,
walken, ego, linux, linux-kernel, linux-raid, stable
On 02/06, Srivatsa S. Bhat wrote:
>
> On 02/06/2014 06:41 AM, NeilBrown wrote:
> > Shall I wait for a signed-of-by from Oleg, then queue it through my md tree?
> >
>
> Sure, that sounds great, since this patch doesn't have any dependency.
> Thanks a lot!
>
> Oleg, it would be great if you could kindly add your S-O-B to this patch.
> Thanks!
Thanks Neil and Srivatsa,
Sure, feel free to add
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2014-02-06 18:43 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20140205220251.19080.92336.stgit@srivatsabhat.in.ibm.com>
2014-02-05 22:12 ` [PATCH 45/51] md, raid5: Fix CPU hotplug callback registration Srivatsa S. Bhat
2014-02-06 1:11 ` NeilBrown
2014-02-06 10:05 ` Srivatsa S. Bhat
2014-02-06 18:43 ` Oleg Nesterov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).