public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
@ 2010-03-15  9:10 Oleg Nesterov
  2010-03-25  3:00 ` Miao Xie
  2010-04-02 19:11 ` [tip:sched/core] sched: Kill " tip-bot for Oleg Nesterov
  0 siblings, 2 replies; 6+ messages in thread
From: Oleg Nesterov @ 2010-03-15  9:10 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar
  Cc: Ben Blum, Jiri Slaby, Lai Jiangshan, Li Zefan, Miao Xie,
	Paul Menage, Rafael J. Wysocki, Tejun Heo, linux-kernel

This patch just states the fact the cpusets/cpuhotplug interaction is
broken and removes the deadlockable code which only pretends to work.

- cpuset_lock() doesn't really work. It is needed for
  cpuset_cpus_allowed_locked() but we can't take this lock in
  try_to_wake_up()->select_fallback_rq() path.

- cpuset_lock() is deadlockable. Suppose that a task T bound to CPU takes
  callback_mutex. If cpu_down(CPU) happens before T drops callback_mutex
  stop_machine() preempts T, then migration_call(CPU_DEAD) tries to take
  cpuset_lock() and hangs forever because CPU is already dead and thus
  T can't be scheduled.

- cpuset_cpus_allowed_locked() is deadlockable too. It takes task_lock()
  which is not irq-safe, but try_to_wake_up() can be called from irq.

Kill them, and change select_fallback_rq() to use cpu_possible_mask, like
we currently do without CONFIG_CPUSETS.

Also, with or without this patch, with or without CONFIG_CPUSETS, the
callers of select_fallback_rq() can race with each other or with
set_cpus_allowed() pathes.

The subsequent patches try to to fix these problems.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---

 include/linux/cpuset.h |   13 -------------
 kernel/cpuset.c        |   27 +--------------------------
 kernel/sched.c         |   10 +++-------
 3 files changed, 4 insertions(+), 46 deletions(-)

--- 34-rc1/include/linux/cpuset.h~1_KILL_CPUSET_LOCK	2010-03-15 09:38:51.000000000 +0100
+++ 34-rc1/include/linux/cpuset.h	2010-03-15 09:40:16.000000000 +0100
@@ -21,8 +21,6 @@ extern int number_of_cpusets;	/* How man
 extern int cpuset_init(void);
 extern void cpuset_init_smp(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p,
-				       struct cpumask *mask);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
@@ -69,9 +67,6 @@ struct seq_file;
 extern void cpuset_task_status_allowed(struct seq_file *m,
 					struct task_struct *task);
 
-extern void cpuset_lock(void);
-extern void cpuset_unlock(void);
-
 extern int cpuset_mem_spread_node(void);
 
 static inline int cpuset_do_page_mem_spread(void)
@@ -105,11 +100,6 @@ static inline void cpuset_cpus_allowed(s
 {
 	cpumask_copy(mask, cpu_possible_mask);
 }
-static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
-					      struct cpumask *mask)
-{
-	cpumask_copy(mask, cpu_possible_mask);
-}
 
 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 {
@@ -157,9 +147,6 @@ static inline void cpuset_task_status_al
 {
 }
 
-static inline void cpuset_lock(void) {}
-static inline void cpuset_unlock(void) {}
-
 static inline int cpuset_mem_spread_node(void)
 {
 	return 0;
--- 34-rc1/kernel/cpuset.c~1_KILL_CPUSET_LOCK	2010-03-15 09:38:51.000000000 +0100
+++ 34-rc1/kernel/cpuset.c	2010-03-15 09:40:16.000000000 +0100
@@ -2140,19 +2140,10 @@ void __init cpuset_init_smp(void)
 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
 	mutex_lock(&callback_mutex);
-	cpuset_cpus_allowed_locked(tsk, pmask);
-	mutex_unlock(&callback_mutex);
-}
-
-/**
- * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
- * Must be called with callback_mutex held.
- **/
-void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
-{
 	task_lock(tsk);
 	guarantee_online_cpus(task_cs(tsk), pmask);
 	task_unlock(tsk);
+	mutex_unlock(&callback_mutex);
 }
 
 void cpuset_init_current_mems_allowed(void)
@@ -2341,22 +2332,6 @@ int __cpuset_node_allowed_hardwall(int n
 }
 
 /**
- * cpuset_lock - lock out any changes to cpuset structures
- *
- * The out of memory (oom) code needs to mutex_lock cpusets
- * from being changed while it scans the tasklist looking for a
- * task in an overlapping cpuset.  Expose callback_mutex via this
- * cpuset_lock() routine, so the oom code can lock it, before
- * locking the task list.  The tasklist_lock is a spinlock, so
- * must be taken inside callback_mutex.
- */
-
-void cpuset_lock(void)
-{
-	mutex_lock(&callback_mutex);
-}
-
-/**
  * cpuset_unlock - release lock on cpuset changes
  *
  * Undo the lock taken in a previous cpuset_lock() call.
--- 34-rc1/kernel/sched.c~1_KILL_CPUSET_LOCK	2010-03-15 09:38:51.000000000 +0100
+++ 34-rc1/kernel/sched.c	2010-03-15 09:40:16.000000000 +0100
@@ -2288,11 +2288,9 @@ static int select_fallback_rq(int cpu, s
 		return dest_cpu;
 
 	/* No more Mr. Nice Guy. */
-	if (dest_cpu >= nr_cpu_ids) {
-		rcu_read_lock();
-		cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-		rcu_read_unlock();
-		dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+	if (unlikely(dest_cpu >= nr_cpu_ids)) {
+		cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+		dest_cpu = cpumask_any(cpu_active_mask);
 
 		/*
 		 * Don't tell them about moving exiting tasks or
@@ -5929,7 +5927,6 @@ migration_call(struct notifier_block *nf
 
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
-		cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
 		migrate_live_tasks(cpu);
 		rq = cpu_rq(cpu);
 		kthread_stop(rq->migration_thread);
@@ -5943,7 +5940,6 @@ migration_call(struct notifier_block *nf
 		rq->idle->sched_class = &idle_sched_class;
 		migrate_dead_tasks(cpu);
 		raw_spin_unlock_irq(&rq->lock);
-		cpuset_unlock();
 		migrate_nr_uninterruptible(rq);
 		BUG_ON(rq->nr_running != 0);
 		calc_global_load_remove(rq);


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
  2010-03-15  9:10 [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code Oleg Nesterov
@ 2010-03-25  3:00 ` Miao Xie
  2010-03-25 10:14   ` Oleg Nesterov
  2010-04-02 19:11 ` [tip:sched/core] sched: Kill " tip-bot for Oleg Nesterov
  1 sibling, 1 reply; 6+ messages in thread
From: Miao Xie @ 2010-03-25  3:00 UTC (permalink / raw)
  To: Oleg Nesterov
  Cc: Peter Zijlstra, Ingo Molnar, Ben Blum, Jiri Slaby, Lai Jiangshan,
	Li Zefan, Paul Menage, Rafael J. Wysocki, Tejun Heo, linux-kernel

on 2010-3-15 17:10, Oleg Nesterov wrote:
> This patch just states the fact the cpusets/cpuhotplug interaction is
> broken and removes the deadlockable code which only pretends to work.
> 
> - cpuset_lock() doesn't really work. It is needed for
>   cpuset_cpus_allowed_locked() but we can't take this lock in
>   try_to_wake_up()->select_fallback_rq() path.
> 
> - cpuset_lock() is deadlockable. Suppose that a task T bound to CPU takes
>   callback_mutex. If cpu_down(CPU) happens before T drops callback_mutex
>   stop_machine() preempts T, then migration_call(CPU_DEAD) tries to take
>   cpuset_lock() and hangs forever because CPU is already dead and thus
>   T can't be scheduled.

The problem what you said don't exist, because the kernel already move T to
the active cpu when preparing to turn off a CPU.

Thanks!
Miao


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
  2010-03-25  3:00 ` Miao Xie
@ 2010-03-25 10:14   ` Oleg Nesterov
  2010-03-25 12:27     ` Miao Xie
  0 siblings, 1 reply; 6+ messages in thread
From: Oleg Nesterov @ 2010-03-25 10:14 UTC (permalink / raw)
  To: Miao Xie
  Cc: Peter Zijlstra, Ingo Molnar, Ben Blum, Jiri Slaby, Lai Jiangshan,
	Li Zefan, Paul Menage, Rafael J. Wysocki, Tejun Heo, linux-kernel

On 03/25, Miao Xie wrote:
>
> on 2010-3-15 17:10, Oleg Nesterov wrote:
> > This patch just states the fact the cpusets/cpuhotplug interaction is
> > broken and removes the deadlockable code which only pretends to work.
> >
> > - cpuset_lock() doesn't really work. It is needed for
> >   cpuset_cpus_allowed_locked() but we can't take this lock in
> >   try_to_wake_up()->select_fallback_rq() path.
> >
> > - cpuset_lock() is deadlockable. Suppose that a task T bound to CPU takes
> >   callback_mutex. If cpu_down(CPU) happens before T drops callback_mutex
> >   stop_machine() preempts T, then migration_call(CPU_DEAD) tries to take
> >   cpuset_lock() and hangs forever because CPU is already dead and thus
> >   T can't be scheduled.
>
> The problem what you said don't exist, because the kernel already move T to
> the active cpu when preparing to turn off a CPU.

we need cpuset_lock() to move T. please look at _cpu_down().

OK.

	A task T holds callback_mutex, and it is bound to CPU 1.

	_cpu_down(cpu => 1) is called by the task X.

	_cpu_down()->stop_machine() spawns rt-threads for each cpu,
	a thread running on CPU 1 preempts T and calls take_cpu_down()
	which removes CPU 1 from online/active masks.

	X continues, and does raw_notifier_call_chain(CPU_DEAD), this
	calls migration_call(CPU_DEAD), and _this_ is what move the
	tasks from the dead CPU.

	migration_call(CPU_DEAD) calls cpuset_lock() and deadlocks.

See?

Oleg.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
  2010-03-25 10:14   ` Oleg Nesterov
@ 2010-03-25 12:27     ` Miao Xie
  2010-03-25 12:59       ` Oleg Nesterov
  0 siblings, 1 reply; 6+ messages in thread
From: Miao Xie @ 2010-03-25 12:27 UTC (permalink / raw)
  To: Oleg Nesterov
  Cc: Peter Zijlstra, Ingo Molnar, Ben Blum, Jiri Slaby, Lai Jiangshan,
	Li Zefan, Paul Menage, Rafael J. Wysocki, Tejun Heo, linux-kernel

on 2010-3-25 18:14, Oleg Nesterov wrote:
> On 03/25, Miao Xie wrote:
>>
>> on 2010-3-15 17:10, Oleg Nesterov wrote:
>>> This patch just states the fact the cpusets/cpuhotplug interaction is
>>> broken and removes the deadlockable code which only pretends to work.
>>>
>>> - cpuset_lock() doesn't really work. It is needed for
>>>   cpuset_cpus_allowed_locked() but we can't take this lock in
>>>   try_to_wake_up()->select_fallback_rq() path.
>>>
>>> - cpuset_lock() is deadlockable. Suppose that a task T bound to CPU takes
>>>   callback_mutex. If cpu_down(CPU) happens before T drops callback_mutex
>>>   stop_machine() preempts T, then migration_call(CPU_DEAD) tries to take
>>>   cpuset_lock() and hangs forever because CPU is already dead and thus
>>>   T can't be scheduled.
>>
>> The problem what you said don't exist, because the kernel already move T to
>> the active cpu when preparing to turn off a CPU.
> 
> we need cpuset_lock() to move T. please look at _cpu_down().
> 
> OK.
> 
> 	A task T holds callback_mutex, and it is bound to CPU 1.
> 
> 	_cpu_down(cpu => 1) is called by the task X.
> 
> 	_cpu_down()->stop_machine() spawns rt-threads for each cpu,
> 	a thread running on CPU 1 preempts T and calls take_cpu_down()
> 	which removes CPU 1 from online/active masks.
> 
> 	X continues, and does raw_notifier_call_chain(CPU_DEAD), this
> 	calls migration_call(CPU_DEAD), and _this_ is what move the
> 	tasks from the dead CPU.
> 
> 	migration_call(CPU_DEAD) calls cpuset_lock() and deadlocks.
> 
> See?

But when the kernel want to offline a cpu, it does 
	raw_notifier_call_chain(CPU_DOWN_PREPARE) 
at first. this calls cpuset_track_online_cpus() to update cpuset's cpus
and task->cpus_allowed, and then moves the task running on the dying cpu
to the other online cpu. At that time, rt-threads for each cpu have not
been created.

And when the kernel does migration_call(CPU_DEAD), the rt-threads already
exit. the task that holds callback_mutex can run as normal.

Miao

> 
> Oleg.
> 
> 
> 
> 



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
  2010-03-25 12:27     ` Miao Xie
@ 2010-03-25 12:59       ` Oleg Nesterov
  0 siblings, 0 replies; 6+ messages in thread
From: Oleg Nesterov @ 2010-03-25 12:59 UTC (permalink / raw)
  To: Miao Xie
  Cc: Peter Zijlstra, Ingo Molnar, Ben Blum, Jiri Slaby, Lai Jiangshan,
	Li Zefan, Paul Menage, Rafael J. Wysocki, Tejun Heo, linux-kernel

On 03/25, Miao Xie wrote:
>
> on 2010-3-25 18:14, Oleg Nesterov wrote:
> > On 03/25, Miao Xie wrote:
> >>
> >> The problem what you said don't exist, because the kernel already move T to
> >> the active cpu when preparing to turn off a CPU.
> >
> > we need cpuset_lock() to move T. please look at _cpu_down().
> >
> > OK.
> >
> > 	A task T holds callback_mutex, and it is bound to CPU 1.
> >
> > 	_cpu_down(cpu => 1) is called by the task X.
> >
> > 	_cpu_down()->stop_machine() spawns rt-threads for each cpu,
> > 	a thread running on CPU 1 preempts T and calls take_cpu_down()
> > 	which removes CPU 1 from online/active masks.
> >
> > 	X continues, and does raw_notifier_call_chain(CPU_DEAD), this
> > 	calls migration_call(CPU_DEAD), and _this_ is what move the
> > 	tasks from the dead CPU.
> >
> > 	migration_call(CPU_DEAD) calls cpuset_lock() and deadlocks.
> >
> > See?
>
> But when the kernel want to offline a cpu, it does
> 	raw_notifier_call_chain(CPU_DOWN_PREPARE)
> at first. this calls cpuset_track_online_cpus() to update cpuset's cpus
First of let me note that it is wrong to call scan_for_empty_cpusets()
at CPU_DOWN_PREPARE state. _cpu_down() can fail after that but we can't
revert the result of remove_tasks_in_empty_cpuset().

But this doesn't matter,

> and task->cpus_allowed, and then moves the task running on the dying cpu
> to the other online cpu.

No, it doesn't track task->cpus_allowed afaics. It only checks
cpumask_empty(cp->cpus_allowed) and does nothing otherwise.

And it is quite possible that the task belongs to some cpuset cs, bound
to a single cpu, but cs->cpus_allowed is "wide" and includes other online cpus.

> At that time, rt-threads for each cpu have not
> been created.

(doesn't matter, but the are already created and sleeping)

> And when the kernel does migration_call(CPU_DEAD), the rt-threads already
> exit.

No, there are sleeping, but this doesn't matter again.

> the task that holds callback_mutex can run as normal.

It can't afaics, please see above.


That said, let me remind. I read this code only once a long ago, during my
first attempt to fix these problems (all my attempts were ignored until
I rerouted my concerns to Peter). It is possible that I missed/forgot/both
something. But when I did the second version I bothered to actually test
my theory and the kernel hanged, see the changelog in
http://marc.info/?t=124910242400002

You was cc'ed too ;)

Oleg.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip:sched/core] sched: Kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
  2010-03-15  9:10 [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code Oleg Nesterov
  2010-03-25  3:00 ` Miao Xie
@ 2010-04-02 19:11 ` tip-bot for Oleg Nesterov
  1 sibling, 0 replies; 6+ messages in thread
From: tip-bot for Oleg Nesterov @ 2010-04-02 19:11 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, oleg, tglx, mingo

Commit-ID:  897f0b3c3ff40b443c84e271bef19bd6ae885195
Gitweb:     http://git.kernel.org/tip/897f0b3c3ff40b443c84e271bef19bd6ae885195
Author:     Oleg Nesterov <oleg@redhat.com>
AuthorDate: Mon, 15 Mar 2010 10:10:03 +0100
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Fri, 2 Apr 2010 20:12:01 +0200

sched: Kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code

This patch just states the fact the cpusets/cpuhotplug interaction is
broken and removes the deadlockable code which only pretends to work.

- cpuset_lock() doesn't really work. It is needed for
  cpuset_cpus_allowed_locked() but we can't take this lock in
  try_to_wake_up()->select_fallback_rq() path.

- cpuset_lock() is deadlockable. Suppose that a task T bound to CPU takes
  callback_mutex. If cpu_down(CPU) happens before T drops callback_mutex
  stop_machine() preempts T, then migration_call(CPU_DEAD) tries to take
  cpuset_lock() and hangs forever because CPU is already dead and thus
  T can't be scheduled.

- cpuset_cpus_allowed_locked() is deadlockable too. It takes task_lock()
  which is not irq-safe, but try_to_wake_up() can be called from irq.

Kill them, and change select_fallback_rq() to use cpu_possible_mask, like
we currently do without CONFIG_CPUSETS.

Also, with or without this patch, with or without CONFIG_CPUSETS, the
callers of select_fallback_rq() can race with each other or with
set_cpus_allowed() pathes.

The subsequent patches try to to fix these problems.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100315091003.GA9123@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 include/linux/cpuset.h |   13 -------------
 kernel/cpuset.c        |   27 +--------------------------
 kernel/sched.c         |   10 +++-------
 3 files changed, 4 insertions(+), 46 deletions(-)

diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a5740fc..eeaaee7 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,6 @@ extern int number_of_cpusets;	/* How many cpusets are defined in system? */
 extern int cpuset_init(void);
 extern void cpuset_init_smp(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p,
-				       struct cpumask *mask);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
@@ -69,9 +67,6 @@ struct seq_file;
 extern void cpuset_task_status_allowed(struct seq_file *m,
 					struct task_struct *task);
 
-extern void cpuset_lock(void);
-extern void cpuset_unlock(void);
-
 extern int cpuset_mem_spread_node(void);
 
 static inline int cpuset_do_page_mem_spread(void)
@@ -105,11 +100,6 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
 {
 	cpumask_copy(mask, cpu_possible_mask);
 }
-static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
-					      struct cpumask *mask)
-{
-	cpumask_copy(mask, cpu_possible_mask);
-}
 
 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 {
@@ -157,9 +147,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
 {
 }
 
-static inline void cpuset_lock(void) {}
-static inline void cpuset_unlock(void) {}
-
 static inline int cpuset_mem_spread_node(void)
 {
 	return 0;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d109467..9a747f5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2182,19 +2182,10 @@ void __init cpuset_init_smp(void)
 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
 	mutex_lock(&callback_mutex);
-	cpuset_cpus_allowed_locked(tsk, pmask);
-	mutex_unlock(&callback_mutex);
-}
-
-/**
- * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
- * Must be called with callback_mutex held.
- **/
-void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
-{
 	task_lock(tsk);
 	guarantee_online_cpus(task_cs(tsk), pmask);
 	task_unlock(tsk);
+	mutex_unlock(&callback_mutex);
 }
 
 void cpuset_init_current_mems_allowed(void)
@@ -2383,22 +2374,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
 }
 
 /**
- * cpuset_lock - lock out any changes to cpuset structures
- *
- * The out of memory (oom) code needs to mutex_lock cpusets
- * from being changed while it scans the tasklist looking for a
- * task in an overlapping cpuset.  Expose callback_mutex via this
- * cpuset_lock() routine, so the oom code can lock it, before
- * locking the task list.  The tasklist_lock is a spinlock, so
- * must be taken inside callback_mutex.
- */
-
-void cpuset_lock(void)
-{
-	mutex_lock(&callback_mutex);
-}
-
-/**
  * cpuset_unlock - release lock on cpuset changes
  *
  * Undo the lock taken in a previous cpuset_lock() call.
diff --git a/kernel/sched.c b/kernel/sched.c
index 52b7efd..c0b3ebc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2296,11 +2296,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
 		return dest_cpu;
 
 	/* No more Mr. Nice Guy. */
-	if (dest_cpu >= nr_cpu_ids) {
-		rcu_read_lock();
-		cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-		rcu_read_unlock();
-		dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+	if (unlikely(dest_cpu >= nr_cpu_ids)) {
+		cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+		dest_cpu = cpumask_any(cpu_active_mask);
 
 		/*
 		 * Don't tell them about moving exiting tasks or
@@ -5866,7 +5864,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
-		cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
 		migrate_live_tasks(cpu);
 		rq = cpu_rq(cpu);
 		kthread_stop(rq->migration_thread);
@@ -5879,7 +5876,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 		rq->idle->sched_class = &idle_sched_class;
 		migrate_dead_tasks(cpu);
 		raw_spin_unlock_irq(&rq->lock);
-		cpuset_unlock();
 		migrate_nr_uninterruptible(rq);
 		BUG_ON(rq->nr_running != 0);
 		calc_global_load_remove(rq);

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2010-04-02 19:12 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-03-15  9:10 [PATCH 1/6] kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code Oleg Nesterov
2010-03-25  3:00 ` Miao Xie
2010-03-25 10:14   ` Oleg Nesterov
2010-03-25 12:27     ` Miao Xie
2010-03-25 12:59       ` Oleg Nesterov
2010-04-02 19:11 ` [tip:sched/core] sched: Kill " tip-bot for Oleg Nesterov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox