public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h
@ 2015-02-17 10:46 Kirill Tkhai
  2015-02-17 11:11 ` Peter Zijlstra
  0 siblings, 1 reply; 8+ messages in thread
From: Kirill Tkhai @ 2015-02-17 10:46 UTC (permalink / raw)
  To: linux-kernel; +Cc: Peter Zijlstra, Ingo Molnar, Josh Poimboeuf


Place it in sched.h, because dl_task_timer() needs it.
Also remove lockdep check, which is not fit to this
function.

Similar to idea of Josh Poimboeuf for task_rq_{,un}lock():
https://lkml.org/lkml/2015/2/9/476

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
CC: Josh Poimboeuf <jpoimboe@redhat.com>
---
 kernel/sched/core.c     |   28 ----------------------------
 kernel/sched/deadline.c |   12 +++---------
 kernel/sched/sched.h    |   26 ++++++++++++++++++++++++++
 3 files changed, 29 insertions(+), 37 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0466e61..fc12a1d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -307,28 +307,6 @@ __read_mostly int scheduler_running;
 int sysctl_sched_rt_runtime = 950000;
 
 /*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	lockdep_assert_held(&p->pi_lock);
-
-	for (;;) {
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-/*
  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
  */
 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
@@ -351,12 +329,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
 	}
 }
 
-static void __task_rq_unlock(struct rq *rq)
-	__releases(rq->lock)
-{
-	raw_spin_unlock(&rq->lock);
-}
-
 static inline void
 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
 	__releases(rq->lock)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 29ae704..e008b51 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -512,14 +512,8 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 						     dl_timer);
 	struct task_struct *p = dl_task_of(dl_se);
 	struct rq *rq;
-again:
-	rq = task_rq(p);
-	raw_spin_lock(&rq->lock);
-	if (rq != task_rq(p) || task_on_rq_migrating(p)) {
-		/* Task was move{d,ing}, retry */
-		raw_spin_unlock(&rq->lock);
-		goto again;
-	}
+
+	rq = __task_rq_lock(p);
 
 	/*
 	 * We need to take care of several possible races here:
@@ -574,7 +568,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 		push_dl_task(rq);
 #endif
 unlock:
-	raw_spin_unlock(&rq->lock);
+	__task_rq_unlock(rq);
 
 	return HRTIMER_NORESTART;
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0870db2..f65f57c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1020,6 +1020,32 @@ static inline int task_on_rq_migrating(struct task_struct *p)
 	return p->on_rq == TASK_ON_RQ_MIGRATING;
 }
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	for (;;) {
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+	__releases(rq->lock)
+{
+	raw_spin_unlock(&rq->lock);
+}
+
 #ifndef prepare_arch_switch
 # define prepare_arch_switch(next)	do { } while (0)
 #endif




^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h
  2015-02-17 10:46 [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h Kirill Tkhai
@ 2015-02-17 11:11 ` Peter Zijlstra
  2015-02-17 11:20   ` Kirill Tkhai
  2015-02-17 11:26   ` Peter Zijlstra
  0 siblings, 2 replies; 8+ messages in thread
From: Peter Zijlstra @ 2015-02-17 11:11 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: linux-kernel, Ingo Molnar, Josh Poimboeuf

On Tue, Feb 17, 2015 at 01:46:51PM +0300, Kirill Tkhai wrote:
> 
> Place it in sched.h, because dl_task_timer() needs it.
> Also remove lockdep check, which is not fit to this
> function.

No, that lockdep check is valid for all current sites.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h
  2015-02-17 11:11 ` Peter Zijlstra
@ 2015-02-17 11:20   ` Kirill Tkhai
  2015-02-17 11:26   ` Peter Zijlstra
  1 sibling, 0 replies; 8+ messages in thread
From: Kirill Tkhai @ 2015-02-17 11:20 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, Ingo Molnar, Josh Poimboeuf

В Вт, 17/02/2015 в 12:11 +0100, Peter Zijlstra пишет:
> On Tue, Feb 17, 2015 at 01:46:51PM +0300, Kirill Tkhai wrote:
> > 
> > Place it in sched.h, because dl_task_timer() needs it.
> > Also remove lockdep check, which is not fit to this
> > function.
> 
> No, that lockdep check is valid for all current sites.

Sure, but dl_task_timer() do not lock pi_lock. I wanted to delete
"again:" loop in it to simplify second patch.

We can do not touch __task_rq_lock(), and add smp_rmb() in "again:"
loop in second patch instead of this.



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h
  2015-02-17 11:11 ` Peter Zijlstra
  2015-02-17 11:20   ` Kirill Tkhai
@ 2015-02-17 11:26   ` Peter Zijlstra
  2015-02-17 11:33     ` Kirill Tkhai
  1 sibling, 1 reply; 8+ messages in thread
From: Peter Zijlstra @ 2015-02-17 11:26 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: linux-kernel, Ingo Molnar, Josh Poimboeuf

On Tue, Feb 17, 2015 at 12:11:16PM +0100, Peter Zijlstra wrote:
> On Tue, Feb 17, 2015 at 01:46:51PM +0300, Kirill Tkhai wrote:
> > 
> > Place it in sched.h, because dl_task_timer() needs it.
> > Also remove lockdep check, which is not fit to this
> > function.
> 
> No, that lockdep check is valid for all current sites.

Also, note that you just proved the reason we didn't have pi_lock there
wrong the other day.

As per 0f397f2c90ce ("sched/dl: Fix race in dl_task_timer()"):

       "The only reason we don't strictly need ->pi_lock now is because
        we're guaranteed to have p->state == TASK_RUNNING here and are
        thus free of ttwu races".

And therefore we should use the full task_rq_lock() here.



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h
  2015-02-17 11:26   ` Peter Zijlstra
@ 2015-02-17 11:33     ` Kirill Tkhai
  2015-02-17 12:31       ` [PATCH] sched: Make dl_task_time() use task_rq_lock() Peter Zijlstra
  0 siblings, 1 reply; 8+ messages in thread
From: Kirill Tkhai @ 2015-02-17 11:33 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, Ingo Molnar, Josh Poimboeuf

В Вт, 17/02/2015 в 12:26 +0100, Peter Zijlstra пишет:
> On Tue, Feb 17, 2015 at 12:11:16PM +0100, Peter Zijlstra wrote:
> > On Tue, Feb 17, 2015 at 01:46:51PM +0300, Kirill Tkhai wrote:
> > > 
> > > Place it in sched.h, because dl_task_timer() needs it.
> > > Also remove lockdep check, which is not fit to this
> > > function.
> > 
> > No, that lockdep check is valid for all current sites.
> 
> Also, note that you just proved the reason we didn't have pi_lock there
> wrong the other day.
> 
> As per 0f397f2c90ce ("sched/dl: Fix race in dl_task_timer()"):
> 
>        "The only reason we don't strictly need ->pi_lock now is because
>         we're guaranteed to have p->state == TASK_RUNNING here and are
>         thus free of ttwu races".
> 
> And therefore we should use the full task_rq_lock() here.
> 

So, we move task_rq_lock() to sched.h, and dl_task_timer() uses it?



^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] sched: Make dl_task_time() use task_rq_lock()
  2015-02-17 11:33     ` Kirill Tkhai
@ 2015-02-17 12:31       ` Peter Zijlstra
  2015-02-17 13:32         ` Peter Zijlstra
  2015-02-18 17:06         ` [tip:sched/core] " tip-bot for Peter Zijlstra
  0 siblings, 2 replies; 8+ messages in thread
From: Peter Zijlstra @ 2015-02-17 12:31 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: linux-kernel, Ingo Molnar, Josh Poimboeuf

On Tue, Feb 17, 2015 at 02:33:58PM +0300, Kirill Tkhai wrote:

> So, we move task_rq_lock() to sched.h, and dl_task_timer() uses it?

Yep, like this. I've also modified your earlier patch to dl_task_time()
back to its original form.

---
Subject: sched: Make dl_task_time() use task_rq_lock()
From: Peter Zijlstra <peterz@infradead.org>
Date: Tue Feb 17 13:22:25 CET 2015

Kirill reported that a dl task can be throttled and dequeued at the
same time. This happens, when it becomes throttled in schedule(),
which is called to go to sleep:

current->state = TASK_INTERRUPTIBLE;
schedule()
    deactivate_task()
        dequeue_task_dl()
            update_curr_dl()
                start_dl_timer()
            __dequeue_task_dl()
    prev->on_rq = 0;

This invalidates the assumption from commit 0f397f2c90ce ("sched/dl:
Fix race in dl_task_timer()"):

  "The only reason we don't strictly need ->pi_lock now is because
   we're guaranteed to have p->state == TASK_RUNNING here and are
   thus free of ttwu races".

And therefore we have to use the full task_rq_lock() here.

This further amends the fact that we forgot to update the rq lock loop
for TASK_ON_RQ_MIGRATE, from commit cca26e8009d1 ("sched: Teach
scheduler to understand TASK_ON_RQ_MIGRATING state").

Reported-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/sched/core.c     |   76 ------------------------------------------------
 kernel/sched/deadline.c |   12 +------
 kernel/sched/sched.h    |   76 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 79 insertions(+), 85 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -307,82 +307,6 @@ __read_mostly int scheduler_running;
 int sysctl_sched_rt_runtime = 950000;
 
 /*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	lockdep_assert_held(&p->pi_lock);
-
-	for (;;) {
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-	__acquires(p->pi_lock)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	for (;;) {
-		raw_spin_lock_irqsave(&p->pi_lock, *flags);
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		/*
-		 *	move_queued_task()		task_rq_lock()
-		 *
-		 *	ACQUIRE (rq->lock)
-		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-		 *	[S] ->cpu = new_cpu		[L] task_rq()
-		 *					[L] ->on_rq
-		 *	RELEASE (rq->lock)
-		 *
-		 * If we observe the old cpu in task_rq_lock, the acquire of
-		 * the old rq->lock will fully serialize against the stores.
-		 *
-		 * If we observe the new cpu in task_rq_lock, the acquire will
-		 * pair with the WMB to ensure we must then also see migrating.
-		 */
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-static void __task_rq_unlock(struct rq *rq)
-	__releases(rq->lock)
-{
-	raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-	__releases(rq->lock)
-	__releases(p->pi_lock)
-{
-	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
-/*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
 static struct rq *this_rq_lock(void)
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_time
 						     struct sched_dl_entity,
 						     dl_timer);
 	struct task_struct *p = dl_task_of(dl_se);
+	unsigned long flags;
 	struct rq *rq;
-again:
-	rq = task_rq(p);
-	raw_spin_lock(&rq->lock);
 
-	if (rq != task_rq(p)) {
-		/* Task was moved, retrying. */
-		raw_spin_unlock(&rq->lock);
-		goto again;
-	}
+	rq = task_rq_lock(current, &flags);
 
 	/*
 	 * We need to take care of several possible races here:
@@ -555,7 +549,7 @@ static enum hrtimer_restart dl_task_time
 		push_dl_task(rq);
 #endif
 unlock:
-	raw_spin_unlock(&rq->lock);
+	task_rq_unlock(rq, current, &flags);
 
 	return HRTIMER_NORESTART;
 }
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1484,6 +1484,82 @@ static inline void double_raw_lock(raw_s
 }
 
 /*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	lockdep_assert_held(&p->pi_lock);
+
+	for (;;) {
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	for (;;) {
+		raw_spin_lock_irqsave(&p->pi_lock, *flags);
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		/*
+		 *	move_queued_task()		task_rq_lock()
+		 *
+		 *	ACQUIRE (rq->lock)
+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
+		 *	[S] ->cpu = new_cpu		[L] task_rq()
+		 *					[L] ->on_rq
+		 *	RELEASE (rq->lock)
+		 *
+		 * If we observe the old cpu in task_rq_lock, the acquire of
+		 * the old rq->lock will fully serialize against the stores.
+		 *
+		 * If we observe the new cpu in task_rq_lock, the acquire will
+		 * pair with the WMB to ensure we must then also see migrating.
+		 */
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+	__releases(rq->lock)
+{
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+	__releases(rq->lock)
+	__releases(p->pi_lock)
+{
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
+/*
  * double_rq_lock - safely lock two runqueues
  *
  * Note this does not disable interrupts like task_rq_lock,

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] sched: Make dl_task_time() use task_rq_lock()
  2015-02-17 12:31       ` [PATCH] sched: Make dl_task_time() use task_rq_lock() Peter Zijlstra
@ 2015-02-17 13:32         ` Peter Zijlstra
  2015-02-18 17:06         ` [tip:sched/core] " tip-bot for Peter Zijlstra
  1 sibling, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2015-02-17 13:32 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: linux-kernel, Ingo Molnar, Josh Poimboeuf

On Tue, Feb 17, 2015 at 01:31:39PM +0100, Peter Zijlstra wrote:
> On Tue, Feb 17, 2015 at 02:33:58PM +0300, Kirill Tkhai wrote:
> 
> > So, we move task_rq_lock() to sched.h, and dl_task_timer() uses it?
> 
> Yep, like this. I've also modified your earlier patch to dl_task_time()
> back to its original form.
> 
> ---
> Subject: sched: Make dl_task_time() use task_rq_lock()
> From: Peter Zijlstra <peterz@infradead.org>
> Date: Tue Feb 17 13:22:25 CET 2015
> 
> Kirill reported that a dl task can be throttled and dequeued at the
> same time. This happens, when it becomes throttled in schedule(),
> which is called to go to sleep:
> 
> current->state = TASK_INTERRUPTIBLE;
> schedule()
>     deactivate_task()
>         dequeue_task_dl()
>             update_curr_dl()
>                 start_dl_timer()
>             __dequeue_task_dl()
>     prev->on_rq = 0;
> 
> This invalidates the assumption from commit 0f397f2c90ce ("sched/dl:
> Fix race in dl_task_timer()"):
> 
>   "The only reason we don't strictly need ->pi_lock now is because
>    we're guaranteed to have p->state == TASK_RUNNING here and are
>    thus free of ttwu races".
> 
> And therefore we have to use the full task_rq_lock() here.
> 
> This further amends the fact that we forgot to update the rq lock loop
> for TASK_ON_RQ_MIGRATE, from commit cca26e8009d1 ("sched: Teach
> scheduler to understand TASK_ON_RQ_MIGRATING state").
> 
> Reported-by: Kirill Tkhai <ktkhai@parallels.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  kernel/sched/core.c     |   76 ------------------------------------------------
>  kernel/sched/deadline.c |   12 +------
>  kernel/sched/sched.h    |   76 ++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 79 insertions(+), 85 deletions(-)

Duh, I put it under CONFIG_SMP, /me changes patch to not do this.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [tip:sched/core] sched: Make dl_task_time() use task_rq_lock()
  2015-02-17 12:31       ` [PATCH] sched: Make dl_task_time() use task_rq_lock() Peter Zijlstra
  2015-02-17 13:32         ` Peter Zijlstra
@ 2015-02-18 17:06         ` tip-bot for Peter Zijlstra
  1 sibling, 0 replies; 8+ messages in thread
From: tip-bot for Peter Zijlstra @ 2015-02-18 17:06 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: juri.lelli, tglx, mingo, hpa, linux-kernel, ktkhai, peterz

Commit-ID:  3960c8c0c7891dfc0f7be687cbdabb0d6916d614
Gitweb:     http://git.kernel.org/tip/3960c8c0c7891dfc0f7be687cbdabb0d6916d614
Author:     Peter Zijlstra <peterz@infradead.org>
AuthorDate: Tue, 17 Feb 2015 13:22:25 +0100
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 18 Feb 2015 14:27:30 +0100

sched: Make dl_task_time() use task_rq_lock()

Kirill reported that a dl task can be throttled and dequeued at the
same time. This happens, when it becomes throttled in schedule(),
which is called to go to sleep:

current->state = TASK_INTERRUPTIBLE;
schedule()
    deactivate_task()
        dequeue_task_dl()
            update_curr_dl()
                start_dl_timer()
            __dequeue_task_dl()
    prev->on_rq = 0;

This invalidates the assumption from commit 0f397f2c90ce ("sched/dl:
Fix race in dl_task_timer()"):

  "The only reason we don't strictly need ->pi_lock now is because
   we're guaranteed to have p->state == TASK_RUNNING here and are
   thus free of ttwu races".

And therefore we have to use the full task_rq_lock() here.

This further amends the fact that we forgot to update the rq lock loop
for TASK_ON_RQ_MIGRATE, from commit cca26e8009d1 ("sched: Teach
scheduler to understand TASK_ON_RQ_MIGRATING state").

Reported-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@arm.com>
Link: http://lkml.kernel.org/r/20150217123139.GN5029@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/sched/core.c     | 76 -------------------------------------------------
 kernel/sched/deadline.c | 12 ++------
 kernel/sched/sched.h    | 76 +++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 79 insertions(+), 85 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fd0a6ed..f77acd9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -307,82 +307,6 @@ __read_mostly int scheduler_running;
 int sysctl_sched_rt_runtime = 950000;
 
 /*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	lockdep_assert_held(&p->pi_lock);
-
-	for (;;) {
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-	__acquires(p->pi_lock)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	for (;;) {
-		raw_spin_lock_irqsave(&p->pi_lock, *flags);
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		/*
-		 *	move_queued_task()		task_rq_lock()
-		 *
-		 *	ACQUIRE (rq->lock)
-		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-		 *	[S] ->cpu = new_cpu		[L] task_rq()
-		 *					[L] ->on_rq
-		 *	RELEASE (rq->lock)
-		 *
-		 * If we observe the old cpu in task_rq_lock, the acquire of
-		 * the old rq->lock will fully serialize against the stores.
-		 *
-		 * If we observe the new cpu in task_rq_lock, the acquire will
-		 * pair with the WMB to ensure we must then also see migrating.
-		 */
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-static void __task_rq_unlock(struct rq *rq)
-	__releases(rq->lock)
-{
-	raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-	__releases(rq->lock)
-	__releases(p->pi_lock)
-{
-	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
-/*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
 static struct rq *this_rq_lock(void)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a027799..e88847d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 						     struct sched_dl_entity,
 						     dl_timer);
 	struct task_struct *p = dl_task_of(dl_se);
+	unsigned long flags;
 	struct rq *rq;
-again:
-	rq = task_rq(p);
-	raw_spin_lock(&rq->lock);
 
-	if (rq != task_rq(p)) {
-		/* Task was moved, retrying. */
-		raw_spin_unlock(&rq->lock);
-		goto again;
-	}
+	rq = task_rq_lock(current, &flags);
 
 	/*
 	 * We need to take care of several possible races here:
@@ -555,7 +549,7 @@ again:
 		push_dl_task(rq);
 #endif
 unlock:
-	raw_spin_unlock(&rq->lock);
+	task_rq_unlock(rq, current, &flags);
 
 	return HRTIMER_NORESTART;
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0870db2..dc0f435 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { }
 
 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	lockdep_assert_held(&p->pi_lock);
+
+	for (;;) {
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	for (;;) {
+		raw_spin_lock_irqsave(&p->pi_lock, *flags);
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		/*
+		 *	move_queued_task()		task_rq_lock()
+		 *
+		 *	ACQUIRE (rq->lock)
+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
+		 *	[S] ->cpu = new_cpu		[L] task_rq()
+		 *					[L] ->on_rq
+		 *	RELEASE (rq->lock)
+		 *
+		 * If we observe the old cpu in task_rq_lock, the acquire of
+		 * the old rq->lock will fully serialize against the stores.
+		 *
+		 * If we observe the new cpu in task_rq_lock, the acquire will
+		 * pair with the WMB to ensure we must then also see migrating.
+		 */
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+	__releases(rq->lock)
+{
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+	__releases(rq->lock)
+	__releases(p->pi_lock)
+{
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 

^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-02-18 17:06 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-02-17 10:46 [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h Kirill Tkhai
2015-02-17 11:11 ` Peter Zijlstra
2015-02-17 11:20   ` Kirill Tkhai
2015-02-17 11:26   ` Peter Zijlstra
2015-02-17 11:33     ` Kirill Tkhai
2015-02-17 12:31       ` [PATCH] sched: Make dl_task_time() use task_rq_lock() Peter Zijlstra
2015-02-17 13:32         ` Peter Zijlstra
2015-02-18 17:06         ` [tip:sched/core] " tip-bot for Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox