From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Chris Mason <chris.mason@oracle.com>,
Frank Rowand <frank.rowand@am.sony.com>,
Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
Mike Galbraith <efault@gmx.de>, Oleg Nesterov <oleg@redhat.com>,
Paul Turner <pjt@google.com>, Jens Axboe <axboe@kernel.dk>,
Yong Zhang <yong.zhang0@gmail.com>
Cc: linux-kernel@vger.kernel.org, Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [PATCH 08/22] sched: Drop the rq argument to sched_class::select_task_rq()
Date: Wed, 02 Mar 2011 18:38:39 +0100 [thread overview]
Message-ID: <20110302174120.829029362@chello.nl> (raw)
In-Reply-To: 20110302173831.295031866@chello.nl
[-- Attachment #1: sched-select_task_rq.patch --]
[-- Type: text/plain, Size: 7460 bytes --]
In preparation of calling select_task_rq() without rq->lock held, drop
the dependency on the rq argument.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
---
include/linux/sched.h | 3 +--
kernel/sched.c | 20 +++++++++++---------
kernel/sched_fair.c | 2 +-
kernel/sched_idletask.c | 2 +-
kernel/sched_rt.c | 38 ++++++++++++++++++++++++++------------
kernel/sched_stoptask.c | 3 +--
6 files changed, 41 insertions(+), 27 deletions(-)
Index: linux-2.6/include/linux/sched.h
===================================================================
--- linux-2.6.orig/include/linux/sched.h
+++ linux-2.6/include/linux/sched.h
@@ -1063,8 +1063,7 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct rq *rq, struct task_struct *p,
- int sd_flag, int flags);
+ int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -2138,13 +2138,15 @@ static int migration_cpu_stop(void *data
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
-static bool migrate_task(struct task_struct *p, struct rq *rq)
+static bool need_migrate_task(struct task_struct *p)
{
/*
* If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task.
*/
- return p->on_rq || task_running(rq, p);
+ bool running = p->on_rq || p->on_cpu;
+ smp_rmb(); /* finish_lock_switch() */
+ return running;
}
/*
@@ -2337,9 +2339,9 @@ static int select_fallback_rq(int cpu, s
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
-int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
{
- int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
+ int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
@@ -2484,7 +2486,7 @@ static int try_to_wake_up(struct task_st
en_flags |= ENQUEUE_WAKING;
}
- cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
+ cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
__task_rq_unlock(rq);
@@ -2694,7 +2696,7 @@ void wake_up_new_task(struct task_struct
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
* without people poking at ->cpus_allowed.
*/
- cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
+ cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu);
p->state = TASK_RUNNING;
@@ -3420,7 +3422,7 @@ void sched_exec(void)
int dest_cpu;
rq = task_rq_lock(p, &flags);
- dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
+ dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
@@ -3428,7 +3430,7 @@ void sched_exec(void)
* select_task_rq() can race against ->cpus_allowed
*/
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
- likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
+ likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
struct migration_arg arg = { p, dest_cpu };
task_rq_unlock(rq, &flags);
@@ -5681,7 +5683,7 @@ int set_cpus_allowed_ptr(struct task_str
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
- if (migrate_task(p, rq)) {
+ if (need_migrate_task(p)) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
__task_rq_unlock(rq);
Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -1623,7 +1623,7 @@ static int select_idle_sibling(struct ta
* preempt must be disabled.
*/
static int
-select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
{
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id();
Index: linux-2.6/kernel/sched_idletask.c
===================================================================
--- linux-2.6.orig/kernel/sched_idletask.c
+++ linux-2.6/kernel/sched_idletask.c
@@ -7,7 +7,7 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
+select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
Index: linux-2.6/kernel/sched_rt.c
===================================================================
--- linux-2.6.orig/kernel/sched_rt.c
+++ linux-2.6/kernel/sched_rt.c
@@ -973,13 +973,23 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task);
static int
-select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
{
+ struct task_struct *curr;
+ struct rq *rq;
+ int cpu;
+
if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();
+ cpu = task_cpu(p);
+ rq = cpu_rq(cpu);
+
+ rcu_read_lock();
+ curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+
/*
- * If the current task is an RT task, then
+ * If the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
@@ -993,21 +1003,25 @@ select_task_rq_rt(struct rq *rq, struct
* lock?
*
* For equal prio tasks, we just let the scheduler sort it out.
+ *
+ * Otherwise, just let it ride on the affined RQ and the
+ * post-schedule router will push the preempted task away
+ *
+ * This test is optimistic, if we get it wrong the load-balancer
+ * will have to sort it out.
*/
- if (unlikely(rt_task(rq->curr)) &&
- (rq->curr->rt.nr_cpus_allowed < 2 ||
- rq->curr->prio < p->prio) &&
+ if (curr && unlikely(rt_task(curr)) &&
+ (curr->rt.nr_cpus_allowed < 2 ||
+ curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
- int cpu = find_lowest_rq(p);
+ int target = find_lowest_rq(p);
- return (cpu == -1) ? task_cpu(p) : cpu;
+ if (target != -1)
+ cpu = target;
}
+ rcu_read_unlock();
- /*
- * Otherwise, just let it ride on the affined RQ and the
- * post-schedule router will push the preempted task away
- */
- return task_cpu(p);
+ return cpu;
}
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
Index: linux-2.6/kernel/sched_stoptask.c
===================================================================
--- linux-2.6.orig/kernel/sched_stoptask.c
+++ linux-2.6/kernel/sched_stoptask.c
@@ -9,8 +9,7 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_stop(struct rq *rq, struct task_struct *p,
- int sd_flag, int flags)
+select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
{
return task_cpu(p); /* stop tasks as never migrate */
}
next prev parent reply other threads:[~2011-03-02 17:47 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-02 17:38 [PATCH 00/22] sched: Reduce runqueue lock contention -v5 Peter Zijlstra
2011-03-02 17:38 ` [PATCH 01/22] sched: Provide scheduler_ipi() callback in response to smp_send_reschedule() Peter Zijlstra
2011-03-11 1:36 ` Frank Rowand
2011-03-16 8:30 ` Peter Zijlstra
2011-03-11 15:07 ` [01/22] " Milton Miller
2011-03-11 15:27 ` Peter Zijlstra
2011-03-15 3:59 ` Milton Miller
2011-03-15 9:13 ` Peter Zijlstra
2011-03-02 17:38 ` [PATCH 02/22] sched: Always provide p->on_cpu Peter Zijlstra
2011-03-02 17:38 ` [PATCH 03/22] mutex: Use p->on_cpu for the adaptive spin Peter Zijlstra
2011-03-02 17:38 ` [PATCH 04/22] sched: Change the ttwu success details Peter Zijlstra
2011-03-02 17:38 ` [PATCH 05/22] sched: Clean up ttwu stats Peter Zijlstra
2011-03-02 17:38 ` [PATCH 06/22] sched: Provide p->on_rq Peter Zijlstra
2011-03-02 17:38 ` [PATCH 07/22] sched: Serialize p->cpus_allowed and ttwu() using p->pi_lock Peter Zijlstra
2011-03-02 17:38 ` Peter Zijlstra [this message]
2011-03-02 17:38 ` [PATCH 09/22] sched: Remove rq argument to sched_class::task_waking() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 10/22] sched: Deal with non-atomic min_vruntime reads on 32bits Peter Zijlstra
2011-03-02 17:38 ` [PATCH 11/22] sched: Delay task_contributes_to_load() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 12/22] sched: Also serialize ttwu_local() with p->pi_lock Peter Zijlstra
2011-03-02 17:38 ` [PATCH 13/22] sched: Add p->pi_lock to task_rq_lock() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 14/22] sched: Drop rq->lock from first part of wake_up_new_task() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 15/22] sched: Drop rq->lock from sched_exec() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 16/22] sched: Remove rq->lock from the first half of ttwu() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 17/22] sched: Remove rq argument from ttwu_stat() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 18/22] sched: Rename ttwu_post_activation Peter Zijlstra
2011-03-02 17:38 ` [PATCH 19/22] sched: Restructure ttwu some more Peter Zijlstra
2011-03-02 17:38 ` [PATCH 20/22] sched: Move the second half of ttwu() to the remote cpu Peter Zijlstra
2011-03-11 1:44 ` Frank Rowand
2011-03-16 8:32 ` Peter Zijlstra
2011-03-02 17:38 ` [PATCH 21/22] sched: Remove need_migrate_task() Peter Zijlstra
2011-03-02 17:38 ` [PATCH 22/22] sched: Remove TASK_WAKING Peter Zijlstra
2011-03-11 1:49 ` Frank Rowand
2011-03-16 9:53 ` Peter Zijlstra
2011-03-11 1:51 ` [PATCH 00/22] sched: Reduce runqueue lock contention -v5 Frank Rowand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110302174120.829029362@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=axboe@kernel.dk \
--cc=chris.mason@oracle.com \
--cc=efault@gmx.de \
--cc=frank.rowand@am.sony.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=oleg@redhat.com \
--cc=pjt@google.com \
--cc=tglx@linutronix.de \
--cc=yong.zhang0@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox