public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: "Gregory Haskins" <ghaskins@novell.com>
To: "Steven Rostedt" <rostedt@goodmis.org>,
	"LKML" <linux-kernel@vger.kernel.org>
Cc: "Peter Zijlstra" <a.p.zijlstra@chello.nl>,
	"Ingo Molnar" <mingo@elte.hu>,
	"Steven Rostedt" <srostedt@redhat.com>,
	"Christoph Lameter" <clameter@sgi.com>
Subject: Re: [PATCH v3 16/17] Fix schedstat handling
Date: Sat, 17 Nov 2007 12:40:49 -0500	[thread overview]
Message-ID: <473EE151.BA47.005A.0@novell.com> (raw)
In-Reply-To: <20071117062405.634159013@goodmis.org>

>>> On Sat, Nov 17, 2007 at  1:21 AM, in message
<20071117062405.634159013@goodmis.org>, Steven Rostedt <rostedt@goodmis.org>
wrote: 
> Gregory Haskins RT balancing broke sched domains.

Doh! (though you mean s/domains/stats ;)

> This is a fix to allow it to still work.
> 
> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
> 
> ---
>  include/linux/sched.h   |    3 ++-
>  kernel/sched.c          |   17 ++++++++++++++---
>  kernel/sched_fair.c     |   19 ++++++++++++++-----
>  kernel/sched_idletask.c |    3 ++-
>  kernel/sched_rt.c       |    3 ++-
>  5 files changed, 34 insertions(+), 11 deletions(-)
> 
> Index: linux-compile.git/kernel/sched.c
> ===================================================================
> --- linux-compile.git.orig/kernel/sched.c	2007-11-17 00:15:57.000000000 -0500
> +++ linux-compile.git/kernel/sched.c	2007-11-17 00:15:57.000000000 -0500
> @@ -1453,6 +1453,7 @@ static int try_to_wake_up(struct task_st
>  	unsigned long flags;
>  	long old_state;
>  	struct rq *rq;
> +	struct sched_domain *this_sd = NULL;
>  #ifdef CONFIG_SMP
>  	int new_cpu;
>  #endif
> @@ -1476,10 +1477,20 @@ static int try_to_wake_up(struct task_st
>  	schedstat_inc(rq, ttwu_count);
>  	if (cpu == this_cpu)
>  		schedstat_inc(rq, ttwu_local);
> -	else
> -		schedstat_inc(rq->sd, ttwu_wake_remote);
> +	else {
> +#ifdef CONFIG_SCHEDSTATS
> +		struct sched_domain *sd;
> +		for_each_domain(this_cpu, sd) {
> +			if (cpu_isset(cpu, sd->span)) {
> +				schedstat_inc(sd, ttwu_wake_remote);
> +				this_sd = sd;
> +				break;
> +			}
> +		}
> +#endif /* CONFIG_SCHEDSTATES */
> +	}
>  
> -	new_cpu = p->sched_class->select_task_rq(p, sync);
> +	new_cpu = p->sched_class->select_task_rq(p, this_sd, sync);

I like this optimization, but I am thinking that the location of the stat update is now no longer relevant.  It should potentially go *after* the select_task_rq() so that we pick the sched_domain of the actual wake target, not the historical affinity.  If that is accurate, I'm sure you can finagle this optimization to work in that scenario too, but it will take a little re-work.


>  
>  	if (new_cpu != cpu) {
>  		set_task_cpu(p, new_cpu);
> Index: linux-compile.git/include/linux/sched.h
> ===================================================================
> --- linux-compile.git.orig/include/linux/sched.h	2007-11-17 00:15:57.000000000 -0500
> +++ linux-compile.git/include/linux/sched.h	2007-11-17 00:15:57.000000000 -0500
> @@ -823,7 +823,8 @@ struct sched_class {
>  	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
>  	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
>  	void (*yield_task) (struct rq *rq);
> -	int  (*select_task_rq)(struct task_struct *p, int sync);
> +	int  (*select_task_rq)(struct task_struct *p,
> +			       struct sched_domain *sd, int sync);
>  
>  	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
>  
> Index: linux-compile.git/kernel/sched_fair.c
> ===================================================================
> --- linux-compile.git.orig/kernel/sched_fair.c	2007-11-17 00:15:57.000000000 -0500
> +++ linux-compile.git/kernel/sched_fair.c	2007-11-17 00:43:44.000000000 -0500
> @@ -611,11 +611,12 @@ static inline int wake_idle(int cpu, str
>  #endif
>  
>  #ifdef CONFIG_SMP
> -static int select_task_rq_fair(struct task_struct *p, int sync)
> +static int select_task_rq_fair(struct task_struct *p,
> +			       struct sched_domain *this_sd, int sync)
>  {
>  	int cpu, this_cpu;
>  	struct rq *rq;
> -	struct sched_domain *sd, *this_sd = NULL;
> +	struct sched_domain *sd;
>  	int new_cpu;
>  
>  	cpu      = task_cpu(p);
> @@ -623,15 +624,23 @@ static int select_task_rq_fair(struct ta
>  	this_cpu = smp_processor_id();
>  	new_cpu  = cpu;
>  
> +	if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
> +		goto out_set_cpu;
> +
> +#ifndef CONFIG_SCHEDSTATS
> +	/*
> +	 * If SCHEDSTATS is configured, then this_sd would
> +	 * have already been determined.
> +	 */
>  	for_each_domain(this_cpu, sd) {
>  		if (cpu_isset(cpu, sd->span)) {
>  			this_sd = sd;
>  			break;
>  		}
>  	}
> -
> -	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
> -		goto out_set_cpu;
> +#else
> +	(void)sd; /* unused */
> +#endif /* CONFIG_SCHEDSTATS */
>  
>  	/*
>  	 * Check for affine wakeup and passive balancing possibilities.
> Index: linux-compile.git/kernel/sched_idletask.c
> ===================================================================
> --- linux-compile.git.orig/kernel/sched_idletask.c	2007-11-17 00:15:57.000000000 
> -0500
> +++ linux-compile.git/kernel/sched_idletask.c	2007-11-17 00:15:57.000000000 -0500
> @@ -6,7 +6,8 @@
>   */
>  
>  #ifdef CONFIG_SMP
> -static int select_task_rq_idle(struct task_struct *p, int sync)
> +static int select_task_rq_idle(struct task_struct *p,
> +			       struct sched_domain *sd, int sync)
>  {
>  	return task_cpu(p); /* IDLE tasks as never migrated */
>  }
> Index: linux-compile.git/kernel/sched_rt.c
> ===================================================================
> --- linux-compile.git.orig/kernel/sched_rt.c	2007-11-17 00:15:57.000000000 -0500
> +++ linux-compile.git/kernel/sched_rt.c	2007-11-17 00:44:31.000000000 -0500
> @@ -46,7 +46,8 @@ static void update_rt_migration(struct r
>  
>  static int find_lowest_rq(struct task_struct *task);
>  
> -static int select_task_rq_rt(struct task_struct *p, int sync)
> +static int select_task_rq_rt(struct task_struct *p,
> +			     struct sched_domain *sd, int sync)
>  {
>  	struct rq *rq = task_rq(p);
>  
> 
> -- 



  reply	other threads:[~2007-11-17 17:45 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-11-17  6:21 [PATCH v3 00/17] New RT Task Balancing -v3 Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 01/17] Add rt_nr_running accounting Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 02/17] track highest prio queued on runqueue Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 03/17] push RT tasks Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 04/17] RT overloaded runqueues accounting Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 05/17] pull RT tasks Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 06/17] wake up balance RT Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 07/17] disable CFS RT load balancing Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 08/17] Cache cpus_allowed weight for optimizing migration Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 09/17] Consistency cleanup for this_rq usage Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 10/17] Remove some CFS specific code from the wakeup path of RT tasks Steven Rostedt
2007-11-17 17:35   ` Gregory Haskins
2007-11-17 18:51     ` Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 11/17] RT: Break out the search function Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 12/17] Allow current_cpu to be included in search Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 13/17] RT: Pre-route RT tasks on wakeup Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 14/17] Optimize our cpu selection based on topology Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 15/17] RT: Optimize rebalancing Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 16/17] Fix schedstat handling Steven Rostedt
2007-11-17 17:40   ` Gregory Haskins [this message]
2007-11-17 18:52     ` Steven Rostedt
2007-11-17  6:21 ` [PATCH v3 17/17] --- kernel/sched_rt.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) Steven Rostedt
2007-11-17  6:33   ` [PATCH v3 17/17] (Avoid overload) Steven Rostedt
2007-11-17 17:42     ` Gregory Haskins
2007-11-17 18:55       ` Steven Rostedt
2007-11-17 17:46     ` Gregory Haskins
2007-11-19 16:34       ` [PATCH] RT: restore the migratable conditional Gregory Haskins
2007-11-17  8:14 ` [PATCH v3 00/17] New RT Task Balancing -v3 Jon Masters

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=473EE151.BA47.005A.0@novell.com \
    --to=ghaskins@novell.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=clameter@sgi.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=rostedt@goodmis.org \
    --cc=srostedt@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox