linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: morten.rasmussen@arm.com
To: paulmck@linux.vnet.ibm.com, pjt@google.com, peterz@infradead.org,
	suresh.b.siddha@intel.com
Cc: morten.rasmussen@arm.com, linaro-sched-sig@lists.linaro.org,
	linaro-dev@lists.linaro.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 10/10] sched: SCHED_HMP multi-domain task migration control
Date: Fri, 21 Sep 2012 19:32:25 +0100	[thread overview]
Message-ID: <1348252345-5642-11-git-send-email-morten.rasmussen@arm.com> (raw)
In-Reply-To: <1348252345-5642-1-git-send-email-morten.rasmussen@arm.com>

From: Morten Rasmussen <morten.rasmussen@arm.com>

We need a way to prevent tasks that are migrating up and down the
hmp_domains from migrating straight on through before the load has
adapted to the new compute capacity of the CPU on the new hmp_domain.
This patch adds a next up/down migration delay that prevents the task
from doing another migration in the same direction until the delay
has expired.

Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
---
 include/linux/sched.h |    4 ++++
 kernel/sched/core.c   |    4 ++++
 kernel/sched/fair.c   |   38 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 46 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index df971a3..ca3890a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1158,6 +1158,10 @@ struct sched_avg {
 	s64 decay_count;
 	unsigned long load_avg_contrib;
 	unsigned long load_avg_ratio;
+#ifdef CONFIG_SCHED_HMP
+	u64 hmp_last_up_migration;
+	u64 hmp_last_down_migration;
+#endif
 	u32 usage_avg_sum;
 };
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 652b86b..a3b1ff6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1723,6 +1723,10 @@ static void __sched_fork(struct task_struct *p)
 #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
 	p->se.avg.runnable_avg_period = 0;
 	p->se.avg.runnable_avg_sum = 0;
+#ifdef CONFIG_SCHED_HMP
+	p->se.avg.hmp_last_up_migration = 0;
+	p->se.avg.hmp_last_down_migration = 0;
+#endif
 #endif
 #ifdef CONFIG_SCHEDSTATS
 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 811b2b9..56cbda1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3138,10 +3138,14 @@ static int __init hmp_cpu_mask_setup(void)
  * tweaking suit particular needs.
  *
  * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
+ * hmp_next_up_threshold: Delay before next up migration (1024 ~= 1 ms)
+ * hmp_next_down_threshold: Delay before next down migration (1024 ~= 1 ms)
  */
 unsigned int hmp_up_threshold = 512;
 unsigned int hmp_down_threshold = 256;
 unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
+unsigned int hmp_next_up_threshold = 4096;
+unsigned int hmp_next_down_threshold = 4096;
 
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
@@ -3204,6 +3208,21 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
 				tsk_cpus_allowed(tsk));
 }
 
+static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
+{
+	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+	se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq);
+	se->avg.hmp_last_down_migration = 0;
+}
+
+static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
+{
+	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+	se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
+	se->avg.hmp_last_up_migration = 0;
+}
 #endif /* CONFIG_SCHED_HMP */
 
 /*
@@ -3335,11 +3354,13 @@ unlock:
 #ifdef CONFIG_SCHED_HMP
 	if (hmp_up_migration(prev_cpu, &p->se)) {
 		new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+		hmp_next_up_delay(&p->se, new_cpu);
 		trace_sched_hmp_migrate(p, new_cpu, 0);
 		return new_cpu;
 	}
 	if (hmp_down_migration(prev_cpu, &p->se)) {
 		new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+		hmp_next_down_delay(&p->se, new_cpu);
 		trace_sched_hmp_migrate(p, new_cpu, 0);
 		return new_cpu;
 	}
@@ -5503,6 +5524,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 {
 	struct task_struct *p = task_of(se);
+	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+	u64 now;
 
 	if (hmp_cpu_is_fastest(cpu))
 		return 0;
@@ -5513,6 +5536,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 		return 0;
 #endif
 
+	/* Let the task load settle before doing another up migration */
+	now = cfs_rq_clock_task(cfs_rq);
+	if (((now - se->avg.hmp_last_up_migration) >> 10)
+					< hmp_next_up_threshold)
+		return 0;
+
 	if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
 					tsk_cpus_allowed(p))
 		&& se->avg.load_avg_ratio > hmp_up_threshold) {
@@ -5525,6 +5554,8 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
 {
 	struct task_struct *p = task_of(se);
+	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+	u64 now;
 
 	if (hmp_cpu_is_slowest(cpu))
 		return 0;
@@ -5535,6 +5566,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
 		return 1;
 #endif
 
+	/* Let the task load settle before doing another down migration */
+	now = cfs_rq_clock_task(cfs_rq);
+	if (((now - se->avg.hmp_last_down_migration) >> 10)
+					< hmp_next_down_threshold)
+		return 0;
+
 	if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
 					tsk_cpus_allowed(p))
 		&& se->avg.load_avg_ratio < hmp_down_threshold) {
@@ -5725,6 +5762,7 @@ static void hmp_force_up_migration(int this_cpu)
 				target->migrate_task = p;
 				force = 1;
 				trace_sched_hmp_migrate(p, target->push_cpu, 1);
+				hmp_next_up_delay(&p->se, target->push_cpu);
 			}
 		}
 		raw_spin_unlock_irqrestore(&target->lock, flags);
-- 
1.7.9.5



      parent reply	other threads:[~2012-09-21 18:38 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-09-21 18:32 [RFC PATCH 00/10] sched: Task placement for heterogeneous MP systems morten.rasmussen
2012-09-21 18:32 ` [RFC PATCH 01/10] sched: entity load-tracking load_avg_ratio morten.rasmussen
2012-09-21 18:32 ` [RFC PATCH 02/10] sched: Task placement for heterogeneous systems based on task load-tracking morten.rasmussen
2012-10-04  6:02   ` Viresh Kumar
2012-10-04  6:54     ` Amit Kucheria
2012-10-09 15:56     ` Morten Rasmussen
2012-10-09 16:58       ` Viresh Kumar
2012-09-21 18:32 ` [RFC PATCH 03/10] sched: Forced task migration on heterogeneous systems morten.rasmussen
2012-10-04  6:18   ` Viresh Kumar
2012-09-21 18:32 ` [RFC PATCH 04/10] sched: Introduce priority-based task migration filter morten.rasmussen
2012-10-04  4:37   ` Viresh Kumar
2012-10-04  6:27   ` Viresh Kumar
2012-10-09 16:40     ` Morten Rasmussen
2012-10-24  2:32       ` li guang
2012-09-21 18:32 ` [RFC PATCH 05/10] ARM: Add HMP scheduling support for ARM architecture morten.rasmussen
2012-09-21 18:32 ` [RFC PATCH 06/10] ARM: sched: Use device-tree to provide fast/slow CPU list for HMP morten.rasmussen
2012-10-04  6:49   ` Viresh Kumar
2012-10-10 10:17     ` Morten Rasmussen
2012-10-10 10:33       ` Viresh Kumar
2012-10-10 11:04   ` Morten Rasmussen
2012-10-10 11:29     ` Jon Medhurst (Tixy)
2012-09-21 18:32 ` [RFC PATCH 07/10] ARM: sched: Setup SCHED_HMP domains morten.rasmussen
2012-10-04  6:58   ` Viresh Kumar
2012-10-10 13:29     ` Morten Rasmussen
2012-09-21 18:32 ` [RFC PATCH 08/10] sched: Add ftrace events for entity load-tracking morten.rasmussen
2012-09-21 18:32 ` [RFC PATCH 09/10] sched: Add HMP task migration ftrace event morten.rasmussen
2012-09-21 18:32 ` morten.rasmussen [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1348252345-5642-11-git-send-email-morten.rasmussen@arm.com \
    --to=morten.rasmussen@arm.com \
    --cc=linaro-dev@lists.linaro.org \
    --cc=linaro-sched-sig@lists.linaro.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=pjt@google.com \
    --cc=suresh.b.siddha@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).