public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [patch] sched,rt: non-isolated cores lift isolcpus throttle for CONFIG_RT_GROUP_SCHED
@ 2012-04-03  9:08 Mike Galbraith
  2012-04-03  9:19 ` [patch] sched,rt: let the user see rt queues in /proc/sched_debug Mike Galbraith
  0 siblings, 1 reply; 14+ messages in thread
From: Mike Galbraith @ 2012-04-03  9:08 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: LKML

s/patch/hack.  Better ideas?

When CONFIG_RT_GROUP_SCHED is enabled, isolcpus have no replentishment timer
running, and unlike !CONFIG_RT_GROUP_SCHED, are not in rd->span of the cpu
running replentishment.  If you trigger the throttle, you're rewarded with a
dead box.  Should the user reassign cpus to a domain, they become sane again,
replentishment starts/stops as usual. 

Signed-off-by: Mike Galbraith <efault@gmx.de>
---
 kernel/sched/core.c  |   15 ++++++++++++++-
 kernel/sched/rt.c    |   16 ++++++++++++++--
 kernel/sched/sched.h |    5 +++++
 3 files changed, 33 insertions(+), 3 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5877,6 +5877,14 @@ cpu_attach_domain(struct sched_domain *s
 
 	sched_domain_debug(sd, cpu);
 
+#ifdef CONFIG_RT_GROUP_SCHED
+	/* If the cpu was an isolcpu, it no longer is. */
+	if (sd) {
+		cpumask_clear_cpu(cpu, cpu_isolated_map);
+		nr_isolated_cpus = cpumask_weight(cpu_isolated_map);
+	}
+#endif
+
 	rq_attach_root(rq, rd);
 	tmp = rq->sd;
 	rcu_assign_pointer(rq->sd, sd);
@@ -5886,13 +5894,18 @@ cpu_attach_domain(struct sched_domain *s
 }
 
 /* cpus with isolated domains */
-static cpumask_var_t cpu_isolated_map;
+cpumask_var_t cpu_isolated_map;
+
+__read_mostly int nr_isolated_cpus;
 
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
 	alloc_bootmem_cpumask_var(&cpu_isolated_map);
 	cpulist_parse(str, cpu_isolated_map);
+#ifdef CONFIG_RT_GROUP_SCHED
+	nr_isolated_cpus = cpumask_weight(cpu_isolated_map);
+#endif
 	return 1;
 }
 
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -778,10 +778,11 @@ static inline int balance_runtime(struct
 
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 {
-	int i, idle = 1, throttled = 0;
+	int i, idle = 1, throttled = 0, isol_cpus = nr_isolated_cpus;
 	const struct cpumask *span;
 
 	span = sched_rt_period_mask();
+do_isolcpus:
 	for_each_cpu(i, span) {
 		int enqueue = 0;
 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
@@ -792,7 +793,7 @@ static int do_sched_rt_period_timer(stru
 			u64 runtime;
 
 			raw_spin_lock(&rt_rq->rt_runtime_lock);
-			if (rt_rq->rt_throttled)
+			if (rt_rq->rt_throttled && span != cpu_isolated_map)
 				balance_runtime(rt_rq);
 			runtime = rt_rq->rt_runtime;
 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
@@ -823,6 +824,17 @@ static int do_sched_rt_period_timer(stru
 		raw_spin_unlock(&rq->lock);
 	}
 
+	/*
+	 * Hack: unthrottle isolcpus for RT_GROUP_SCHED.  No replentishment
+	 * timer is running on isolcpus, and unlike !RT_GROUP_SCHED, they're
+	 * not in the rd->span of the cpu running the timer.
+	 */
+	if (isol_cpus) {
+		span = cpu_isolated_map;
+		isol_cpus = 0;
+		goto do_isolcpus;
+	}
+
 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 		return 1;
 
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -572,6 +572,11 @@ static inline void set_task_rq(struct ta
 #endif
 }
 
+/* cpus with isolated domains */
+extern  cpumask_var_t cpu_isolated_map;
+
+extern __read_mostly int nr_isolated_cpus;
+
 #else /* CONFIG_CGROUP_SCHED */
 
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }



^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2012-04-19  6:34 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-04-03  9:08 [patch] sched,rt: non-isolated cores lift isolcpus throttle for CONFIG_RT_GROUP_SCHED Mike Galbraith
2012-04-03  9:19 ` [patch] sched,rt: let the user see rt queues in /proc/sched_debug Mike Galbraith
2012-04-07  8:58   ` [patch] sched,cgroup_sched: fix up task_groups list buglet Mike Galbraith
2012-04-07  9:54     ` RFC [patch] sched,cgroup_sched: convince RT_GROUP_SCHED throttle to work Mike Galbraith
2012-04-10  9:08       ` Mike Galbraith
2012-04-14 11:10         ` Peter Zijlstra
2012-04-15  3:37           ` Mike Galbraith
2012-04-15  3:44             ` Mike Galbraith
2012-04-15  4:51               ` Mike Galbraith
2012-04-18  5:20                 ` Yong Zhang
2012-04-18  6:27                   ` Mike Galbraith
2012-04-18  7:48                     ` Yong Zhang
2012-04-18  8:38                       ` Mike Galbraith
2012-04-19  6:34                         ` Yong Zhang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox