* [PATCH 0/3][RT] rt: patches to postpone migrate disable
@ 2011-09-26 15:08 Steven Rostedt
2011-09-26 15:08 ` [PATCH 1/3][RT] sched: Postpone actual migration disalbe to schedule Steven Rostedt
` (2 more replies)
0 siblings, 3 replies; 5+ messages in thread
From: Steven Rostedt @ 2011-09-26 15:08 UTC (permalink / raw)
To: linux-kernel; +Cc: Thomas Gleixner, Peter Zijlstra, Clark Williams
Thomas,
Here's some patches that help speed up the rt kernel by delaying
migrate disable until scheduling.
-- Steve
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/3][RT] sched: Postpone actual migration disalbe to schedule
2011-09-26 15:08 [PATCH 0/3][RT] rt: patches to postpone migrate disable Steven Rostedt
@ 2011-09-26 15:08 ` Steven Rostedt
2011-09-26 15:08 ` [PATCH 2/3][RT] sched: Do not compare cpu masks in scheduler Steven Rostedt
2011-09-26 15:08 ` [PATCH 3/3][RT] sched: Have migrate_disable ignore bounded threads Steven Rostedt
2 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2011-09-26 15:08 UTC (permalink / raw)
To: linux-kernel; +Cc: Thomas Gleixner, Peter Zijlstra, Clark Williams
[-- Attachment #1: migrate-disable-delay.patch --]
[-- Type: text/plain, Size: 7765 bytes --]
The migrate_disable() can cause a bit of a overhead to the RT kernel,
as changing the affinity is expensive to do at every lock encountered.
As a running task can not migrate, the actual disabling of migration
does not need to occur until the task is about to schedule out.
In most cases, a task that disables migration will enable it before
it schedules making this change improve performance tremendously.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Index: linux-rt.git/kernel/sched.c
===================================================================
--- linux-rt.git.orig/kernel/sched.c
+++ linux-rt.git/kernel/sched.c
@@ -4207,6 +4207,130 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
+
+static inline void update_migrate_disable(struct task_struct *p)
+{
+ const struct cpumask *mask;
+
+ if (likely(!p->migrate_disable))
+ return;
+
+ /* Did we already update affinity? */
+ if (unlikely(migrate_disabled_updated(p)))
+ return;
+
+ /*
+ * Since this is always current we can get away with only locking
+ * rq->lock, the ->cpus_allowed value can normally only be changed
+ * while holding both p->pi_lock and rq->lock, but seeing that this
+ * is current, we cannot actually be waking up, so all code that
+ * relies on serialization against p->pi_lock is out of scope.
+ *
+ * Having rq->lock serializes us against things like
+ * set_cpus_allowed_ptr() that can still happen concurrently.
+ */
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+
+ /* Let migrate_enable know to fix things back up */
+ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+ }
+}
+
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ preempt_enable();
+ return;
+ }
+
+ pin_current_cpu();
+ p->migrate_disable = 1;
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
+ if (migrate_disable_count(p) > 1) {
+ p->migrate_disable--;
+ preempt_enable();
+ return;
+ }
+
+ if (unlikely(migrate_disabled_updated(p))) {
+ /*
+ * See comment in update_migrate_disable() about locking.
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
+ /* Get the mask now that migration is enabled */
+ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ p->migrate_disable = 0;
+ unpin_current_cpu();
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_enable);
+#else
+static inline void update_migrate_disable(struct task_struct *p) { }
+#define migrate_disabled_updated(p) 0
+#endif
+
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
@@ -4266,6 +4390,8 @@ need_resched:
raw_spin_lock_irq(&rq->lock);
+ update_migrate_disable(prev);
+
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -6058,7 +6184,7 @@ static inline void sched_init_granularit
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (!__migrate_disabled(p)) {
+ if (!migrate_disabled_updated(p)) {
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
@@ -6133,124 +6259,6 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-#ifdef CONFIG_PREEMPT_RT_FULL
-void migrate_disable(void)
-{
- struct task_struct *p = current;
- const struct cpumask *mask;
- unsigned long flags;
- struct rq *rq;
-
- if (in_atomic()) {
-#ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic++;
-#endif
- return;
- }
-
-#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
-#endif
-
- preempt_disable();
- if (p->migrate_disable) {
- p->migrate_disable++;
- preempt_enable();
- return;
- }
-
- pin_current_cpu();
- if (unlikely(!scheduler_running)) {
- p->migrate_disable = 1;
- preempt_enable();
- return;
- }
-
- /*
- * Since this is always current we can get away with only locking
- * rq->lock, the ->cpus_allowed value can normally only be changed
- * while holding both p->pi_lock and rq->lock, but seeing that this
- * it current, we cannot actually be waking up, so all code that
- * relies on serialization against p->pi_lock is out of scope.
- *
- * Taking rq->lock serializes us against things like
- * set_cpus_allowed_ptr() that can still happen concurrently.
- */
- rq = this_rq();
- raw_spin_lock_irqsave(&rq->lock, flags);
- p->migrate_disable = 1;
- mask = tsk_cpus_allowed(p);
-
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
-
- if (!cpumask_equal(&p->cpus_allowed, mask)) {
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->rt.nr_cpus_allowed = cpumask_weight(mask);
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(migrate_disable);
-
-void migrate_enable(void)
-{
- struct task_struct *p = current;
- const struct cpumask *mask;
- unsigned long flags;
- struct rq *rq;
-
- if (in_atomic()) {
-#ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic--;
-#endif
- return;
- }
-
-#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
-#endif
- WARN_ON_ONCE(p->migrate_disable <= 0);
-
- preempt_disable();
- if (p->migrate_disable > 1) {
- p->migrate_disable--;
- preempt_enable();
- return;
- }
-
- if (unlikely(!scheduler_running)) {
- p->migrate_disable = 0;
- unpin_current_cpu();
- preempt_enable();
- return;
- }
-
- /*
- * See comment in migrate_disable().
- */
- rq = this_rq();
- raw_spin_lock_irqsave(&rq->lock, flags);
- mask = tsk_cpus_allowed(p);
- p->migrate_disable = 0;
-
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
-
- if (!cpumask_equal(&p->cpus_allowed, mask)) {
- /* Get the mask now that migration is enabled */
- mask = tsk_cpus_allowed(p);
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->rt.nr_cpus_allowed = cpumask_weight(mask);
- }
-
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- unpin_current_cpu();
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(migrate_enable);
-#endif /* CONFIG_PREEMPT_RT_FULL */
-
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 2/3][RT] sched: Do not compare cpu masks in scheduler
2011-09-26 15:08 [PATCH 0/3][RT] rt: patches to postpone migrate disable Steven Rostedt
2011-09-26 15:08 ` [PATCH 1/3][RT] sched: Postpone actual migration disalbe to schedule Steven Rostedt
@ 2011-09-26 15:08 ` Steven Rostedt
2011-09-26 15:26 ` Steven Rostedt
2011-09-26 15:08 ` [PATCH 3/3][RT] sched: Have migrate_disable ignore bounded threads Steven Rostedt
2 siblings, 1 reply; 5+ messages in thread
From: Steven Rostedt @ 2011-09-26 15:08 UTC (permalink / raw)
To: linux-kernel
Cc: Thomas Gleixner, Peter Zijlstra, Clark Williams, Peter Zijlstra
[-- Attachment #1: peterz-migrate-disable-no-cpu-compare.patch --]
[-- Type: text/plain, Size: 960 bytes --]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Index: linux-rt.git/kernel/sched.c
===================================================================
--- linux-rt.git.orig/kernel/sched.c
+++ linux-rt.git/kernel/sched.c
@@ -4235,16 +4235,12 @@ static inline void update_migrate_disabl
*/
mask = tsk_cpus_allowed(p);
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
- if (!cpumask_equal(&p->cpus_allowed, mask)) {
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->rt.nr_cpus_allowed = cpumask_weight(mask);
-
- /* Let migrate_enable know to fix things back up */
- p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
- }
+ /* Let migrate_enable know to fix things back up */
+ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
}
void migrate_disable(void)
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 3/3][RT] sched: Have migrate_disable ignore bounded threads
2011-09-26 15:08 [PATCH 0/3][RT] rt: patches to postpone migrate disable Steven Rostedt
2011-09-26 15:08 ` [PATCH 1/3][RT] sched: Postpone actual migration disalbe to schedule Steven Rostedt
2011-09-26 15:08 ` [PATCH 2/3][RT] sched: Do not compare cpu masks in scheduler Steven Rostedt
@ 2011-09-26 15:08 ` Steven Rostedt
2 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2011-09-26 15:08 UTC (permalink / raw)
To: linux-kernel
Cc: Thomas Gleixner, Peter Zijlstra, Clark Williams, Peter Zijlstra
[-- Attachment #1: peterz-migrate-disable-thread-bound.patch --]
[-- Type: text/plain, Size: 1612 bytes --]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Index: linux-rt.git/kernel/sched.c
===================================================================
--- linux-rt.git.orig/kernel/sched.c
+++ linux-rt.git/kernel/sched.c
@@ -4247,7 +4247,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
- if (in_atomic()) {
+ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
@@ -4278,7 +4278,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
- if (in_atomic()) {
+ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
@@ -4299,21 +4299,16 @@ void migrate_enable(void)
if (unlikely(migrate_disabled_updated(p))) {
/*
- * See comment in update_migrate_disable() about locking.
+ * Undo whatever update_migrate_disable() did, also see there
+ * about locking.
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);
- mask = tsk_cpus_allowed(p);
-
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
- /* Get the mask now that migration is enabled */
- mask = tsk_cpus_allowed(p);
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->rt.nr_cpus_allowed = cpumask_weight(mask);
- }
+ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 2/3][RT] sched: Do not compare cpu masks in scheduler
2011-09-26 15:08 ` [PATCH 2/3][RT] sched: Do not compare cpu masks in scheduler Steven Rostedt
@ 2011-09-26 15:26 ` Steven Rostedt
0 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2011-09-26 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Thomas Gleixner, Peter Zijlstra, Clark Williams, Peter Zijlstra
On Mon, 2011-09-26 at 11:08 -0400, Steven Rostedt wrote:
Grumble. Quilt seemed to have ignored the From: Peter ... part.
The last two patches are Peter's not mine. I took his changes from a
patch he gave me on top of my patch.
-- Steve
> plain text document attachment
> (peterz-migrate-disable-no-cpu-compare.patch)
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
>
> Index: linux-rt.git/kernel/sched.c
> ===================================================================
> --- linux-rt.git.orig/kernel/sched.c
> +++ linux-rt.git/kernel/sched.c
> @@ -4235,16 +4235,12 @@ static inline void update_migrate_disabl
> */
> mask = tsk_cpus_allowed(p);
>
> - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
> + if (p->sched_class->set_cpus_allowed)
> + p->sched_class->set_cpus_allowed(p, mask);
> + p->rt.nr_cpus_allowed = cpumask_weight(mask);
>
> - if (!cpumask_equal(&p->cpus_allowed, mask)) {
> - if (p->sched_class->set_cpus_allowed)
> - p->sched_class->set_cpus_allowed(p, mask);
> - p->rt.nr_cpus_allowed = cpumask_weight(mask);
> -
> - /* Let migrate_enable know to fix things back up */
> - p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
> - }
> + /* Let migrate_enable know to fix things back up */
> + p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
> }
>
> void migrate_disable(void)
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2011-09-26 15:26 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-09-26 15:08 [PATCH 0/3][RT] rt: patches to postpone migrate disable Steven Rostedt
2011-09-26 15:08 ` [PATCH 1/3][RT] sched: Postpone actual migration disalbe to schedule Steven Rostedt
2011-09-26 15:08 ` [PATCH 2/3][RT] sched: Do not compare cpu masks in scheduler Steven Rostedt
2011-09-26 15:26 ` Steven Rostedt
2011-09-26 15:08 ` [PATCH 3/3][RT] sched: Have migrate_disable ignore bounded threads Steven Rostedt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox