public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sched: consider migration thread with smp nice
@ 2005-06-26  8:25 Con Kolivas
  2005-06-28  0:43 ` Peter Williams
  0 siblings, 1 reply; 5+ messages in thread
From: Con Kolivas @ 2005-06-26  8:25 UTC (permalink / raw)
  To: linux kernel mailing list; +Cc: Andrew Morton, Ingo Molnar, Martin J. Bligh


[-- Attachment #1.1: Type: text/plain, Size: 207 bytes --]

This patch improves throughput with the smp nice balancing code. Many thanks 
to Martin Bligh for the usage of his regression testing bed to confirm the 
effectiveness of various patches.

Con
---



[-- Attachment #1.2: sched-consider_migration_thread_smp_nice.patch --]
[-- Type: text/x-diff, Size: 1999 bytes --]

The intermittent scheduling of the migration thread at ultra high priority
makes the smp nice handling see that runqueue as being heavily loaded. The
migration thread itself actually handles the balancing so its influence on
priority balancing should be ignored.

Signed-off-by: Con Kolivas <kernel@kolivas.org>

Index: linux-2.6.12-mm1/kernel/sched.c
===================================================================
--- linux-2.6.12-mm1.orig/kernel/sched.c	2005-06-26 17:59:10.000000000 +1000
+++ linux-2.6.12-mm1/kernel/sched.c	2005-06-26 18:02:01.000000000 +1000
@@ -669,6 +669,31 @@ static inline void dec_prio_bias(runqueu
 {
 	rq->prio_bias -= MAX_PRIO - prio;
 }
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running++;
+	if (rt_task(p)) {
+		if (p != rq->migration_thread)
+			/*
+			 * The migration thread does the actual balancing. Do
+			 * not bias by its priority as the ultra high priority
+			 * will skew balancing adversely.
+			 */
+			inc_prio_bias(rq, p->prio);
+	} else
+		inc_prio_bias(rq, p->static_prio);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	if (rt_task(p)) {
+		if (p != rq->migration_thread)
+			dec_prio_bias(rq, p->prio);
+	} else
+		dec_prio_bias(rq, p->static_prio);
+}
 #else
 static inline void inc_prio_bias(runqueue_t *rq, int prio)
 {
@@ -677,25 +702,17 @@ static inline void inc_prio_bias(runqueu
 static inline void dec_prio_bias(runqueue_t *rq, int prio)
 {
 }
-#endif
 
 static inline void inc_nr_running(task_t *p, runqueue_t *rq)
 {
 	rq->nr_running++;
-	if (rt_task(p))
-		inc_prio_bias(rq, p->prio);
-	else
-		inc_prio_bias(rq, p->static_prio);
 }
 
 static inline void dec_nr_running(task_t *p, runqueue_t *rq)
 {
 	rq->nr_running--;
-	if (rt_task(p))
-		dec_prio_bias(rq, p->prio);
-	else
-		dec_prio_bias(rq, p->static_prio);
 }
+#endif
 
 /*
  * __activate_task - move a task to the runqueue.

[-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: consider migration thread with smp nice
  2005-06-26  8:25 [PATCH] sched: consider migration thread with smp nice Con Kolivas
@ 2005-06-28  0:43 ` Peter Williams
  2005-06-28  0:48   ` Con Kolivas
  0 siblings, 1 reply; 5+ messages in thread
From: Peter Williams @ 2005-06-28  0:43 UTC (permalink / raw)
  To: Con Kolivas
  Cc: linux kernel mailing list, Andrew Morton, Ingo Molnar,
	Martin J. Bligh

[-- Attachment #1: Type: text/plain, Size: 686 bytes --]

Con Kolivas wrote:
> This patch improves throughput with the smp nice balancing code. Many thanks 
> to Martin Bligh for the usage of his regression testing bed to confirm the 
> effectiveness of various patches.

Con,
	This doesn't build on non SMP systems due to the migration_thread field 
only being defined for SMP.  Attached is a copy of a slightly modified 
PlugSched version of the patch which I used to fix the problem in 
PlugSched.  Even though it's for a different file it should be easy to 
copy over.

Peter
-- 
Peter Williams                                   pwil3058@bigpond.net.au

"Learning, n. The kind of ignorance distinguishing the studious."
  -- Ambrose Bierce

[-- Attachment #2: migration_thread.diff --]
[-- Type: text/x-diff, Size: 1458 bytes --]

Index: MM-2.6.12/include/linux/sched_pvt.h
===================================================================
--- MM-2.6.12.orig/include/linux/sched_pvt.h	2005-06-28 10:11:47.000000000 +1000
+++ MM-2.6.12/include/linux/sched_pvt.h	2005-06-28 10:37:14.000000000 +1000
@@ -393,6 +393,11 @@
 {
 	rq->prio_bias -= MAX_STATIC_PRIO - prio;
 }
+
+static inline int is_migration_thread(const task_t *p, const runqueue_t *rq)
+{
+	return p == rq->migration_thread;
+}
 #else
 static inline void inc_prio_bias(runqueue_t *rq, int prio)
 {
@@ -401,23 +406,35 @@
 static inline void dec_prio_bias(runqueue_t *rq, int prio)
 {
 }
+
+static inline int is_migration_thread(const task_t *p, const runqueue_t *rq)
+{
+	return 0;
+}
 #endif
 
 static inline void inc_nr_running(task_t *p, runqueue_t *rq)
 {
 	rq->nr_running++;
-	if (rt_task(p))
-		inc_prio_bias(rq, p->prio);
-	else
+	if (rt_task(p)) {
+		if (!is_migration_thread(p, rq))
+			/*
+			 * The migration thread does the actual balancing. Do
+			 * not bias by its priority as the ultra high priority
+			 * will skew balancing adversely.
+			 */
+			inc_prio_bias(rq, p->prio);
+	} else
 		inc_prio_bias(rq, p->static_prio);
 }
 
 static inline void dec_nr_running(task_t *p, runqueue_t *rq)
 {
 	rq->nr_running--;
-	if (rt_task(p))
-		dec_prio_bias(rq, p->prio);
-	else
+	if (rt_task(p)) {
+		if (!is_migration_thread(p, rq))
+			dec_prio_bias(rq, p->prio);
+	} else
 		dec_prio_bias(rq, p->static_prio);
 }
 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: consider migration thread with smp nice
  2005-06-28  0:43 ` Peter Williams
@ 2005-06-28  0:48   ` Con Kolivas
  2005-06-28  2:30     ` Peter Williams
  0 siblings, 1 reply; 5+ messages in thread
From: Con Kolivas @ 2005-06-28  0:48 UTC (permalink / raw)
  To: Peter Williams
  Cc: linux kernel mailing list, Andrew Morton, Ingo Molnar,
	Martin J. Bligh

[-- Attachment #1: Type: text/plain, Size: 817 bytes --]

On Tue, 28 Jun 2005 10:43, Peter Williams wrote:
> Con Kolivas wrote:
> > This patch improves throughput with the smp nice balancing code. Many
> > thanks to Martin Bligh for the usage of his regression testing bed to
> > confirm the effectiveness of various patches.
>
> Con,
> 	This doesn't build on non SMP systems due to the migration_thread field
> only being defined for SMP.  Attached is a copy of a slightly modified
> PlugSched version of the patch which I used to fix the problem in
> PlugSched.  Even though it's for a different file it should be easy to
> copy over.

Peter

Look at the actual patch I sent out you'll see it moved the ifdefs up to 
compensate. I believe your port of my patch doesn't build and I suspect it's 
because you missed these ifdef movements ;)

Cheers,
Con

[-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] sched: consider migration thread with smp nice
  2005-06-28  0:48   ` Con Kolivas
@ 2005-06-28  2:30     ` Peter Williams
  0 siblings, 0 replies; 5+ messages in thread
From: Peter Williams @ 2005-06-28  2:30 UTC (permalink / raw)
  To: Con Kolivas
  Cc: linux kernel mailing list, Andrew Morton, Ingo Molnar,
	Martin J. Bligh

Con Kolivas wrote:
> On Tue, 28 Jun 2005 10:43, Peter Williams wrote:
> 
>>Con Kolivas wrote:
>>
>>>This patch improves throughput with the smp nice balancing code. Many
>>>thanks to Martin Bligh for the usage of his regression testing bed to
>>>confirm the effectiveness of various patches.
>>
>>Con,
>>	This doesn't build on non SMP systems due to the migration_thread field
>>only being defined for SMP.  Attached is a copy of a slightly modified
>>PlugSched version of the patch which I used to fix the problem in
>>PlugSched.  Even though it's for a different file it should be easy to
>>copy over.
> 
> 
> Peter
> 
> Look at the actual patch I sent out you'll see it moved the ifdefs up to 
> compensate. I believe your port of my patch doesn't build and I suspect it's 
> because you missed these ifdef movements ;)

Yes, I should have read the patch more carefully.

Sorry about that,
Peter
-- 
Peter Williams                                   pwil3058@bigpond.net.au

"Learning, n. The kind of ignorance distinguishing the studious."
  -- Ambrose Bierce

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH] sched: consider migration thread with smp nice
  2005-07-01 11:40 2.6.13-rc1-mm1 Andrew Morton
@ 2005-07-02  0:22 ` Con Kolivas
  0 siblings, 0 replies; 5+ messages in thread
From: Con Kolivas @ 2005-07-02  0:22 UTC (permalink / raw)
  To: linux-kernel; +Cc: Andrew Morton


[-- Attachment #1.1: Type: text/plain, Size: 278 bytes --]

On Fri, 1 Jul 2005 21:40, Andrew Morton wrote:
> ftp://ftp.kernel.org/pub/linux/kernel/people/akpm/patches/2.6/2.6.13-rc1/2.
>6.13-rc1-mm1/

Hi akpm

With your 4 bazillion patches I guess this got missed. It's an add-on 
to the smp-nice series.

Cheers,
Con
---



[-- Attachment #1.2: sched-consider_migration_thread_smp_nice.patch --]
[-- Type: text/x-diff, Size: 1999 bytes --]

The intermittent scheduling of the migration thread at ultra high priority
makes the smp nice handling see that runqueue as being heavily loaded. The
migration thread itself actually handles the balancing so its influence on
priority balancing should be ignored.

Signed-off-by: Con Kolivas <kernel@kolivas.org>

Index: linux-2.6.12-mm1/kernel/sched.c
===================================================================
--- linux-2.6.12-mm1.orig/kernel/sched.c	2005-06-26 17:59:10.000000000 +1000
+++ linux-2.6.12-mm1/kernel/sched.c	2005-06-26 18:02:01.000000000 +1000
@@ -669,6 +669,31 @@ static inline void dec_prio_bias(runqueu
 {
 	rq->prio_bias -= MAX_PRIO - prio;
 }
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running++;
+	if (rt_task(p)) {
+		if (p != rq->migration_thread)
+			/*
+			 * The migration thread does the actual balancing. Do
+			 * not bias by its priority as the ultra high priority
+			 * will skew balancing adversely.
+			 */
+			inc_prio_bias(rq, p->prio);
+	} else
+		inc_prio_bias(rq, p->static_prio);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+	rq->nr_running--;
+	if (rt_task(p)) {
+		if (p != rq->migration_thread)
+			dec_prio_bias(rq, p->prio);
+	} else
+		dec_prio_bias(rq, p->static_prio);
+}
 #else
 static inline void inc_prio_bias(runqueue_t *rq, int prio)
 {
@@ -677,25 +702,17 @@ static inline void inc_prio_bias(runqueu
 static inline void dec_prio_bias(runqueue_t *rq, int prio)
 {
 }
-#endif
 
 static inline void inc_nr_running(task_t *p, runqueue_t *rq)
 {
 	rq->nr_running++;
-	if (rt_task(p))
-		inc_prio_bias(rq, p->prio);
-	else
-		inc_prio_bias(rq, p->static_prio);
 }
 
 static inline void dec_nr_running(task_t *p, runqueue_t *rq)
 {
 	rq->nr_running--;
-	if (rt_task(p))
-		dec_prio_bias(rq, p->prio);
-	else
-		dec_prio_bias(rq, p->static_prio);
 }
+#endif
 
 /*
  * __activate_task - move a task to the runqueue.

[-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2005-07-02  0:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-06-26  8:25 [PATCH] sched: consider migration thread with smp nice Con Kolivas
2005-06-28  0:43 ` Peter Williams
2005-06-28  0:48   ` Con Kolivas
2005-06-28  2:30     ` Peter Williams
  -- strict thread matches above, loose matches on Subject: below --
2005-07-01 11:40 2.6.13-rc1-mm1 Andrew Morton
2005-07-02  0:22 ` [PATCH] sched: consider migration thread with smp nice Con Kolivas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox