public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* Re: [sched-devel, patch-rfc] rework #2 of "prioritize non-migratable tasks over migratable ones"
@ 2008-07-01 21:32 Dmitry Adamushko
  2008-07-14 14:50 ` Dmitry Adamushko
  2008-07-18 10:30 ` Ingo Molnar
  0 siblings, 2 replies; 9+ messages in thread
From: Dmitry Adamushko @ 2008-07-01 21:32 UTC (permalink / raw)
  To: Gregory Haskins
  Cc: Steven Rostedt, Ingo Molnar, Thomas Gleixner, linux-kernel


Finally, this new version compiles and boots (applied a minor compilation fix).
Functionality is not yet fully tested though.

It's on top of today's tip tree.

Any objections to this approach?


---
From: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Subject: prioritize non-migratable tasks over migratable ones in a generic way

(1) handle in a generic way all cases when a newly woken-up task is
not migratable (not just a corner case when "rt_se->nr_cpus_allowed ==
1")

(2) if current is to be preempted, then make sure "p" will be picked
up by pick_next_task_rt().
i.e. move task's group at the head of its list as well.

currently, it's not a case for the group-scheduling case as described
here: http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.html


Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>

---
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 7c96147..7bc73e8 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
 		return;
 
-	if (rt_se->nr_cpus_allowed == 1)
-		list_add(&rt_se->run_list, queue);
-	else
-		list_add_tail(&rt_se->run_list, queue);
-
+	list_add_tail(&rt_se->run_list, queue);
 	__set_bit(rt_se_prio(rt_se), array->bitmap);
 
 	inc_rt_tasks(rt_se, rt_rq);
@@ -689,31 +685,33 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
  * followed by enqueue.
  */
 static
-void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
 {
-	struct rt_prio_array *array = &rt_rq->active;
-
 	if (on_rt_rq(rt_se)) {
-		list_del_init(&rt_se->run_list);
-		list_add_tail(&rt_se->run_list,
-			      array->queue + rt_se_prio(rt_se));
+		struct rt_prio_array *array = &rt_rq->active;
+		struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+		if (head)
+			list_move(&rt_se->run_list, queue);
+		else
+			list_move_tail(&rt_se->run_list, queue);
 	}
 }
 
-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 {
 	struct sched_rt_entity *rt_se = &p->rt;
 	struct rt_rq *rt_rq;
 
 	for_each_sched_rt_entity(rt_se) {
 		rt_rq = rt_rq_of_se(rt_se);
-		requeue_rt_entity(rt_rq, rt_se);
+		requeue_rt_entity(rt_rq, rt_se, head);
 	}
 }
 
 static void yield_task_rt(struct rq *rq)
 {
-	requeue_task_rt(rq, rq->curr);
+	requeue_task_rt(rq, rq->curr, 0);
 }
 
 #ifdef CONFIG_SMP
@@ -753,6 +751,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
 	 */
 	return task_cpu(p);
 }
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+	cpumask_t mask;
+
+	if (rq->curr->rt.nr_cpus_allowed == 1)
+		return;
+
+	if (p->rt.nr_cpus_allowed != 1
+	    && cpupri_find(&rq->rd->cpupri, p, &mask))
+		return;
+	
+	if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+		return;
+
+	/*
+	 * There appears to be other cpus that can accept
+	 * current and none to run 'p', so lets reschedule
+	 * to try and push current away:
+	 */
+	requeue_task_rt(rq, p, 1);
+	resched_task(rq->curr);
+}
+
 #endif /* CONFIG_SMP */
 
 /*
@@ -778,18 +800,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
 	 * to move current somewhere else, making room for our non-migratable
 	 * task.
 	 */
-	if((p->prio == rq->curr->prio)
-	   && p->rt.nr_cpus_allowed == 1
-	   && rq->curr->rt.nr_cpus_allowed != 1) {
-		cpumask_t mask;
-
-		if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
-			/*
-			 * There appears to be other cpus that can accept
-			 * current, so lets reschedule to try and push it away
-			 */
-			resched_task(rq->curr);
-	}
+	if (p->prio == rq->curr->prio && !need_resched())
+		check_preempt_equal_prio(rq, p);
 #endif
 }
 
@@ -1415,7 +1427,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 	 * on the queue:
 	 */
 	if (p->rt.run_list.prev != p->rt.run_list.next) {
-		requeue_task_rt(rq, p);
+		requeue_task_rt(rq, p, 0);
 		set_tsk_need_resched(p);
 	}
 }

---


--Dmitry



^ permalink raw reply related	[flat|nested] 9+ messages in thread
* [sched-devel, patch-rfc] rework #2 of "prioritize non-migratable tasks over migratable ones"
@ 2008-07-01 13:32 Dmitry Adamushko
  2008-07-01 14:11 ` Steven Rostedt
  0 siblings, 1 reply; 9+ messages in thread
From: Dmitry Adamushko @ 2008-07-01 13:32 UTC (permalink / raw)
  To: Gregory Haskins
  Cc: Ingo Molnar, Steven Rostedt, Thomas Gleixner, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 4580 bytes --]

Hi,


this is a continuation of another thread:
http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.html


The following patch is just an illustration, not tested even compilation wise.


(1) handle in a generic way all cases when a newly woken-up task is
not migratable (not just a corner case when "rt_se->nr_cpus_allowed ==
1")

(2) if current is to be preempted, then make sure "p" will be picked
up by pick_next_task_rt().
i.e. move task's group at the head of its list as well.

currently, it's not a case for group-scheduling as described here:
http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.html


what do you think?


(non white-space-damaged version is enclosed)


---

--- sched_rt-old.c      2008-07-01 11:42:30.000000000 +0200
+++ sched_rt.c  2008-07-01 15:00:55.000000000 +0200
@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct s
        if (group_rq && (rt_rq_throttled(group_rq) ||
!group_rq->rt_nr_running))                return;

-       if (rt_se->nr_cpus_allowed == 1)
-               list_add(&rt_se->run_list, queue);
-       else
-               list_add_tail(&rt_se->run_list, queue);
-
+       list_add_tail(&rt_se->run_list, queue);
        __set_bit(rt_se_prio(rt_se), array->bitmap);

        inc_rt_tasks(rt_se, rt_rq);
@@ -689,31 +685,33 @@ static void dequeue_task_rt(struct rq *r
  * followed by enqueue.
  */
 static
-void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity
*rt_se, int head)
 {
-       struct rt_prio_array *array = &rt_rq->active;
-
        if (on_rt_rq(rt_se)) {
-               list_del_init(&rt_se->run_list);
-               list_add_tail(&rt_se->run_list,
-                             array->queue + rt_se_prio(rt_se));
+               struct rt_prio_array *array = &rt_rq->active;
+               struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+               if (head)
+                       list_move(&rt_se->run_list, queue);
+               else
+                       list_move_tail(&rt_se->run_list, queue);
        }
 }

-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 {
        struct sched_rt_entity *rt_se = &p->rt;
        struct rt_rq *rt_rq;

        for_each_sched_rt_entity(rt_se) {
                rt_rq = rt_rq_of_se(rt_se);
-               requeue_rt_entity(rt_rq, rt_se);
+               requeue_rt_entity(rt_rq, rt_se, head);
        }
 }

 static void yield_task_rt(struct rq *rq)
 {
-       requeue_task_rt(rq, rq->curr);
+       requeue_task_rt(rq, rq->curr, 0);
 }

 #ifdef CONFIG_SMP
@@ -753,6 +751,29 @@ static int select_task_rq_rt(struct task
         */
        return task_cpu(p);
 }
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+       cpumask_t mask;
+
+       if (rq->curr.rt.nr_cpus_allowed == 1 || p->rt.nr_cpus_allowed == 1)
+               return;
+
+       if (cpupri_find(&rq->rd->cpupri, p, &mask))
+               return;
+
+       if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+               return;
+
+       /*
+        * There appears to be other cpus that can accept
+        * current and none to run 'p', so lets reschedule
+        * to try and push current away:
+        */
+       requeue_task_rt(rq, p, 1);
+       resched_task(rq->curr);
+}
+
 #endif /* CONFIG_SMP */

 /*
@@ -778,18 +799,8 @@ static void check_preempt_curr_rt(struct
         * to move current somewhere else, making room for our non-migratable
         * task.
         */
-       if((p->prio == rq->curr->prio)
-          && p->rt.nr_cpus_allowed == 1
-          && rq->curr->rt.nr_cpus_allowed != 1) {
-               cpumask_t mask;
-
-               if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
-                       /*
-                        * There appears to be other cpus that can accept
-                        * current, so lets reschedule to try and push it away
-                        */
-                       resched_task(rq->curr);
-       }
+       if (p->prio == rq->curr->prio && !need_resched())
+               check_preempt_equal_prio(rq, p);
 #endif
 }

@@ -1415,7 +1426,7 @@ static void task_tick_rt(struct rq *rq,
         * on the queue:
         */
        if (p->rt.run_list.prev != p->rt.run_list.next) {
-               requeue_task_rt(rq, p);
+               requeue_task_rt(rq, p, 0);
                set_tsk_need_resched(p);
        }
 }

---



-- 
Best regards,
Dmitry Adamushko

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: resched-eqial-prio.patch --]
[-- Type: text/x-patch; name=resched-eqial-prio.patch, Size: 3172 bytes --]

--- sched_rt-old.c	2008-07-01 11:42:30.000000000 +0200
+++ sched_rt.c	2008-07-01 15:00:55.000000000 +0200
@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct s
 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
 		return;
 
-	if (rt_se->nr_cpus_allowed == 1)
-		list_add(&rt_se->run_list, queue);
-	else
-		list_add_tail(&rt_se->run_list, queue);
-
+	list_add_tail(&rt_se->run_list, queue);
 	__set_bit(rt_se_prio(rt_se), array->bitmap);
 
 	inc_rt_tasks(rt_se, rt_rq);
@@ -689,31 +685,33 @@ static void dequeue_task_rt(struct rq *r
  * followed by enqueue.
  */
 static
-void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
 {
-	struct rt_prio_array *array = &rt_rq->active;
-
 	if (on_rt_rq(rt_se)) {
-		list_del_init(&rt_se->run_list);
-		list_add_tail(&rt_se->run_list,
-			      array->queue + rt_se_prio(rt_se));
+		struct rt_prio_array *array = &rt_rq->active;
+		struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+		if (head)
+			list_move(&rt_se->run_list, queue);
+		else
+			list_move_tail(&rt_se->run_list, queue);
 	}
 }
 
-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 {
 	struct sched_rt_entity *rt_se = &p->rt;
 	struct rt_rq *rt_rq;
 
 	for_each_sched_rt_entity(rt_se) {
 		rt_rq = rt_rq_of_se(rt_se);
-		requeue_rt_entity(rt_rq, rt_se);
+		requeue_rt_entity(rt_rq, rt_se, head);
 	}
 }
 
 static void yield_task_rt(struct rq *rq)
 {
-	requeue_task_rt(rq, rq->curr);
+	requeue_task_rt(rq, rq->curr, 0);
 }
 
 #ifdef CONFIG_SMP
@@ -753,6 +751,29 @@ static int select_task_rq_rt(struct task
 	 */
 	return task_cpu(p);
 }
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+	cpumask_t mask;
+
+	if (rq->curr.rt.nr_cpus_allowed == 1 || p->rt.nr_cpus_allowed == 1)
+		return;
+
+	if (cpupri_find(&rq->rd->cpupri, p, &mask))
+		return;
+	
+	if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+		return;
+
+	/*
+	 * There appears to be other cpus that can accept
+	 * current and none to run 'p', so lets reschedule
+	 * to try and push current away:
+	 */
+	requeue_task_rt(rq, p, 1);
+	resched_task(rq->curr);
+}
+
 #endif /* CONFIG_SMP */
 
 /*
@@ -778,18 +799,8 @@ static void check_preempt_curr_rt(struct
 	 * to move current somewhere else, making room for our non-migratable
 	 * task.
 	 */
-	if((p->prio == rq->curr->prio)
-	   && p->rt.nr_cpus_allowed == 1
-	   && rq->curr->rt.nr_cpus_allowed != 1) {
-		cpumask_t mask;
-
-		if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
-			/*
-			 * There appears to be other cpus that can accept
-			 * current, so lets reschedule to try and push it away
-			 */
-			resched_task(rq->curr);
-	}
+	if (p->prio == rq->curr->prio && !need_resched())
+		check_preempt_equal_prio(rq, p);
 #endif
 }
 
@@ -1415,7 +1426,7 @@ static void task_tick_rt(struct rq *rq, 
 	 * on the queue:
 	 */
 	if (p->rt.run_list.prev != p->rt.run_list.next) {
-		requeue_task_rt(rq, p);
+		requeue_task_rt(rq, p, 0);
 		set_tsk_need_resched(p);
 	}
 }

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2008-07-18 10:30 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-07-01 21:32 [sched-devel, patch-rfc] rework #2 of "prioritize non-migratable tasks over migratable ones" Dmitry Adamushko
2008-07-14 14:50 ` Dmitry Adamushko
2008-07-15  3:01   ` Steven Rostedt
2008-07-15 13:05   ` Gregory Haskins
2008-07-15 13:14     ` Dmitry Adamushko
2008-07-18 10:30 ` Ingo Molnar
  -- strict thread matches above, loose matches on Subject: below --
2008-07-01 13:32 Dmitry Adamushko
2008-07-01 14:11 ` Steven Rostedt
2008-07-01 14:19   ` Dmitry Adamushko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox