public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] sched/fair: use reweight_entity to reweight tasks
@ 2017-08-03 15:13 josef
  2017-08-03 15:13 ` [PATCH 2/2] sched/fair: calculate runnable_weight slightly differently josef
  0 siblings, 1 reply; 3+ messages in thread
From: josef @ 2017-08-03 15:13 UTC (permalink / raw)
  To: riel, kernel-team, mingo, peterz, linux-kernel, tj; +Cc: Josef Bacik

From: Josef Bacik <jbacik@fb.com>

reweight_task only accounts for the load average change in the cfs_rq, but
doesn't account for the runnable_average change in the cfs_rq.  We need to do
everything reweight_entity does, and then we just set our inv_weight
appropriately.

Signed-off-by: Josef Bacik <jbacik@fb.com>
---
 kernel/sched/fair.c | 31 +++++++++++--------------------
 1 file changed, 11 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0cff1b6..c336534 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2809,26 +2809,6 @@ __sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
 }
 
-void reweight_task(struct task_struct *p, int prio)
-{
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	struct load_weight *load = &p->se.load;
-
-	u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
-
-	__sub_load_avg(cfs_rq, se);
-
-	load->weight = scale_load(sched_prio_to_weight[prio]);
-	load->inv_weight = sched_prio_to_wmult[prio];
-
-	se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
-	se->avg.runnable_load_avg =
-		div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
-
-	__add_load_avg(cfs_rq, se);
-}
-
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight, unsigned long runnable)
 {
@@ -2858,6 +2838,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	}
 }
 
+void reweight_task(struct task_struct *p, int prio)
+{
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct load_weight *load = &se->load;
+	unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+	reweight_entity(cfs_rq, se, weight, weight);
+	load->inv_weight = sched_prio_to_wmult[prio];
+}
+
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 
 /*
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-09-29 20:17 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-08-03 15:13 [PATCH 1/2] sched/fair: use reweight_entity to reweight tasks josef
2017-08-03 15:13 ` [PATCH 2/2] sched/fair: calculate runnable_weight slightly differently josef
2017-09-29 20:14   ` [tip:sched/core] sched/fair: Calculate " tip-bot for Josef Bacik

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox