public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sched/fair: move definitions to fix !CONFIG_SMP
@ 2017-08-21 20:03 josef
  2017-08-21 20:06 ` Peter Zijlstra
  2017-08-21 20:20 ` Josef Bacik
  0 siblings, 2 replies; 4+ messages in thread
From: josef @ 2017-08-21 20:03 UTC (permalink / raw)
  To: peterz, linux-kernel, kernel-team; +Cc: Josef Bacik

From: Josef Bacik <jbacik@fb.com>

The series of patches adding runnable_avg and subsequent supporting
patches broke on !CONFIG_SMP.  Fix this by moving the definitions under
the appropriate checks, and moving the !CONFIG_SMP definitions higher
up.

Signed-off-by: Josef Bacik <jbacik@fb.com>
---
 kernel/sched/fair.c | 155 +++++++++++++++++++++++++++-------------------------
 1 file changed, 80 insertions(+), 75 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40a89f4..c53da64 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2796,7 +2796,81 @@ static long calc_group_shares(struct cfs_rq *cfs_rq)
 	 */
 	return clamp_t(long, shares, MIN_SHARES, tg_shares);
 }
-# endif /* CONFIG_SMP */
+
+/*
+ * The runnable shares of this group are calculated as such
+ *
+ *          max(cfs_rq->avg.runnable_load_avg, cfs_rq->runnable_weight)
+ * shares * ------------------------------------------------------------
+ *               max(cfs_rq->avg.load_avg, cfs_rq->load.weight)
+ *
+ * We do this to keep the shares in line with expected load on the cfs_rq.
+ * Consider a cfs_rq that has several tasks wake up on this cfs_rq for the first
+ * time, it's runnable_load_avg is not going to be representative of the actual
+ * load this cfs_rq will now experience, which will bias us agaisnt this cfs_rq.
+ * The weight on the cfs_rq is the immediate effect of having new tasks
+ * enqueue'd onto it which should be used to calculate the new runnable shares.
+ * At the same time we need the actual load_avg to be the lower bounds for the
+ * calculation, to handle when our weight drops quickly from having entities
+ * dequeued.
+ */
+static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
+{
+	long load_avg = max(cfs_rq->avg.load_avg,
+			    scale_load_down(cfs_rq->load.weight));
+	long runnable = max(cfs_rq->avg.runnable_load_avg,
+			    scale_load_down(cfs_rq->runnable_weight));
+
+	runnable *= shares;
+	if (load_avg)
+		runnable /= load_avg;
+	return clamp_t(long, runnable, MIN_SHARES, shares);
+}
+
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	cfs_rq->runnable_weight += se->runnable_weight;
+
+	cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
+	cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
+}
+
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	cfs_rq->runnable_weight -= se->runnable_weight;
+
+	sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
+	sub_positive(&cfs_rq->avg.runnable_load_sum,
+		     se_runnable(se) * se->avg.runnable_load_sum);
+}
+
+static inline void
+__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	cfs_rq->avg.load_avg += se->avg.load_avg;
+	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
+}
+
+static inline void
+__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+}
+
+#else /* CONFIG_SMP */
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+#endif /* CONFIG_SMP */
+
 
 /*
  * Signed add and clamp on underflow.
@@ -2848,44 +2922,9 @@ static inline long se_runnable(struct sched_entity *se)
 	return scale_load_down(se->runnable_weight);
 }
 
-static inline void
-enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-	cfs_rq->runnable_weight += se->runnable_weight;
-
-	cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
-	cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
-}
-
-static inline void
-dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-	cfs_rq->runnable_weight -= se->runnable_weight;
-
-	sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
-	sub_positive(&cfs_rq->avg.runnable_load_sum,
-		     se_runnable(se) * se->avg.runnable_load_sum);
-}
-
-static inline void
-__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-	cfs_rq->avg.load_avg += se->avg.load_avg;
-	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
-}
-
-static inline void
-__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
-	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
-}
-
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight, unsigned long runnable)
 {
-	u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
-
 	if (se->on_rq) {
 		/* commit outstanding execution time */
 		if (cfs_rq->curr == se)
@@ -2899,9 +2938,9 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	se->runnable_weight = runnable;
 	update_load_set(&se->load, weight);
 
-	se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
-	se->avg.runnable_load_avg =
-		div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
+#ifdef CONFIG_SMP
+	___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+#endif
 
 	__add_load_avg(cfs_rq, se);
 	if (se->on_rq) {
@@ -2924,36 +2963,6 @@ void reweight_task(struct task_struct *p, int prio)
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 
 /*
- * The runnable shares of this group are calculated as such
- *
- *          max(cfs_rq->avg.runnable_load_avg, cfs_rq->runnable_weight)
- * shares * ------------------------------------------------------------
- *               max(cfs_rq->avg.load_avg, cfs_rq->load.weight)
- *
- * We do this to keep the shares in line with expected load on the cfs_rq.
- * Consider a cfs_rq that has several tasks wake up on this cfs_rq for the first
- * time, it's runnable_load_avg is not going to be representative of the actual
- * load this cfs_rq will now experience, which will bias us agaisnt this cfs_rq.
- * The weight on the cfs_rq is the immediate effect of having new tasks
- * enqueue'd onto it which should be used to calculate the new runnable shares.
- * At the same time we need the actual load_avg to be the lower bounds for the
- * calculation, to handle when our weight drops quickly from having entities
- * dequeued.
- */
-static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
-{
-	long load_avg = max(cfs_rq->avg.load_avg,
-			    scale_load_down(cfs_rq->load.weight));
-	long runnable = max(cfs_rq->avg.runnable_load_avg,
-			    scale_load_down(cfs_rq->runnable_weight));
-
-	runnable *= shares;
-	if (load_avg)
-		runnable /= load_avg;
-	return clamp_t(long, runnable, MIN_SHARES, shares);
-}
-
-/*
  * Recomputes the group entity based on the current state of its group
  * runqueue.
  */
@@ -2969,14 +2978,14 @@ static void update_cfs_group(struct sched_entity *se)
 		return;
 
 #ifndef CONFIG_SMP
-	shares = READ_ONCE(gcfs_rq->tg->shares);
+	runnable = shares = READ_ONCE(gcfs_rq->tg->shares);
 
 	if (likely(se->load.weight == shares))
 		return;
 #else
 	shares = calc_group_shares(gcfs_rq);
-#endif
 	runnable = calc_group_runnable(gcfs_rq, shares);
+#endif
 
 	reweight_entity(cfs_rq_of(se), se, shares, runnable);
 }
@@ -3819,10 +3828,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	cfs_rq_util_change(cfs_rq);
 }
 
-static inline void
-enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
-static inline void
-dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
 static inline void
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP
  2017-08-21 20:03 [PATCH] sched/fair: move definitions to fix !CONFIG_SMP josef
@ 2017-08-21 20:06 ` Peter Zijlstra
  2017-08-21 20:44   ` Josef Bacik
  2017-08-21 20:20 ` Josef Bacik
  1 sibling, 1 reply; 4+ messages in thread
From: Peter Zijlstra @ 2017-08-21 20:06 UTC (permalink / raw)
  To: josef; +Cc: linux-kernel, kernel-team, Josef Bacik

On Mon, Aug 21, 2017 at 04:03:05PM -0400, josef@toxicpanda.com wrote:
> From: Josef Bacik <jbacik@fb.com>
> 
> The series of patches adding runnable_avg and subsequent supporting
> patches broke on !CONFIG_SMP.  Fix this by moving the definitions under
> the appropriate checks, and moving the !CONFIG_SMP definitions higher
> up.

Is this on top or can I fold this in somewhere? I was meaning to rework
the series to avoid this superfluous build borkage.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP
  2017-08-21 20:03 [PATCH] sched/fair: move definitions to fix !CONFIG_SMP josef
  2017-08-21 20:06 ` Peter Zijlstra
@ 2017-08-21 20:20 ` Josef Bacik
  1 sibling, 0 replies; 4+ messages in thread
From: Josef Bacik @ 2017-08-21 20:20 UTC (permalink / raw)
  To: josef; +Cc: peterz, linux-kernel, kernel-team, Josef Bacik

On Mon, Aug 21, 2017 at 04:03:05PM -0400, josef@toxicpanda.com wrote:
> From: Josef Bacik <jbacik@fb.com>
> 
> The series of patches adding runnable_avg and subsequent supporting
> patches broke on !CONFIG_SMP.  Fix this by moving the definitions under
> the appropriate checks, and moving the !CONFIG_SMP definitions higher
> up.
> 
> Signed-off-by: Josef Bacik <jbacik@fb.com>

Sorry ignore this, it's still screwed up.  I'll send a new series, there's
multiple broken things here.  Thanks,

Josef

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP
  2017-08-21 20:06 ` Peter Zijlstra
@ 2017-08-21 20:44   ` Josef Bacik
  0 siblings, 0 replies; 4+ messages in thread
From: Josef Bacik @ 2017-08-21 20:44 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: josef, linux-kernel, kernel-team, Josef Bacik

On Mon, Aug 21, 2017 at 10:06:53PM +0200, Peter Zijlstra wrote:
> On Mon, Aug 21, 2017 at 04:03:05PM -0400, josef@toxicpanda.com wrote:
> > From: Josef Bacik <jbacik@fb.com>
> > 
> > The series of patches adding runnable_avg and subsequent supporting
> > patches broke on !CONFIG_SMP.  Fix this by moving the definitions under
> > the appropriate checks, and moving the !CONFIG_SMP definitions higher
> > up.
> 
> Is this on top or can I fold this in somewhere? I was meaning to rework
> the series to avoid this superfluous build borkage.

There's actually a lot of individual breakages.  I can go through and make
individual patches that can be folded in to where they were introduced, or I can
do them all at once.  There's breakage around !CONFIG_FAIR_GROUP_SCHED as well,
basically everything you added needs to be moved around some.  I'll do whatever
is most convenient for you.  Thanks,

Josef

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-08-21 20:44 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-08-21 20:03 [PATCH] sched/fair: move definitions to fix !CONFIG_SMP josef
2017-08-21 20:06 ` Peter Zijlstra
2017-08-21 20:44   ` Josef Bacik
2017-08-21 20:20 ` Josef Bacik

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox