From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751712AbdJ1KBO (ORCPT ); Sat, 28 Oct 2017 06:01:14 -0400 Received: from mail-qt0-f195.google.com ([209.85.216.195]:56658 "EHLO mail-qt0-f195.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751001AbdJ1KAF (ORCPT ); Sat, 28 Oct 2017 06:00:05 -0400 X-Google-Smtp-Source: ABhQp+TURpYO1cncqCw91JmH+TaS7ejSppAptlK/QOlPEfeYVvSAEsDVB4AnJtftyrmnBzAG3pPmoQ== From: Joel Fernandes To: linux-kernel@vger.kernel.org Cc: Joel Fernandes , "Rafael J . Wysocki" , Viresh Kumar , Ingo Molnar , Peter Zijlstra , "Cc: Srinivas Pandruvada" , "Cc: Len Brown" , "Cc: Juri Lelli" , "Cc: Patrick Bellasi" , "Cc: Steve Muckle" , "Cc: Brendan Jackman" , "Cc: Chris Redpath" , "Cc: Atish Patra" , "Cc: Dietmar Eggemann" , "Cc: Vincent Guittot" , "Cc: Morten Ramussen" , "Cc: Frederic Weisbecker" , "Cc: Thomas Gleixner" , "Cc: EAS Dev" , "Cc: Android Kernel" Subject: [PATCH RFC 1/5] Revert "sched/fair: Drop always true parameter of update_cfs_rq_load_avg()" Date: Sat, 28 Oct 2017 02:59:37 -0700 Message-Id: <20171028095941.4773-2-joelaf@google.com> X-Mailer: git-send-email 2.15.0.rc2.357.g7e34df9404-goog In-Reply-To: <20171028095941.4773-1-joelaf@google.com> References: <20171028095941.4773-1-joelaf@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52. It needs a revert for controlling whether cpufreq is notified about updating frequency during an update to the utilization. Cc: Rafael J. Wysocki Cc: Viresh Kumar Cc: Ingo Molnar Cc: Peter Zijlstra Signed-off-by: Joel Fernandes --- kernel/sched/fair.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 56f343b8e749..f97693fe8b6e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -784,7 +784,7 @@ void post_init_entity_util_avg(struct sched_entity *se) /* * For !fair tasks do: * - update_cfs_rq_load_avg(now, cfs_rq); + update_cfs_rq_load_avg(now, cfs_rq, false); attach_entity_load_avg(cfs_rq, se); switched_from_fair(rq, p); * @@ -3596,6 +3596,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_task() * @cfs_rq: cfs_rq to update + * @update_freq: should we call cfs_rq_util_change() or will the call do so * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) * avg. The immediate corollary is that all (fair) tasks must be attached, see @@ -3609,7 +3610,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum * call update_tg_load_avg() when this function returns true. */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) { unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; struct sched_avg *sa = &cfs_rq->avg; @@ -3646,7 +3647,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif - if (decayed) + if (update_freq && decayed) cfs_rq_util_change(cfs_rq); return decayed; @@ -3740,7 +3741,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cpu, cfs_rq, se); - decayed = update_cfs_rq_load_avg(now, cfs_rq); + decayed = update_cfs_rq_load_avg(now, cfs_rq, true); decayed |= propagate_entity_load_avg(se); if (!se->avg.last_update_time && (flags & DO_ATTACH)) { @@ -3830,7 +3831,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf); #else /* CONFIG_SMP */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) { return 0; } @@ -7318,7 +7319,7 @@ static void update_blocked_averages(int cpu) if (throttled_hierarchy(cfs_rq)) continue; - if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) + if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) update_tg_load_avg(cfs_rq, 0); /* Propagate pending load changes to the parent, if any: */ @@ -7391,7 +7392,7 @@ static inline void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq); - update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); + update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); rq_unlock_irqrestore(rq, &rf); } -- 2.15.0.rc2.357.g7e34df9404-goog