From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753057AbcAMQCO (ORCPT ); Wed, 13 Jan 2016 11:02:14 -0500 Received: from mail-wm0-f67.google.com ([74.125.82.67]:35099 "EHLO mail-wm0-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752520AbcAMQBn (ORCPT ); Wed, 13 Jan 2016 11:01:43 -0500 From: Frederic Weisbecker To: Peter Zijlstra Cc: LKML , Frederic Weisbecker , Byungchul Park , Chris Metcalf , Thomas Gleixner , Luiz Capitulino , Christoph Lameter , "Paul E . McKenney" , Mike Galbraith , Rik van Riel Subject: [RFC PATCH 3/4] sched: Move cpu load stats functions above fair queue callbacks Date: Wed, 13 Jan 2016 17:01:30 +0100 Message-Id: <1452700891-21807-4-git-send-email-fweisbec@gmail.com> X-Mailer: git-send-email 2.6.4 In-Reply-To: <1452700891-21807-1-git-send-email-fweisbec@gmail.com> References: <1452700891-21807-1-git-send-email-fweisbec@gmail.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org We are going to update nohz full CPU load from fair queue update functions. Lets make these cpu load functions visible from the queue callbacks. Cc: Byungchul Park Cc: Mike Galbraith Cc: Chris Metcalf Cc: Christoph Lameter Cc: Luiz Capitulino Cc: Paul E . McKenney Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Frederic Weisbecker --- kernel/sched/fair.c | 216 ++++++++++++++++++++++++++-------------------------- 1 file changed, 109 insertions(+), 107 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 161cee2..1e0cb6e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4199,113 +4199,6 @@ static inline void hrtick_update(struct rq *rq) } #endif -/* - * The enqueue_task method is called before nr_running is - * increased. Here we update the fair scheduling stats and - * then put the task into the rbtree: - */ -static void -enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) -{ - struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se; - - for_each_sched_entity(se) { - if (se->on_rq) - break; - cfs_rq = cfs_rq_of(se); - enqueue_entity(cfs_rq, se, flags); - - /* - * end evaluation on encountering a throttled cfs_rq - * - * note: in the case of encountering a throttled cfs_rq we will - * post the final h_nr_running increment below. - */ - if (cfs_rq_throttled(cfs_rq)) - break; - cfs_rq->h_nr_running++; - - flags = ENQUEUE_WAKEUP; - } - - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); - cfs_rq->h_nr_running++; - - if (cfs_rq_throttled(cfs_rq)) - break; - - update_load_avg(se, 1); - update_cfs_shares(cfs_rq); - } - - if (!se) - add_nr_running(rq, 1); - - hrtick_update(rq); -} - -static void set_next_buddy(struct sched_entity *se); - -/* - * The dequeue_task method is called before nr_running is - * decreased. We remove the task from the rbtree and - * update the fair scheduling stats: - */ -static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) -{ - struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se; - int task_sleep = flags & DEQUEUE_SLEEP; - - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); - dequeue_entity(cfs_rq, se, flags); - - /* - * end evaluation on encountering a throttled cfs_rq - * - * note: in the case of encountering a throttled cfs_rq we will - * post the final h_nr_running decrement below. - */ - if (cfs_rq_throttled(cfs_rq)) - break; - cfs_rq->h_nr_running--; - - /* Don't dequeue parent if it has other entities besides us */ - if (cfs_rq->load.weight) { - /* - * Bias pick_next to pick a task from this cfs_rq, as - * p is sleeping when it is within its sched_slice. - */ - if (task_sleep && parent_entity(se)) - set_next_buddy(parent_entity(se)); - - /* avoid re-evaluating load for this entity */ - se = parent_entity(se); - break; - } - flags |= DEQUEUE_SLEEP; - } - - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); - cfs_rq->h_nr_running--; - - if (cfs_rq_throttled(cfs_rq)) - break; - - update_load_avg(se, 1); - update_cfs_shares(cfs_rq); - } - - if (!se) - sub_nr_running(rq, 1); - - hrtick_update(rq); -} - #ifdef CONFIG_SMP /* @@ -4537,8 +4430,117 @@ void update_cpu_load_active(struct rq *this_rq) this_rq->last_load_update_tick = jiffies; __update_cpu_load(this_rq, load, 1, 1); } +#endif /* CONFIG_SMP */ /* + * The enqueue_task method is called before nr_running is + * increased. Here we update the fair scheduling stats and + * then put the task into the rbtree: + */ +static void +enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se = &p->se; + + for_each_sched_entity(se) { + if (se->on_rq) + break; + cfs_rq = cfs_rq_of(se); + enqueue_entity(cfs_rq, se, flags); + + /* + * end evaluation on encountering a throttled cfs_rq + * + * note: in the case of encountering a throttled cfs_rq we will + * post the final h_nr_running increment below. + */ + if (cfs_rq_throttled(cfs_rq)) + break; + cfs_rq->h_nr_running++; + + flags = ENQUEUE_WAKEUP; + } + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + cfs_rq->h_nr_running++; + + if (cfs_rq_throttled(cfs_rq)) + break; + + update_load_avg(se, 1); + update_cfs_shares(cfs_rq); + } + + if (!se) + add_nr_running(rq, 1); + + hrtick_update(rq); +} + +static void set_next_buddy(struct sched_entity *se); + +/* + * The dequeue_task method is called before nr_running is + * decreased. We remove the task from the rbtree and + * update the fair scheduling stats: + */ +static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se = &p->se; + int task_sleep = flags & DEQUEUE_SLEEP; + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + dequeue_entity(cfs_rq, se, flags); + + /* + * end evaluation on encountering a throttled cfs_rq + * + * note: in the case of encountering a throttled cfs_rq we will + * post the final h_nr_running decrement below. + */ + if (cfs_rq_throttled(cfs_rq)) + break; + cfs_rq->h_nr_running--; + + /* Don't dequeue parent if it has other entities besides us */ + if (cfs_rq->load.weight) { + /* + * Bias pick_next to pick a task from this cfs_rq, as + * p is sleeping when it is within its sched_slice. + */ + if (task_sleep && parent_entity(se)) + set_next_buddy(parent_entity(se)); + + /* avoid re-evaluating load for this entity */ + se = parent_entity(se); + break; + } + flags |= DEQUEUE_SLEEP; + } + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + cfs_rq->h_nr_running--; + + if (cfs_rq_throttled(cfs_rq)) + break; + + update_load_avg(se, 1); + update_cfs_shares(cfs_rq); + } + + if (!se) + sub_nr_running(rq, 1); + + hrtick_update(rq); +} + +#ifdef CONFIG_SMP +/* * Return a low guess at the load of a migration-source cpu weighted * according to the scheduling class and "nice" value. * -- 2.6.4