linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <fweisbec@gmail.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Byungchul Park <byungchul.park@lge.com>,
	Chris Metcalf <cmetcalf@ezchip.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Luiz Capitulino <lcapitulino@redhat.com>,
	Christoph Lameter <cl@linux.com>,
	"Paul E . McKenney" <paulmck@linux.vnet.ibm.com>,
	Mike Galbraith <efault@gmx.de>, Rik van Riel <riel@redhat.com>
Subject: [RFC PATCH 3/4] sched: Move cpu load stats functions above fair queue callbacks
Date: Wed, 13 Jan 2016 17:01:30 +0100	[thread overview]
Message-ID: <1452700891-21807-4-git-send-email-fweisbec@gmail.com> (raw)
In-Reply-To: <1452700891-21807-1-git-send-email-fweisbec@gmail.com>

We are going to update nohz full CPU load from fair queue update
functions. Lets make these cpu load functions visible from the queue
callbacks.

Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
---
 kernel/sched/fair.c | 216 ++++++++++++++++++++++++++--------------------------
 1 file changed, 109 insertions(+), 107 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 161cee2..1e0cb6e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4199,113 +4199,6 @@ static inline void hrtick_update(struct rq *rq)
 }
 #endif
 
-/*
- * The enqueue_task method is called before nr_running is
- * increased. Here we update the fair scheduling stats and
- * then put the task into the rbtree:
- */
-static void
-enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
-
-	for_each_sched_entity(se) {
-		if (se->on_rq)
-			break;
-		cfs_rq = cfs_rq_of(se);
-		enqueue_entity(cfs_rq, se, flags);
-
-		/*
-		 * end evaluation on encountering a throttled cfs_rq
-		 *
-		 * note: in the case of encountering a throttled cfs_rq we will
-		 * post the final h_nr_running increment below.
-		*/
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-		cfs_rq->h_nr_running++;
-
-		flags = ENQUEUE_WAKEUP;
-	}
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		cfs_rq->h_nr_running++;
-
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-
-		update_load_avg(se, 1);
-		update_cfs_shares(cfs_rq);
-	}
-
-	if (!se)
-		add_nr_running(rq, 1);
-
-	hrtick_update(rq);
-}
-
-static void set_next_buddy(struct sched_entity *se);
-
-/*
- * The dequeue_task method is called before nr_running is
- * decreased. We remove the task from the rbtree and
- * update the fair scheduling stats:
- */
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
-	int task_sleep = flags & DEQUEUE_SLEEP;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		dequeue_entity(cfs_rq, se, flags);
-
-		/*
-		 * end evaluation on encountering a throttled cfs_rq
-		 *
-		 * note: in the case of encountering a throttled cfs_rq we will
-		 * post the final h_nr_running decrement below.
-		*/
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-		cfs_rq->h_nr_running--;
-
-		/* Don't dequeue parent if it has other entities besides us */
-		if (cfs_rq->load.weight) {
-			/*
-			 * Bias pick_next to pick a task from this cfs_rq, as
-			 * p is sleeping when it is within its sched_slice.
-			 */
-			if (task_sleep && parent_entity(se))
-				set_next_buddy(parent_entity(se));
-
-			/* avoid re-evaluating load for this entity */
-			se = parent_entity(se);
-			break;
-		}
-		flags |= DEQUEUE_SLEEP;
-	}
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		cfs_rq->h_nr_running--;
-
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-
-		update_load_avg(se, 1);
-		update_cfs_shares(cfs_rq);
-	}
-
-	if (!se)
-		sub_nr_running(rq, 1);
-
-	hrtick_update(rq);
-}
-
 #ifdef CONFIG_SMP
 
 /*
@@ -4537,8 +4430,117 @@ void update_cpu_load_active(struct rq *this_rq)
 	this_rq->last_load_update_tick = jiffies;
 	__update_cpu_load(this_rq, load, 1, 1);
 }
+#endif /* CONFIG_SMP */
 
 /*
+ * The enqueue_task method is called before nr_running is
+ * increased. Here we update the fair scheduling stats and
+ * then put the task into the rbtree:
+ */
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	for_each_sched_entity(se) {
+		if (se->on_rq)
+			break;
+		cfs_rq = cfs_rq_of(se);
+		enqueue_entity(cfs_rq, se, flags);
+
+		/*
+		 * end evaluation on encountering a throttled cfs_rq
+		 *
+		 * note: in the case of encountering a throttled cfs_rq we will
+		 * post the final h_nr_running increment below.
+		*/
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+		cfs_rq->h_nr_running++;
+
+		flags = ENQUEUE_WAKEUP;
+	}
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		cfs_rq->h_nr_running++;
+
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+
+		update_load_avg(se, 1);
+		update_cfs_shares(cfs_rq);
+	}
+
+	if (!se)
+		add_nr_running(rq, 1);
+
+	hrtick_update(rq);
+}
+
+static void set_next_buddy(struct sched_entity *se);
+
+/*
+ * The dequeue_task method is called before nr_running is
+ * decreased. We remove the task from the rbtree and
+ * update the fair scheduling stats:
+ */
+static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+	int task_sleep = flags & DEQUEUE_SLEEP;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		dequeue_entity(cfs_rq, se, flags);
+
+		/*
+		 * end evaluation on encountering a throttled cfs_rq
+		 *
+		 * note: in the case of encountering a throttled cfs_rq we will
+		 * post the final h_nr_running decrement below.
+		*/
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+		cfs_rq->h_nr_running--;
+
+		/* Don't dequeue parent if it has other entities besides us */
+		if (cfs_rq->load.weight) {
+			/*
+			 * Bias pick_next to pick a task from this cfs_rq, as
+			 * p is sleeping when it is within its sched_slice.
+			 */
+			if (task_sleep && parent_entity(se))
+				set_next_buddy(parent_entity(se));
+
+			/* avoid re-evaluating load for this entity */
+			se = parent_entity(se);
+			break;
+		}
+		flags |= DEQUEUE_SLEEP;
+	}
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		cfs_rq->h_nr_running--;
+
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+
+		update_load_avg(se, 1);
+		update_cfs_shares(cfs_rq);
+	}
+
+	if (!se)
+		sub_nr_running(rq, 1);
+
+	hrtick_update(rq);
+}
+
+#ifdef CONFIG_SMP
+/*
  * Return a low guess at the load of a migration-source cpu weighted
  * according to the scheduling class and "nice" value.
  *
-- 
2.6.4

  parent reply	other threads:[~2016-01-13 16:02 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-13 16:01 [RFC PATCH 0/4] sched: Improve cpu load accounting with nohz Frederic Weisbecker
2016-01-13 16:01 ` [PATCH 1/4] sched: Don't account tickless CPU load on tick Frederic Weisbecker
2016-01-19 13:08   ` Peter Zijlstra
2016-01-19 16:22     ` Frederic Weisbecker
2016-01-19 18:56       ` Peter Zijlstra
2016-01-19 22:33         ` Frederic Weisbecker
2016-01-20  5:43           ` Byungchul Park
2016-01-20 10:26             ` Byungchul Park
2016-01-28 16:01               ` Frederic Weisbecker
2016-01-29  9:50                 ` Peter Zijlstra
2016-02-01 10:05                   ` Byungchul Park
2016-02-01 10:09                     ` [PATCH] sched: calculate sched_clock_cpu without tick handling during nohz Byungchul Park
2016-02-01 10:34                     ` [PATCH 1/4] sched: Don't account tickless CPU load on tick Peter Zijlstra
2016-02-01 23:51                       ` Byungchul Park
2016-02-02  0:50                       ` Byungchul Park
2016-02-01  6:33               ` Byungchul Park
2016-01-20  8:42     ` Thomas Gleixner
2016-01-20 17:36       ` Frederic Weisbecker
2016-01-22  8:40       ` Byungchul Park
2016-01-13 16:01 ` [PATCH 2/4] sched: Consolidate nohz CPU load update code Frederic Weisbecker
2016-01-14  2:30   ` Byungchul Park
2016-01-19 13:13     ` Peter Zijlstra
2016-01-20  0:51       ` Byungchul Park
2016-01-14  5:18   ` Byungchul Park
2016-01-19 13:13     ` Peter Zijlstra
2016-01-19 16:49     ` Frederic Weisbecker
2016-01-20  1:41       ` Byungchul Park
2016-02-29 11:14   ` [tip:sched/core] sched/fair: " tip-bot for Frederic Weisbecker
2016-01-13 16:01 ` Frederic Weisbecker [this message]
2016-01-13 16:01 ` [RFC PATCH 4/4] sched: Upload nohz full CPU load on task enqueue/dequeue Frederic Weisbecker
2016-01-19 13:17   ` Peter Zijlstra
2016-01-19 17:03     ` Frederic Weisbecker
2016-01-20  9:09       ` Peter Zijlstra
2016-01-20 14:54         ` Frederic Weisbecker
2016-01-20 15:11           ` Thomas Gleixner
2016-01-20 15:19             ` Christoph Lameter
2016-01-20 16:52               ` Frederic Weisbecker
2016-01-20 16:45             ` Frederic Weisbecker
2016-01-20 16:56           ` Peter Zijlstra
2016-01-20 17:21             ` Frederic Weisbecker
2016-01-20 18:25               ` Peter Zijlstra
2016-01-21 13:25                 ` Frederic Weisbecker
2016-01-20  9:03   ` Thomas Gleixner
2016-01-20 14:31     ` Frederic Weisbecker
2016-01-20 14:43       ` Thomas Gleixner
2016-01-20 16:40         ` Frederic Weisbecker
2016-01-20 16:42           ` Christoph Lameter
2016-01-20 16:47             ` Frederic Weisbecker
2016-01-14 21:19 ` [RFC PATCH 0/4] sched: Improve cpu load accounting with nohz Dietmar Eggemann
2016-01-14 21:27   ` Peter Zijlstra
2016-01-14 22:23     ` Dietmar Eggemann
2016-01-15  7:07       ` Byungchul Park
2016-01-15 16:56         ` Dietmar Eggemann
2016-01-18  0:23           ` Byungchul Park
2016-01-19 13:04           ` Peter Zijlstra
2016-01-20  0:48             ` Byungchul Park
2016-01-20 13:04               ` Dietmar Eggemann
2016-02-29 11:14         ` [tip:sched/core] sched/fair: Avoid using decay_load_missed() with a negative value tip-bot for Byungchul Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1452700891-21807-4-git-send-email-fweisbec@gmail.com \
    --to=fweisbec@gmail.com \
    --cc=byungchul.park@lge.com \
    --cc=cl@linux.com \
    --cc=cmetcalf@ezchip.com \
    --cc=efault@gmx.de \
    --cc=lcapitulino@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).