* [PATCH 01/10 v2] sched/eevdf: More PELT vs DELAYED_DEQUEUE
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 02/10 v2] sched/fair: Rename h_nr_running into h_nr_queued Vincent Guittot
` (9 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
From: Peter Zijlstra <peterz@infradead.org>
Vincent and Dietmar noted that while
commit fc1892becd56 ("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE") fixes
the entity runnable stats, it does not adjust the cfs_rq runnable stats,
which are based off of h_nr_running.
Track h_nr_delayed such that we can discount those and adjust the
signal.
Fixes: fc1892becd56 ("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE")
Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Closes: https://lore.kernel.org/lkml/a9a45193-d0c6-4ba2-a822-464ad30b550e@arm.com/
Reported-by: Vincent Guittot <vincent.guittot@linaro.org>
Closes: https://lore.kernel.org/lkml/CAKfTPtCNUvWE_GX5LyvTF-WdxUT=ZgvZZv-4t=eWntg5uOFqiQ@mail.gmail.com/
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
[ Fixes checkpatch warnings ]
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20240906104525.GG4928@noisy.programming.kicks-ass.net
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
kernel/sched/debug.c | 1 +
kernel/sched/fair.c | 51 +++++++++++++++++++++++++++++++++++++++-----
kernel/sched/pelt.c | 2 +-
kernel/sched/sched.h | 8 +++++--
4 files changed, 54 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a48b2a701ec2..a1be00a988bf 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -845,6 +845,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
cfs_rq->idle_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3356315d7e64..f4ed2ed070e4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5465,9 +5465,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
+static void set_delayed(struct sched_entity *se)
+{
+ se->sched_delayed = 1;
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ cfs_rq->h_nr_delayed++;
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+}
+
+static void clear_delayed(struct sched_entity *se)
{
se->sched_delayed = 0;
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ cfs_rq->h_nr_delayed--;
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+}
+
+static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
+{
+ clear_delayed(se);
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
se->vlag = 0;
}
@@ -5497,7 +5521,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (cfs_rq->next == se)
cfs_rq->next = NULL;
update_load_avg(cfs_rq, se, 0);
- se->sched_delayed = 1;
+ set_delayed(se);
return false;
}
}
@@ -5910,7 +5934,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long task_delta, idle_task_delta, dequeue = 1;
+ long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
long rq_h_nr_running = rq->cfs.h_nr_running;
raw_spin_lock(&cfs_b->lock);
@@ -5943,6 +5967,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
+ delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
int flags;
@@ -5966,6 +5991,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_running -= task_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ qcfs_rq->h_nr_delayed -= delayed_delta;
if (qcfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
@@ -5988,6 +6014,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_running -= task_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ qcfs_rq->h_nr_delayed -= delayed_delta;
}
/* At this point se is NULL and we are at root level*/
@@ -6013,7 +6040,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long task_delta, idle_task_delta;
+ long task_delta, idle_task_delta, delayed_delta;
long rq_h_nr_running = rq->cfs.h_nr_running;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6049,6 +6076,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
+ delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
@@ -6066,6 +6094,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_running += task_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
+ qcfs_rq->h_nr_delayed += delayed_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
@@ -6083,6 +6112,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_running += task_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
+ qcfs_rq->h_nr_delayed += delayed_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
@@ -6936,7 +6966,7 @@ requeue_delayed_entity(struct sched_entity *se)
}
update_load_avg(cfs_rq, se, 0);
- se->sched_delayed = 0;
+ clear_delayed(se);
}
/*
@@ -6950,6 +6980,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p);
+ int h_nr_delayed = 0;
int task_new = !(flags & ENQUEUE_WAKEUP);
int rq_h_nr_running = rq->cfs.h_nr_running;
u64 slice = 0;
@@ -6976,6 +7007,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (p->in_iowait)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
+ if (task_new)
+ h_nr_delayed = !!se->sched_delayed;
+
for_each_sched_entity(se) {
if (se->on_rq) {
if (se->sched_delayed)
@@ -6998,6 +7032,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ cfs_rq->h_nr_delayed += h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
@@ -7021,6 +7056,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ cfs_rq->h_nr_delayed += h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
@@ -7083,6 +7119,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
struct task_struct *p = NULL;
int idle_h_nr_running = 0;
int h_nr_running = 0;
+ int h_nr_delayed = 0;
struct cfs_rq *cfs_rq;
u64 slice = 0;
@@ -7090,6 +7127,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
p = task_of(se);
h_nr_running = 1;
idle_h_nr_running = task_has_idle_policy(p);
+ if (!task_sleep && !task_delayed)
+ h_nr_delayed = !!se->sched_delayed;
} else {
cfs_rq = group_cfs_rq(se);
slice = cfs_rq_min_slice(cfs_rq);
@@ -7107,6 +7146,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
cfs_rq->h_nr_running -= h_nr_running;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ cfs_rq->h_nr_delayed -= h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = h_nr_running;
@@ -7145,6 +7185,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
cfs_rq->h_nr_running -= h_nr_running;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ cfs_rq->h_nr_delayed -= h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = h_nr_running;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index fc07382361a8..fee75cc2c47b 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
- cfs_rq->h_nr_running,
+ cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
cfs_rq->curr != NULL)) {
___update_load_avg(&cfs_rq->avg, 1);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 090dd4b38fa2..77a05ca0cce7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -649,6 +649,7 @@ struct cfs_rq {
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
+ unsigned int h_nr_delayed;
s64 avg_vruntime;
u64 avg_load;
@@ -897,8 +898,11 @@ struct dl_rq {
static inline void se_update_runnable(struct sched_entity *se)
{
- if (!entity_is_task(se))
- se->runnable_weight = se->my_q->h_nr_running;
+ if (!entity_is_task(se)) {
+ struct cfs_rq *cfs_rq = se->my_q;
+
+ se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
+ }
}
static inline long se_runnable(struct sched_entity *se)
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 02/10 v2] sched/fair: Rename h_nr_running into h_nr_queued
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
2024-11-29 16:17 ` [PATCH 01/10 v2] sched/eevdf: More PELT vs DELAYED_DEQUEUE Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 03/10 v2] sched/fair: Add new cfs_rq.h_nr_runnable Vincent Guittot
` (8 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
With delayed dequeued feature, a sleeping sched_entity remains queued
in the rq until its lag has elapsed but can't run.
Rename h_nr_running into h_nr_queued to reflect this new behavior.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 6 +--
kernel/sched/fair.c | 88 ++++++++++++++++++++++----------------------
kernel/sched/pelt.c | 4 +-
kernel/sched/sched.h | 4 +-
5 files changed, 52 insertions(+), 52 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5c47d70f4204..ba5e314eb99b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6015,7 +6015,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* opportunity to pull in more work from other CPUs.
*/
if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
- rq->nr_running == rq->cfs.h_nr_running)) {
+ rq->nr_running == rq->cfs.h_nr_queued)) {
p = pick_next_task_fair(rq, prev, rf);
if (unlikely(p == RETRY_TASK))
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a1be00a988bf..08d6c2b7caa3 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -379,7 +379,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
return -EINVAL;
}
- if (rq->cfs.h_nr_running) {
+ if (rq->cfs.h_nr_queued) {
update_rq_clock(rq);
dl_server_stop(&rq->fair_server);
}
@@ -392,7 +392,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
cpu_of(rq));
- if (rq->cfs.h_nr_running)
+ if (rq->cfs.h_nr_queued)
dl_server_start(&rq->fair_server);
}
@@ -844,7 +844,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
- SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
cfs_rq->idle_nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f4ed2ed070e4..c3cc9f784afe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2128,7 +2128,7 @@ static void update_numa_stats(struct task_numa_env *env,
ns->load += cpu_load(rq);
ns->runnable += cpu_runnable(rq);
ns->util += cpu_util_cfs(cpu);
- ns->nr_running += rq->cfs.h_nr_running;
+ ns->nr_running += rq->cfs.h_nr_queued;
ns->compute_capacity += capacity_of(cpu);
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
@@ -5396,7 +5396,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* When enqueuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
- * h_nr_running of its group cfs_rq.
+ * h_nr_queued of its group cfs_rq.
* - For group_entity, update its weight to reflect the new share of
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
@@ -5534,7 +5534,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
- * h_nr_running of its group cfs_rq.
+ * h_nr_queued of its group cfs_rq.
* - Subtract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
@@ -5934,8 +5934,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
- long rq_h_nr_running = rq->cfs.h_nr_running;
+ long queued_delta, idle_task_delta, delayed_delta, dequeue = 1;
+ long rq_h_nr_queued = rq->cfs.h_nr_queued;
raw_spin_lock(&cfs_b->lock);
/* This will start the period timer if necessary */
@@ -5965,7 +5965,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
rcu_read_unlock();
- task_delta = cfs_rq->h_nr_running;
+ queued_delta = cfs_rq->h_nr_queued;
idle_task_delta = cfs_rq->idle_h_nr_running;
delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
@@ -5987,9 +5987,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
dequeue_entity(qcfs_rq, se, flags);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
+ idle_task_delta = cfs_rq->h_nr_queued;
- qcfs_rq->h_nr_running -= task_delta;
+ qcfs_rq->h_nr_queued -= queued_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
qcfs_rq->h_nr_delayed -= delayed_delta;
@@ -6010,18 +6010,18 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
se_update_runnable(se);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
+ idle_task_delta = cfs_rq->h_nr_queued;
- qcfs_rq->h_nr_running -= task_delta;
+ qcfs_rq->h_nr_queued -= queued_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
qcfs_rq->h_nr_delayed -= delayed_delta;
}
/* At this point se is NULL and we are at root level*/
- sub_nr_running(rq, task_delta);
+ sub_nr_running(rq, queued_delta);
/* Stop the fair server if throttling resulted in no runnable tasks */
- if (rq_h_nr_running && !rq->cfs.h_nr_running)
+ if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
dl_server_stop(&rq->fair_server);
done:
/*
@@ -6040,8 +6040,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long task_delta, idle_task_delta, delayed_delta;
- long rq_h_nr_running = rq->cfs.h_nr_running;
+ long queued_delta, idle_task_delta, delayed_delta;
+ long rq_h_nr_queued = rq->cfs.h_nr_queued;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6074,7 +6074,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
goto unthrottle_throttle;
}
- task_delta = cfs_rq->h_nr_running;
+ queued_delta = cfs_rq->h_nr_queued;
idle_task_delta = cfs_rq->idle_h_nr_running;
delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
@@ -6090,9 +6090,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
+ idle_task_delta = cfs_rq->h_nr_queued;
- qcfs_rq->h_nr_running += task_delta;
+ qcfs_rq->h_nr_queued += queued_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
qcfs_rq->h_nr_delayed += delayed_delta;
@@ -6108,9 +6108,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
se_update_runnable(se);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
+ idle_task_delta = cfs_rq->h_nr_queued;
- qcfs_rq->h_nr_running += task_delta;
+ qcfs_rq->h_nr_queued += queued_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
qcfs_rq->h_nr_delayed += delayed_delta;
@@ -6120,11 +6120,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
}
/* Start the fair server if un-throttling resulted in new runnable tasks */
- if (!rq_h_nr_running && rq->cfs.h_nr_running)
+ if (!rq_h_nr_queued && rq->cfs.h_nr_queued)
dl_server_start(&rq->fair_server);
/* At this point se is NULL and we are at root level*/
- add_nr_running(rq, task_delta);
+ add_nr_running(rq, queued_delta);
unthrottle_throttle:
assert_list_leaf_cfs_rq(rq);
@@ -6839,7 +6839,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
SCHED_WARN_ON(task_rq(p) != rq);
- if (rq->cfs.h_nr_running > 1) {
+ if (rq->cfs.h_nr_queued > 1) {
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
u64 slice = se->slice;
s64 delta = slice - ran;
@@ -6982,7 +6982,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
int idle_h_nr_running = task_has_idle_policy(p);
int h_nr_delayed = 0;
int task_new = !(flags & ENQUEUE_WAKEUP);
- int rq_h_nr_running = rq->cfs.h_nr_running;
+ int rq_h_nr_queued = rq->cfs.h_nr_queued;
u64 slice = 0;
/*
@@ -7030,7 +7030,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
enqueue_entity(cfs_rq, se, flags);
slice = cfs_rq_min_slice(cfs_rq);
- cfs_rq->h_nr_running++;
+ cfs_rq->h_nr_queued++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
cfs_rq->h_nr_delayed += h_nr_delayed;
@@ -7054,7 +7054,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se->slice = slice;
slice = cfs_rq_min_slice(cfs_rq);
- cfs_rq->h_nr_running++;
+ cfs_rq->h_nr_queued++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
cfs_rq->h_nr_delayed += h_nr_delayed;
@@ -7066,7 +7066,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
goto enqueue_throttle;
}
- if (!rq_h_nr_running && rq->cfs.h_nr_running) {
+ if (!rq_h_nr_queued && rq->cfs.h_nr_queued) {
/* Account for idle runtime */
if (!rq->nr_running)
dl_server_update_idle_time(rq, rq->curr);
@@ -7113,19 +7113,19 @@ static void set_next_buddy(struct sched_entity *se);
static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
{
bool was_sched_idle = sched_idle_rq(rq);
- int rq_h_nr_running = rq->cfs.h_nr_running;
+ int rq_h_nr_queued = rq->cfs.h_nr_queued;
bool task_sleep = flags & DEQUEUE_SLEEP;
bool task_delayed = flags & DEQUEUE_DELAYED;
struct task_struct *p = NULL;
int idle_h_nr_running = 0;
- int h_nr_running = 0;
+ int h_nr_queued = 0;
int h_nr_delayed = 0;
struct cfs_rq *cfs_rq;
u64 slice = 0;
if (entity_is_task(se)) {
p = task_of(se);
- h_nr_running = 1;
+ h_nr_queued = 1;
idle_h_nr_running = task_has_idle_policy(p);
if (!task_sleep && !task_delayed)
h_nr_delayed = !!se->sched_delayed;
@@ -7144,12 +7144,12 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
break;
}
- cfs_rq->h_nr_running -= h_nr_running;
+ cfs_rq->h_nr_queued -= h_nr_queued;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
cfs_rq->h_nr_delayed -= h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = h_nr_running;
+ idle_h_nr_running = h_nr_queued;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
@@ -7183,21 +7183,21 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
se->slice = slice;
slice = cfs_rq_min_slice(cfs_rq);
- cfs_rq->h_nr_running -= h_nr_running;
+ cfs_rq->h_nr_queued -= h_nr_queued;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
cfs_rq->h_nr_delayed -= h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = h_nr_running;
+ idle_h_nr_running = h_nr_queued;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
return 0;
}
- sub_nr_running(rq, h_nr_running);
+ sub_nr_running(rq, h_nr_queued);
- if (rq_h_nr_running && !rq->cfs.h_nr_running)
+ if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
dl_server_stop(&rq->fair_server);
/* balance early to pull high priority tasks */
@@ -10319,7 +10319,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
* When there is more than 1 task, the group_overloaded case already
* takes care of cpu with reduced capacity
*/
- if (rq->cfs.h_nr_running != 1)
+ if (rq->cfs.h_nr_queued != 1)
return false;
return check_cpu_capacity(rq, sd);
@@ -10354,7 +10354,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_load += load;
sgs->group_util += cpu_util_cfs(i);
sgs->group_runnable += cpu_runnable(rq);
- sgs->sum_h_nr_running += rq->cfs.h_nr_running;
+ sgs->sum_h_nr_running += rq->cfs.h_nr_queued;
nr_running = rq->nr_running;
sgs->sum_nr_running += nr_running;
@@ -10669,7 +10669,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sgs->group_util += cpu_util_without(i, p);
sgs->group_runnable += cpu_runnable_without(rq, p);
local = task_running_on_cpu(i, p);
- sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
+ sgs->sum_h_nr_running += rq->cfs.h_nr_queued - local;
nr_running = rq->nr_running - local;
sgs->sum_nr_running += nr_running;
@@ -11451,7 +11451,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
if (rt > env->fbq_type)
continue;
- nr_running = rq->cfs.h_nr_running;
+ nr_running = rq->cfs.h_nr_queued;
if (!nr_running)
continue;
@@ -11610,7 +11610,7 @@ static int need_active_balance(struct lb_env *env)
* available on dst_cpu.
*/
if (env->idle &&
- (env->src_rq->cfs.h_nr_running == 1)) {
+ (env->src_rq->cfs.h_nr_queued == 1)) {
if ((check_cpu_capacity(env->src_rq, sd)) &&
(capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
return 1;
@@ -12353,7 +12353,7 @@ static void nohz_balancer_kick(struct rq *rq)
* If there's a runnable CFS task and the current CPU has reduced
* capacity, kick the ILB to see if there's a better CPU to run on:
*/
- if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
+ if (rq->cfs.h_nr_queued >= 1 && check_cpu_capacity(rq, sd)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
@@ -12851,11 +12851,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
* have been enqueued in the meantime. Since we're not going idle,
* pretend we pulled a task.
*/
- if (this_rq->cfs.h_nr_running && !pulled_task)
+ if (this_rq->cfs.h_nr_queued && !pulled_task)
pulled_task = 1;
/* Is there a task of a high priority class? */
- if (this_rq->nr_running != this_rq->cfs.h_nr_running)
+ if (this_rq->nr_running != this_rq->cfs.h_nr_queued)
pulled_task = -1;
out:
@@ -13542,7 +13542,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
parent_cfs_rq->idle_nr_running--;
}
- idle_task_delta = grp_cfs_rq->h_nr_running -
+ idle_task_delta = grp_cfs_rq->h_nr_queued -
grp_cfs_rq->idle_h_nr_running;
if (!cfs_rq_is_idle(grp_cfs_rq))
idle_task_delta *= -1;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index fee75cc2c47b..2bad0b508dfc 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
*
* group: [ see update_cfs_group() ]
* se_weight() = tg->weight * grq->load_avg / tg->load_avg
- * se_runnable() = grq->h_nr_running
+ * se_runnable() = grq->h_nr_queued
*
* runnable_sum = se_runnable() * runnable = grq->runnable_sum
* runnable_avg = runnable_sum
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
- cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
+ cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed,
cfs_rq->curr != NULL)) {
___update_load_avg(&cfs_rq->avg, 1);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 77a05ca0cce7..4677e5412c40 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -646,7 +646,7 @@ struct balance_callback {
struct cfs_rq {
struct load_weight load;
unsigned int nr_running;
- unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
unsigned int h_nr_delayed;
@@ -901,7 +901,7 @@ static inline void se_update_runnable(struct sched_entity *se)
if (!entity_is_task(se)) {
struct cfs_rq *cfs_rq = se->my_q;
- se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
+ se->runnable_weight = cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed;
}
}
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 03/10 v2] sched/fair: Add new cfs_rq.h_nr_runnable
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
2024-11-29 16:17 ` [PATCH 01/10 v2] sched/eevdf: More PELT vs DELAYED_DEQUEUE Vincent Guittot
2024-11-29 16:17 ` [PATCH 02/10 v2] sched/fair: Rename h_nr_running into h_nr_queued Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-12-02 9:54 ` Peter Zijlstra
2024-11-29 16:17 ` [PATCH 04/10 v2] sched/fair: Removed unsued cfs_rq.h_nr_delayed Vincent Guittot
` (7 subsequent siblings)
10 siblings, 1 reply; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
With delayed dequeued feature, a sleeping sched_entity remains queued in
the rq until its lag has elapsed. As a result, it stays also visible
in the statistics that are used to balance the system and in particular
the field cfs.h_nr_queued when the sched_entity is associated to a task.
Create a new h_nr_runnable that tracks all queued and runnable tasks and
use it when balancing the load on the system.
h_nr_runnable will be used in several places to make decision on load
balance:
- PELT runnable_avg
- deciding if a group is overloaded or has spare capacity
- numa stats
- reduced capacity management
- load balance
- nohz kick
It should be noticed that the rq->nr_running still counts the delayed
dequeued tasks as delayed dequeue is a fair feature that is meaningless
at core level.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/debug.c | 1 +
kernel/sched/fair.c | 45 ++++++++++++++++++++++++++++++--------------
kernel/sched/pelt.c | 4 ++--
kernel/sched/sched.h | 10 ++++------
4 files changed, 38 insertions(+), 22 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 08d6c2b7caa3..fd711cc4d44c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -844,6 +844,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c3cc9f784afe..d5736bde3682 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2128,7 +2128,7 @@ static void update_numa_stats(struct task_numa_env *env,
ns->load += cpu_load(rq);
ns->runnable += cpu_runnable(rq);
ns->util += cpu_util_cfs(cpu);
- ns->nr_running += rq->cfs.h_nr_queued;
+ ns->nr_running += rq->cfs.h_nr_runnable;
ns->compute_capacity += capacity_of(cpu);
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
@@ -5396,7 +5396,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* When enqueuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
- * h_nr_queued of its group cfs_rq.
+ * h_nr_runnable of its group cfs_rq.
* - For group_entity, update its weight to reflect the new share of
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
@@ -5471,18 +5471,21 @@ static void set_delayed(struct sched_entity *se)
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ cfs_rq->h_nr_runnable--;
cfs_rq->h_nr_delayed++;
if (cfs_rq_throttled(cfs_rq))
break;
}
}
-static void clear_delayed(struct sched_entity *se)
+static void clear_delayed(struct sched_entity *se, bool running)
{
se->sched_delayed = 0;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ if (running)
+ cfs_rq->h_nr_runnable++;
cfs_rq->h_nr_delayed--;
if (cfs_rq_throttled(cfs_rq))
break;
@@ -5491,7 +5494,7 @@ static void clear_delayed(struct sched_entity *se)
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
{
- clear_delayed(se);
+ clear_delayed(se, false);
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
se->vlag = 0;
}
@@ -5534,7 +5537,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
- * h_nr_queued of its group cfs_rq.
+ * h_nr_runnable of its group cfs_rq.
* - Subtract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
@@ -5934,7 +5937,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long queued_delta, idle_task_delta, delayed_delta, dequeue = 1;
+ long queued_delta, runnable_delta, idle_task_delta, delayed_delta, dequeue = 1;
long rq_h_nr_queued = rq->cfs.h_nr_queued;
raw_spin_lock(&cfs_b->lock);
@@ -5966,6 +5969,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
rcu_read_unlock();
queued_delta = cfs_rq->h_nr_queued;
+ runnable_delta = cfs_rq->h_nr_runnable;
idle_task_delta = cfs_rq->idle_h_nr_running;
delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
@@ -5990,6 +5994,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued -= queued_delta;
+ qcfs_rq->h_nr_runnable -= runnable_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
qcfs_rq->h_nr_delayed -= delayed_delta;
@@ -6013,6 +6018,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued -= queued_delta;
+ qcfs_rq->h_nr_runnable -= runnable_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
qcfs_rq->h_nr_delayed -= delayed_delta;
}
@@ -6040,7 +6046,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long queued_delta, idle_task_delta, delayed_delta;
+ long queued_delta, runnable_delta, idle_task_delta, delayed_delta;
long rq_h_nr_queued = rq->cfs.h_nr_queued;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6075,6 +6081,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
}
queued_delta = cfs_rq->h_nr_queued;
+ runnable_delta = cfs_rq->h_nr_runnable;
idle_task_delta = cfs_rq->idle_h_nr_running;
delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
@@ -6093,6 +6100,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued += queued_delta;
+ qcfs_rq->h_nr_runnable += runnable_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
qcfs_rq->h_nr_delayed += delayed_delta;
@@ -6111,6 +6119,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued += queued_delta;
+ qcfs_rq->h_nr_runnable += runnable_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
qcfs_rq->h_nr_delayed += delayed_delta;
@@ -6966,7 +6975,7 @@ requeue_delayed_entity(struct sched_entity *se)
}
update_load_avg(cfs_rq, se, 0);
- clear_delayed(se);
+ clear_delayed(se, true);
}
/*
@@ -7030,6 +7039,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
enqueue_entity(cfs_rq, se, flags);
slice = cfs_rq_min_slice(cfs_rq);
+ if (!h_nr_delayed)
+ cfs_rq->h_nr_runnable++;
cfs_rq->h_nr_queued++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
cfs_rq->h_nr_delayed += h_nr_delayed;
@@ -7054,6 +7065,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se->slice = slice;
slice = cfs_rq_min_slice(cfs_rq);
+ if (!h_nr_delayed)
+ cfs_rq->h_nr_runnable++;
cfs_rq->h_nr_queued++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
cfs_rq->h_nr_delayed += h_nr_delayed;
@@ -7144,6 +7157,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
break;
}
+ if (!h_nr_delayed)
+ cfs_rq->h_nr_runnable -= h_nr_queued;
cfs_rq->h_nr_queued -= h_nr_queued;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
cfs_rq->h_nr_delayed -= h_nr_delayed;
@@ -7183,6 +7198,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
se->slice = slice;
slice = cfs_rq_min_slice(cfs_rq);
+ if (!h_nr_delayed)
+ cfs_rq->h_nr_runnable -= h_nr_queued;
cfs_rq->h_nr_queued -= h_nr_queued;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
cfs_rq->h_nr_delayed -= h_nr_delayed;
@@ -10319,7 +10336,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
* When there is more than 1 task, the group_overloaded case already
* takes care of cpu with reduced capacity
*/
- if (rq->cfs.h_nr_queued != 1)
+ if (rq->cfs.h_nr_runnable != 1)
return false;
return check_cpu_capacity(rq, sd);
@@ -10354,7 +10371,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_load += load;
sgs->group_util += cpu_util_cfs(i);
sgs->group_runnable += cpu_runnable(rq);
- sgs->sum_h_nr_running += rq->cfs.h_nr_queued;
+ sgs->sum_h_nr_running += rq->cfs.h_nr_runnable;
nr_running = rq->nr_running;
sgs->sum_nr_running += nr_running;
@@ -10669,7 +10686,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sgs->group_util += cpu_util_without(i, p);
sgs->group_runnable += cpu_runnable_without(rq, p);
local = task_running_on_cpu(i, p);
- sgs->sum_h_nr_running += rq->cfs.h_nr_queued - local;
+ sgs->sum_h_nr_running += rq->cfs.h_nr_runnable - local;
nr_running = rq->nr_running - local;
sgs->sum_nr_running += nr_running;
@@ -11451,7 +11468,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
if (rt > env->fbq_type)
continue;
- nr_running = rq->cfs.h_nr_queued;
+ nr_running = rq->cfs.h_nr_runnable;
if (!nr_running)
continue;
@@ -11610,7 +11627,7 @@ static int need_active_balance(struct lb_env *env)
* available on dst_cpu.
*/
if (env->idle &&
- (env->src_rq->cfs.h_nr_queued == 1)) {
+ (env->src_rq->cfs.h_nr_runnable == 1)) {
if ((check_cpu_capacity(env->src_rq, sd)) &&
(capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
return 1;
@@ -12353,7 +12370,7 @@ static void nohz_balancer_kick(struct rq *rq)
* If there's a runnable CFS task and the current CPU has reduced
* capacity, kick the ILB to see if there's a better CPU to run on:
*/
- if (rq->cfs.h_nr_queued >= 1 && check_cpu_capacity(rq, sd)) {
+ if (rq->cfs.h_nr_runnable >= 1 && check_cpu_capacity(rq, sd)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 2bad0b508dfc..7a8534a2deff 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
*
* group: [ see update_cfs_group() ]
* se_weight() = tg->weight * grq->load_avg / tg->load_avg
- * se_runnable() = grq->h_nr_queued
+ * se_runnable() = grq->h_nr_runnable
*
* runnable_sum = se_runnable() * runnable = grq->runnable_sum
* runnable_avg = runnable_sum
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
- cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed,
+ cfs_rq->h_nr_runnable,
cfs_rq->curr != NULL)) {
___update_load_avg(&cfs_rq->avg, 1);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4677e5412c40..e0b05ab43abd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -646,7 +646,8 @@ struct balance_callback {
struct cfs_rq {
struct load_weight load;
unsigned int nr_running;
- unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_queued;
unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
unsigned int h_nr_delayed;
@@ -898,11 +899,8 @@ struct dl_rq {
static inline void se_update_runnable(struct sched_entity *se)
{
- if (!entity_is_task(se)) {
- struct cfs_rq *cfs_rq = se->my_q;
-
- se->runnable_weight = cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed;
- }
+ if (!entity_is_task(se))
+ se->runnable_weight = se->my_q->h_nr_runnable;
}
static inline long se_runnable(struct sched_entity *se)
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* Re: [PATCH 03/10 v2] sched/fair: Add new cfs_rq.h_nr_runnable
2024-11-29 16:17 ` [PATCH 03/10 v2] sched/fair: Add new cfs_rq.h_nr_runnable Vincent Guittot
@ 2024-12-02 9:54 ` Peter Zijlstra
2024-12-02 9:57 ` Vincent Guittot
0 siblings, 1 reply; 26+ messages in thread
From: Peter Zijlstra @ 2024-12-02 9:54 UTC (permalink / raw)
To: Vincent Guittot
Cc: mingo, juri.lelli, dietmar.eggemann, rostedt, bsegall, mgorman,
vschneid, linux-kernel, kprateek.nayak, pauld, efault,
luis.machado
On Fri, Nov 29, 2024 at 05:17:49PM +0100, Vincent Guittot wrote:
> With delayed dequeued feature, a sleeping sched_entity remains queued in
> the rq until its lag has elapsed. As a result, it stays also visible
> in the statistics that are used to balance the system and in particular
> the field cfs.h_nr_queued when the sched_entity is associated to a task.
>
> Create a new h_nr_runnable that tracks all queued and runnable tasks and
> use it when balancing the load on the system.
>
> h_nr_runnable will be used in several places to make decision on load
> balance:
> - PELT runnable_avg
> - deciding if a group is overloaded or has spare capacity
> - numa stats
> - reduced capacity management
> - load balance
> - nohz kick
>
> It should be noticed that the rq->nr_running still counts the delayed
> dequeued tasks as delayed dequeue is a fair feature that is meaningless
> at core level.
Since you're doing a v3, could you please split this into 2 patches, one
adding the accounting, and then a separate patch making use of it?
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 03/10 v2] sched/fair: Add new cfs_rq.h_nr_runnable
2024-12-02 9:54 ` Peter Zijlstra
@ 2024-12-02 9:57 ` Vincent Guittot
0 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 9:57 UTC (permalink / raw)
To: Peter Zijlstra
Cc: mingo, juri.lelli, dietmar.eggemann, rostedt, bsegall, mgorman,
vschneid, linux-kernel, kprateek.nayak, pauld, efault,
luis.machado
On Mon, 2 Dec 2024 at 10:54, Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Nov 29, 2024 at 05:17:49PM +0100, Vincent Guittot wrote:
> > With delayed dequeued feature, a sleeping sched_entity remains queued in
> > the rq until its lag has elapsed. As a result, it stays also visible
> > in the statistics that are used to balance the system and in particular
> > the field cfs.h_nr_queued when the sched_entity is associated to a task.
> >
> > Create a new h_nr_runnable that tracks all queued and runnable tasks and
> > use it when balancing the load on the system.
> >
> > h_nr_runnable will be used in several places to make decision on load
> > balance:
> > - PELT runnable_avg
> > - deciding if a group is overloaded or has spare capacity
> > - numa stats
> > - reduced capacity management
> > - load balance
> > - nohz kick
> >
> > It should be noticed that the rq->nr_running still counts the delayed
> > dequeued tasks as delayed dequeue is a fair feature that is meaningless
> > at core level.
>
> Since you're doing a v3, could you please split this into 2 patches, one
> adding the accounting, and then a separate patch making use of it?
ok
^ permalink raw reply [flat|nested] 26+ messages in thread
* [PATCH 04/10 v2] sched/fair: Removed unsued cfs_rq.h_nr_delayed
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (2 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 03/10 v2] sched/fair: Add new cfs_rq.h_nr_runnable Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 05/10 v2] sched/fair: Rename cfs_rq.idle_h_nr_running into h_nr_idle Vincent Guittot
` (6 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
h_nr_delayed is not used anymore. We now have
- h_nr_runnable which tracks tasks ready to run
- h_nr_queued which tracks queued tasks either ready to run or delayed
dequeue
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/debug.c | 1 -
kernel/sched/fair.c | 45 ++++++++++++++------------------------------
kernel/sched/sched.h | 1 -
3 files changed, 14 insertions(+), 33 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fd711cc4d44c..56be3651605d 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -846,7 +846,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
- SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
cfs_rq->idle_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d5736bde3682..c13163deb92c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5472,21 +5472,18 @@ static void set_delayed(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_runnable--;
- cfs_rq->h_nr_delayed++;
if (cfs_rq_throttled(cfs_rq))
break;
}
}
-static void clear_delayed(struct sched_entity *se, bool running)
+static void clear_delayed(struct sched_entity *se)
{
se->sched_delayed = 0;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (running)
- cfs_rq->h_nr_runnable++;
- cfs_rq->h_nr_delayed--;
+ cfs_rq->h_nr_runnable++;
if (cfs_rq_throttled(cfs_rq))
break;
}
@@ -5494,7 +5491,7 @@ static void clear_delayed(struct sched_entity *se, bool running)
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
{
- clear_delayed(se, false);
+ se->sched_delayed = 0;
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
se->vlag = 0;
}
@@ -5937,7 +5934,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long queued_delta, runnable_delta, idle_task_delta, delayed_delta, dequeue = 1;
+ long queued_delta, runnable_delta, idle_task_delta, dequeue = 1;
long rq_h_nr_queued = rq->cfs.h_nr_queued;
raw_spin_lock(&cfs_b->lock);
@@ -5971,7 +5968,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
queued_delta = cfs_rq->h_nr_queued;
runnable_delta = cfs_rq->h_nr_runnable;
idle_task_delta = cfs_rq->idle_h_nr_running;
- delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
int flags;
@@ -5996,7 +5992,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_queued -= queued_delta;
qcfs_rq->h_nr_runnable -= runnable_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
- qcfs_rq->h_nr_delayed -= delayed_delta;
if (qcfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
@@ -6020,7 +6015,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_queued -= queued_delta;
qcfs_rq->h_nr_runnable -= runnable_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
- qcfs_rq->h_nr_delayed -= delayed_delta;
}
/* At this point se is NULL and we are at root level*/
@@ -6046,7 +6040,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long queued_delta, runnable_delta, idle_task_delta, delayed_delta;
+ long queued_delta, runnable_delta, idle_task_delta;
long rq_h_nr_queued = rq->cfs.h_nr_queued;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6083,7 +6077,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
queued_delta = cfs_rq->h_nr_queued;
runnable_delta = cfs_rq->h_nr_runnable;
idle_task_delta = cfs_rq->idle_h_nr_running;
- delayed_delta = cfs_rq->h_nr_delayed;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
@@ -6102,7 +6095,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_queued += queued_delta;
qcfs_rq->h_nr_runnable += runnable_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
- qcfs_rq->h_nr_delayed += delayed_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
@@ -6121,7 +6113,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
qcfs_rq->h_nr_queued += queued_delta;
qcfs_rq->h_nr_runnable += runnable_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
- qcfs_rq->h_nr_delayed += delayed_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
@@ -6975,7 +6966,7 @@ requeue_delayed_entity(struct sched_entity *se)
}
update_load_avg(cfs_rq, se, 0);
- clear_delayed(se, true);
+ clear_delayed(se);
}
/*
@@ -6989,7 +6980,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p);
- int h_nr_delayed = 0;
+ int h_nr_runnable = 0;
int task_new = !(flags & ENQUEUE_WAKEUP);
int rq_h_nr_queued = rq->cfs.h_nr_queued;
u64 slice = 0;
@@ -7017,7 +7008,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
if (task_new)
- h_nr_delayed = !!se->sched_delayed;
+ h_nr_runnable = !se->sched_delayed;
for_each_sched_entity(se) {
if (se->on_rq) {
@@ -7039,11 +7030,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
enqueue_entity(cfs_rq, se, flags);
slice = cfs_rq_min_slice(cfs_rq);
- if (!h_nr_delayed)
- cfs_rq->h_nr_runnable++;
+ cfs_rq->h_nr_runnable += h_nr_runnable;
cfs_rq->h_nr_queued++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
- cfs_rq->h_nr_delayed += h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
@@ -7065,11 +7054,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se->slice = slice;
slice = cfs_rq_min_slice(cfs_rq);
- if (!h_nr_delayed)
- cfs_rq->h_nr_runnable++;
+ cfs_rq->h_nr_runnable += h_nr_runnable;
cfs_rq->h_nr_queued++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
- cfs_rq->h_nr_delayed += h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
@@ -7132,7 +7119,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
struct task_struct *p = NULL;
int idle_h_nr_running = 0;
int h_nr_queued = 0;
- int h_nr_delayed = 0;
+ int h_nr_runnable = 0;
struct cfs_rq *cfs_rq;
u64 slice = 0;
@@ -7141,7 +7128,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
h_nr_queued = 1;
idle_h_nr_running = task_has_idle_policy(p);
if (!task_sleep && !task_delayed)
- h_nr_delayed = !!se->sched_delayed;
+ h_nr_runnable = !se->sched_delayed;
} else {
cfs_rq = group_cfs_rq(se);
slice = cfs_rq_min_slice(cfs_rq);
@@ -7157,11 +7144,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
break;
}
- if (!h_nr_delayed)
- cfs_rq->h_nr_runnable -= h_nr_queued;
+ cfs_rq->h_nr_runnable -= h_nr_runnable;
cfs_rq->h_nr_queued -= h_nr_queued;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
- cfs_rq->h_nr_delayed -= h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = h_nr_queued;
@@ -7198,11 +7183,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
se->slice = slice;
slice = cfs_rq_min_slice(cfs_rq);
- if (!h_nr_delayed)
- cfs_rq->h_nr_runnable -= h_nr_queued;
+ cfs_rq->h_nr_runnable -= h_nr_runnable;
cfs_rq->h_nr_queued -= h_nr_queued;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
- cfs_rq->h_nr_delayed -= h_nr_delayed;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = h_nr_queued;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e0b05ab43abd..e098b852704a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -650,7 +650,6 @@ struct cfs_rq {
unsigned int h_nr_queued;
unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
- unsigned int h_nr_delayed;
s64 avg_vruntime;
u64 avg_load;
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 05/10 v2] sched/fair: Rename cfs_rq.idle_h_nr_running into h_nr_idle
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (3 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 04/10 v2] sched/fair: Removed unsued cfs_rq.h_nr_delayed Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 06/10 v2] sched/fair: Remove unused cfs_rq.idle_nr_running Vincent Guittot
` (5 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
Use same naming convention as others starting with h_nr_* and rename
idle_h_nr_running into h_nr_idle.
The "running" is not correct anymore as it includes delayed dequeue tasks
as well.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/debug.c | 4 ++--
kernel/sched/fair.c | 52 ++++++++++++++++++++++----------------------
kernel/sched/sched.h | 2 +-
3 files changed, 29 insertions(+), 29 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 56be3651605d..867e1102c368 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -848,8 +848,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
cfs_rq->idle_nr_running);
- SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
- cfs_rq->idle_h_nr_running);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle",
+ cfs_rq->h_nr_idle);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c13163deb92c..13ee5ea13580 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5934,7 +5934,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long queued_delta, runnable_delta, idle_task_delta, dequeue = 1;
+ long queued_delta, runnable_delta, idle_delta, dequeue = 1;
long rq_h_nr_queued = rq->cfs.h_nr_queued;
raw_spin_lock(&cfs_b->lock);
@@ -5967,7 +5967,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
queued_delta = cfs_rq->h_nr_queued;
runnable_delta = cfs_rq->h_nr_runnable;
- idle_task_delta = cfs_rq->idle_h_nr_running;
+ idle_delta = cfs_rq->h_nr_idle;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
int flags;
@@ -5987,11 +5987,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
dequeue_entity(qcfs_rq, se, flags);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_queued;
+ idle_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued -= queued_delta;
qcfs_rq->h_nr_runnable -= runnable_delta;
- qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ qcfs_rq->h_nr_idle -= idle_delta;
if (qcfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
@@ -6010,11 +6010,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
se_update_runnable(se);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_queued;
+ idle_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued -= queued_delta;
qcfs_rq->h_nr_runnable -= runnable_delta;
- qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ qcfs_rq->h_nr_idle -= idle_delta;
}
/* At this point se is NULL and we are at root level*/
@@ -6040,7 +6040,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
- long queued_delta, runnable_delta, idle_task_delta;
+ long queued_delta, runnable_delta, idle_delta;
long rq_h_nr_queued = rq->cfs.h_nr_queued;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6076,7 +6076,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
queued_delta = cfs_rq->h_nr_queued;
runnable_delta = cfs_rq->h_nr_runnable;
- idle_task_delta = cfs_rq->idle_h_nr_running;
+ idle_delta = cfs_rq->h_nr_idle;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
@@ -6090,11 +6090,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_queued;
+ idle_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued += queued_delta;
qcfs_rq->h_nr_runnable += runnable_delta;
- qcfs_rq->idle_h_nr_running += idle_task_delta;
+ qcfs_rq->h_nr_idle += idle_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
@@ -6108,11 +6108,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
se_update_runnable(se);
if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_queued;
+ idle_delta = cfs_rq->h_nr_queued;
qcfs_rq->h_nr_queued += queued_delta;
qcfs_rq->h_nr_runnable += runnable_delta;
- qcfs_rq->idle_h_nr_running += idle_task_delta;
+ qcfs_rq->h_nr_idle += idle_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
@@ -6927,7 +6927,7 @@ static inline void check_update_overutilized_status(struct rq *rq) { }
/* Runqueue only has SCHED_IDLE tasks enqueued */
static int sched_idle_rq(struct rq *rq)
{
- return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
+ return unlikely(rq->nr_running == rq->cfs.h_nr_idle &&
rq->nr_running);
}
@@ -6979,7 +6979,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
- int idle_h_nr_running = task_has_idle_policy(p);
+ int h_nr_idle = task_has_idle_policy(p);
int h_nr_runnable = 0;
int task_new = !(flags & ENQUEUE_WAKEUP);
int rq_h_nr_queued = rq->cfs.h_nr_queued;
@@ -7032,10 +7032,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cfs_rq->h_nr_runnable += h_nr_runnable;
cfs_rq->h_nr_queued++;
- cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ cfs_rq->h_nr_idle += h_nr_idle;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
+ h_nr_idle = 1;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
@@ -7056,10 +7056,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cfs_rq->h_nr_runnable += h_nr_runnable;
cfs_rq->h_nr_queued++;
- cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ cfs_rq->h_nr_idle += h_nr_idle;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
+ h_nr_idle = 1;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
@@ -7117,7 +7117,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
bool task_sleep = flags & DEQUEUE_SLEEP;
bool task_delayed = flags & DEQUEUE_DELAYED;
struct task_struct *p = NULL;
- int idle_h_nr_running = 0;
+ int h_nr_idle = 0;
int h_nr_queued = 0;
int h_nr_runnable = 0;
struct cfs_rq *cfs_rq;
@@ -7126,7 +7126,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
if (entity_is_task(se)) {
p = task_of(se);
h_nr_queued = 1;
- idle_h_nr_running = task_has_idle_policy(p);
+ h_nr_idle = task_has_idle_policy(p);
if (!task_sleep && !task_delayed)
h_nr_runnable = !se->sched_delayed;
} else {
@@ -7146,10 +7146,10 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
cfs_rq->h_nr_runnable -= h_nr_runnable;
cfs_rq->h_nr_queued -= h_nr_queued;
- cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ cfs_rq->h_nr_idle -= h_nr_idle;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = h_nr_queued;
+ h_nr_idle = h_nr_queued;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
@@ -7185,10 +7185,10 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
cfs_rq->h_nr_runnable -= h_nr_runnable;
cfs_rq->h_nr_queued -= h_nr_queued;
- cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ cfs_rq->h_nr_idle -= h_nr_idle;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = h_nr_queued;
+ h_nr_idle = h_nr_queued;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
@@ -13543,7 +13543,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
}
idle_task_delta = grp_cfs_rq->h_nr_queued -
- grp_cfs_rq->idle_h_nr_running;
+ grp_cfs_rq->h_nr_idle;
if (!cfs_rq_is_idle(grp_cfs_rq))
idle_task_delta *= -1;
@@ -13553,7 +13553,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
if (!se->on_rq)
break;
- cfs_rq->idle_h_nr_running += idle_task_delta;
+ cfs_rq->h_nr_idle += idle_task_delta;
/* Already accounted at parent level and above. */
if (cfs_rq_is_idle(cfs_rq))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e098b852704a..8c57da1af378 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -649,7 +649,7 @@ struct cfs_rq {
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int h_nr_queued;
unsigned int idle_nr_running; /* SCHED_IDLE */
- unsigned int idle_h_nr_running; /* SCHED_IDLE */
+ unsigned int h_nr_idle; /* SCHED_IDLE */
s64 avg_vruntime;
u64 avg_load;
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 06/10 v2] sched/fair: Remove unused cfs_rq.idle_nr_running
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (4 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 05/10 v2] sched/fair: Rename cfs_rq.idle_h_nr_running into h_nr_idle Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 07/10 v2] sched/fair: Rename cfs_rq.nr_running into nr_queued Vincent Guittot
` (4 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
cfs_rq.idle_nr_running field is not used anywhere so we can remove the
useless associated computation
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/debug.c | 2 --
kernel/sched/fair.c | 14 +-------------
kernel/sched/sched.h | 1 -
3 files changed, 1 insertion(+), 16 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 867e1102c368..37ccba0bedf5 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -846,8 +846,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
- SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
- cfs_rq->idle_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle",
cfs_rq->h_nr_idle);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 13ee5ea13580..20fe55e95882 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3674,8 +3674,6 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#endif
cfs_rq->nr_running++;
- if (se_is_idle(se))
- cfs_rq->idle_nr_running++;
}
static void
@@ -3689,8 +3687,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#endif
cfs_rq->nr_running--;
- if (se_is_idle(se))
- cfs_rq->idle_nr_running--;
}
/*
@@ -13523,7 +13519,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se = tg->se[i];
- struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
+ struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
long idle_task_delta;
struct rq_flags rf;
@@ -13534,14 +13530,6 @@ int sched_group_set_idle(struct task_group *tg, long idle)
if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
goto next_cpu;
- if (se->on_rq) {
- parent_cfs_rq = cfs_rq_of(se);
- if (cfs_rq_is_idle(grp_cfs_rq))
- parent_cfs_rq->idle_nr_running++;
- else
- parent_cfs_rq->idle_nr_running--;
- }
-
idle_task_delta = grp_cfs_rq->h_nr_queued -
grp_cfs_rq->h_nr_idle;
if (!cfs_rq_is_idle(grp_cfs_rq))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8c57da1af378..7ece69b0fc14 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -648,7 +648,6 @@ struct cfs_rq {
unsigned int nr_running;
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int h_nr_queued;
- unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int h_nr_idle; /* SCHED_IDLE */
s64 avg_vruntime;
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 07/10 v2] sched/fair: Rename cfs_rq.nr_running into nr_queued
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (5 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 06/10 v2] sched/fair: Remove unused cfs_rq.idle_nr_running Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 08/10 v2] sched/fair: Do not try to migrate delayed dequeue task Vincent Guittot
` (3 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
Rename cfs_rq.nr_running into cfs_rq.nr_queued which better reflects the
reality as the value includes both the ready to run entity and the delayed
dequeue entity.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
kernel/sched/fair.c | 38 +++++++++++++++++++-------------------
kernel/sched/sched.h | 4 ++--
4 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ba5e314eb99b..3571f91d4b0d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1341,7 +1341,7 @@ bool sched_can_stop_tick(struct rq *rq)
if (scx_enabled() && !scx_can_stop_tick(rq))
return false;
- if (rq->cfs.nr_running > 1)
+ if (rq->cfs.nr_queued > 1)
return false;
/*
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 37ccba0bedf5..9d9b6940687f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -843,7 +843,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(right_vruntime));
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
- SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+ SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 20fe55e95882..dc9725da033e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -915,7 +915,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
* We can safely skip eligibility check if there is only one entity
* in this cfs_rq, saving some cycles.
*/
- if (cfs_rq->nr_running == 1)
+ if (cfs_rq->nr_queued == 1)
return curr && curr->on_rq ? curr : se;
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
@@ -1247,7 +1247,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
account_cfs_rq_runtime(cfs_rq, delta_exec);
- if (cfs_rq->nr_running == 1)
+ if (cfs_rq->nr_queued == 1)
return;
if (resched || did_preempt_short(cfs_rq, curr)) {
@@ -3673,7 +3673,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
list_add(&se->group_node, &rq->cfs_tasks);
}
#endif
- cfs_rq->nr_running++;
+ cfs_rq->nr_queued++;
}
static void
@@ -3686,7 +3686,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
list_del_init(&se->group_node);
}
#endif
- cfs_rq->nr_running--;
+ cfs_rq->nr_queued--;
}
/*
@@ -5220,7 +5220,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
- return !cfs_rq->nr_running;
+ return !cfs_rq->nr_queued;
}
#define UPDATE_TG 0x0
@@ -5276,7 +5276,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*
* EEVDF: placement strategy #1 / #2
*/
- if (sched_feat(PLACE_LAG) && cfs_rq->nr_running && se->vlag) {
+ if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -5425,7 +5425,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;
- if (cfs_rq->nr_running == 1) {
+ if (cfs_rq->nr_queued == 1) {
check_enqueue_throttle(cfs_rq);
if (!throttled_hierarchy(cfs_rq)) {
list_add_leaf_cfs_rq(cfs_rq);
@@ -5570,7 +5570,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (flags & DEQUEUE_DELAYED)
finish_delayed_dequeue_entity(se);
- if (cfs_rq->nr_running == 0)
+ if (cfs_rq->nr_queued == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
return true;
@@ -5917,7 +5917,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
list_del_leaf_cfs_rq(cfs_rq);
SCHED_WARN_ON(cfs_rq->throttled_clock_self);
- if (cfs_rq->nr_running)
+ if (cfs_rq->nr_queued)
cfs_rq->throttled_clock_self = rq_clock(rq);
}
cfs_rq->throttle_count++;
@@ -6026,7 +6026,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
*/
cfs_rq->throttled = 1;
SCHED_WARN_ON(cfs_rq->throttled_clock);
- if (cfs_rq->nr_running)
+ if (cfs_rq->nr_queued)
cfs_rq->throttled_clock = rq_clock(rq);
return true;
}
@@ -6126,7 +6126,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
assert_list_leaf_cfs_rq(rq);
/* Determine whether we need to wake up potentially idle CPU: */
- if (rq->curr == rq->idle && rq->cfs.nr_running)
+ if (rq->curr == rq->idle && rq->cfs.nr_queued)
resched_curr(rq);
}
@@ -6427,7 +6427,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
if (!cfs_bandwidth_used())
return;
- if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
+ if (!cfs_rq->runtime_enabled || cfs_rq->nr_queued)
return;
__return_cfs_rq_runtime(cfs_rq);
@@ -6950,14 +6950,14 @@ requeue_delayed_entity(struct sched_entity *se)
if (sched_feat(DELAY_ZERO)) {
update_entity_lag(cfs_rq, se);
if (se->vlag > 0) {
- cfs_rq->nr_running--;
+ cfs_rq->nr_queued--;
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->vlag = 0;
place_entity(cfs_rq, se, 0);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
- cfs_rq->nr_running++;
+ cfs_rq->nr_queued++;
}
}
@@ -8876,7 +8876,7 @@ static struct task_struct *pick_task_fair(struct rq *rq)
again:
cfs_rq = &rq->cfs;
- if (!cfs_rq->nr_running)
+ if (!cfs_rq->nr_queued)
return NULL;
do {
@@ -8993,7 +8993,7 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_stru
static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
{
- return !!dl_se->rq->cfs.nr_running;
+ return !!dl_se->rq->cfs.nr_queued;
}
static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
@@ -9783,7 +9783,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
update_tg_load_avg(cfs_rq);
- if (cfs_rq->nr_running == 0)
+ if (cfs_rq->nr_queued == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
if (cfs_rq == &rq->cfs)
@@ -12965,7 +12965,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
* MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
* if we need to give up the CPU.
*/
- if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
+ if (rq->core->core_forceidle_count && rq->cfs.nr_queued == 1 &&
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
resched_curr(rq);
}
@@ -13109,7 +13109,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
if (!task_on_rq_queued(p))
return;
- if (rq->cfs.nr_running == 1)
+ if (rq->cfs.nr_queued == 1)
return;
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7ece69b0fc14..fa317662da53 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -645,7 +645,7 @@ struct balance_callback {
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
- unsigned int nr_running;
+ unsigned int nr_queued;
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int h_nr_queued;
unsigned int h_nr_idle; /* SCHED_IDLE */
@@ -2564,7 +2564,7 @@ static inline bool sched_rt_runnable(struct rq *rq)
static inline bool sched_fair_runnable(struct rq *rq)
{
- return rq->cfs.nr_running > 0;
+ return rq->cfs.nr_queued > 0;
}
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 08/10 v2] sched/fair: Do not try to migrate delayed dequeue task
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (6 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 07/10 v2] sched/fair: Rename cfs_rq.nr_running into nr_queued Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 16:17 ` [PATCH 09/10 v2] sched/fair: Fix sched_can_stop_tick() for fair tasks Vincent Guittot
` (2 subsequent siblings)
10 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
Migrating a delayed dequeued task doesn't help in balancing the number
of runnable tasks in the system.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/fair.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dc9725da033e..c34874203da2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9394,11 +9394,15 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* We do not migrate tasks that are:
- * 1) throttled_lb_pair, or
- * 2) cannot be migrated to this CPU due to cpus_ptr, or
- * 3) running (obviously), or
- * 4) are cache-hot on their current CPU.
+ * 1) delayed dequeued unless we migrate load, or
+ * 2) throttled_lb_pair, or
+ * 3) cannot be migrated to this CPU due to cpus_ptr, or
+ * 4) running (obviously), or
+ * 5) are cache-hot on their current CPU.
*/
+ if ((p->se.sched_delayed) && (env->migration_type != migrate_load))
+ return 0;
+
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0;
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* [PATCH 09/10 v2] sched/fair: Fix sched_can_stop_tick() for fair tasks
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (7 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 08/10 v2] sched/fair: Do not try to migrate delayed dequeue task Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 18:26 ` K Prateek Nayak
2024-11-29 16:17 ` [PATCH 10/10 v2] sched/fair: Fix variable declaration position Vincent Guittot
2024-12-01 13:30 ` [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Mike Galbraith
10 siblings, 1 reply; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
We can't stop the tick of a rq if there are at least 2 tasks enqueued in
the whole hierarchy and not only at the root cfs rq.
rq->cfs.nr_queued tracks the number of sched_entity at one level
whereas rq->cfs.h_nr_enqueued tracks all enqueued tasks in the
hierarchy.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3571f91d4b0d..866a1605656c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1341,7 +1341,7 @@ bool sched_can_stop_tick(struct rq *rq)
if (scx_enabled() && !scx_can_stop_tick(rq))
return false;
- if (rq->cfs.nr_queued > 1)
+ if (rq->cfs.h_nr_queued > 1)
return false;
/*
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* Re: [PATCH 09/10 v2] sched/fair: Fix sched_can_stop_tick() for fair tasks
2024-11-29 16:17 ` [PATCH 09/10 v2] sched/fair: Fix sched_can_stop_tick() for fair tasks Vincent Guittot
@ 2024-11-29 18:26 ` K Prateek Nayak
2024-12-02 9:18 ` Vincent Guittot
0 siblings, 1 reply; 26+ messages in thread
From: K Prateek Nayak @ 2024-11-29 18:26 UTC (permalink / raw)
To: Vincent Guittot, mingo, peterz, juri.lelli, dietmar.eggemann,
rostedt, bsegall, mgorman, vschneid, linux-kernel
Cc: pauld, efault, luis.machado, Tejun Heo, David Vernet
(+ Tejun, David)
Hello Vincent,
On 11/29/2024 9:47 PM, Vincent Guittot wrote:
> We can't stop the tick of a rq if there are at least 2 tasks enqueued in
> the whole hierarchy and not only at the root cfs rq.
>
> rq->cfs.nr_queued tracks the number of sched_entity at one level
> whereas rq->cfs.h_nr_enqueued tracks all enqueued tasks in the
> hierarchy.
>
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/core.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 3571f91d4b0d..866a1605656c 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1341,7 +1341,7 @@ bool sched_can_stop_tick(struct rq *rq)
> if (scx_enabled() && !scx_can_stop_tick(rq))
> return false;
>
> - if (rq->cfs.nr_queued > 1)
> + if (rq->cfs.h_nr_queued > 1)
Perhaps we can move this fix to the beginning of the series and add:
Fixes: 11cc374f4643b ("sched_ext: Simplify scx_can_stop_tick() invocation in sched_can_stop_tick()")
before converting the h_nr_running to h_nr_queued since prior to that
commit, sched_can_stop_tick() used to check "rq->nr_running" and since
we check the count of DL, RR, and FIFO tasks up above, it would have
captured number of fair tasks running before sched-ext. That way the fix
can be backported easily to LTS too. Thoughts?
> return false;
>
> /*
--
Thanks and Regards,
Prateek
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [PATCH 09/10 v2] sched/fair: Fix sched_can_stop_tick() for fair tasks
2024-11-29 18:26 ` K Prateek Nayak
@ 2024-12-02 9:18 ` Vincent Guittot
0 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 9:18 UTC (permalink / raw)
To: K Prateek Nayak
Cc: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel, pauld, efault, luis.machado,
Tejun Heo, David Vernet
On Fri, 29 Nov 2024 at 19:26, K Prateek Nayak <kprateek.nayak@amd.com> wrote:
>
> (+ Tejun, David)
>
> Hello Vincent,
>
> On 11/29/2024 9:47 PM, Vincent Guittot wrote:
> > We can't stop the tick of a rq if there are at least 2 tasks enqueued in
> > the whole hierarchy and not only at the root cfs rq.
> >
> > rq->cfs.nr_queued tracks the number of sched_entity at one level
> > whereas rq->cfs.h_nr_enqueued tracks all enqueued tasks in the
> > hierarchy.
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> > ---
> > kernel/sched/core.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 3571f91d4b0d..866a1605656c 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -1341,7 +1341,7 @@ bool sched_can_stop_tick(struct rq *rq)
> > if (scx_enabled() && !scx_can_stop_tick(rq))
> > return false;
> >
> > - if (rq->cfs.nr_queued > 1)
> > + if (rq->cfs.h_nr_queued > 1)
>
> Perhaps we can move this fix to the beginning of the series and add:
>
> Fixes: 11cc374f4643b ("sched_ext: Simplify scx_can_stop_tick() invocation in sched_can_stop_tick()")
>
> before converting the h_nr_running to h_nr_queued since prior to that
> commit, sched_can_stop_tick() used to check "rq->nr_running" and since
> we check the count of DL, RR, and FIFO tasks up above, it would have
> captured number of fair tasks running before sched-ext. That way the fix
> can be backported easily to LTS too. Thoughts?
Yes I can do that
>
> > return false;
> >
> > /*
>
> --
> Thanks and Regards,
> Prateek
>
^ permalink raw reply [flat|nested] 26+ messages in thread
* [PATCH 10/10 v2] sched/fair: Fix variable declaration position
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (8 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 09/10 v2] sched/fair: Fix sched_can_stop_tick() for fair tasks Vincent Guittot
@ 2024-11-29 16:17 ` Vincent Guittot
2024-11-29 18:30 ` K Prateek Nayak
2024-12-01 13:30 ` [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Mike Galbraith
10 siblings, 1 reply; 26+ messages in thread
From: Vincent Guittot @ 2024-11-29 16:17 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, efault, luis.machado, Vincent Guittot
Move variable declaration at the beginning of the function
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/fair.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c34874203da2..87552870958c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5632,6 +5632,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
static struct sched_entity *
pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
{
+ struct sched_entity *se;
/*
* Enabling NEXT_BUDDY will affect latency but not fairness.
*/
@@ -5642,7 +5643,7 @@ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
return cfs_rq->next;
}
- struct sched_entity *se = pick_eevdf(cfs_rq);
+ se = pick_eevdf(cfs_rq);
if (se->sched_delayed) {
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
SCHED_WARN_ON(se->sched_delayed);
--
2.43.0
^ permalink raw reply related [flat|nested] 26+ messages in thread* Re: [PATCH 10/10 v2] sched/fair: Fix variable declaration position
2024-11-29 16:17 ` [PATCH 10/10 v2] sched/fair: Fix variable declaration position Vincent Guittot
@ 2024-11-29 18:30 ` K Prateek Nayak
2024-12-02 9:19 ` Vincent Guittot
0 siblings, 1 reply; 26+ messages in thread
From: K Prateek Nayak @ 2024-11-29 18:30 UTC (permalink / raw)
To: Vincent Guittot, mingo, peterz, juri.lelli, dietmar.eggemann,
rostedt, bsegall, mgorman, vschneid, linux-kernel
Cc: pauld, efault, luis.machado
Hello Vincent,
On 11/29/2024 9:47 PM, Vincent Guittot wrote:
> Move variable declaration at the beginning of the function
>
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 3 ++-
> 1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index c34874203da2..87552870958c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5632,6 +5632,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
> static struct sched_entity *
> pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
> {
> + struct sched_entity *se;
> /*
> * Enabling NEXT_BUDDY will affect latency but not fairness.
> */
> @@ -5642,7 +5643,7 @@ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
> return cfs_rq->next;
> }
>
> - struct sched_entity *se = pick_eevdf(cfs_rq);
> + se = pick_eevdf(cfs_rq);
> if (se->sched_delayed) {
> dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
> SCHED_WARN_ON(se->sched_delayed);
Perhaps also squash in:
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3356315d7e64..321e3f9e2ac4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5476,6 +5476,7 @@ static bool
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool sleep = flags & DEQUEUE_SLEEP;
+ int action = UPDATE_TG;
update_curr(cfs_rq);
@@ -5502,7 +5503,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
}
}
- int action = UPDATE_TG;
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
action |= DO_DETACH;
--
while at it :)
--
Thanks and Regards,
Prateek
^ permalink raw reply related [flat|nested] 26+ messages in thread* Re: [PATCH 10/10 v2] sched/fair: Fix variable declaration position
2024-11-29 18:30 ` K Prateek Nayak
@ 2024-12-02 9:19 ` Vincent Guittot
0 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 9:19 UTC (permalink / raw)
To: K Prateek Nayak
Cc: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel, pauld, efault, luis.machado
On Fri, 29 Nov 2024 at 19:30, K Prateek Nayak <kprateek.nayak@amd.com> wrote:
>
> Hello Vincent,
>
> On 11/29/2024 9:47 PM, Vincent Guittot wrote:
> > Move variable declaration at the beginning of the function
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> > ---
> > kernel/sched/fair.c | 3 ++-
> > 1 file changed, 2 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index c34874203da2..87552870958c 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -5632,6 +5632,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
> > static struct sched_entity *
> > pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
> > {
> > + struct sched_entity *se;
> > /*
> > * Enabling NEXT_BUDDY will affect latency but not fairness.
> > */
> > @@ -5642,7 +5643,7 @@ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
> > return cfs_rq->next;
> > }
> >
> > - struct sched_entity *se = pick_eevdf(cfs_rq);
> > + se = pick_eevdf(cfs_rq);
> > if (se->sched_delayed) {
> > dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
> > SCHED_WARN_ON(se->sched_delayed);
>
> Perhaps also squash in:
Yes, I add it in v3
Thanks
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3356315d7e64..321e3f9e2ac4 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5476,6 +5476,7 @@ static bool
> dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> {
> bool sleep = flags & DEQUEUE_SLEEP;
> + int action = UPDATE_TG;
>
> update_curr(cfs_rq);
>
> @@ -5502,7 +5503,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> }
> }
>
> - int action = UPDATE_TG;
> if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
> action |= DO_DETACH;
>
> --
>
> while at it :)
>
> --
> Thanks and Regards,
> Prateek
>
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-11-29 16:17 [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Vincent Guittot
` (9 preceding siblings ...)
2024-11-29 16:17 ` [PATCH 10/10 v2] sched/fair: Fix variable declaration position Vincent Guittot
@ 2024-12-01 13:30 ` Mike Galbraith
2024-12-02 9:17 ` Vincent Guittot
10 siblings, 1 reply; 26+ messages in thread
From: Mike Galbraith @ 2024-12-01 13:30 UTC (permalink / raw)
To: Vincent Guittot, mingo, peterz, juri.lelli, dietmar.eggemann,
rostedt, bsegall, mgorman, vschneid, linux-kernel
Cc: kprateek.nayak, pauld, luis.machado
Greetings,
On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
> Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
> lag has elapsed. As a result, it stays also visible in the statistics that
> are used to balance the system and in particular the field h_nr_running.
>
> This serie fixes those metrics by creating a new h_nr_queued that tracks
> all queued tasks. It renames h_nr_running into h_nr_runnable and restores
> the behavior of h_nr_running i.e. tracking the number of fair tasks that
> want to run.
>
> h_nr_runnable is used in several places to make decision on load balance:
> - PELT runnable_avg
> - deciding if a group is overloaded or has spare capacity
> - numa stats
> - reduced capacity management
> - load balance between groups
I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
runnable seems to have an off-by-one issue, causing it to wander ever
further south.
patches 1-3 applied.
.h_nr_runnable : -3046
.runnable_avg : 450189777126
full set applied.
.h_nr_runnable : -5707
.runnable_avg : 4391793519526
-Mike
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-01 13:30 ` [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue Mike Galbraith
@ 2024-12-02 9:17 ` Vincent Guittot
2024-12-02 9:23 ` Luis Machado
` (2 more replies)
0 siblings, 3 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 9:17 UTC (permalink / raw)
To: Mike Galbraith
Cc: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel, kprateek.nayak, pauld,
luis.machado
On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
>
> Greetings,
>
> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
> > Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
> > lag has elapsed. As a result, it stays also visible in the statistics that
> > are used to balance the system and in particular the field h_nr_running.
> >
> > This serie fixes those metrics by creating a new h_nr_queued that tracks
> > all queued tasks. It renames h_nr_running into h_nr_runnable and restores
> > the behavior of h_nr_running i.e. tracking the number of fair tasks that
> > want to run.
> >
> > h_nr_runnable is used in several places to make decision on load balance:
> > - PELT runnable_avg
> > - deciding if a group is overloaded or has spare capacity
> > - numa stats
> > - reduced capacity management
> > - load balance between groups
>
> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
> runnable seems to have an off-by-one issue, causing it to wander ever
> further south.
>
> patches 1-3 applied.
> .h_nr_runnable : -3046
> .runnable_avg : 450189777126
Yeah, I messed up something around finish_delayed_dequeue_entity().
I'm' going to prepare a v3
>
> full set applied.
> .h_nr_runnable : -5707
> .runnable_avg : 4391793519526
>
> -Mike
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 9:17 ` Vincent Guittot
@ 2024-12-02 9:23 ` Luis Machado
2024-12-02 9:59 ` Vincent Guittot
2024-12-02 9:58 ` K Prateek Nayak
2024-12-02 18:34 ` Mike Galbraith
2 siblings, 1 reply; 26+ messages in thread
From: Luis Machado @ 2024-12-02 9:23 UTC (permalink / raw)
To: Vincent Guittot, Mike Galbraith
Cc: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel, kprateek.nayak, pauld
On 12/2/24 09:17, Vincent Guittot wrote:
> On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
>>
>> Greetings,
>>
>> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
>>> Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
>>> lag has elapsed. As a result, it stays also visible in the statistics that
>>> are used to balance the system and in particular the field h_nr_running.
>>>
>>> This serie fixes those metrics by creating a new h_nr_queued that tracks
>>> all queued tasks. It renames h_nr_running into h_nr_runnable and restores
>>> the behavior of h_nr_running i.e. tracking the number of fair tasks that
>>> want to run.
>>>
>>> h_nr_runnable is used in several places to make decision on load balance:
>>> - PELT runnable_avg
>>> - deciding if a group is overloaded or has spare capacity
>>> - numa stats
>>> - reduced capacity management
>>> - load balance between groups
>>
>> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
>> runnable seems to have an off-by-one issue, causing it to wander ever
>> further south.
>>
>> patches 1-3 applied.
>> .h_nr_runnable : -3046
>> .runnable_avg : 450189777126
>
> Yeah, I messed up something around finish_delayed_dequeue_entity().
> I'm' going to prepare a v3>
Maybe something similar to what I ran into here?
https://lore.kernel.org/lkml/6df12fde-1e0d-445f-8f8a-736d11f9ee41@arm.com/
>>
>> full set applied.
>> .h_nr_runnable : -5707
>> .runnable_avg : 4391793519526
>>
>> -Mike
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 9:23 ` Luis Machado
@ 2024-12-02 9:59 ` Vincent Guittot
2024-12-02 13:52 ` Dietmar Eggemann
0 siblings, 1 reply; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 9:59 UTC (permalink / raw)
To: Luis Machado
Cc: Mike Galbraith, mingo, peterz, juri.lelli, dietmar.eggemann,
rostedt, bsegall, mgorman, vschneid, linux-kernel, kprateek.nayak,
pauld
On Mon, 2 Dec 2024 at 10:23, Luis Machado <luis.machado@arm.com> wrote:
>
> On 12/2/24 09:17, Vincent Guittot wrote:
> > On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
> >>
> >> Greetings,
> >>
> >> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
> >>> Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
> >>> lag has elapsed. As a result, it stays also visible in the statistics that
> >>> are used to balance the system and in particular the field h_nr_running.
> >>>
> >>> This serie fixes those metrics by creating a new h_nr_queued that tracks
> >>> all queued tasks. It renames h_nr_running into h_nr_runnable and restores
> >>> the behavior of h_nr_running i.e. tracking the number of fair tasks that
> >>> want to run.
> >>>
> >>> h_nr_runnable is used in several places to make decision on load balance:
> >>> - PELT runnable_avg
> >>> - deciding if a group is overloaded or has spare capacity
> >>> - numa stats
> >>> - reduced capacity management
> >>> - load balance between groups
> >>
> >> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
> >> runnable seems to have an off-by-one issue, causing it to wander ever
> >> further south.
> >>
> >> patches 1-3 applied.
> >> .h_nr_runnable : -3046
> >> .runnable_avg : 450189777126
> >
> > Yeah, I messed up something around finish_delayed_dequeue_entity().
> > I'm' going to prepare a v3>
>
> Maybe something similar to what I ran into here?
>
> https://lore.kernel.org/lkml/6df12fde-1e0d-445f-8f8a-736d11f9ee41@arm.com/
I'm going to have a look
>
> >>
> >> full set applied.
> >> .h_nr_runnable : -5707
> >> .runnable_avg : 4391793519526
> >>
> >> -Mike
>
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 9:59 ` Vincent Guittot
@ 2024-12-02 13:52 ` Dietmar Eggemann
0 siblings, 0 replies; 26+ messages in thread
From: Dietmar Eggemann @ 2024-12-02 13:52 UTC (permalink / raw)
To: Vincent Guittot, Luis Machado
Cc: Mike Galbraith, mingo, peterz, juri.lelli, rostedt, bsegall,
mgorman, vschneid, linux-kernel, kprateek.nayak, pauld
On 02/12/2024 10:59, Vincent Guittot wrote:
> On Mon, 2 Dec 2024 at 10:23, Luis Machado <luis.machado@arm.com> wrote:
>>
>> On 12/2/24 09:17, Vincent Guittot wrote:
>>> On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
>>>>
>>>> Greetings,
>>>>
>>>> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
[...]
>>>>> h_nr_runnable is used in several places to make decision on load balance:
>>>>> - PELT runnable_avg
>>>>> - deciding if a group is overloaded or has spare capacity
>>>>> - numa stats
>>>>> - reduced capacity management
>>>>> - load balance between groups
>>>>
>>>> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
>>>> runnable seems to have an off-by-one issue, causing it to wander ever
>>>> further south.
>>>>
>>>> patches 1-3 applied.
>>>> .h_nr_runnable : -3046
>>>> .runnable_avg : 450189777126
>>>
>>> Yeah, I messed up something around finish_delayed_dequeue_entity().
>>> I'm' going to prepare a v3>
>>
>> Maybe something similar to what I ran into here?
>>
>> https://lore.kernel.org/lkml/6df12fde-1e0d-445f-8f8a-736d11f9ee41@arm.com/
>
> I'm going to have a look
Looks like this is not an issue anymore since commit 98442f0ccd82
("sched: Fix delayed_dequeue vs switched_from_fair()") removed
finish_delayed_dequeue_entity() from switched_from_fair() in the meantime.
[...]
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 9:17 ` Vincent Guittot
2024-12-02 9:23 ` Luis Machado
@ 2024-12-02 9:58 ` K Prateek Nayak
2024-12-02 11:42 ` Vincent Guittot
2024-12-02 18:34 ` Mike Galbraith
2 siblings, 1 reply; 26+ messages in thread
From: K Prateek Nayak @ 2024-12-02 9:58 UTC (permalink / raw)
To: Vincent Guittot, Mike Galbraith
Cc: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel, pauld, luis.machado
Hello Vincent, Mike,
On 12/2/2024 2:47 PM, Vincent Guittot wrote:
> On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
>>
>> Greetings,
>>
>> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
>>> Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
>>> lag has elapsed. As a result, it stays also visible in the statistics that
>>> are used to balance the system and in particular the field h_nr_running.
>>>
>>> This serie fixes those metrics by creating a new h_nr_queued that tracks
>>> all queued tasks. It renames h_nr_running into h_nr_runnable and restores
>>> the behavior of h_nr_running i.e. tracking the number of fair tasks that
>>> want to run.
>>>
>>> h_nr_runnable is used in several places to make decision on load balance:
>>> - PELT runnable_avg
>>> - deciding if a group is overloaded or has spare capacity
>>> - numa stats
>>> - reduced capacity management
>>> - load balance between groups
>>
>> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
>> runnable seems to have an off-by-one issue, causing it to wander ever
>> further south.
>>
>> patches 1-3 applied.
>> .h_nr_runnable : -3046
>> .runnable_avg : 450189777126
>
> Yeah, I messed up something around finish_delayed_dequeue_entity().
> I'm' going to prepare a v3
I was looking into this and I have the below diff so far that seems to
solve the post boot negative values of h_nr_runnable on my setup; it is
only lightly tested so far:
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 87552870958c..423981e65aba 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5464,6 +5464,10 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void set_delayed(struct sched_entity *se)
{
se->sched_delayed = 1;
+
+ if (!entity_is_task(se))
+ return;
+
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -5476,6 +5480,10 @@ static void set_delayed(struct sched_entity *se)
static void clear_delayed(struct sched_entity *se)
{
se->sched_delayed = 0;
+
+ if (!entity_is_task(se))
+ return;
+
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -6977,7 +6985,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int h_nr_idle = task_has_idle_policy(p);
- int h_nr_runnable = 0;
+ int h_nr_runnable = 1;
int task_new = !(flags & ENQUEUE_WAKEUP);
int rq_h_nr_queued = rq->cfs.h_nr_queued;
u64 slice = 0;
@@ -7124,8 +7132,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
p = task_of(se);
h_nr_queued = 1;
h_nr_idle = task_has_idle_policy(p);
- if (!task_sleep && !task_delayed)
- h_nr_runnable = !se->sched_delayed;
+ h_nr_runnable = !se->sched_delayed;
} else {
cfs_rq = group_cfs_rq(se);
slice = cfs_rq_min_slice(cfs_rq);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index ab911d1335ba..f4ef5aaa4674 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -457,6 +457,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct t
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
+ SCHED_WARN_ON(rq->cfs.h_nr_runnable);
update_idle_core(rq);
scx_update_idle(rq, true);
schedstat_inc(rq->sched_goidle);
--
I'm not sure if the change in dequeue_entities() is completely necessary
but I added it after seeing the (DEQUEUE_SLEEP | DEQUEUE_DELAYED) in
throttle_cfs_rq() but there the entity cannot possibly be a task so
perhaps that part is unnecessary ¯\_(ツ)_/¯
Still testing! Will keep an eye out for v3.
>
>>
>> full set applied.
>> .h_nr_runnable : -5707
>> .runnable_avg : 4391793519526
>>
>> -Mike
--
Thanks and Regards,
Prateek
^ permalink raw reply related [flat|nested] 26+ messages in thread* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 9:58 ` K Prateek Nayak
@ 2024-12-02 11:42 ` Vincent Guittot
2024-12-02 14:24 ` Vincent Guittot
0 siblings, 1 reply; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 11:42 UTC (permalink / raw)
To: K Prateek Nayak
Cc: Mike Galbraith, mingo, peterz, juri.lelli, dietmar.eggemann,
rostedt, bsegall, mgorman, vschneid, linux-kernel, pauld,
luis.machado
On Mon, 2 Dec 2024 at 10:58, K Prateek Nayak <kprateek.nayak@amd.com> wrote:
>
> Hello Vincent, Mike,
>
> On 12/2/2024 2:47 PM, Vincent Guittot wrote:
> > On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
> >>
> >> Greetings,
> >>
> >> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
> >>> Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
> >>> lag has elapsed. As a result, it stays also visible in the statistics that
> >>> are used to balance the system and in particular the field h_nr_running.
> >>>
> >>> This serie fixes those metrics by creating a new h_nr_queued that tracks
> >>> all queued tasks. It renames h_nr_running into h_nr_runnable and restores
> >>> the behavior of h_nr_running i.e. tracking the number of fair tasks that
> >>> want to run.
> >>>
> >>> h_nr_runnable is used in several places to make decision on load balance:
> >>> - PELT runnable_avg
> >>> - deciding if a group is overloaded or has spare capacity
> >>> - numa stats
> >>> - reduced capacity management
> >>> - load balance between groups
> >>
> >> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
> >> runnable seems to have an off-by-one issue, causing it to wander ever
> >> further south.
> >>
> >> patches 1-3 applied.
> >> .h_nr_runnable : -3046
> >> .runnable_avg : 450189777126
> >
> > Yeah, I messed up something around finish_delayed_dequeue_entity().
> > I'm' going to prepare a v3
>
> I was looking into this and I have the below diff so far that seems to
> solve the post boot negative values of h_nr_runnable on my setup; it is
> only lightly tested so far:
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 87552870958c..423981e65aba 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5464,6 +5464,10 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
> static void set_delayed(struct sched_entity *se)
> {
> se->sched_delayed = 1;
> +
> + if (!entity_is_task(se))
> + return;
> +
> for_each_sched_entity(se) {
> struct cfs_rq *cfs_rq = cfs_rq_of(se);
>
> @@ -5476,6 +5480,10 @@ static void set_delayed(struct sched_entity *se)
> static void clear_delayed(struct sched_entity *se)
> {
> se->sched_delayed = 0;
> +
> + if (!entity_is_task(se))
> + return;
> +
> for_each_sched_entity(se) {
> struct cfs_rq *cfs_rq = cfs_rq_of(se);
>
> @@ -6977,7 +6985,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
> struct cfs_rq *cfs_rq;
> struct sched_entity *se = &p->se;
> int h_nr_idle = task_has_idle_policy(p);
> - int h_nr_runnable = 0;
> + int h_nr_runnable = 1;
I miss to invert default value when moving from h_nr_delayed to h_nr_runnable
> int task_new = !(flags & ENQUEUE_WAKEUP);
> int rq_h_nr_queued = rq->cfs.h_nr_queued;
> u64 slice = 0;
> @@ -7124,8 +7132,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
> p = task_of(se);
> h_nr_queued = 1;
> h_nr_idle = task_has_idle_policy(p);
> - if (!task_sleep && !task_delayed)
> - h_nr_runnable = !se->sched_delayed;
> + h_nr_runnable = !se->sched_delayed;
> } else {
> cfs_rq = group_cfs_rq(se);
> slice = cfs_rq_min_slice(cfs_rq);
> diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
> index ab911d1335ba..f4ef5aaa4674 100644
> --- a/kernel/sched/idle.c
> +++ b/kernel/sched/idle.c
> @@ -457,6 +457,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct t
>
> static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
> {
> + SCHED_WARN_ON(rq->cfs.h_nr_runnable);
> update_idle_core(rq);
> scx_update_idle(rq, true);
> schedstat_inc(rq->sched_goidle);
> --
>
> I'm not sure if the change in dequeue_entities() is completely necessary
> but I added it after seeing the (DEQUEUE_SLEEP | DEQUEUE_DELAYED) in
> throttle_cfs_rq() but there the entity cannot possibly be a task so
> perhaps that part is unnecessary ¯\_(ツ)_/¯
>
> Still testing! Will keep an eye out for v3.
>
> >
> >>
> >> full set applied.
> >> .h_nr_runnable : -5707
> >> .runnable_avg : 4391793519526
> >>
> >> -Mike
>
> --
> Thanks and Regards,
> Prateek
>
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 11:42 ` Vincent Guittot
@ 2024-12-02 14:24 ` Vincent Guittot
0 siblings, 0 replies; 26+ messages in thread
From: Vincent Guittot @ 2024-12-02 14:24 UTC (permalink / raw)
To: K Prateek Nayak
Cc: Mike Galbraith, mingo, peterz, juri.lelli, dietmar.eggemann,
rostedt, bsegall, mgorman, vschneid, linux-kernel, pauld,
luis.machado
On Mon, 2 Dec 2024 at 12:42, Vincent Guittot <vincent.guittot@linaro.org> wrote:
>
> On Mon, 2 Dec 2024 at 10:58, K Prateek Nayak <kprateek.nayak@amd.com> wrote:
> >
> > Hello Vincent, Mike,
> >
> > On 12/2/2024 2:47 PM, Vincent Guittot wrote:
> > > On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
> > >>
> > >> Greetings,
> > >>
> > >> On Fri, 2024-11-29 at 17:17 +0100, Vincent Guittot wrote:
> > >>> Delayed dequeued feature keeps a sleeping sched_entitiy enqueued until its
> > >>> lag has elapsed. As a result, it stays also visible in the statistics that
> > >>> are used to balance the system and in particular the field h_nr_running.
> > >>>
> > >>> This serie fixes those metrics by creating a new h_nr_queued that tracks
> > >>> all queued tasks. It renames h_nr_running into h_nr_runnable and restores
> > >>> the behavior of h_nr_running i.e. tracking the number of fair tasks that
> > >>> want to run.
> > >>>
> > >>> h_nr_runnable is used in several places to make decision on load balance:
> > >>> - PELT runnable_avg
> > >>> - deciding if a group is overloaded or has spare capacity
> > >>> - numa stats
> > >>> - reduced capacity management
> > >>> - load balance between groups
> > >>
> > >> I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
> > >> runnable seems to have an off-by-one issue, causing it to wander ever
> > >> further south.
> > >>
> > >> patches 1-3 applied.
> > >> .h_nr_runnable : -3046
> > >> .runnable_avg : 450189777126
> > >
> > > Yeah, I messed up something around finish_delayed_dequeue_entity().
> > > I'm' going to prepare a v3
> >
> > I was looking into this and I have the below diff so far that seems to
> > solve the post boot negative values of h_nr_runnable on my setup; it is
> > only lightly tested so far:
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 87552870958c..423981e65aba 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -5464,6 +5464,10 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
> > static void set_delayed(struct sched_entity *se)
> > {
> > se->sched_delayed = 1;
> > +
> > + if (!entity_is_task(se))
> > + return;
> > +
> > for_each_sched_entity(se) {
> > struct cfs_rq *cfs_rq = cfs_rq_of(se);
> >
> > @@ -5476,6 +5480,10 @@ static void set_delayed(struct sched_entity *se)
> > static void clear_delayed(struct sched_entity *se)
> > {
> > se->sched_delayed = 0;
> > +
> > + if (!entity_is_task(se))
> > + return;
> > +
> > for_each_sched_entity(se) {
> > struct cfs_rq *cfs_rq = cfs_rq_of(se);
> >
> > @@ -6977,7 +6985,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
> > struct cfs_rq *cfs_rq;
> > struct sched_entity *se = &p->se;
> > int h_nr_idle = task_has_idle_policy(p);
> > - int h_nr_runnable = 0;
> > + int h_nr_runnable = 1;
>
> I miss to invert default value when moving from h_nr_delayed to h_nr_runnable
>
> > int task_new = !(flags & ENQUEUE_WAKEUP);
> > int rq_h_nr_queued = rq->cfs.h_nr_queued;
> > u64 slice = 0;
> > @@ -7124,8 +7132,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
> > p = task_of(se);
> > h_nr_queued = 1;
> > h_nr_idle = task_has_idle_policy(p);
> > - if (!task_sleep && !task_delayed)
> > - h_nr_runnable = !se->sched_delayed;
> > + h_nr_runnable = !se->sched_delayed;
And I screwed up h_nr_runnable here as well
> > } else {
> > cfs_rq = group_cfs_rq(se);
> > slice = cfs_rq_min_slice(cfs_rq);
> > diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
> > index ab911d1335ba..f4ef5aaa4674 100644
> > --- a/kernel/sched/idle.c
> > +++ b/kernel/sched/idle.c
> > @@ -457,6 +457,7 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct t
> >
> > static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
> > {
> > + SCHED_WARN_ON(rq->cfs.h_nr_runnable);
> > update_idle_core(rq);
> > scx_update_idle(rq, true);
> > schedstat_inc(rq->sched_goidle);
> > --
> >
> > I'm not sure if the change in dequeue_entities() is completely necessary
> > but I added it after seeing the (DEQUEUE_SLEEP | DEQUEUE_DELAYED) in
> > throttle_cfs_rq() but there the entity cannot possibly be a task so
> > perhaps that part is unnecessary ¯\_(ツ)_/¯
> >
> > Still testing! Will keep an eye out for v3.
> >
> > >
> > >>
> > >> full set applied.
> > >> .h_nr_runnable : -5707
> > >> .runnable_avg : 4391793519526
> > >>
> > >> -Mike
> >
> > --
> > Thanks and Regards,
> > Prateek
> >
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 0/10 v2] sched/fair: Fix statistics with delayed dequeue
2024-12-02 9:17 ` Vincent Guittot
2024-12-02 9:23 ` Luis Machado
2024-12-02 9:58 ` K Prateek Nayak
@ 2024-12-02 18:34 ` Mike Galbraith
2 siblings, 0 replies; 26+ messages in thread
From: Mike Galbraith @ 2024-12-02 18:34 UTC (permalink / raw)
To: Vincent Guittot
Cc: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
mgorman, vschneid, linux-kernel, kprateek.nayak, pauld,
luis.machado
On Mon, 2024-12-02 at 10:17 +0100, Vincent Guittot wrote:
> On Sun, 1 Dec 2024 at 14:30, Mike Galbraith <efault@gmx.de> wrote:
> >
> > I took the series for a spin in tip v6.12-10334-gb1b238fba309, but
> > runnable seems to have an off-by-one issue, causing it to wander ever
> > further south.
> >
> > patches 1-3 applied.
> > .h_nr_runnable : -3046
> > .runnable_avg : 450189777126
>
> Yeah, I messed up something around finish_delayed_dequeue_entity().
> I'm' going to prepare a v3
v3 is all better with my light config. I'll plug it into an rt tree
with an enterprise config and give it some exercise.
-Mike
^ permalink raw reply [flat|nested] 26+ messages in thread