From: Peter Zijlstra <peterz@infradead.org>
To: mingo@kernel.org, vincent.guittot@linaro.org
Cc: linux-kernel@vger.kernel.org, peterz@infradead.org,
juri.lelli@redhat.com, dietmar.eggemann@arm.com,
rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
bristot@redhat.com, corbet@lwn.net, qyousef@layalina.io,
chris.hyser@oracle.com, patrick.bellasi@matbug.net,
pjt@google.com, pavel@ucw.cz, qperret@google.com,
tim.c.chen@linux.intel.com, joshdon@google.com, timj@gnu.org,
kprateek.nayak@amd.com, yu.c.chen@intel.com,
youssefesmat@chromium.org, joel@joelfernandes.org, efault@gmx.de
Subject: [PATCH 13/17] sched: Merge latency_offset into slice
Date: Tue, 28 Mar 2023 11:26:35 +0200 [thread overview]
Message-ID: <20230328110354.494493579@infradead.org> (raw)
In-Reply-To: 20230328092622.062917921@infradead.org
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/sched.h | 2 --
kernel/sched/core.c | 17 +++++++----------
kernel/sched/fair.c | 29 ++++++++++++-----------------
kernel/sched/sched.h | 2 +-
4 files changed, 20 insertions(+), 30 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -573,8 +573,6 @@ struct sched_entity {
/* cached value of my_q->h_nr_running */
unsigned long runnable_weight;
#endif
- /* preemption offset in ns */
- long latency_offset;
#ifdef CONFIG_SMP
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1285,9 +1285,10 @@ static void set_load_weight(struct task_
}
}
-static void set_latency_offset(struct task_struct *p)
+static inline void set_latency_prio(struct task_struct *p, int prio)
{
- p->se.latency_offset = calc_latency_offset(p->latency_prio - MAX_RT_PRIO);
+ p->latency_prio = prio;
+ set_latency_fair(&p->se, prio - MAX_RT_PRIO);
}
#ifdef CONFIG_UCLAMP_TASK
@@ -4442,7 +4443,7 @@ static void __sched_fork(unsigned long c
p->se.vlag = 0;
INIT_LIST_HEAD(&p->se.group_node);
- set_latency_offset(p);
+ set_latency_prio(p, p->latency_prio);
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
@@ -4694,9 +4695,7 @@ int sched_fork(unsigned long clone_flags
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
-
- p->latency_prio = NICE_TO_PRIO(0);
- set_latency_offset(p);
+ set_latency_prio(p, NICE_TO_PRIO(0));
/*
* We don't need the reset flag anymore after the fork. It has
@@ -7469,10 +7468,8 @@ static void __setscheduler_params(struct
static void __setscheduler_latency(struct task_struct *p,
const struct sched_attr *attr)
{
- if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) {
- p->latency_prio = NICE_TO_PRIO(attr->sched_latency_nice);
- set_latency_offset(p);
- }
+ if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE)
+ set_latency_prio(p, NICE_TO_PRIO(attr->sched_latency_nice));
}
/*
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -919,12 +919,19 @@ int sched_update_scaling(void)
}
#endif
-long calc_latency_offset(int prio)
+void set_latency_fair(struct sched_entity *se, int prio)
{
u32 weight = sched_prio_to_weight[prio];
u64 base = sysctl_sched_base_slice;
- return div_u64(base << SCHED_FIXEDPOINT_SHIFT, weight);
+ /*
+ * For EEVDF the virtual time slope is determined by w_i (iow.
+ * nice) while the request time r_i is determined by
+ * latency-nice.
+ *
+ * Smaller request gets better latency.
+ */
+ se->slice = div_u64(base << SCHED_FIXEDPOINT_SHIFT, weight);
}
/*
@@ -937,13 +944,6 @@ static void update_deadline(struct cfs_r
return;
/*
- * For EEVDF the virtual time slope is determined by w_i (iow.
- * nice) while the request time r_i is determined by
- * latency-nice.
- */
- se->slice = se->latency_offset;
-
- /*
* EEVDF: vd_i = ve_i + r_i / w_i
*/
se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
@@ -12231,7 +12231,7 @@ void init_tg_cfs_entry(struct task_group
se->my_q = cfs_rq;
- se->latency_offset = calc_latency_offset(tg->latency_prio - MAX_RT_PRIO);
+ set_latency_fair(se, tg->latency_prio - MAX_RT_PRIO);
/* guarantee group entities always have weight */
update_load_set(&se->load, NICE_0_LOAD);
@@ -12365,7 +12365,6 @@ int sched_group_set_idle(struct task_gro
int sched_group_set_latency(struct task_group *tg, int prio)
{
- long latency_offset;
int i;
if (tg == &root_task_group)
@@ -12379,13 +12378,9 @@ int sched_group_set_latency(struct task_
}
tg->latency_prio = prio;
- latency_offset = calc_latency_offset(prio - MAX_RT_PRIO);
- for_each_possible_cpu(i) {
- struct sched_entity *se = tg->se[i];
-
- WRITE_ONCE(se->latency_offset, latency_offset);
- }
+ for_each_possible_cpu(i)
+ set_latency_fair(tg->se[i], prio - MAX_RT_PRIO);
mutex_unlock(&shares_mutex);
return 0;
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2477,7 +2477,7 @@ extern unsigned int sysctl_numa_balancin
extern unsigned int sysctl_numa_balancing_hot_threshold;
#endif
-extern long calc_latency_offset(int prio);
+extern void set_latency_fair(struct sched_entity *se, int prio);
#ifdef CONFIG_SCHED_HRTICK
next prev parent reply other threads:[~2023-03-28 11:07 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-28 9:26 [PATCH 00/17] sched: EEVDF using latency-nice Peter Zijlstra
2023-03-28 9:26 ` [PATCH 01/17] sched: Introduce latency-nice as a per-task attribute Peter Zijlstra
2023-03-28 9:26 ` [PATCH 02/17] sched/fair: Add latency_offset Peter Zijlstra
2023-03-28 9:26 ` [PATCH 03/17] sched/fair: Add sched group latency support Peter Zijlstra
2023-03-28 9:26 ` [PATCH 04/17] sched/fair: Add avg_vruntime Peter Zijlstra
2023-03-28 23:57 ` Josh Don
2023-03-29 7:50 ` Peter Zijlstra
2023-04-05 19:13 ` Peter Zijlstra
2023-03-28 9:26 ` [PATCH 05/17] sched/fair: Remove START_DEBIT Peter Zijlstra
2023-03-28 9:26 ` [PATCH 06/17] sched/fair: Add lag based placement Peter Zijlstra
2023-04-03 9:18 ` Chen Yu
2023-04-05 9:47 ` Peter Zijlstra
2023-04-06 3:03 ` Chen Yu
2023-04-13 15:42 ` Chen Yu
2023-04-13 15:55 ` Chen Yu
2023-03-28 9:26 ` [PATCH 07/17] rbtree: Add rb_add_augmented_cached() helper Peter Zijlstra
2023-03-28 9:26 ` [PATCH 08/17] sched/fair: Implement an EEVDF like policy Peter Zijlstra
2023-03-29 1:26 ` Josh Don
2023-03-29 8:02 ` Peter Zijlstra
2023-03-29 8:06 ` Peter Zijlstra
2023-03-29 8:22 ` Peter Zijlstra
2023-03-29 18:48 ` Josh Don
2023-03-29 8:12 ` Peter Zijlstra
2023-03-29 18:54 ` Josh Don
2023-03-29 8:18 ` Peter Zijlstra
2023-03-29 14:35 ` Vincent Guittot
2023-03-30 8:01 ` Peter Zijlstra
2023-03-30 17:05 ` Vincent Guittot
2023-04-04 12:00 ` Peter Zijlstra
2023-03-28 9:26 ` [PATCH 09/17] sched: Commit to lag based placement Peter Zijlstra
2023-03-28 9:26 ` [PATCH 10/17] sched/smp: Use lag to simplify cross-runqueue placement Peter Zijlstra
2023-03-28 9:26 ` [PATCH 11/17] sched: Commit to EEVDF Peter Zijlstra
2023-03-28 9:26 ` [PATCH 12/17] sched/debug: Rename min_granularity to base_slice Peter Zijlstra
2023-03-28 9:26 ` Peter Zijlstra [this message]
2023-03-28 9:26 ` [PATCH 14/17] sched/eevdf: Better handle mixed slice length Peter Zijlstra
2023-03-31 15:26 ` Vincent Guittot
2023-04-04 9:29 ` Peter Zijlstra
2023-04-04 13:50 ` Joel Fernandes
2023-04-05 5:41 ` Mike Galbraith
2023-04-05 8:35 ` Peter Zijlstra
2023-04-05 20:05 ` Joel Fernandes
2023-04-14 11:18 ` Phil Auld
2023-04-16 5:10 ` Joel Fernandes
[not found] ` <20230401232355.336-1-hdanton@sina.com>
2023-04-02 2:40 ` Mike Galbraith
2023-03-28 9:26 ` [PATCH 15/17] [RFC] sched/eevdf: Sleeper bonus Peter Zijlstra
2023-03-29 9:10 ` Mike Galbraith
2023-03-28 9:26 ` [PATCH 16/17] [RFC] sched/eevdf: Minimal vavg option Peter Zijlstra
2023-03-28 9:26 ` [PATCH 17/17] [DEBUG] sched/eevdf: Debug / validation crud Peter Zijlstra
2023-04-03 7:42 ` [PATCH 00/17] sched: EEVDF using latency-nice Shrikanth Hegde
2023-04-10 3:13 ` David Vernet
2023-04-11 2:09 ` David Vernet
[not found] ` <20230410082307.1327-1-hdanton@sina.com>
2023-04-11 10:15 ` Mike Galbraith
[not found] ` <20230411133333.1790-1-hdanton@sina.com>
2023-04-11 14:56 ` Mike Galbraith
[not found] ` <20230412025042.1413-1-hdanton@sina.com>
2023-04-12 4:05 ` Mike Galbraith
2023-04-25 12:32 ` Phil Auld
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230328110354.494493579@infradead.org \
--to=peterz@infradead.org \
--cc=bristot@redhat.com \
--cc=bsegall@google.com \
--cc=chris.hyser@oracle.com \
--cc=corbet@lwn.net \
--cc=dietmar.eggemann@arm.com \
--cc=efault@gmx.de \
--cc=joel@joelfernandes.org \
--cc=joshdon@google.com \
--cc=juri.lelli@redhat.com \
--cc=kprateek.nayak@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=patrick.bellasi@matbug.net \
--cc=pavel@ucw.cz \
--cc=pjt@google.com \
--cc=qperret@google.com \
--cc=qyousef@layalina.io \
--cc=rostedt@goodmis.org \
--cc=tim.c.chen@linux.intel.com \
--cc=timj@gnu.org \
--cc=vincent.guittot@linaro.org \
--cc=youssefesmat@chromium.org \
--cc=yu.c.chen@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).