From: Andrea Righi <arighi@nvidia.com>
To: Tejun Heo <tj@kernel.org>, David Vernet <void@manifault.com>,
Changwoo Min <changwoo@igalia.com>,
John Stultz <jstultz@google.com>
Cc: Ingo Molnar <mingo@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Juri Lelli <juri.lelli@redhat.com>,
Vincent Guittot <vincent.guittot@linaro.org>,
Dietmar Eggemann <dietmar.eggemann@arm.com>,
Steven Rostedt <rostedt@goodmis.org>,
Ben Segall <bsegall@google.com>, Mel Gorman <mgorman@suse.de>,
Valentin Schneider <vschneid@redhat.com>,
K Prateek Nayak <kprateek.nayak@amd.com>,
Christian Loehle <christian.loehle@arm.com>,
Koba Ko <kobak@nvidia.com>,
Joel Fernandes <joelagnelf@nvidia.com>,
sched-ext@lists.linux.dev, linux-kernel@vger.kernel.org
Subject: [PATCH 03/10] sched/ext: Split curr|donor references properly
Date: Wed, 6 May 2026 19:45:43 +0200 [thread overview]
Message-ID: <20260506174639.535232-4-arighi@nvidia.com> (raw)
In-Reply-To: <20260506174639.535232-1-arighi@nvidia.com>
From: John Stultz <jstultz@google.com>
With proxy-exec, we want to do the accounting against the donor most of
the time. Without proxy-exec, there should be no difference as the
rq->donor and rq->curr are the same.
So rework the logic to reference the rq->donor where appropriate.
Also add donor info to scx_dump_state().
Since CONFIG_SCHED_PROXY_EXEC currently depends on
!CONFIG_SCHED_CLASS_EXT, this should have no effect (other than the
extra donor output in scx_dump_state), but this is one step needed to
eventually remove that constraint for proxy-exec.
Signed-off-by: John Stultz <jstultz@google.com>
---
kernel/sched/ext.c | 32 ++++++++++++++++++--------------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7ac7d10a41bef..c410afd28fb6d 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1370,17 +1370,17 @@ static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
static void update_curr_scx(struct rq *rq)
{
- struct task_struct *curr = rq->curr;
+ struct task_struct *donor = rq->donor;
s64 delta_exec;
delta_exec = update_curr_common(rq);
if (unlikely(delta_exec <= 0))
return;
- if (curr->scx.slice != SCX_SLICE_INF) {
- curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
- if (!curr->scx.slice)
- touch_core_sched(rq, curr);
+ if (donor->scx.slice != SCX_SLICE_INF) {
+ donor->scx.slice -= min_t(u64, donor->scx.slice, delta_exec);
+ if (!donor->scx.slice)
+ touch_core_sched(rq, donor);
}
dl_server_update(&rq->ext_server, delta_exec);
@@ -1504,13 +1504,14 @@ static void local_dsq_post_enq(struct scx_sched *sch, struct scx_dispatch_q *dsq
if (rq->scx.flags & SCX_RQ_IN_BALANCE)
return;
- if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
- rq->curr->sched_class == &ext_sched_class) {
- rq->curr->scx.slice = 0;
+ if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->donor &&
+ rq->donor->sched_class == &ext_sched_class) {
+ rq->donor->scx.slice = 0;
preempt = true;
}
- if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
+ if (preempt || sched_class_above(&ext_sched_class,
+ rq->donor->sched_class))
resched_curr(rq);
}
@@ -2634,7 +2635,7 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
}
/* if the destination CPU is idle, wake it up */
- if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
+ if (sched_class_above(p->sched_class, dst_rq->donor->sched_class))
resched_curr(dst_rq);
}
@@ -3150,7 +3151,7 @@ static struct task_struct *first_local_task(struct rq *rq)
static struct task_struct *
do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
{
- struct task_struct *prev = rq->curr;
+ struct task_struct *prev = rq->donor;
bool keep_prev;
struct task_struct *p;
@@ -4323,7 +4324,7 @@ static void run_deferred(struct rq *rq)
#ifdef CONFIG_NO_HZ_FULL
bool scx_can_stop_tick(struct rq *rq)
{
- struct task_struct *p = rq->curr;
+ struct task_struct *p = rq->donor;
struct scx_sched *sch = scx_task_sched(p);
if (p->sched_class != &ext_sched_class)
@@ -6355,6 +6356,9 @@ static void scx_dump_cpu(struct scx_sched *sch, struct seq_buf *s,
dump_line(&ns, " curr=%s[%d] class=%ps",
rq->curr->comm, rq->curr->pid,
rq->curr->sched_class);
+ dump_line(&ns, " donor=%s[%d] class=%ps",
+ rq->donor->comm, rq->donor->pid,
+ rq->donor->sched_class);
if (!cpumask_empty(rq->scx.cpus_to_kick))
dump_line(&ns, " cpus_to_kick : %*pb",
cpumask_pr_args(rq->scx.cpus_to_kick));
@@ -7974,7 +7978,7 @@ static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
unsigned long flags;
raw_spin_rq_lock_irqsave(rq, flags);
- cur_class = rq->curr->sched_class;
+ cur_class = rq->donor->sched_class;
/*
* During CPU hotplug, a CPU may depend on kicking itself to make
@@ -7986,7 +7990,7 @@ static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
!sched_class_above(cur_class, &ext_sched_class)) {
if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
if (cur_class == &ext_sched_class)
- rq->curr->scx.slice = 0;
+ rq->donor->scx.slice = 0;
cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
}
--
2.54.0
next prev parent reply other threads:[~2026-05-06 17:47 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-06 17:45 [RFC PATCH sched_ext/for-7.2 0/10] sched: Make proxy execution compatible with sched_ext Andrea Righi
2026-05-06 17:45 ` [PATCH 01/10] sched/core: Skip migration disabled tasks in proxy execution Andrea Righi
2026-05-06 21:09 ` John Stultz
2026-05-07 3:34 ` K Prateek Nayak
2026-05-07 6:31 ` Andrea Righi
2026-05-07 7:45 ` K Prateek Nayak
2026-05-06 17:45 ` [PATCH 02/10] sched/core: Skip put_prev_task/set_next_task re-entry for sched_ext donors Andrea Righi
2026-05-06 17:45 ` Andrea Righi [this message]
2026-05-06 17:45 ` [PATCH 04/10] sched/ext: Avoid migrating blocked tasks with proxy execution Andrea Righi
2026-05-06 17:45 ` [PATCH 05/10] sched_ext: Fix TOCTOU race in consume_remote_task() Andrea Righi
2026-05-06 17:45 ` [PATCH 06/10] sched_ext: Fix ops.running/stopping() pairing for proxy-exec donors Andrea Righi
2026-05-06 17:45 ` [PATCH 07/10] sched_ext: Save/restore kf_tasks[] when task ops nest Andrea Righi
2026-05-06 17:45 ` [PATCH 08/10] sched_ext: Skip ops.runnable() when nested in SCX_CALL_OP_TASK Andrea Righi
2026-05-06 17:45 ` [PATCH 09/10] sched/core: Disable proxy-exec context switch under sched_ext by default Andrea Righi
2026-05-06 17:45 ` [PATCH 10/10] sched: Allow enabling proxy exec with sched_ext Andrea Righi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260506174639.535232-4-arighi@nvidia.com \
--to=arighi@nvidia.com \
--cc=bsegall@google.com \
--cc=changwoo@igalia.com \
--cc=christian.loehle@arm.com \
--cc=dietmar.eggemann@arm.com \
--cc=joelagnelf@nvidia.com \
--cc=jstultz@google.com \
--cc=juri.lelli@redhat.com \
--cc=kobak@nvidia.com \
--cc=kprateek.nayak@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=sched-ext@lists.linux.dev \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=void@manifault.com \
--cc=vschneid@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox