From: Shrikanth Hegde <sshegde@linux.ibm.com>
To: mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com,
vincent.guittot@linaro.org, tglx@linutronix.de,
yury.norov@gmail.com, maddy@linux.ibm.com
Cc: sshegde@linux.ibm.com, vschneid@redhat.com,
dietmar.eggemann@arm.com, rostedt@goodmis.org,
kprateek.nayak@amd.com, huschle@linux.ibm.com,
srikar@linux.ibm.com, linux-kernel@vger.kernel.org,
christophe.leroy@csgroup.eu, linuxppc-dev@lists.ozlabs.org,
gregkh@linuxfoundation.org
Subject: [RFC v2 7/9] sched: Add static key check for cpu_avoid
Date: Thu, 26 Jun 2025 00:41:06 +0530 [thread overview]
Message-ID: <20250625191108.1646208-8-sshegde@linux.ibm.com> (raw)
In-Reply-To: <20250625191108.1646208-1-sshegde@linux.ibm.com>
Checking if a CPU is avoid can add a slight overhead and should be
done only when necessary.
Add a static key check which makes it almost nop when key is false.
Arch needs to set the key when it decides to. Refer to debug patch
for example.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
This method avoids additional ifdefs. So kept it that way instead of
CONFIG_PARAVIRT.
Added a helper function for cpu_avoid, since including sched.h fails in
cpumask.h
kernel/sched/core.c | 8 ++++----
kernel/sched/fair.c | 5 +++--
kernel/sched/rt.c | 8 ++++----
kernel/sched/sched.h | 9 +++++++++
4 files changed, 20 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aea4232e3ec4..51426b17ef55 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -148,9 +148,9 @@ __read_mostly int sysctl_resched_latency_warn_once = 1;
* Limited because this is done with IRQs disabled.
*/
__read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
-
__read_mostly int scheduler_running;
+DEFINE_STATIC_KEY_FALSE(paravirt_cpu_avoid_enabled);
#ifdef CONFIG_SCHED_CORE
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
@@ -2438,7 +2438,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
return false;
/* CPU marked as avoid, shouldn't chosen to run any task*/
- if (cpu_avoid(cpu))
+ if (cpu_avoid_check(cpu))
return false;
/* But are allowed during online. */
@@ -5578,7 +5578,7 @@ void sched_tick(void)
sched_clock_tick();
/* push the current task out if cpu is marked as avoid */
- if (cpu_avoid(cpu))
+ if (cpu_avoid_check(cpu))
push_current_task(rq);
rq_lock(rq, &rf);
@@ -8048,7 +8048,7 @@ void push_current_task(struct rq *rq)
unsigned long flags;
/* idle task can't be pused out */
- if (rq->curr == rq->idle || !cpu_avoid(rq->cpu))
+ if (rq->curr == rq->idle || !cpu_avoid_check(rq->cpu))
return;
/* Do for only SCHED_NORMAL AND RT for now */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 406288aef535..21370f76d61b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8547,7 +8547,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
rcu_read_unlock();
/* Don't select a CPU marked as avoid for wakeup */
- if (cpu_avoid(new_cpu))
+ if (cpu_avoid_check(new_cpu))
return cpu;
else
return new_cpu;
@@ -11668,7 +11668,8 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
/* Don't spread load into CPUs marked as avoid */
- cpumask_andnot(cpus, cpus, cpu_avoid_mask);
+ if (static_branch_unlikely(¶virt_cpu_avoid_enabled))
+ cpumask_andnot(cpus, cpus, cpu_avoid_mask);
schedstat_inc(sd->lb_count[idle]);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index fd9df6f46135..0ab3fdf7a637 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1549,7 +1549,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
if (!test && target != -1 && !rt_task_fits_capacity(p, target))
goto out_unlock;
- if (cpu_avoid(target))
+ if (cpu_avoid_check(target))
goto out_unlock;
/*
* Don't bother moving it if the destination CPU is
@@ -1873,7 +1873,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);
- if ((cpu == -1) || (cpu == rq->cpu) || cpu_avoid(cpu))
+ if ((cpu == -1) || (cpu == rq->cpu) || cpu_avoid_check(cpu))
break;
lowest_rq = cpu_rq(cpu);
@@ -1971,7 +1971,7 @@ static int push_rt_task(struct rq *rq, bool pull)
return 0;
cpu = find_lowest_rq(rq->curr);
- if (cpu == -1 || cpu == rq->cpu || cpu_avoid(cpu))
+ if (cpu == -1 || cpu == rq->cpu || cpu_avoid_check(cpu))
return 0;
/*
@@ -2234,7 +2234,7 @@ static void pull_rt_task(struct rq *this_rq)
if (likely(!rt_overload_count))
return;
- if (cpu_avoid(this_rq->cpu))
+ if (cpu_avoid_check(this_rq->cpu))
return;
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b9614873762e..707fdfa46772 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1710,6 +1710,15 @@ struct rq_flags {
extern struct balance_callback balance_push_callback;
void push_current_task(struct rq *rq);
+DECLARE_STATIC_KEY_FALSE(paravirt_cpu_avoid_enabled);
+
+static inline bool cpu_avoid_check(int cpu)
+{
+ if (static_branch_unlikely(¶virt_cpu_avoid_enabled))
+ return cpu_avoid(cpu);
+
+ return false;
+}
#ifdef CONFIG_SCHED_CLASS_EXT
extern const struct sched_class ext_sched_class;
--
2.43.0
next prev parent reply other threads:[~2025-06-25 19:13 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-25 19:10 [RFC v2 0/9] cpu avoid state and push task mechanism Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 1/9] sched/docs: Document avoid_cpu_mask and avoid CPU concept Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 2/9] cpumask: Introduce cpu_avoid_mask Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 3/9] sched/core: Dont allow to use CPU marked as avoid Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 4/9] sched/fair: Don't use CPU marked as avoid for wakeup and load balance Shrikanth Hegde
2025-06-26 0:02 ` Yury Norov
2025-06-26 13:42 ` Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 5/9] sched/rt: Don't select CPU marked as avoid for wakeup and push/pull rt task Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 6/9] sched/core: Push current task out if CPU is marked as avoid Shrikanth Hegde
2025-08-12 18:40 ` Shrikanth Hegde
2025-06-25 19:11 ` Shrikanth Hegde [this message]
2025-06-26 0:12 ` [RFC v2 7/9] sched: Add static key check for cpu_avoid Yury Norov
2025-06-25 19:11 ` [RFC v2 8/9] sysfs: Add cpu_avoid file Shrikanth Hegde
2025-07-01 9:35 ` Greg KH
2025-07-02 6:05 ` Shrikanth Hegde
2025-06-25 19:11 ` [RFC v2 9/9] [DEBUG] powerpc: add debug file for set/unset cpu avoid Shrikanth Hegde
2025-06-25 22:53 ` Yury Norov
2025-06-26 13:39 ` Shrikanth Hegde
2025-06-25 21:55 ` [RFC v2 0/9] cpu avoid state and push task mechanism Yury Norov
2025-06-26 14:33 ` Shrikanth Hegde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250625191108.1646208-8-sshegde@linux.ibm.com \
--to=sshegde@linux.ibm.com \
--cc=christophe.leroy@csgroup.eu \
--cc=dietmar.eggemann@arm.com \
--cc=gregkh@linuxfoundation.org \
--cc=huschle@linux.ibm.com \
--cc=juri.lelli@redhat.com \
--cc=kprateek.nayak@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=maddy@linux.ibm.com \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=srikar@linux.ibm.com \
--cc=tglx@linutronix.de \
--cc=vincent.guittot@linaro.org \
--cc=vschneid@redhat.com \
--cc=yury.norov@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).