From: David Vernet <void@manifault.com>
To: linux-kernel@vger.kernel.org
Cc: peterz@infradead.org, mingo@redhat.com, juri.lelli@redhat.com,
vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
bristot@redhat.com, vschneid@redhat.com, tj@kernel.org,
roman.gushchin@linux.dev, gautham.shenoy@amd.com,
kprateek.nayak@amd.com, aaron.lu@intel.com,
wuyun.abel@bytedance.com, kernel-team@meta.com
Subject: [PATCH v3 4/7] sched: Enable sched_feat callbacks on enable/disable
Date: Wed, 9 Aug 2023 17:12:15 -0500 [thread overview]
Message-ID: <20230809221218.163894-5-void@manifault.com> (raw)
In-Reply-To: <20230809221218.163894-1-void@manifault.com>
When a scheduler feature is enabled or disabled, the sched_feat_enable()
and sched_feat_disable() functions are invoked respectively for that
feature. For features that don't require resetting any state, this works
fine. However, there will be an upcoming feature called shared_runq
which needs to drain all tasks from a set of global shared runqueues in
order to avoid stale tasks from staying in the queues after the feature
has been disabled.
This patch therefore defines a new SCHED_FEAT_CALLBACK macro which
allows scheduler features to specify a callback that should be invoked
when a feature is enabled or disabled respectively. The SCHED_FEAT macro
assumes a NULL callback.
Signed-off-by: David Vernet <void@manifault.com>
---
kernel/sched/core.c | 4 ++--
kernel/sched/debug.c | 18 ++++++++++++++----
kernel/sched/sched.h | 16 ++++++++++------
3 files changed, 26 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dd6412a49263..385c565da87f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -124,12 +124,12 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
* sysctl_sched_features, defined in sched.h, to allow constants propagation
* at compile time and compiler optimization based on features default.
*/
-#define SCHED_FEAT(name, enabled) \
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
-#undef SCHED_FEAT
+#undef SCHED_FEAT_CALLBACK
/*
* Print a warning if need_resched is set for the given duration (if
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index aeeba46a096b..803dff75c56f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -44,14 +44,14 @@ static unsigned long nsec_low(unsigned long long nsec)
#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
-#define SCHED_FEAT(name, enabled) \
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) \
#name ,
static const char * const sched_feat_names[] = {
#include "features.h"
};
-#undef SCHED_FEAT
+#undef SCHED_FEAT_CALLBACK
static int sched_feat_show(struct seq_file *m, void *v)
{
@@ -72,22 +72,32 @@ static int sched_feat_show(struct seq_file *m, void *v)
#define jump_label_key__true STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE
-#define SCHED_FEAT(name, enabled) \
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) \
jump_label_key__##enabled ,
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};
-#undef SCHED_FEAT
+#undef SCHED_FEAT_CALLBACK
+
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) cb,
+static const sched_feat_change_f sched_feat_cbs[__SCHED_FEAT_NR] = {
+#include "features.h"
+};
+#undef SCHED_FEAT_CALLBACK
static void sched_feat_disable(int i)
{
+ if (sched_feat_cbs[i])
+ sched_feat_cbs[i](false);
static_key_disable_cpuslocked(&sched_feat_keys[i]);
}
static void sched_feat_enable(int i)
{
+ if (sched_feat_cbs[i])
+ sched_feat_cbs[i](true);
static_key_enable_cpuslocked(&sched_feat_keys[i]);
}
#else
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 88cca7cc00cf..2631da3c8a4d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2065,6 +2065,8 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif
}
+#define SCHED_FEAT(name, enabled) SCHED_FEAT_CALLBACK(name, enabled, NULL)
+
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
@@ -2074,7 +2076,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
# define const_debug const
#endif
-#define SCHED_FEAT(name, enabled) \
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) \
__SCHED_FEAT_##name ,
enum {
@@ -2082,7 +2084,7 @@ enum {
__SCHED_FEAT_NR,
};
-#undef SCHED_FEAT
+#undef SCHED_FEAT_CALLBACK
#ifdef CONFIG_SCHED_DEBUG
@@ -2093,14 +2095,14 @@ enum {
extern const_debug unsigned int sysctl_sched_features;
#ifdef CONFIG_JUMP_LABEL
-#define SCHED_FEAT(name, enabled) \
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) \
static __always_inline bool static_branch_##name(struct static_key *key) \
{ \
return static_key_##enabled(key); \
}
#include "features.h"
-#undef SCHED_FEAT
+#undef SCHED_FEAT_CALLBACK
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
@@ -2118,17 +2120,19 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
* constants propagation at compile time and compiler optimization based on
* features default.
*/
-#define SCHED_FEAT(name, enabled) \
+#define SCHED_FEAT_CALLBACK(name, enabled, cb) \
(1UL << __SCHED_FEAT_##name) * enabled |
static const_debug __maybe_unused unsigned int sysctl_sched_features =
#include "features.h"
0;
-#undef SCHED_FEAT
+#undef SCHED_FEAT_CALLBACK
#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG */
+typedef void (*sched_feat_change_f)(bool enabling);
+
extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;
--
2.41.0
next prev parent reply other threads:[~2023-08-09 22:13 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-09 22:12 [PATCH v3 0/7] sched: Implement shared runqueue in CFS David Vernet
2023-08-09 22:12 ` [PATCH v3 1/7] sched: Expose move_queued_task() from core.c David Vernet
2023-08-09 22:12 ` [PATCH v3 2/7] sched: Move is_cpu_allowed() into sched.h David Vernet
2023-08-09 22:12 ` [PATCH v3 3/7] sched: Check cpu_active() earlier in newidle_balance() David Vernet
2023-08-09 22:12 ` David Vernet [this message]
2023-08-09 22:12 ` [PATCH v3 5/7] sched/fair: Add SHARED_RUNQ sched feature and skeleton calls David Vernet
2023-08-09 22:12 ` [PATCH v3 6/7] sched: Implement shared runqueue in CFS David Vernet
2023-08-10 7:11 ` kernel test robot
2023-08-10 7:41 ` kernel test robot
2023-08-30 6:46 ` K Prateek Nayak
2023-08-31 1:34 ` David Vernet
2023-08-31 3:47 ` K Prateek Nayak
2023-08-09 22:12 ` [PATCH v3 7/7] sched: Shard per-LLC shared runqueues David Vernet
2023-08-09 23:46 ` kernel test robot
2023-08-10 0:12 ` David Vernet
2023-08-10 7:11 ` kernel test robot
2023-08-30 6:17 ` Chen Yu
2023-08-31 0:01 ` David Vernet
2023-08-31 10:45 ` Chen Yu
2023-08-31 19:14 ` David Vernet
2023-09-23 6:35 ` Chen Yu
2023-08-17 8:42 ` [PATCH v3 0/7] sched: Implement shared runqueue in CFS Gautham R. Shenoy
2023-08-18 5:03 ` David Vernet
2023-08-18 8:49 ` Gautham R. Shenoy
2023-08-24 11:14 ` Gautham R. Shenoy
2023-08-24 22:51 ` David Vernet
2023-08-30 9:56 ` K Prateek Nayak
2023-08-31 2:32 ` David Vernet
2023-08-31 4:21 ` K Prateek Nayak
2023-08-31 10:45 ` [RFC PATCH 0/3] DO NOT MERGE: Breaking down the experimantal diff K Prateek Nayak
2023-08-31 10:45 ` [RFC PATCH 1/3] sched/fair: Move SHARED_RUNQ related structs and definitions into sched.h K Prateek Nayak
2023-08-31 10:45 ` [RFC PATCH 2/3] sched/fair: Improve integration of SHARED_RUNQ feature within newidle_balance K Prateek Nayak
2023-08-31 18:45 ` David Vernet
2023-08-31 19:47 ` K Prateek Nayak
2023-08-31 10:45 ` [RFC PATCH 3/3] sched/fair: Add a per-shard overload flag K Prateek Nayak
2023-08-31 19:11 ` David Vernet
2023-08-31 20:23 ` K Prateek Nayak
2023-09-29 17:01 ` David Vernet
2023-10-04 4:21 ` K Prateek Nayak
2023-10-04 17:20 ` David Vernet
2023-10-05 3:50 ` K Prateek Nayak
2023-09-27 4:23 ` K Prateek Nayak
2023-09-27 6:59 ` Chen Yu
2023-09-27 8:36 ` K Prateek Nayak
2023-09-28 8:41 ` Chen Yu
2023-10-03 21:05 ` David Vernet
2023-10-07 2:10 ` Chen Yu
2023-09-27 13:08 ` David Vernet
2023-11-27 8:28 ` [PATCH v3 0/7] sched: Implement shared runqueue in CFS Aboorva Devarajan
2023-11-27 19:49 ` David Vernet
2023-12-07 6:00 ` Aboorva Devarajan
2023-12-04 19:30 ` David Vernet
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230809221218.163894-5-void@manifault.com \
--to=void@manifault.com \
--cc=aaron.lu@intel.com \
--cc=bristot@redhat.com \
--cc=bsegall@google.com \
--cc=dietmar.eggemann@arm.com \
--cc=gautham.shenoy@amd.com \
--cc=juri.lelli@redhat.com \
--cc=kernel-team@meta.com \
--cc=kprateek.nayak@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=vschneid@redhat.com \
--cc=wuyun.abel@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox