linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] sched/rt: pass rq and task_struct pointers to update_stats functions
@ 2025-09-03 15:08 Yajun Deng
  0 siblings, 0 replies; only message in thread
From: Yajun Deng @ 2025-09-03 15:08 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
	rostedt, bsegall, mgorman, vschneid
  Cc: linux-kernel, Yajun Deng

These __update_stats functions only require the rq and task_struct
parameters. All update_stats functions convert the rt_rq to rq and the
rt_se to task_struct. And all callers of update_stats will get the
rt_rq from the rq and the rt_se from the task_struct.

The rq_of_rt_rq() always returns the top-level rq, which is unique for
each CPU. The rt_task_of() is only available for the task_struct. They
are doing the convering and being converted. However, these conversions
don't change anything.

Just pass the rq and task_struct pointers to these update_stats functions.
If it don't have a task_struct pointer, call rt_entity_is_task() to ensure
the task isn't NULL.

Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
 kernel/sched/rt.c | 91 ++++++++++-------------------------------------
 1 file changed, 18 insertions(+), 73 deletions(-)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 390f3d08abbe..feef4508efca 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1212,107 +1212,56 @@ static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_arr
 	rt_se->on_list = 0;
 }
 
-static inline struct sched_statistics *
-__schedstats_from_rt_se(struct sched_rt_entity *rt_se)
-{
-	/* schedstats is not supported for rt group. */
-	if (!rt_entity_is_task(rt_se))
-		return NULL;
-
-	return &rt_task_of(rt_se)->stats;
-}
-
 static inline void
-update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+update_stats_wait_start_rt(struct rq *rq, struct task_struct *p)
 {
-	struct sched_statistics *stats;
-	struct task_struct *p = NULL;
-
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
-	stats = __schedstats_from_rt_se(rt_se);
-	if (!stats)
-		return;
-
-	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
+	__update_stats_wait_start(rq, p, &p->stats);
 }
 
 static inline void
-update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+update_stats_enqueue_sleeper_rt(struct rq *rq, struct task_struct *p)
 {
-	struct sched_statistics *stats;
-	struct task_struct *p = NULL;
-
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
-	stats = __schedstats_from_rt_se(rt_se);
-	if (!stats)
-		return;
-
-	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
+	__update_stats_enqueue_sleeper(rq, p, &p->stats);
 }
 
 static inline void
-update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
-			int flags)
+update_stats_enqueue_rt(struct rq *rq, struct task_struct *p, int flags)
 {
-	if (!schedstat_enabled())
-		return;
-
 	if (flags & ENQUEUE_WAKEUP)
-		update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
+		update_stats_enqueue_sleeper_rt(rq, p);
 }
 
 static inline void
-update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+update_stats_wait_end_rt(struct rq *rq, struct task_struct *p)
 {
-	struct sched_statistics *stats;
-	struct task_struct *p = NULL;
-
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
-	stats = __schedstats_from_rt_se(rt_se);
-	if (!stats)
-		return;
-
-	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
+	__update_stats_wait_end(rq, p, &p->stats);
 }
 
 static inline void
-update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
-			int flags)
+update_stats_dequeue_rt(struct rq *rq, struct task_struct *p, int flags)
 {
-	struct task_struct *p = NULL;
 
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
 	if ((flags & DEQUEUE_SLEEP) && p) {
 		unsigned int state;
 
 		state = READ_ONCE(p->__state);
 		if (state & TASK_INTERRUPTIBLE)
-			__schedstat_set(p->stats.sleep_start,
-					rq_clock(rq_of_rt_rq(rt_rq)));
+			__schedstat_set(p->stats.sleep_start, rq_clock(rq));
 
 		if (state & TASK_UNINTERRUPTIBLE)
-			__schedstat_set(p->stats.block_start,
-					rq_clock(rq_of_rt_rq(rt_rq)));
+			__schedstat_set(p->stats.block_start, rq_clock(rq));
 	}
 }
 
@@ -1392,7 +1341,8 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rq *rq = rq_of_rt_se(rt_se);
 
-	update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
+	if (rt_entity_is_task(rt_se))
+		update_stats_enqueue_rt(rq, rt_task_of(rt_se), flags);
 
 	dequeue_rt_stack(rt_se, flags);
 	for_each_sched_rt_entity(rt_se)
@@ -1404,7 +1354,8 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
 {
 	struct rq *rq = rq_of_rt_se(rt_se);
 
-	update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
+	if (rt_entity_is_task(rt_se))
+		update_stats_dequeue_rt(rq, rt_task_of(rt_se), flags);
 
 	dequeue_rt_stack(rt_se, flags);
 
@@ -1429,7 +1380,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 		rt_se->timeout = 0;
 
 	check_schedstat_required();
-	update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
+	update_stats_wait_start_rt(rq_of_rt_se(rt_se), p);
 
 	enqueue_rt_entity(rt_se, flags);
 
@@ -1631,12 +1582,9 @@ static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
 
 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
 {
-	struct sched_rt_entity *rt_se = &p->rt;
-	struct rt_rq *rt_rq = &rq->rt;
-
 	p->se.exec_start = rq_clock_task(rq);
 	if (on_rt_rq(&p->rt))
-		update_stats_wait_end_rt(rt_rq, rt_se);
+		update_stats_wait_end_rt(rq, p);
 
 	/* The running task is never eligible for pushing */
 	dequeue_pushable_task(rq, p);
@@ -1702,11 +1650,8 @@ static struct task_struct *pick_task_rt(struct rq *rq)
 
 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
 {
-	struct sched_rt_entity *rt_se = &p->rt;
-	struct rt_rq *rt_rq = &rq->rt;
-
 	if (on_rt_rq(&p->rt))
-		update_stats_wait_start_rt(rt_rq, rt_se);
+		update_stats_wait_start_rt(rq, p);
 
 	update_curr_rt(rq);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2025-09-03 15:09 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-09-03 15:08 [PATCH] sched/rt: pass rq and task_struct pointers to update_stats functions Yajun Deng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).