linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] sched/rt: Remove the unnecessary CONFIG_RT_GROUP_SCHED
@ 2025-08-18  6:17 Yajun Deng
  2025-08-26  1:45 ` Yajun Deng
  0 siblings, 1 reply; 2+ messages in thread
From: Yajun Deng @ 2025-08-18  6:17 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
	rostedt, bsegall, mgorman, vschneid
  Cc: linux-kernel, Yajun Deng

After commit 5f6bd380c7bd ("sched/rt: Remove default bandwidth control"),
these bandwidth control members are only initialized when
CONFIG_RT_GROUP_SCHED is enabled.

Remove the unnecessary CONFIG_RT_GROUP_SCHED from init_rt_rq() and
initialize the members in init_tg_rt_entry().

In sched_init(), the rt_runtime of rt_bandwidth will be initialized by
global_rt_runtime(), so we can unify the rt_runtime of rt_rq by it in
init_tg_rt_entry().

Also, remove the unnecessary CONFIG_RT_GROUP_SCHED in rt_se_prio().

Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
 kernel/sched/core.c |  6 ------
 kernel/sched/rt.c   | 17 +++++------------
 2 files changed, 5 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index be00629f0ba4..e9d6ceead9f4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8772,12 +8772,6 @@ void __init sched_init(void)
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
-		/*
-		 * This is required for init cpu because rt.c:__enable_runtime()
-		 * starts working after scheduler_running, which is not the case
-		 * yet.
-		 */
-		rq->rt.rt_runtime = global_rt_runtime();
 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
 #endif
 		rq->sd = NULL;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7936d4333731..390f3d08abbe 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -84,14 +84,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
 	plist_head_init(&rt_rq->pushable_tasks);
 	/* We start is dequeued state, because no RT tasks are queued */
 	rt_rq->rt_queued = 0;
-
-#ifdef CONFIG_RT_GROUP_SCHED
-	rt_rq->rt_time = 0;
-	rt_rq->rt_throttled = 0;
-	rt_rq->rt_runtime = 0;
-	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
-	rt_rq->tg = &root_task_group;
-#endif
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -229,10 +221,14 @@ void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
+	rt_rq->rt_time = 0;
+	rt_rq->rt_throttled = 0;
 	rt_rq->rt_nr_boosted = 0;
+	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+
 	rt_rq->rq = rq;
 	rt_rq->tg = tg;
+	rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 
 	tg->rt_rq[cpu] = rt_rq;
 	tg->rt_se[cpu] = rt_se;
@@ -280,7 +276,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 			goto err_free_rq;
 
 		init_rt_rq(rt_rq);
-		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 	}
 
@@ -957,12 +952,10 @@ static void __disable_runtime(struct rq *rq) { }
 
 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 {
-#ifdef CONFIG_RT_GROUP_SCHED
 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
 	if (rt_rq)
 		return rt_rq->highest_prio.curr;
-#endif
 
 	return rt_task_of(rt_se)->prio;
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] sched/rt: Remove the unnecessary CONFIG_RT_GROUP_SCHED
  2025-08-18  6:17 [PATCH] sched/rt: Remove the unnecessary CONFIG_RT_GROUP_SCHED Yajun Deng
@ 2025-08-26  1:45 ` Yajun Deng
  0 siblings, 0 replies; 2+ messages in thread
From: Yajun Deng @ 2025-08-26  1:45 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
	rostedt, bsegall, mgorman, vschneid
  Cc: linux-kernel

Hi all,

Gentle ping.

Thanks

On 2025/8/18 14:17, Yajun Deng wrote:
> After commit 5f6bd380c7bd ("sched/rt: Remove default bandwidth control"),
> these bandwidth control members are only initialized when
> CONFIG_RT_GROUP_SCHED is enabled.
>
> Remove the unnecessary CONFIG_RT_GROUP_SCHED from init_rt_rq() and
> initialize the members in init_tg_rt_entry().
>
> In sched_init(), the rt_runtime of rt_bandwidth will be initialized by
> global_rt_runtime(), so we can unify the rt_runtime of rt_rq by it in
> init_tg_rt_entry().
>
> Also, remove the unnecessary CONFIG_RT_GROUP_SCHED in rt_se_prio().
>
> Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
> ---
>   kernel/sched/core.c |  6 ------
>   kernel/sched/rt.c   | 17 +++++------------
>   2 files changed, 5 insertions(+), 18 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index be00629f0ba4..e9d6ceead9f4 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8772,12 +8772,6 @@ void __init sched_init(void)
>   #endif /* CONFIG_FAIR_GROUP_SCHED */
>   
>   #ifdef CONFIG_RT_GROUP_SCHED
> -		/*
> -		 * This is required for init cpu because rt.c:__enable_runtime()
> -		 * starts working after scheduler_running, which is not the case
> -		 * yet.
> -		 */
> -		rq->rt.rt_runtime = global_rt_runtime();
>   		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
>   #endif
>   		rq->sd = NULL;
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 7936d4333731..390f3d08abbe 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -84,14 +84,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
>   	plist_head_init(&rt_rq->pushable_tasks);
>   	/* We start is dequeued state, because no RT tasks are queued */
>   	rt_rq->rt_queued = 0;
> -
> -#ifdef CONFIG_RT_GROUP_SCHED
> -	rt_rq->rt_time = 0;
> -	rt_rq->rt_throttled = 0;
> -	rt_rq->rt_runtime = 0;
> -	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
> -	rt_rq->tg = &root_task_group;
> -#endif
>   }
>   
>   #ifdef CONFIG_RT_GROUP_SCHED
> @@ -229,10 +221,14 @@ void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
>   {
>   	struct rq *rq = cpu_rq(cpu);
>   
> -	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
> +	rt_rq->rt_time = 0;
> +	rt_rq->rt_throttled = 0;
>   	rt_rq->rt_nr_boosted = 0;
> +	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
> +
>   	rt_rq->rq = rq;
>   	rt_rq->tg = tg;
> +	rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
>   
>   	tg->rt_rq[cpu] = rt_rq;
>   	tg->rt_se[cpu] = rt_se;
> @@ -280,7 +276,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
>   			goto err_free_rq;
>   
>   		init_rt_rq(rt_rq);
> -		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
>   		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
>   	}
>   
> @@ -957,12 +952,10 @@ static void __disable_runtime(struct rq *rq) { }
>   
>   static inline int rt_se_prio(struct sched_rt_entity *rt_se)
>   {
> -#ifdef CONFIG_RT_GROUP_SCHED
>   	struct rt_rq *rt_rq = group_rt_rq(rt_se);
>   
>   	if (rt_rq)
>   		return rt_rq->highest_prio.curr;
> -#endif
>   
>   	return rt_task_of(rt_se)->prio;
>   }

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-08-26  1:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-18  6:17 [PATCH] sched/rt: Remove the unnecessary CONFIG_RT_GROUP_SCHED Yajun Deng
2025-08-26  1:45 ` Yajun Deng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).