public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] sched/pelt: rename init_entity_runnable_average()
@ 2022-06-22 16:20 Zhaoyu Liu
  2022-07-15 14:31 ` Zackary Liu
  0 siblings, 1 reply; 2+ messages in thread
From: Zhaoyu Liu @ 2022-06-22 16:20 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
	rostedt, bsegall, mgorman, bristot, vschneid
  Cc: linux-kernel

In commit 0dacee1bfa70 ("sched/pelt: Remove unused runnable load average"),
sa->runnable_load_avg was deleted from init_entity_runnable_average(),
and now it only init sa->load_avg inside of the function,
so rename the function to init_entity_load_average() is more appropriate.

Signed-off-by: Zhaoyu Liu <zackary.liu.pro@gmail.com>
---
 kernel/sched/core.c  | 3 +--
 kernel/sched/fair.c  | 6 +++---
 kernel/sched/sched.h | 2 +-
 3 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 263d76489a48..224ca514e9d8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4580,8 +4580,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
 	else
 		p->sched_class = &fair_sched_class;
 
-	init_entity_runnable_average(&p->se);
-
+	init_entity_load_average(&p->se);
 
 #ifdef CONFIG_SCHED_INFO
 	if (likely(sched_info_on()))
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ed32f66bbd3d..6bc7eaf88c59 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@ static unsigned long task_h_load(struct task_struct *p);
 static unsigned long capacity_of(int cpu);
 
 /* Give new sched_entity start runnable values to heavy its load in infant time */
-void init_entity_runnable_average(struct sched_entity *se)
+void init_entity_load_average(struct sched_entity *se)
 {
 	struct sched_avg *sa = &se->avg;
 
@@ -871,7 +871,7 @@ void post_init_entity_util_avg(struct task_struct *p)
 }
 
 #else /* !CONFIG_SMP */
-void init_entity_runnable_average(struct sched_entity *se)
+void init_entity_load_average(struct sched_entity *se)
 {
 }
 void post_init_entity_util_avg(struct task_struct *p)
@@ -11510,7 +11510,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 
 		init_cfs_rq(cfs_rq);
 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
-		init_entity_runnable_average(se);
+		init_entity_load_average(se);
 	}
 
 	return 1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5b14b6b4495d..349b423ea8a6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2330,7 +2330,7 @@ extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
 #define MAX_BW			((1ULL << MAX_BW_BITS) - 1)
 unsigned long to_ratio(u64 period, u64 runtime);
 
-extern void init_entity_runnable_average(struct sched_entity *se);
+extern void init_entity_load_average(struct sched_entity *se);
 extern void post_init_entity_util_avg(struct task_struct *p);
 
 #ifdef CONFIG_NO_HZ_FULL
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] sched/pelt: rename init_entity_runnable_average()
  2022-06-22 16:20 [PATCH] sched/pelt: rename init_entity_runnable_average() Zhaoyu Liu
@ 2022-07-15 14:31 ` Zackary Liu
  0 siblings, 0 replies; 2+ messages in thread
From: Zackary Liu @ 2022-07-15 14:31 UTC (permalink / raw)
  To: mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
	bristot@redhat.com, vschneid@redhat.com
  Cc: linux-kernel@vger.kernel.org


On Jun 23 2022, at 12:20 am, Zhaoyu Liu <zackary.liu.pro@gmail.com> wrote:

> In commit 0dacee1bfa70 ("sched/pelt: Remove unused runnable load average"),
> sa->runnable_load_avg was deleted from init_entity_runnable_average(),
> and now it only init sa->load_avg inside of the function,
> so rename the function to init_entity_load_average() is more appropriate.
> 
> Signed-off-by: Zhaoyu Liu <zackary.liu.pro@gmail.com>
> ---
> kernel/sched/core.c  | 3 +--
> kernel/sched/fair.c  | 6 +++---
> kernel/sched/sched.h | 2 +-
> 3 files changed, 5 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 263d76489a48..224ca514e9d8 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4580,8 +4580,7 @@ int sched_fork(unsigned long clone_flags, struct
> task_struct *p)
> 	else
> 		p->sched_class = &fair_sched_class;
> 
> -	init_entity_runnable_average(&p->se);
> -
> +	init_entity_load_average(&p->se);
> 
> #ifdef CONFIG_SCHED_INFO
> 	if (likely(sched_info_on()))
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index ed32f66bbd3d..6bc7eaf88c59 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -784,7 +784,7 @@ static unsigned long task_h_load(struct
> task_struct *p);
> static unsigned long capacity_of(int cpu);
> 
> /* Give new sched_entity start runnable values to heavy its load in
> infant time */
> -void init_entity_runnable_average(struct sched_entity *se)
> +void init_entity_load_average(struct sched_entity *se)
> {
> 	struct sched_avg *sa = &se->avg;
> 
> @@ -871,7 +871,7 @@ void post_init_entity_util_avg(struct task_struct *p)
> }
> 
> #else /* !CONFIG_SMP */
> -void init_entity_runnable_average(struct sched_entity *se)
> +void init_entity_load_average(struct sched_entity *se)
> {
> }
> void post_init_entity_util_avg(struct task_struct *p)
> @@ -11510,7 +11510,7 @@ int alloc_fair_sched_group(struct task_group
> *tg, struct task_group *parent)
> 
> 		init_cfs_rq(cfs_rq);
> 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
> -		init_entity_runnable_average(se);
> +		init_entity_load_average(se);
> 	}
> 
> 	return 1;
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 5b14b6b4495d..349b423ea8a6 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -2330,7 +2330,7 @@ extern void init_dl_inactive_task_timer(struct
> sched_dl_entity *dl_se);
> #define MAX_BW			((1ULL << MAX_BW_BITS) - 1)
> unsigned long to_ratio(u64 period, u64 runtime);
> 
> -extern void init_entity_runnable_average(struct sched_entity *se);
> +extern void init_entity_load_average(struct sched_entity *se);
> extern void post_init_entity_util_avg(struct task_struct *p);
> 
> #ifdef CONFIG_NO_HZ_FULL
> -- 
> 2.17.1
> 
> 

I have sent a patch couple days ago but still i don't get the reply,
I am looking forward to your reply,
thank you

--
zackary

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-07-15 14:31 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-06-22 16:20 [PATCH] sched/pelt: rename init_entity_runnable_average() Zhaoyu Liu
2022-07-15 14:31 ` Zackary Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox