public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] sched/core: clean up sched_init() a bit
@ 2019-06-04 20:46 Qian Cai
  2019-06-19 14:36 ` Qian Cai
  0 siblings, 1 reply; 2+ messages in thread
From: Qian Cai @ 2019-06-04 20:46 UTC (permalink / raw)
  To: akpm; +Cc: mingo, peterz, torvalds, tglx, linux-kernel, Qian Cai

Compiling a kernel with both FAIR_GROUP_SCHED=n and RT_GROUP_SCHED=n
will generate a warning using W=1:

  kernel/sched/core.c: In function 'sched_init':
  kernel/sched/core.c:5906:32: warning: variable 'ptr' set but not used

Use this opportunity to tidy up a code a bit by removing unnecssary
indentations, #endif comments and lines.

Signed-off-by: Qian Cai <cai@lca.pw>
---

v2: Fix an oversight when both FAIR_GROUP_SCHED and RT_GROUP_SCHED
    selected which was found by the 0day kernel testing robot.

 kernel/sched/core.c | 50 +++++++++++++++++++++++---------------------------
 1 file changed, 23 insertions(+), 27 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 874c427742a9..edebd5e97542 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5903,36 +5903,31 @@ int in_sched_functions(unsigned long addr)
 void __init sched_init(void)
 {
 	int i, j;
-	unsigned long alloc_size = 0, ptr;
-
-	wait_bit_init();
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
+	unsigned long alloc_size = 4 * nr_cpu_ids * sizeof(void **);
+	unsigned long ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
+#elif defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
+	unsigned long alloc_size = 2 * nr_cpu_ids * sizeof(void **);
+	unsigned long ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
 #endif
-#ifdef CONFIG_RT_GROUP_SCHED
-	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-#endif
-	if (alloc_size) {
-		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
+	wait_bit_init();
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-		root_task_group.se = (struct sched_entity **)ptr;
-		ptr += nr_cpu_ids * sizeof(void **);
+	root_task_group.se = (struct sched_entity **)ptr;
+	ptr += nr_cpu_ids * sizeof(void **);
 
-		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
-		ptr += nr_cpu_ids * sizeof(void **);
+	root_task_group.cfs_rq = (struct cfs_rq **)ptr;
+	ptr += nr_cpu_ids * sizeof(void **);
 
-#endif /* CONFIG_FAIR_GROUP_SCHED */
+#endif
 #ifdef CONFIG_RT_GROUP_SCHED
-		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
-		ptr += nr_cpu_ids * sizeof(void **);
+	root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+	ptr += nr_cpu_ids * sizeof(void **);
 
-		root_task_group.rt_rq = (struct rt_rq **)ptr;
-		ptr += nr_cpu_ids * sizeof(void **);
+	root_task_group.rt_rq = (struct rt_rq **)ptr;
+	ptr += nr_cpu_ids * sizeof(void **);
 
-#endif /* CONFIG_RT_GROUP_SCHED */
-	}
+#endif
 #ifdef CONFIG_CPUMASK_OFFSTACK
 	for_each_possible_cpu(i) {
 		per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
@@ -5940,7 +5935,7 @@ void __init sched_init(void)
 		per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
 			cpumask_size(), GFP_KERNEL, cpu_to_node(i));
 	}
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+#endif
 
 	init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
 	init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
@@ -5950,9 +5945,9 @@ void __init sched_init(void)
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
-	init_rt_bandwidth(&root_task_group.rt_bandwidth,
-			global_rt_period(), global_rt_runtime());
-#endif /* CONFIG_RT_GROUP_SCHED */
+	init_rt_bandwidth(&root_task_group.rt_bandwidth, global_rt_period(),
+			  global_rt_runtime());
+#endif
 
 #ifdef CONFIG_CGROUP_SCHED
 	task_group_cache = KMEM_CACHE(task_group, 0);
@@ -5961,7 +5956,7 @@ void __init sched_init(void)
 	INIT_LIST_HEAD(&root_task_group.children);
 	INIT_LIST_HEAD(&root_task_group.siblings);
 	autogroup_init(&init_task);
-#endif /* CONFIG_CGROUP_SCHED */
+#endif
 
 	for_each_possible_cpu(i) {
 		struct rq *rq;
@@ -6031,6 +6026,7 @@ void __init sched_init(void)
 		rq->last_blocked_load_update_tick = jiffies;
 		atomic_set(&rq->nohz_flags, 0);
 #endif
+
 #endif /* CONFIG_SMP */
 		hrtick_rq_init(rq);
 		atomic_set(&rq->nr_iowait, 0);
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v2] sched/core: clean up sched_init() a bit
  2019-06-04 20:46 [PATCH v2] sched/core: clean up sched_init() a bit Qian Cai
@ 2019-06-19 14:36 ` Qian Cai
  0 siblings, 0 replies; 2+ messages in thread
From: Qian Cai @ 2019-06-19 14:36 UTC (permalink / raw)
  To: akpm; +Cc: mingo, peterz, torvalds, tglx, linux-kernel

Ping.

On Tue, 2019-06-04 at 16:46 -0400, Qian Cai wrote:
> Compiling a kernel with both FAIR_GROUP_SCHED=n and RT_GROUP_SCHED=n
> will generate a warning using W=1:
> 
>   kernel/sched/core.c: In function 'sched_init':
>   kernel/sched/core.c:5906:32: warning: variable 'ptr' set but not used
> 
> Use this opportunity to tidy up a code a bit by removing unnecssary
> indentations, #endif comments and lines.
> 
> Signed-off-by: Qian Cai <cai@lca.pw>
> ---
> 
> v2: Fix an oversight when both FAIR_GROUP_SCHED and RT_GROUP_SCHED
>     selected which was found by the 0day kernel testing robot.
> 
>  kernel/sched/core.c | 50 +++++++++++++++++++++++---------------------------
>  1 file changed, 23 insertions(+), 27 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 874c427742a9..edebd5e97542 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -5903,36 +5903,31 @@ int in_sched_functions(unsigned long addr)
>  void __init sched_init(void)
>  {
>  	int i, j;
> -	unsigned long alloc_size = 0, ptr;
> -
> -	wait_bit_init();
> -
> -#ifdef CONFIG_FAIR_GROUP_SCHED
> -	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
> +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
> +	unsigned long alloc_size = 4 * nr_cpu_ids * sizeof(void **);
> +	unsigned long ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
> +#elif defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
> +	unsigned long alloc_size = 2 * nr_cpu_ids * sizeof(void **);
> +	unsigned long ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
>  #endif
> -#ifdef CONFIG_RT_GROUP_SCHED
> -	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
> -#endif
> -	if (alloc_size) {
> -		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
> +	wait_bit_init();
>  
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -		root_task_group.se = (struct sched_entity **)ptr;
> -		ptr += nr_cpu_ids * sizeof(void **);
> +	root_task_group.se = (struct sched_entity **)ptr;
> +	ptr += nr_cpu_ids * sizeof(void **);
>  
> -		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
> -		ptr += nr_cpu_ids * sizeof(void **);
> +	root_task_group.cfs_rq = (struct cfs_rq **)ptr;
> +	ptr += nr_cpu_ids * sizeof(void **);
>  
> -#endif /* CONFIG_FAIR_GROUP_SCHED */
> +#endif
>  #ifdef CONFIG_RT_GROUP_SCHED
> -		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
> -		ptr += nr_cpu_ids * sizeof(void **);
> +	root_task_group.rt_se = (struct sched_rt_entity **)ptr;
> +	ptr += nr_cpu_ids * sizeof(void **);
>  
> -		root_task_group.rt_rq = (struct rt_rq **)ptr;
> -		ptr += nr_cpu_ids * sizeof(void **);
> +	root_task_group.rt_rq = (struct rt_rq **)ptr;
> +	ptr += nr_cpu_ids * sizeof(void **);
>  
> -#endif /* CONFIG_RT_GROUP_SCHED */
> -	}
> +#endif
>  #ifdef CONFIG_CPUMASK_OFFSTACK
>  	for_each_possible_cpu(i) {
>  		per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
> @@ -5940,7 +5935,7 @@ void __init sched_init(void)
>  		per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
>  			cpumask_size(), GFP_KERNEL, cpu_to_node(i));
>  	}
> -#endif /* CONFIG_CPUMASK_OFFSTACK */
> +#endif
>  
>  	init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(),
> global_rt_runtime());
>  	init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(),
> global_rt_runtime());
> @@ -5950,9 +5945,9 @@ void __init sched_init(void)
>  #endif
>  
>  #ifdef CONFIG_RT_GROUP_SCHED
> -	init_rt_bandwidth(&root_task_group.rt_bandwidth,
> -			global_rt_period(), global_rt_runtime());
> -#endif /* CONFIG_RT_GROUP_SCHED */
> +	init_rt_bandwidth(&root_task_group.rt_bandwidth, global_rt_period(),
> +			  global_rt_runtime());
> +#endif
>  
>  #ifdef CONFIG_CGROUP_SCHED
>  	task_group_cache = KMEM_CACHE(task_group, 0);
> @@ -5961,7 +5956,7 @@ void __init sched_init(void)
>  	INIT_LIST_HEAD(&root_task_group.children);
>  	INIT_LIST_HEAD(&root_task_group.siblings);
>  	autogroup_init(&init_task);
> -#endif /* CONFIG_CGROUP_SCHED */
> +#endif
>  
>  	for_each_possible_cpu(i) {
>  		struct rq *rq;
> @@ -6031,6 +6026,7 @@ void __init sched_init(void)
>  		rq->last_blocked_load_update_tick = jiffies;
>  		atomic_set(&rq->nohz_flags, 0);
>  #endif
> +
>  #endif /* CONFIG_SMP */
>  		hrtick_rq_init(rq);
>  		atomic_set(&rq->nr_iowait, 0);

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-06-19 14:36 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-06-04 20:46 [PATCH v2] sched/core: clean up sched_init() a bit Qian Cai
2019-06-19 14:36 ` Qian Cai

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox