public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] sched/core: Address classes via __begin_sched_classes
@ 2022-05-17  3:00 Kees Cook
  2022-05-17  3:33 ` Kees Cook
  2022-05-17  6:42 ` [PATCH v3] sched/core: Address classes via __begin_sched_classes Peter Zijlstra
  0 siblings, 2 replies; 7+ messages in thread
From: Kees Cook @ 2022-05-17  3:00 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Kees Cook, Christophe de Dinechin, Ingo Molnar, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-kernel,
	linux-hardening

GCC 12 is very sensitive about array checking, and views all negative
array accesses as unsafe (a not unreasonable position). Redefine
sched_class_highest in terms of its location from __begin_sched_classes,
and redefine sched_class_lowest to the actual lowest sched class instead
of one lower. This also means the for_class_range() must be redefined to
be inclusive, which means its 1 caller must be adjusted to have its
"lowest" argument bumped up one position. Silences this warning:

In file included from kernel/sched/core.c:81:
kernel/sched/core.c: In function ‘set_rq_online.part.0’:
kernel/sched/sched.h:2197:52: error: array subscript -1 is outside array bounds of ‘struct sched_class[44343134792571037]’
 [-Werror=array-bounds]
 2197 | #define sched_class_lowest  (__begin_sched_classes - 1)
      |                                                    ^
kernel/sched/sched.h:2200:41: note: in definition of macro ‘for_class_range’
 2200 |         for (class = (_from); class != (_to); class--)
      |                                         ^~~
kernel/sched/sched.h:2203:53: note: in expansion of macro ‘sched_class_lowest’
 2203 |for_class_range(class, sched_class_highest, sched_class_lowest)
      |                                            ^~~~~~~~~~~~~~~~~~
kernel/sched/core.c:9115:17: note: in expansion of macro ‘for_each_class’
 9115 |                 for_each_class(class) {
      |                 ^~~~~~~~~~~~~~
kernel/sched/sched.h:2193:27: note: at offset -208 into object ‘__begin_sched_classes’ of size [0, 9223372036854775807]
 2193 | extern struct sched_class __begin_sched_classes[];
      |                           ^~~~~~~~~~~~~~~~~~~~~

The introduce and use of sched_class_higher() could just be a bare "+ 1",
but this code's backwards walking and non-inclusive for loop was weird
enough, it seemed back to explicitly describe the manipulation
happening.

These can't just be object pointers because GCC still sees it as an
address of a single struct.

The resulting instruction output is identical to before except that one
less register is used in set_rq_online(), where an immediate can now
be used, resulting in a small instruction count savings:

│  set_rq_online():
│ -	push   %r12
│  	push   %rbp
│  	push   %rbx
│  	mov    0x9a0(%rdi),%rax
│  	mov    0xa10(%rdi),%edx
│  	lock bts %rdx,0x20(%rax)
│  	movabs $0x0,%rbx
│   R_X86_64_64	__end_sched_classes-0xd0
│ -	movabs $0x0,%r12
│ - R_X86_64_64	__begin_sched_classes-0xd0
│  	movl   $0x1,0xa14(%rdi)
│ -	cmp    %r12,%rbx
│ -	je     31ea <set_rq_online.part.0+0x5a>
│ -	mov    %rdi,%rbp
│ +	cmp    $0x0,%rbx
│ + R_X86_64_32S	__begin_sched_classes
│ +	jb     31e6 <set_rq_online.part.0+0x56>
│ +	mov    %rdi,%rbp
│  	mov    0x70(%rbx),%rax
│  	test   %rax,%rax
│ -	je     31de <set_rq_online.part.0+0x4e>
│ +	je     31d6 <set_rq_online.part.0+0x46>
│  	mov    %rbp,%rdi
│ -	call   31de <set_rq_online.part.0+0x4e>
│ +	call   31d6 <set_rq_online.part.0+0x46>
│   R_X86_64_PLT32	__x86_indirect_thunk_rax-0x4
│  	sub    $0xd0,%rbx
│ -	cmp    %r12,%rbx
│ -	jne    31cd <set_rq_online.part.0+0x3d>
│ +	cmp    $0x0,%rbx
│ + R_X86_64_32S	__begin_sched_classes
│ +	jae    31c5 <set_rq_online.part.0+0x35>
│  	pop    %rbx
│  	pop    %rbp
│ -	pop    %r12
│  	retq

Reported-by: Christophe de Dinechin <dinechin@redhat.com>
Link: https://lore.kernel.org/lkml/20220414150855.2407137-2-dinechin@redhat.com/
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
---
v1: https://lore.kernel.org/lkml/20220516194241.3064242-1-keescook@chromium.org
v2: https://lore.kernel.org/lkml/20220517000630.3383144-1-keescook@chromium.org
v3:
 - Add missing increment to the one for_class_range() user
 - Provide instruction sequence change analysis in commit log
---
 kernel/sched/core.c  |  3 ++-
 kernel/sched/sched.h | 11 ++++++++---
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d58c0389eb23..f2bcc7f15381 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5665,7 +5665,8 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
 	 * We can terminate the balance pass as soon as we know there is
 	 * a runnable task of @class priority or higher.
 	 */
-	for_class_range(class, prev->sched_class, &idle_sched_class) {
+	for_class_range(class, prev->sched_class,
+			sched_class_higher(&idle_sched_class)) {
 		if (class->balance(rq, prev, rf))
 			break;
 	}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8dccb34eb190..c757bd26b01a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2193,11 +2193,16 @@ const struct sched_class name##_sched_class \
 extern struct sched_class __begin_sched_classes[];
 extern struct sched_class __end_sched_classes[];
 
-#define sched_class_highest (__end_sched_classes - 1)
-#define sched_class_lowest  (__begin_sched_classes - 1)
+#define sched_class_higher(class) ((class) + 1)
 
+#define sched_class_highest (&__begin_sched_classes[__end_sched_classes     \
+						    - __begin_sched_classes \
+						    - 1])
+#define sched_class_lowest  (&__begin_sched_classes[0])
+
+/* For each class, inclusive from _from down to _to. */
 #define for_class_range(class, _from, _to) \
-	for (class = (_from); class != (_to); class--)
+	for (class = (_from); class >= (_to); class--)
 
 #define for_each_class(class) \
 	for_class_range(class, sched_class_highest, sched_class_lowest)
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-05-19 21:57 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-05-17  3:00 [PATCH v3] sched/core: Address classes via __begin_sched_classes Kees Cook
2022-05-17  3:33 ` Kees Cook
2022-05-17 11:46   ` Peter Zijlstra
2022-05-17 17:35     ` Kees Cook
2022-05-17 22:22       ` Peter Zijlstra
2022-05-19 21:57     ` [tip: sched/core] sched: Reverse sched_class layout tip-bot2 for Peter Zijlstra
2022-05-17  6:42 ` [PATCH v3] sched/core: Address classes via __begin_sched_classes Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox