* [PATCH v3 1/4] jump_label,module: Don't alloc static_key_mod for __ro_after_init keys
2024-03-13 18:01 [PATCH v3 0/4] jump_label: Fix __ro_after_init keys for modules & annotate some keys Valentin Schneider
@ 2024-03-13 18:01 ` Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 2/4] context_tracking: Make context_tracking_key __ro_after_init Valentin Schneider
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Valentin Schneider @ 2024-03-13 18:01 UTC (permalink / raw)
To: linux-kernel, kvm, linux-arch, x86
Cc: Peter Zijlstra, Josh Poimboeuf, Thomas Gleixner, Borislav Petkov,
Pawan Gupta, Ingo Molnar, Dave Hansen, H. Peter Anvin,
Paolo Bonzini, Wanpeng Li, Vitaly Kuznetsov, Arnd Bergmann,
Jason Baron, Steven Rostedt, Ard Biesheuvel, Frederic Weisbecker,
Paul E. McKenney, Feng Tang, Andrew Morton, Mike Rapoport (IBM),
Vlastimil Babka, David Hildenbrand, ndesaulniers@google.com,
Michael Kelley, Masami Hiramatsu (Google)
From: Peter Zijlstra <peterz@infradead.org>
When a static_key is marked ro_after_init, its state will never change
(after init), therefore jump_label_update() will never need to iterate
the entries, and thus module load won't actually need to track this --
avoiding the static_key::next write.
Therefore, mark these keys such that jump_label_add_module() might
recognise them and avoid the modification.
Use the special state: 'static_key_linked(key) && !static_key_mod(key)'
to denote such keys.
jump_label_add_module() does not exist under CONFIG_JUMP_LABEL=n, so the
newly-introduced jump_label_ro() can be defined as a nop for that
configuration.
Link: http://lore.kernel.org/r/20230705204142.GB2813335@hirez.programming.kicks-ass.net
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
[Added comments and build fix]
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
include/asm-generic/sections.h | 5 ++++
include/linux/jump_label.h | 3 ++
init/main.c | 1 +
kernel/jump_label.c | 53 ++++++++++++++++++++++++++++++++++
4 files changed, 62 insertions(+)
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index db13bb620f527..c768de6f19a9a 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -180,6 +180,11 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+static inline bool is_kernel_ro_after_init(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_ro_after_init &&
+ addr < (unsigned long)__end_ro_after_init;
+}
/**
* is_kernel_inittext - checks if the pointer address is located in the
* .init.text section
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f0a949b7c9733..3b103d88c139e 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -216,6 +216,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_ro(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -265,6 +266,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static __always_inline void jump_label_ro(void) { }
+
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely_notrace(static_key_count(key) > 0))
diff --git a/init/main.c b/init/main.c
index 7dce08198b133..6fc421b4d5fdb 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1412,6 +1412,7 @@ static void mark_readonly(void)
* insecure pages which are W+X.
*/
rcu_barrier();
+ jump_label_ro();
mark_rodata_ro();
rodata_test();
} else
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index d9c822bbffb8d..7e3e8d1a0fea7 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -530,6 +530,45 @@ void __init jump_label_init(void)
cpus_read_unlock();
}
+static inline bool static_key_sealed(struct static_key *key)
+{
+ return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK);
+}
+
+static inline void static_key_seal(struct static_key *key)
+{
+ unsigned long type = key->type & JUMP_TYPE_TRUE;
+ key->type = JUMP_TYPE_LINKED | type;
+}
+
+void jump_label_ro(void)
+{
+ struct jump_entry *iter_start = __start___jump_table;
+ struct jump_entry *iter_stop = __stop___jump_table;
+ struct jump_entry *iter;
+
+ if (WARN_ON_ONCE(!static_key_initialized))
+ return;
+
+ cpus_read_lock();
+ jump_label_lock();
+
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ struct static_key *iterk = jump_entry_key(iter);
+
+ if (!is_kernel_ro_after_init((unsigned long)iterk))
+ continue;
+
+ if (static_key_sealed(iterk))
+ continue;
+
+ static_key_seal(iterk);
+ }
+
+ jump_label_unlock();
+ cpus_read_unlock();
+}
+
#ifdef CONFIG_MODULES
enum jump_label_type jump_label_init_type(struct jump_entry *entry)
@@ -650,6 +689,15 @@ static int jump_label_add_module(struct module *mod)
static_key_set_entries(key, iter);
continue;
}
+
+ /*
+ * If the key was sealed at init, then there's no need to keep a
+ * reference to its module entries - just patch them now and be
+ * done with it.
+ */
+ if (static_key_sealed(key))
+ goto do_poke;
+
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
if (!jlm)
return -ENOMEM;
@@ -675,6 +723,7 @@ static int jump_label_add_module(struct module *mod)
static_key_set_linked(key);
/* Only update if we've changed from our initial state */
+do_poke:
if (jump_label_type(iter) != jump_label_init_type(iter))
__jump_label_update(key, iter, iter_stop, true);
}
@@ -699,6 +748,10 @@ static void jump_label_del_module(struct module *mod)
if (within_module((unsigned long)key, mod))
continue;
+ /* No @jlm allocated because key was sealed at init. */
+ if (static_key_sealed(key))
+ continue;
+
/* No memory during module load */
if (WARN_ON(!static_key_linked(key)))
continue;
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH v3 2/4] context_tracking: Make context_tracking_key __ro_after_init
2024-03-13 18:01 [PATCH v3 0/4] jump_label: Fix __ro_after_init keys for modules & annotate some keys Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 1/4] jump_label,module: Don't alloc static_key_mod for __ro_after_init keys Valentin Schneider
@ 2024-03-13 18:01 ` Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 3/4] x86/kvm: Make kvm_async_pf_enabled __ro_after_init Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 4/4] x86/tsc: Make __use_tsc __ro_after_init Valentin Schneider
3 siblings, 0 replies; 5+ messages in thread
From: Valentin Schneider @ 2024-03-13 18:01 UTC (permalink / raw)
To: linux-kernel, kvm, linux-arch, x86
Cc: Josh Poimboeuf, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Ingo Molnar, Dave Hansen, H. Peter Anvin,
Paolo Bonzini, Wanpeng Li, Vitaly Kuznetsov, Arnd Bergmann,
Jason Baron, Steven Rostedt, Ard Biesheuvel, Frederic Weisbecker,
Paul E. McKenney, Feng Tang, Andrew Morton, Mike Rapoport (IBM),
Vlastimil Babka, David Hildenbrand, ndesaulniers@google.com,
Michael Kelley, Masami Hiramatsu (Google)
context_tracking_key is only ever enabled in __init ct_cpu_tracker_user(),
so mark it as __ro_after_init.
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
kernel/context_tracking.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 70ae70d038233..24b1e11432608 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -432,7 +432,7 @@ static __always_inline void ct_kernel_enter(bool user, int offset) { }
#define CREATE_TRACE_POINTS
#include <trace/events/context_tracking.h>
-DEFINE_STATIC_KEY_FALSE(context_tracking_key);
+DEFINE_STATIC_KEY_FALSE_RO(context_tracking_key);
EXPORT_SYMBOL_GPL(context_tracking_key);
static noinstr bool context_tracking_recursion_enter(void)
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH v3 3/4] x86/kvm: Make kvm_async_pf_enabled __ro_after_init
2024-03-13 18:01 [PATCH v3 0/4] jump_label: Fix __ro_after_init keys for modules & annotate some keys Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 1/4] jump_label,module: Don't alloc static_key_mod for __ro_after_init keys Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 2/4] context_tracking: Make context_tracking_key __ro_after_init Valentin Schneider
@ 2024-03-13 18:01 ` Valentin Schneider
2024-03-13 18:01 ` [PATCH v3 4/4] x86/tsc: Make __use_tsc __ro_after_init Valentin Schneider
3 siblings, 0 replies; 5+ messages in thread
From: Valentin Schneider @ 2024-03-13 18:01 UTC (permalink / raw)
To: linux-kernel, kvm, linux-arch, x86
Cc: Sean Christopherson, Josh Poimboeuf, Thomas Gleixner,
Borislav Petkov, Peter Zijlstra, Pawan Gupta, Ingo Molnar,
Dave Hansen, H. Peter Anvin, Paolo Bonzini, Wanpeng Li,
Vitaly Kuznetsov, Arnd Bergmann, Jason Baron, Steven Rostedt,
Ard Biesheuvel, Frederic Weisbecker, Paul E. McKenney, Feng Tang,
Andrew Morton, Mike Rapoport (IBM), Vlastimil Babka,
David Hildenbrand, ndesaulniers@google.com, Michael Kelley,
Masami Hiramatsu (Google)
kvm_async_pf_enabled is only ever enabled in __init kvm_guest_init(), so
mark it as __ro_after_init.
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/kernel/kvm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 101a7c1bf2008..6c6ff015b99fd 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -44,7 +44,7 @@
#include <asm/svm.h>
#include <asm/e820/api.h>
-DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
+DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled);
static int kvmapf = 1;
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v3 4/4] x86/tsc: Make __use_tsc __ro_after_init
2024-03-13 18:01 [PATCH v3 0/4] jump_label: Fix __ro_after_init keys for modules & annotate some keys Valentin Schneider
` (2 preceding siblings ...)
2024-03-13 18:01 ` [PATCH v3 3/4] x86/kvm: Make kvm_async_pf_enabled __ro_after_init Valentin Schneider
@ 2024-03-13 18:01 ` Valentin Schneider
3 siblings, 0 replies; 5+ messages in thread
From: Valentin Schneider @ 2024-03-13 18:01 UTC (permalink / raw)
To: linux-kernel, kvm, linux-arch, x86
Cc: Josh Poimboeuf, Thomas Gleixner, Borislav Petkov, Peter Zijlstra,
Pawan Gupta, Ingo Molnar, Dave Hansen, H. Peter Anvin,
Paolo Bonzini, Wanpeng Li, Vitaly Kuznetsov, Arnd Bergmann,
Jason Baron, Steven Rostedt, Ard Biesheuvel, Frederic Weisbecker,
Paul E. McKenney, Feng Tang, Andrew Morton, Mike Rapoport (IBM),
Vlastimil Babka, David Hildenbrand, ndesaulniers@google.com,
Michael Kelley, Masami Hiramatsu (Google)
__use_tsc is only ever enabled in __init tsc_enable_sched_clock(), so mark
it as __ro_after_init.
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/kernel/tsc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 5a69a49acc963..0f7624ed1d1d0 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(tsc_khz);
static int __read_mostly tsc_unstable;
static unsigned int __initdata tsc_early_khz;
-static DEFINE_STATIC_KEY_FALSE(__use_tsc);
+static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
int tsc_clocksource_reliable;
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread