* [PATCH 0/3] powerpc: Enable dynamic preemption
@ 2024-11-25 4:22 Shrikanth Hegde
2024-11-25 4:22 ` [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm Shrikanth Hegde
` (2 more replies)
0 siblings, 3 replies; 17+ messages in thread
From: Shrikanth Hegde @ 2024-11-25 4:22 UTC (permalink / raw)
To: mpe, linuxppc-dev
Cc: sshegde, npiggin, christophe.leroy, maddy, bigeasy, ankur.a.arora,
linux-kernel, mark.rutland, vschneid, peterz
Once the lazy preemption is supported, it makes sense to enable dynamic
preemption. One can change the preemption model without changing the
kernel.
Overheads of dynamic preemption seems reasonable for some of the
workloads. Tested with db2 database workload, unixbench, schbench
and hackbench. Except hackbench pipe rest show similar numbers as
without dynamic preemption specially for preempt=none.
These patches *depend* on lazy preemption patches[1] to be applied
first on tip/sched/core tree.
The reason for arch/asm/preempt.h is to enable arch specific preempt
enablements. Also, there is plan to move preempt count to paca for 64
bit systems as idea was discussed in [2]
[1] https://lore.kernel.org/all/20241116192306.88217-1-sshegde@linux.ibm.com/#t
[2] https://lore.kernel.org/all/14d4584d-a087-4674-9e2b-810e96078b3a@linux.ibm.com/
Shrikanth Hegde (3):
powerpc: copy preempt.h into arch/include/asm
powerpc: support dynamic preemption
powerpc: print right preemption model in die
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/preempt.h | 101 +++++++++++++++++++++++++++++
arch/powerpc/kernel/interrupt.c | 6 +-
arch/powerpc/kernel/traps.c | 6 +-
arch/powerpc/lib/vmx-helper.c | 2 +-
5 files changed, 113 insertions(+), 3 deletions(-)
create mode 100644 arch/powerpc/include/asm/preempt.h
--
2.39.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-11-25 4:22 [PATCH 0/3] powerpc: Enable dynamic preemption Shrikanth Hegde
@ 2024-11-25 4:22 ` Shrikanth Hegde
2024-11-26 10:49 ` Christophe Leroy
2024-11-27 6:37 ` Christophe Leroy
2024-11-25 4:22 ` [PATCH 2/3] powerpc: support dynamic preemption Shrikanth Hegde
2024-11-25 4:22 ` [PATCH 3/3] powerpc: print right preemption model in die Shrikanth Hegde
2 siblings, 2 replies; 17+ messages in thread
From: Shrikanth Hegde @ 2024-11-25 4:22 UTC (permalink / raw)
To: mpe, linuxppc-dev
Cc: sshegde, npiggin, christophe.leroy, maddy, bigeasy, ankur.a.arora,
linux-kernel, mark.rutland, vschneid, peterz
PowerPC uses asm-generic preempt definitions as of now.
Copy that into arch/asm so that arch specific changes can be done.
This would help the next patch for enabling dynamic preemption.
No functional changes intended.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
1 file changed, 100 insertions(+)
create mode 100644 arch/powerpc/include/asm/preempt.h
diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
new file mode 100644
index 000000000000..51f8f3881523
--- /dev/null
+++ b/arch/powerpc/include/asm/preempt.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+#define PREEMPT_ENABLED (0)
+
+static __always_inline int preempt_count(void)
+{
+ return READ_ONCE(current_thread_info()->preempt_count);
+}
+
+static __always_inline volatile int *preempt_count_ptr(void)
+{
+ return ¤t_thread_info()->preempt_count;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ *preempt_count_ptr() = pc;
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+static __always_inline void set_preempt_need_resched(void)
+{
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(int preempt_offset)
+{
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
+}
+
+#ifdef CONFIG_PREEMPTION
+extern asmlinkage void preempt_schedule(void);
+extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+#endif /* CONFIG_PREEMPTION */
+
+#endif /* __ASM_PREEMPT_H */
--
2.39.3
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 2/3] powerpc: support dynamic preemption
2024-11-25 4:22 [PATCH 0/3] powerpc: Enable dynamic preemption Shrikanth Hegde
2024-11-25 4:22 ` [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm Shrikanth Hegde
@ 2024-11-25 4:22 ` Shrikanth Hegde
2024-11-26 10:48 ` Christophe Leroy
2024-11-27 6:44 ` Christophe Leroy
2024-11-25 4:22 ` [PATCH 3/3] powerpc: print right preemption model in die Shrikanth Hegde
2 siblings, 2 replies; 17+ messages in thread
From: Shrikanth Hegde @ 2024-11-25 4:22 UTC (permalink / raw)
To: mpe, linuxppc-dev
Cc: sshegde, npiggin, christophe.leroy, maddy, bigeasy, ankur.a.arora,
linux-kernel, mark.rutland, vschneid, peterz
Once the lazy preemption is supported, it would be desirable to change
the preemption models at runtime. So this change adds support for dynamic
preemption using DYNAMIC_KEY.
In irq-exit to kernel path, use preempt_model_preemptible for decision.
Other way would be using static key based decision. Keeping it
simpler since key based change didn't show performance improvement.
Tested lightly on Power10 LPAR. Performance numbers indicate that,
preempt=none(no dynamic) and preempt=none(dynamic) are similar.
Only hackbench pipe shows a regression. There is slight overhead of code
check if it is preemptible kernel. hackbench pipe is prone to such
patterns[1]
cat /sys/kernel/debug/sched/preempt
(none) voluntary full lazy
perf stat -e probe:__cond_resched -a sleep 1
Performance counter stats for 'system wide':
1,253 probe:__cond_resched
echo full > /sys/kernel/debug/sched/preempt
cat /sys/kernel/debug/sched/preempt
none voluntary (full) lazy
perf stat -e probe:__cond_resched -a sleep 1
Performance counter stats for 'system wide':
0 probe:__cond_resched
echo lazy > /sys/kernel/debug/sched/preempt
cat /sys/kernel/debug/sched/preempt
none voluntary full (lazy)
perf stat -e probe:__cond_resched -a sleep 1
Performance counter stats for 'system wide':
0 probe:__cond_resched
[1]: https://lore.kernel.org/all/1a973dda-c79e-4d95-935b-e4b93eb077b8@linux.ibm.com/
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/preempt.h | 1 +
arch/powerpc/kernel/interrupt.c | 6 +++++-
arch/powerpc/lib/vmx-helper.c | 2 +-
4 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6d6bbd93abab..01c58f5258c9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -270,6 +270,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_RETHOOK if KPROBES
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
index 51f8f3881523..c0a19ff3f78c 100644
--- a/arch/powerpc/include/asm/preempt.h
+++ b/arch/powerpc/include/asm/preempt.h
@@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_preempt_schedule(void);
void dynamic_preempt_schedule_notrace(void);
#define __preempt_schedule() dynamic_preempt_schedule()
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 8f4acc55407b..0fb01019d7e0 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
}
#endif
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#endif
+
/*
* local irqs must be disabled. Returns false if the caller must re-enable
* them, check for new work, and try again.
@@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
- if (IS_ENABLED(CONFIG_PREEMPTION)) {
+ if (preempt_model_preemptible()) {
/* Return to preemptible kernel context */
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 58ed6bd613a6..7b069c832ce2 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
* set and we are preemptible. The hack here is to schedule a
* decrementer to fire here and reschedule for us if necessary.
*/
- if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
+ if (preempt_model_preemptible() && need_resched())
set_dec(1);
return 0;
}
--
2.39.3
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 3/3] powerpc: print right preemption model in die
2024-11-25 4:22 [PATCH 0/3] powerpc: Enable dynamic preemption Shrikanth Hegde
2024-11-25 4:22 ` [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm Shrikanth Hegde
2024-11-25 4:22 ` [PATCH 2/3] powerpc: support dynamic preemption Shrikanth Hegde
@ 2024-11-25 4:22 ` Shrikanth Hegde
2024-12-04 6:44 ` Christophe Leroy
2 siblings, 1 reply; 17+ messages in thread
From: Shrikanth Hegde @ 2024-11-25 4:22 UTC (permalink / raw)
To: mpe, linuxppc-dev
Cc: sshegde, npiggin, christophe.leroy, maddy, bigeasy, ankur.a.arora,
linux-kernel, mark.rutland, vschneid, peterz
Preemption models can change at runtime with dynamic preemption in
place. So need to use the right methods instead of relying on
CONFIG_PREEMPT to decide whether its full preemption or not.
While there, fix it to print preemption model correctly.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
arch/powerpc/kernel/traps.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index edf5cabe5dfd..2556fa8ec019 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -266,7 +266,11 @@ static int __die(const char *str, struct pt_regs *regs, long err)
printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
PAGE_SIZE / 1024, get_mmu_str(),
- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
+ preempt_model_none() ? "none" :
+ preempt_model_voluntary() ? "voluntary" :
+ preempt_model_full() ? "full" :
+ preempt_model_lazy() ? "lazy" :
+ "",
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
--
2.39.3
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH 2/3] powerpc: support dynamic preemption
2024-11-25 4:22 ` [PATCH 2/3] powerpc: support dynamic preemption Shrikanth Hegde
@ 2024-11-26 10:48 ` Christophe Leroy
2024-11-26 11:15 ` Shrikanth Hegde
2024-11-27 6:44 ` Christophe Leroy
1 sibling, 1 reply; 17+ messages in thread
From: Christophe Leroy @ 2024-11-26 10:48 UTC (permalink / raw)
To: Shrikanth Hegde, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> Once the lazy preemption is supported, it would be desirable to change
> the preemption models at runtime. So this change adds support for dynamic
> preemption using DYNAMIC_KEY.
>
> In irq-exit to kernel path, use preempt_model_preemptible for decision.
> Other way would be using static key based decision. Keeping it
> simpler since key based change didn't show performance improvement.
>
> Tested lightly on Power10 LPAR. Performance numbers indicate that,
> preempt=none(no dynamic) and preempt=none(dynamic) are similar.
> Only hackbench pipe shows a regression. There is slight overhead of code
> check if it is preemptible kernel. hackbench pipe is prone to such
> patterns[1]
>
> cat /sys/kernel/debug/sched/preempt
> (none) voluntary full lazy
> perf stat -e probe:__cond_resched -a sleep 1
> Performance counter stats for 'system wide':
> 1,253 probe:__cond_resched
>
> echo full > /sys/kernel/debug/sched/preempt
> cat /sys/kernel/debug/sched/preempt
> none voluntary (full) lazy
> perf stat -e probe:__cond_resched -a sleep 1
> Performance counter stats for 'system wide':
> 0 probe:__cond_resched
>
> echo lazy > /sys/kernel/debug/sched/preempt
> cat /sys/kernel/debug/sched/preempt
> none voluntary full (lazy)
> perf stat -e probe:__cond_resched -a sleep 1
> Performance counter stats for 'system wide':
> 0 probe:__cond_resched
>
> [1]: https://lore.kernel.org/all/1a973dda-c79e-4d95-935b-e4b93eb077b8@linux.ibm.com/
>
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
> arch/powerpc/Kconfig | 1 +
> arch/powerpc/include/asm/preempt.h | 1 +
> arch/powerpc/kernel/interrupt.c | 6 +++++-
> arch/powerpc/lib/vmx-helper.c | 2 +-
> 4 files changed, 8 insertions(+), 2 deletions(-)
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 6d6bbd93abab..01c58f5258c9 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -270,6 +270,7 @@ config PPC
> select HAVE_PERF_EVENTS_NMI if PPC64
> select HAVE_PERF_REGS
> select HAVE_PERF_USER_STACK_DUMP
> + select HAVE_PREEMPT_DYNAMIC_KEY
> select HAVE_RETHOOK if KPROBES
> select HAVE_REGS_AND_STACK_ACCESS_API
> select HAVE_RELIABLE_STACKTRACE
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> index 51f8f3881523..c0a19ff3f78c 100644
> --- a/arch/powerpc/include/asm/preempt.h
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
>
> #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>
> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> void dynamic_preempt_schedule(void);
> void dynamic_preempt_schedule_notrace(void);
> #define __preempt_schedule() dynamic_preempt_schedule()
> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
> index 8f4acc55407b..0fb01019d7e0 100644
> --- a/arch/powerpc/kernel/interrupt.c
> +++ b/arch/powerpc/kernel/interrupt.c
> @@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
> }
> #endif
>
> +#ifdef CONFIG_PREEMPT_DYNAMIC
> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> +#endif
Why is that needed at all ? It isn't used.
> +
> /*
> * local irqs must be disabled. Returns false if the caller must re-enable
> * them, check for new work, and try again.
> @@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
> /* Returning to a kernel context with local irqs enabled. */
> WARN_ON_ONCE(!(regs->msr & MSR_EE));
> again:
> - if (IS_ENABLED(CONFIG_PREEMPTION)) {
> + if (preempt_model_preemptible()) {
> /* Return to preemptible kernel context */
> if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
> if (preempt_count() == 0)
> diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
> index 58ed6bd613a6..7b069c832ce2 100644
> --- a/arch/powerpc/lib/vmx-helper.c
> +++ b/arch/powerpc/lib/vmx-helper.c
> @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
> * set and we are preemptible. The hack here is to schedule a
> * decrementer to fire here and reschedule for us if necessary.
> */
> - if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
> + if (preempt_model_preemptible() && need_resched())
> set_dec(1);
> return 0;
> }
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-11-25 4:22 ` [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm Shrikanth Hegde
@ 2024-11-26 10:49 ` Christophe Leroy
2024-11-27 6:37 ` Christophe Leroy
1 sibling, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2024-11-26 10:49 UTC (permalink / raw)
To: Shrikanth Hegde, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> PowerPC uses asm-generic preempt definitions as of now.
> Copy that into arch/asm so that arch specific changes can be done.
> This would help the next patch for enabling dynamic preemption.
I can't see any valid use in following patches. The only modification
you do to that file is in patch 2 and it is unused.
>
> No functional changes intended.
>
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
> arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
> 1 file changed, 100 insertions(+)
> create mode 100644 arch/powerpc/include/asm/preempt.h
>
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> new file mode 100644
> index 000000000000..51f8f3881523
> --- /dev/null
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -0,0 +1,100 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_PREEMPT_H
> +#define __ASM_PREEMPT_H
> +
> +#include <linux/thread_info.h>
> +
> +#define PREEMPT_ENABLED (0)
> +
> +static __always_inline int preempt_count(void)
> +{
> + return READ_ONCE(current_thread_info()->preempt_count);
> +}
> +
> +static __always_inline volatile int *preempt_count_ptr(void)
> +{
> + return ¤t_thread_info()->preempt_count;
> +}
> +
> +static __always_inline void preempt_count_set(int pc)
> +{
> + *preempt_count_ptr() = pc;
> +}
> +
> +/*
> + * must be macros to avoid header recursion hell
> + */
> +#define init_task_preempt_count(p) do { \
> + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
> +} while (0)
> +
> +#define init_idle_preempt_count(p, cpu) do { \
> + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
> +} while (0)
> +
> +static __always_inline void set_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline void clear_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline bool test_preempt_need_resched(void)
> +{
> + return false;
> +}
> +
> +/*
> + * The various preempt_count add/sub methods
> + */
> +
> +static __always_inline void __preempt_count_add(int val)
> +{
> + *preempt_count_ptr() += val;
> +}
> +
> +static __always_inline void __preempt_count_sub(int val)
> +{
> + *preempt_count_ptr() -= val;
> +}
> +
> +static __always_inline bool __preempt_count_dec_and_test(void)
> +{
> + /*
> + * Because of load-store architectures cannot do per-cpu atomic
> + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
> + * lost.
> + */
> + return !--*preempt_count_ptr() && tif_need_resched();
> +}
> +
> +/*
> + * Returns true when we need to resched and can (barring IRQ state).
> + */
> +static __always_inline bool should_resched(int preempt_offset)
> +{
> + return unlikely(preempt_count() == preempt_offset &&
> + tif_need_resched());
> +}
> +
> +#ifdef CONFIG_PREEMPTION
> +extern asmlinkage void preempt_schedule(void);
> +extern asmlinkage void preempt_schedule_notrace(void);
> +
> +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> +
> +void dynamic_preempt_schedule(void);
> +void dynamic_preempt_schedule_notrace(void);
> +#define __preempt_schedule() dynamic_preempt_schedule()
> +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
> +
> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +
> +#define __preempt_schedule() preempt_schedule()
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
> +
> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +#endif /* CONFIG_PREEMPTION */
> +
> +#endif /* __ASM_PREEMPT_H */
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 2/3] powerpc: support dynamic preemption
2024-11-26 10:48 ` Christophe Leroy
@ 2024-11-26 11:15 ` Shrikanth Hegde
2024-11-27 6:28 ` Christophe Leroy
0 siblings, 1 reply; 17+ messages in thread
From: Shrikanth Hegde @ 2024-11-26 11:15 UTC (permalink / raw)
To: Christophe Leroy
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz, mpe, linuxppc-dev
On 11/26/24 16:18, Christophe Leroy wrote:
>
>
Hi Christophe, Thanks for taking a look at this.
> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> Once the lazy preemption is supported, it would be desirable to change
>> the preemption models at runtime. So this change adds support for dynamic
>> preemption using DYNAMIC_KEY.
>>
>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>> Other way would be using static key based decision. Keeping it
>> simpler since key based change didn't show performance improvement.
>>
>> Tested lightly on Power10 LPAR. Performance numbers indicate that,
>> preempt=none(no dynamic) and preempt=none(dynamic) are similar.
>> Only hackbench pipe shows a regression. There is slight overhead of code
>> check if it is preemptible kernel. hackbench pipe is prone to such
>> patterns[1]
>>
>> cat /sys/kernel/debug/sched/preempt
>> (none) voluntary full lazy
>> perf stat -e probe:__cond_resched -a sleep 1
>> Performance counter stats for 'system wide':
>> 1,253 probe:__cond_resched
>>
>> echo full > /sys/kernel/debug/sched/preempt
>> cat /sys/kernel/debug/sched/preempt
>> none voluntary (full) lazy
>> perf stat -e probe:__cond_resched -a sleep 1
>> Performance counter stats for 'system wide':
>> 0 probe:__cond_resched
>>
>> echo lazy > /sys/kernel/debug/sched/preempt
>> cat /sys/kernel/debug/sched/preempt
>> none voluntary full (lazy)
>> perf stat -e probe:__cond_resched -a sleep 1
>> Performance counter stats for 'system wide':
>> 0 probe:__cond_resched
>>
>> [1]: https://lore.kernel.org/all/1a973dda-c79e-4d95-935b-
>> e4b93eb077b8@linux.ibm.com/
>>
>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>> ---
>> arch/powerpc/Kconfig | 1 +
>> arch/powerpc/include/asm/preempt.h | 1 +
>> arch/powerpc/kernel/interrupt.c | 6 +++++-
>> arch/powerpc/lib/vmx-helper.c | 2 +-
>> 4 files changed, 8 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 6d6bbd93abab..01c58f5258c9 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -270,6 +270,7 @@ config PPC
>> select HAVE_PERF_EVENTS_NMI if PPC64
>> select HAVE_PERF_REGS
>> select HAVE_PERF_USER_STACK_DUMP
>> + select HAVE_PREEMPT_DYNAMIC_KEY
>> select HAVE_RETHOOK if KPROBES
>> select HAVE_REGS_AND_STACK_ACCESS_API
>> select HAVE_RELIABLE_STACKTRACE
>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/
>> include/asm/preempt.h
>> index 51f8f3881523..c0a19ff3f78c 100644
>> --- a/arch/powerpc/include/asm/preempt.h
>> +++ b/arch/powerpc/include/asm/preempt.h
>> @@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
>> #if defined(CONFIG_PREEMPT_DYNAMIC) &&
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> void dynamic_preempt_schedule(void);
>> void dynamic_preempt_schedule_notrace(void);
>> #define __preempt_schedule() dynamic_preempt_schedule()
>> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/
>> interrupt.c
>> index 8f4acc55407b..0fb01019d7e0 100644
>> --- a/arch/powerpc/kernel/interrupt.c
>> +++ b/arch/powerpc/kernel/interrupt.c
>> @@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
>> }
>> #endif
>> +#ifdef CONFIG_PREEMPT_DYNAMIC
>> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> +#endif
>
> Why is that needed at all ? It isn't used.
This is needed else compilation fails.
It has be defined by arch if it doesn't use kernel infra of entry/exit.
So if an arch does enable, CONFIG_HAVE_PREEMPT_DYNAMIC_KEY it has to be
define this key has well. The generic sched/core enables this flag.
This was one of the point I was requesting answer for. Either to use
preempt_model_preemptible or define macros based on this key. Other
archs are doing the later and hence the generic code enables this key.
It can be done in either way. if we do the later way, then this variable
will be used as well.
I hope this answers the question on patch 1 as well. Otherwise i have to
declare that in one of other arch/asm/.h file.
>
>> +
>> /*
>> * local irqs must be disabled. Returns false if the caller must re-
>> enable
>> * them, check for new work, and try again.
>> @@ -396,7 +400,7 @@ notrace unsigned long
>> interrupt_exit_kernel_prepare(struct pt_regs *regs)
>> /* Returning to a kernel context with local irqs enabled. */
>> WARN_ON_ONCE(!(regs->msr & MSR_EE));
>> again:
>> - if (IS_ENABLED(CONFIG_PREEMPTION)) {
>> + if (preempt_model_preemptible()) {
>> /* Return to preemptible kernel context */
>> if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
>> if (preempt_count() == 0)
>> diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-
>> helper.c
>> index 58ed6bd613a6..7b069c832ce2 100644
>> --- a/arch/powerpc/lib/vmx-helper.c
>> +++ b/arch/powerpc/lib/vmx-helper.c
>> @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
>> * set and we are preemptible. The hack here is to schedule a
>> * decrementer to fire here and reschedule for us if necessary.
>> */
>> - if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
>> + if (preempt_model_preemptible() && need_resched())
>> set_dec(1);
>> return 0;
>> }
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 2/3] powerpc: support dynamic preemption
2024-11-26 11:15 ` Shrikanth Hegde
@ 2024-11-27 6:28 ` Christophe Leroy
0 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2024-11-27 6:28 UTC (permalink / raw)
To: Shrikanth Hegde
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz, mpe, linuxppc-dev
Le 26/11/2024 à 12:15, Shrikanth Hegde a écrit :
>
>
> On 11/26/24 16:18, Christophe Leroy wrote:
>>
>>
>
> Hi Christophe, Thanks for taking a look at this.
>
>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>> Once the lazy preemption is supported, it would be desirable to change
>>> the preemption models at runtime. So this change adds support for
>>> dynamic
>>> preemption using DYNAMIC_KEY.
>>>
>>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>>> Other way would be using static key based decision. Keeping it
>>> simpler since key based change didn't show performance improvement.
>>>
>>> Tested lightly on Power10 LPAR. Performance numbers indicate that,
>>> preempt=none(no dynamic) and preempt=none(dynamic) are similar.
>>> Only hackbench pipe shows a regression. There is slight overhead of code
>>> check if it is preemptible kernel. hackbench pipe is prone to such
>>> patterns[1]
>>>
>>> cat /sys/kernel/debug/sched/preempt
>>> (none) voluntary full lazy
>>> perf stat -e probe:__cond_resched -a sleep 1
>>> Performance counter stats for 'system wide':
>>> 1,253 probe:__cond_resched
>>>
>>> echo full > /sys/kernel/debug/sched/preempt
>>> cat /sys/kernel/debug/sched/preempt
>>> none voluntary (full) lazy
>>> perf stat -e probe:__cond_resched -a sleep 1
>>> Performance counter stats for 'system wide':
>>> 0 probe:__cond_resched
>>>
>>> echo lazy > /sys/kernel/debug/sched/preempt
>>> cat /sys/kernel/debug/sched/preempt
>>> none voluntary full (lazy)
>>> perf stat -e probe:__cond_resched -a sleep 1
>>> Performance counter stats for 'system wide':
>>> 0 probe:__cond_resched
>>>
>>> [1]:
>>> https://eur01.safelinks.protection.outlook.com/?url=https%3A%2F%2Flore.kernel.org%2Fall%2F1a973dda-c79e-4d95-935b-&data=05%7C02%7Cchristophe.leroy%40csgroup.eu%7Cf0474c2567834b69dfd908dd0e0bb554%7C8b87af7d86474dc78df45f69a2011bb5%7C0%7C0%7C638682165690258507%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&sdata=Gcw6nRSPkp78lGEkG8NX04KWW%2FjCZm0oA%2BTGTjpUZUc%3D&reserved=0 e4b93eb077b8@linux.ibm.com/
>>>
>>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>>> ---
>>> arch/powerpc/Kconfig | 1 +
>>> arch/powerpc/include/asm/preempt.h | 1 +
>>> arch/powerpc/kernel/interrupt.c | 6 +++++-
>>> arch/powerpc/lib/vmx-helper.c | 2 +-
>>> 4 files changed, 8 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>> index 6d6bbd93abab..01c58f5258c9 100644
>>> --- a/arch/powerpc/Kconfig
>>> +++ b/arch/powerpc/Kconfig
>>> @@ -270,6 +270,7 @@ config PPC
>>> select HAVE_PERF_EVENTS_NMI if PPC64
>>> select HAVE_PERF_REGS
>>> select HAVE_PERF_USER_STACK_DUMP
>>> + select HAVE_PREEMPT_DYNAMIC_KEY
>>> select HAVE_RETHOOK if KPROBES
>>> select HAVE_REGS_AND_STACK_ACCESS_API
>>> select HAVE_RELIABLE_STACKTRACE
>>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/
>>> include/asm/preempt.h
>>> index 51f8f3881523..c0a19ff3f78c 100644
>>> --- a/arch/powerpc/include/asm/preempt.h
>>> +++ b/arch/powerpc/include/asm/preempt.h
>>> @@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
>>> #if defined(CONFIG_PREEMPT_DYNAMIC) &&
>>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>>> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>> void dynamic_preempt_schedule(void);
>>> void dynamic_preempt_schedule_notrace(void);
>>> #define __preempt_schedule() dynamic_preempt_schedule()
>>> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/
>>> interrupt.c
>>> index 8f4acc55407b..0fb01019d7e0 100644
>>> --- a/arch/powerpc/kernel/interrupt.c
>>> +++ b/arch/powerpc/kernel/interrupt.c
>>> @@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
>>> }
>>> #endif
>>> +#ifdef CONFIG_PREEMPT_DYNAMIC
>>> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>> +#endif
>>
>> Why is that needed at all ? It isn't used.
>
> This is needed else compilation fails.
>
> It has be defined by arch if it doesn't use kernel infra of entry/exit.
> So if an arch does enable, CONFIG_HAVE_PREEMPT_DYNAMIC_KEY it has to be
> define this key has well. The generic sched/core enables this flag.
>
> This was one of the point I was requesting answer for. Either to use
> preempt_model_preemptible or define macros based on this key. Other
> archs are doing the later and hence the generic code enables this key.
>
> It can be done in either way. if we do the later way, then this variable
> will be used as well.
>
Ah right, I did a grep on sk_dynamic_irqentry_exit_cond_resched but
indeed it is triggered by static_key_enable(&sk_dynamic_##f.key)
Christophe
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-11-25 4:22 ` [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm Shrikanth Hegde
2024-11-26 10:49 ` Christophe Leroy
@ 2024-11-27 6:37 ` Christophe Leroy
2024-12-02 14:05 ` Shrikanth Hegde
1 sibling, 1 reply; 17+ messages in thread
From: Christophe Leroy @ 2024-11-27 6:37 UTC (permalink / raw)
To: Shrikanth Hegde, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> PowerPC uses asm-generic preempt definitions as of now.
> Copy that into arch/asm so that arch specific changes can be done.
> This would help the next patch for enabling dynamic preemption.
Instead of copying all the content of asm-generic version, can you just
create a receptacle for your new macros, that will include
asm-generic/preempt.h ?
Look at arch/powerpc/include/asm/percpu.h for exemple.
>
> No functional changes intended.
>
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
> arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
> 1 file changed, 100 insertions(+)
> create mode 100644 arch/powerpc/include/asm/preempt.h
>
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> new file mode 100644
> index 000000000000..51f8f3881523
> --- /dev/null
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -0,0 +1,100 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_PREEMPT_H
> +#define __ASM_PREEMPT_H
Should be __ASM_POWERPC_PREEMPT_H
> +
> +#include <linux/thread_info.h>
> +
> +#define PREEMPT_ENABLED (0)
> +
> +static __always_inline int preempt_count(void)
> +{
> + return READ_ONCE(current_thread_info()->preempt_count);
> +}
> +
> +static __always_inline volatile int *preempt_count_ptr(void)
> +{
> + return ¤t_thread_info()->preempt_count;
> +}
> +
> +static __always_inline void preempt_count_set(int pc)
> +{
> + *preempt_count_ptr() = pc;
> +}
> +
> +/*
> + * must be macros to avoid header recursion hell
> + */
> +#define init_task_preempt_count(p) do { \
> + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
> +} while (0)
> +
> +#define init_idle_preempt_count(p, cpu) do { \
> + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
> +} while (0)
> +
> +static __always_inline void set_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline void clear_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline bool test_preempt_need_resched(void)
> +{
> + return false;
> +}
> +
> +/*
> + * The various preempt_count add/sub methods
> + */
> +
> +static __always_inline void __preempt_count_add(int val)
> +{
> + *preempt_count_ptr() += val;
> +}
> +
> +static __always_inline void __preempt_count_sub(int val)
> +{
> + *preempt_count_ptr() -= val;
> +}
> +
> +static __always_inline bool __preempt_count_dec_and_test(void)
> +{
> + /*
> + * Because of load-store architectures cannot do per-cpu atomic
> + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
> + * lost.
> + */
> + return !--*preempt_count_ptr() && tif_need_resched();
> +}
> +
> +/*
> + * Returns true when we need to resched and can (barring IRQ state).
> + */
> +static __always_inline bool should_resched(int preempt_offset)
> +{
> + return unlikely(preempt_count() == preempt_offset &&
> + tif_need_resched());
> +}
> +
> +#ifdef CONFIG_PREEMPTION
> +extern asmlinkage void preempt_schedule(void);
> +extern asmlinkage void preempt_schedule_notrace(void);
> +
> +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> +
> +void dynamic_preempt_schedule(void);
> +void dynamic_preempt_schedule_notrace(void);
> +#define __preempt_schedule() dynamic_preempt_schedule()
> +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
> +
> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +
> +#define __preempt_schedule() preempt_schedule()
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
> +
> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +#endif /* CONFIG_PREEMPTION */
> +
> +#endif /* __ASM_PREEMPT_H */
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 2/3] powerpc: support dynamic preemption
2024-11-25 4:22 ` [PATCH 2/3] powerpc: support dynamic preemption Shrikanth Hegde
2024-11-26 10:48 ` Christophe Leroy
@ 2024-11-27 6:44 ` Christophe Leroy
2024-12-01 19:45 ` Shrikanth Hegde
1 sibling, 1 reply; 17+ messages in thread
From: Christophe Leroy @ 2024-11-27 6:44 UTC (permalink / raw)
To: Shrikanth Hegde, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> Once the lazy preemption is supported, it would be desirable to change
> the preemption models at runtime. So this change adds support for dynamic
> preemption using DYNAMIC_KEY.
>
> In irq-exit to kernel path, use preempt_model_preemptible for decision.
> Other way would be using static key based decision. Keeping it
> simpler since key based change didn't show performance improvement.
What about static_call, wouldn't it improve performance ?
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 6d6bbd93abab..01c58f5258c9 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -270,6 +270,7 @@ config PPC
> select HAVE_PERF_EVENTS_NMI if PPC64
> select HAVE_PERF_REGS
> select HAVE_PERF_USER_STACK_DUMP
> + select HAVE_PREEMPT_DYNAMIC_KEY
Can you use HAVE_PREEPT_DYNAMIC_CALL instead ? That should be more
performant.
I know static calls are not in for PPC64 yet, you can restart from
http://patchwork.ozlabs.org/project/linuxppc-dev/cover/20221010002957.128276-1-bgray@linux.ibm.com/
and https://github.com/linuxppc/issues/issues/416
Christophe
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 2/3] powerpc: support dynamic preemption
2024-11-27 6:44 ` Christophe Leroy
@ 2024-12-01 19:45 ` Shrikanth Hegde
2024-12-03 19:53 ` Christophe Leroy
0 siblings, 1 reply; 17+ messages in thread
From: Shrikanth Hegde @ 2024-12-01 19:45 UTC (permalink / raw)
To: Christophe Leroy, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
On 11/27/24 12:14, Christophe Leroy wrote:
>
>
> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> Once the lazy preemption is supported, it would be desirable to change
>> the preemption models at runtime. So this change adds support for dynamic
>> preemption using DYNAMIC_KEY.
>>
>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>> Other way would be using static key based decision. Keeping it
>> simpler since key based change didn't show performance improvement.
>
> What about static_call, wouldn't it improve performance ?
>
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 6d6bbd93abab..01c58f5258c9 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -270,6 +270,7 @@ config PPC
>> select HAVE_PERF_EVENTS_NMI if PPC64
>> select HAVE_PERF_REGS
>> select HAVE_PERF_USER_STACK_DUMP
>> + select HAVE_PREEMPT_DYNAMIC_KEY
>
> Can you use HAVE_PREEPT_DYNAMIC_CALL instead ? That should be more
> performant.
>
> I know static calls are not in for PPC64 yet, you can restart from
> http://patchwork.ozlabs.org/project/linuxppc-dev/
> cover/20221010002957.128276-1-bgray@linux.ibm.com/ and https://
> github.com/linuxppc/issues/issues/416
>
Thanks Christophe, I will take a look and understand.
May be stupid question, do the concerns of arm still valid for ppc64/ppc32 out-line static calls?
https://lore.kernel.org/all/20220214165216.2231574-6-mark.rutland@arm.com/
As I understood, that is the reason they went ahead with DYNAMIC_KEY.
> Christophe
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-11-27 6:37 ` Christophe Leroy
@ 2024-12-02 14:05 ` Shrikanth Hegde
2024-12-02 18:17 ` Christophe Leroy
0 siblings, 1 reply; 17+ messages in thread
From: Shrikanth Hegde @ 2024-12-02 14:05 UTC (permalink / raw)
To: Christophe Leroy, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz, Michael Ellerman
On 11/27/24 12:07, Christophe Leroy wrote:
>
>
> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> PowerPC uses asm-generic preempt definitions as of now.
>> Copy that into arch/asm so that arch specific changes can be done.
>> This would help the next patch for enabling dynamic preemption.
>
The reason I want the content instead was to allow future patches where
I thought of making preempt count per paca for ppc64 atleast. generic
code assumes it is per thread. If this change is to be done at that
point, that is fair too. I am okay with it.
> Instead of copying all the content of asm-generic version, can you just
> create a receptacle for your new macros, that will include asm-generic/
> preempt.h ?
>
> Look at arch/powerpc/include/asm/percpu.h for exemple.
>
You mean something like below right?
#ifndef __ASM_POWERPC_PREEMPT_H
#define __ASM_POWERPC_PREEMPT_H
#include <asm-generic/preempt.h>
#if defined(CONFIG_PREEMPT_DYNAMIC) &&
defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
#endif
#endif /* __ASM_POWERPC_PREEMPT_H */
>>
>> No functional changes intended.
>>
>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>> ---
>> arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
>> 1 file changed, 100 insertions(+)
>> create mode 100644 arch/powerpc/include/asm/preempt.h
>>
>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/
>> include/asm/preempt.h
>> new file mode 100644
>> index 000000000000..51f8f3881523
>> --- /dev/null
>> +++ b/arch/powerpc/include/asm/preempt.h
>> @@ -0,0 +1,100 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef __ASM_PREEMPT_H
>> +#define __ASM_PREEMPT_H
>
> Should be __ASM_POWERPC_PREEMPT_H
thanks for catching this.
>
>> +
>> +#include <linux/thread_info.h>
>> +
>> +#define PREEMPT_ENABLED (0)
>> +
>> +static __always_inline int preempt_count(void)
>> +{
>> + return READ_ONCE(current_thread_info()->preempt_count);
>> +}
>> +
>> +static __always_inline volatile int *preempt_count_ptr(void)
>> +{
>> + return ¤t_thread_info()->preempt_count;
>> +}
>> +
>> +static __always_inline void preempt_count_set(int pc)
>> +{
>> + *preempt_count_ptr() = pc;
>> +}
>> +
>> +/*
>> + * must be macros to avoid header recursion hell
>> + */
>> +#define init_task_preempt_count(p) do { \
>> + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
>> +} while (0)
>> +
>> +#define init_idle_preempt_count(p, cpu) do { \
>> + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
>> +} while (0)
>> +
>> +static __always_inline void set_preempt_need_resched(void)
>> +{
>> +}
>> +
>> +static __always_inline void clear_preempt_need_resched(void)
>> +{
>> +}
>> +
>> +static __always_inline bool test_preempt_need_resched(void)
>> +{
>> + return false;
>> +}
>> +
>> +/*
>> + * The various preempt_count add/sub methods
>> + */
>> +
>> +static __always_inline void __preempt_count_add(int val)
>> +{
>> + *preempt_count_ptr() += val;
>> +}
>> +
>> +static __always_inline void __preempt_count_sub(int val)
>> +{
>> + *preempt_count_ptr() -= val;
>> +}
>> +
>> +static __always_inline bool __preempt_count_dec_and_test(void)
>> +{
>> + /*
>> + * Because of load-store architectures cannot do per-cpu atomic
>> + * operations; we cannot use PREEMPT_NEED_RESCHED because it
>> might get
>> + * lost.
>> + */
>> + return !--*preempt_count_ptr() && tif_need_resched();
>> +}
>> +
>> +/*
>> + * Returns true when we need to resched and can (barring IRQ state).
>> + */
>> +static __always_inline bool should_resched(int preempt_offset)
>> +{
>> + return unlikely(preempt_count() == preempt_offset &&
>> + tif_need_resched());
>> +}
>> +
>> +#ifdef CONFIG_PREEMPTION
>> +extern asmlinkage void preempt_schedule(void);
>> +extern asmlinkage void preempt_schedule_notrace(void);
>> +
>> +#if defined(CONFIG_PREEMPT_DYNAMIC) &&
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> +
>> +void dynamic_preempt_schedule(void);
>> +void dynamic_preempt_schedule_notrace(void);
>> +#define __preempt_schedule() dynamic_preempt_schedule()
>> +#define __preempt_schedule_notrace()
>> dynamic_preempt_schedule_notrace()
>> +
>> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
>> +
>> +#define __preempt_schedule() preempt_schedule()
>> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
>> +
>> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
>> +#endif /* CONFIG_PREEMPTION */
>> +
>> +#endif /* __ASM_PREEMPT_H */
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-12-02 14:05 ` Shrikanth Hegde
@ 2024-12-02 18:17 ` Christophe Leroy
2024-12-03 14:00 ` Shrikanth Hegde
0 siblings, 1 reply; 17+ messages in thread
From: Christophe Leroy @ 2024-12-02 18:17 UTC (permalink / raw)
To: Shrikanth Hegde, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz, Michael Ellerman
Le 02/12/2024 à 15:05, Shrikanth Hegde a écrit :
>
>
> On 11/27/24 12:07, Christophe Leroy wrote:
>>
>>
>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>> PowerPC uses asm-generic preempt definitions as of now.
>>> Copy that into arch/asm so that arch specific changes can be done.
>>> This would help the next patch for enabling dynamic preemption.
>>
>
> The reason I want the content instead was to allow future patches where
> I thought of making preempt count per paca for ppc64 atleast. generic
> code assumes it is per thread. If this change is to be done at that
> point, that is fair too. I am okay with it.
I think it is better to keep series minimal and consistent. If you have
a futur plan, no problem, keep it future and do everything at once
unless it is heavy and better done in two steps.
As we say in French, a lot of water will have flowed under the bridge by
then.
I'm sure there will be a lot of discussion when you do that and maybe at
the end you will end up with something completely different than what
you have in mind at the moment.
>
>
>> Instead of copying all the content of asm-generic version, can you
>> just create a receptacle for your new macros, that will include asm-
>> generic/ preempt.h ?
>>
>> Look at arch/powerpc/include/asm/percpu.h for exemple.
>>
>
> You mean something like below right?
>
>
> #ifndef __ASM_POWERPC_PREEMPT_H
> #define __ASM_POWERPC_PREEMPT_H
>
> #include <asm-generic/preempt.h>
>
> #if defined(CONFIG_PREEMPT_DYNAMIC) &&
> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> #endif
>
> #endif /* __ASM_POWERPC_PREEMPT_H */
Yes exactly.
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-12-02 18:17 ` Christophe Leroy
@ 2024-12-03 14:00 ` Shrikanth Hegde
2024-12-03 19:47 ` Christophe Leroy
0 siblings, 1 reply; 17+ messages in thread
From: Shrikanth Hegde @ 2024-12-03 14:00 UTC (permalink / raw)
To: Christophe Leroy
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz, Michael Ellerman, linuxppc-dev
On 12/2/24 23:47, Christophe Leroy wrote:
>
>
> Le 02/12/2024 à 15:05, Shrikanth Hegde a écrit :
>>
>>
>> On 11/27/24 12:07, Christophe Leroy wrote:
>>>
>>>
>>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>>> PowerPC uses asm-generic preempt definitions as of now.
>>>> Copy that into arch/asm so that arch specific changes can be done.
>>>> This would help the next patch for enabling dynamic preemption.
>>>
>>
>> The reason I want the content instead was to allow future patches
>> where I thought of making preempt count per paca for ppc64 atleast.
>> generic code assumes it is per thread. If this change is to be done at
>> that point, that is fair too. I am okay with it.
>
> I think it is better to keep series minimal and consistent. If you have
> a futur plan, no problem, keep it future and do everything at once
> unless it is heavy and better done in two steps.
>
> As we say in French, a lot of water will have flowed under the bridge by
> then.
>
> I'm sure there will be a lot of discussion when you do that and maybe at
> the end you will end up with something completely different than what
> you have in mind at the moment.
>
ok.
>>
>>
>>> Instead of copying all the content of asm-generic version, can you
>>> just create a receptacle for your new macros, that will include asm-
>>> generic/ preempt.h ?
>>>
>>> Look at arch/powerpc/include/asm/percpu.h for exemple.
>>>
>>
>> You mean something like below right?
>>
>>
>> #ifndef __ASM_POWERPC_PREEMPT_H
>> #define __ASM_POWERPC_PREEMPT_H
>>
>> #include <asm-generic/preempt.h>
>>
>> #if defined(CONFIG_PREEMPT_DYNAMIC) &&
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> #endif
>>
>> #endif /* __ASM_POWERPC_PREEMPT_H */
>
> Yes exactly.
>
>
Should I send v2 with this and using DYNAMIC_KEY?
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
2024-12-03 14:00 ` Shrikanth Hegde
@ 2024-12-03 19:47 ` Christophe Leroy
0 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2024-12-03 19:47 UTC (permalink / raw)
To: Shrikanth Hegde
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz, Michael Ellerman, linuxppc-dev
Le 03/12/2024 à 15:00, Shrikanth Hegde a écrit :
>
>
> On 12/2/24 23:47, Christophe Leroy wrote:
>>
>>
>> Le 02/12/2024 à 15:05, Shrikanth Hegde a écrit :
>>>
>>>
>>> On 11/27/24 12:07, Christophe Leroy wrote:
>>>>
>>>>
>>>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>>>> PowerPC uses asm-generic preempt definitions as of now.
>>>>> Copy that into arch/asm so that arch specific changes can be done.
>>>>> This would help the next patch for enabling dynamic preemption.
>>>>
>>>
>>> The reason I want the content instead was to allow future patches
>>> where I thought of making preempt count per paca for ppc64 atleast.
>>> generic code assumes it is per thread. If this change is to be done
>>> at that point, that is fair too. I am okay with it.
>>
>> I think it is better to keep series minimal and consistent. If you
>> have a futur plan, no problem, keep it future and do everything at
>> once unless it is heavy and better done in two steps.
>>
>> As we say in French, a lot of water will have flowed under the bridge
>> by then.
>>
>> I'm sure there will be a lot of discussion when you do that and maybe
>> at the end you will end up with something completely different than
>> what you have in mind at the moment.
>>
>
> ok.
>
>>>
>>>
>>>> Instead of copying all the content of asm-generic version, can you
>>>> just create a receptacle for your new macros, that will include asm-
>>>> generic/ preempt.h ?
>>>>
>>>> Look at arch/powerpc/include/asm/percpu.h for exemple.
>>>>
>>>
>>> You mean something like below right?
>>>
>>>
>>> #ifndef __ASM_POWERPC_PREEMPT_H
>>> #define __ASM_POWERPC_PREEMPT_H
>>>
>>> #include <asm-generic/preempt.h>
>>>
>>> #if defined(CONFIG_PREEMPT_DYNAMIC) &&
>>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>>> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>> #endif
>>>
>>> #endif /* __ASM_POWERPC_PREEMPT_H */
>>
>> Yes exactly.
>>
>>
>
> Should I send v2 with this and using DYNAMIC_KEY?
Yes you can do that, but I guess it is not urgent as it requires the
lazy patches to be merged first and spend some time in linux-next ?
Christophe
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 2/3] powerpc: support dynamic preemption
2024-12-01 19:45 ` Shrikanth Hegde
@ 2024-12-03 19:53 ` Christophe Leroy
0 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2024-12-03 19:53 UTC (permalink / raw)
To: Shrikanth Hegde, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
Le 01/12/2024 à 20:45, Shrikanth Hegde a écrit :
>
>
> On 11/27/24 12:14, Christophe Leroy wrote:
>>
>>
>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>> Once the lazy preemption is supported, it would be desirable to change
>>> the preemption models at runtime. So this change adds support for
>>> dynamic
>>> preemption using DYNAMIC_KEY.
>>>
>>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>>> Other way would be using static key based decision. Keeping it
>>> simpler since key based change didn't show performance improvement.
>>
>> What about static_call, wouldn't it improve performance ?
>>
>>>
>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>> index 6d6bbd93abab..01c58f5258c9 100644
>>> --- a/arch/powerpc/Kconfig
>>> +++ b/arch/powerpc/Kconfig
>>> @@ -270,6 +270,7 @@ config PPC
>>> select HAVE_PERF_EVENTS_NMI if PPC64
>>> select HAVE_PERF_REGS
>>> select HAVE_PERF_USER_STACK_DUMP
>>> + select HAVE_PREEMPT_DYNAMIC_KEY
>>
>> Can you use HAVE_PREEPT_DYNAMIC_CALL instead ? That should be more
>> performant.
>>
>> I know static calls are not in for PPC64 yet, you can restart from
>> https://eur01.safelinks.protection.outlook.com/?
>> url=http%3A%2F%2Fpatchwork.ozlabs.org%2Fproject%2Flinuxppc-
>> dev%2F&data=05%7C02%7Cchristophe.leroy%40csgroup.eu%7C696bf56dcb544f3e49a408dd1240c477%7C8b87af7d86474dc78df45f69a2011bb5%7C0%7C0%7C638686791595217076%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&sdata=iUwKXhmoU3YgqNI%2Bi7vwi%2Fz4obxMXD0au%2Fclo1m23ng%3D&reserved=0 cover/20221010002957.128276-1-bgray@linux.ibm.com/ and https:// github.com/linuxppc/issues/issues/416
>>
>
> Thanks Christophe, I will take a look and understand.
>
> May be stupid question, do the concerns of arm still valid for ppc64/
> ppc32 out-line static calls?
Not sure. Don't know what they call landing pad.
Limited branch range is a limitation for sure, but whatever the method
when the called function is too far indirect call is required. And on
powerpc the max distance is 32 Mb which is quite big for a standard
kernel. Only modules should be concerned, but do we have scheduling code
in modules ?
CFI I don't know.
Anyway, I resurected the series I sent to implement inline static calls
on PPC32. I'm sure we can then amplify it to PPC64.
> https://eur01.safelinks.protection.outlook.com/?
> url=https%3A%2F%2Flore.kernel.org%2Fall%2F20220214165216.2231574-6-
> mark.rutland%40arm.com%2F&data=05%7C02%7Cchristophe.leroy%40csgroup.eu%7C696bf56dcb544f3e49a408dd1240c477%7C8b87af7d86474dc78df45f69a2011bb5%7C0%7C0%7C638686791595233955%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&sdata=8jT7JHp7HgNIwVEEL7gAI8JiDvCFDShI1eIeARWwbVo%3D&reserved=0
>
> As I understood, that is the reason they went ahead with DYNAMIC_KEY.
In their commit they have comparisons of assembly code. Can you do the
same for powerpc to get a better picture of what we are talking about ?
Christophe
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 3/3] powerpc: print right preemption model in die
2024-11-25 4:22 ` [PATCH 3/3] powerpc: print right preemption model in die Shrikanth Hegde
@ 2024-12-04 6:44 ` Christophe Leroy
0 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2024-12-04 6:44 UTC (permalink / raw)
To: Shrikanth Hegde, mpe, linuxppc-dev
Cc: npiggin, maddy, bigeasy, ankur.a.arora, linux-kernel,
mark.rutland, vschneid, peterz
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> Preemption models can change at runtime with dynamic preemption in
> place. So need to use the right methods instead of relying on
> CONFIG_PREEMPT to decide whether its full preemption or not.
Then this patch should go _before_ activating dynamic preemption.
But at the end, with the change to avoid the full copy of preempt.h, at
the end the amount of changes is small and all three patches can be
squashed into a single one.
>
> While there, fix it to print preemption model correctly.
>
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
> arch/powerpc/kernel/traps.c | 6 +++++-
> 1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
> index edf5cabe5dfd..2556fa8ec019 100644
> --- a/arch/powerpc/kernel/traps.c
> +++ b/arch/powerpc/kernel/traps.c
> @@ -266,7 +266,11 @@ static int __die(const char *str, struct pt_regs *regs, long err)
> printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
> IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
> PAGE_SIZE / 1024, get_mmu_str(),
> - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
> + preempt_model_none() ? "none" :
> + preempt_model_voluntary() ? "voluntary" :
> + preempt_model_full() ? "full" :
> + preempt_model_lazy() ? "lazy" :
> + "",
> IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
> IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
> debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
^ permalink raw reply [flat|nested] 17+ messages in thread
end of thread, other threads:[~2024-12-04 6:44 UTC | newest]
Thread overview: 17+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-11-25 4:22 [PATCH 0/3] powerpc: Enable dynamic preemption Shrikanth Hegde
2024-11-25 4:22 ` [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm Shrikanth Hegde
2024-11-26 10:49 ` Christophe Leroy
2024-11-27 6:37 ` Christophe Leroy
2024-12-02 14:05 ` Shrikanth Hegde
2024-12-02 18:17 ` Christophe Leroy
2024-12-03 14:00 ` Shrikanth Hegde
2024-12-03 19:47 ` Christophe Leroy
2024-11-25 4:22 ` [PATCH 2/3] powerpc: support dynamic preemption Shrikanth Hegde
2024-11-26 10:48 ` Christophe Leroy
2024-11-26 11:15 ` Shrikanth Hegde
2024-11-27 6:28 ` Christophe Leroy
2024-11-27 6:44 ` Christophe Leroy
2024-12-01 19:45 ` Shrikanth Hegde
2024-12-03 19:53 ` Christophe Leroy
2024-11-25 4:22 ` [PATCH 3/3] powerpc: print right preemption model in die Shrikanth Hegde
2024-12-04 6:44 ` Christophe Leroy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).