linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC RESEND v10 02/14] preempt: Introduce __preempt_count_{sub, add}_return()
       [not found] <20250527222254.565881-1-lyude@redhat.com>
@ 2025-05-27 22:21 ` Lyude Paul
  2025-05-28  6:37   ` Heiko Carstens
  0 siblings, 1 reply; 2+ messages in thread
From: Lyude Paul @ 2025-05-27 22:21 UTC (permalink / raw)
  To: rust-for-linux, Thomas Gleixner, Boqun Feng, linux-kernel,
	Daniel Almeida
  Cc: Catalin Marinas, Will Deacon, Heiko Carstens, Vasily Gorbik,
	Alexander Gordeev, Christian Borntraeger, Sven Schnelle,
	Ingo Molnar, Borislav Petkov, Dave Hansen,
	maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT), H. Peter Anvin,
	Arnd Bergmann, Juergen Christ, Uros Bizjak, Brian Gerst,
	moderated list:ARM64 PORT (AARCH64 ARCHITECTURE),
	open list:S390 ARCHITECTURE,
	open list:GENERIC INCLUDE/ASM HEADER FILES

From: Boqun Feng <boqun.feng@gmail.com>

In order to use preempt_count() to tracking the interrupt disable
nesting level, __preempt_count_{add,sub}_return() are introduced, as
their name suggest, these primitives return the new value of the
preempt_count() after changing it. The following example shows the usage
of it in local_interrupt_disable():

	// increase the HARDIRQ_DISABLE bit
	new_count = __preempt_count_add_return(HARDIRQ_DISABLE_OFFSET);

	// if it's the first-time increment, then disable the interrupt
	// at hardware level.
	if (new_count & HARDIRQ_DISABLE_MASK == HARDIRQ_DISABLE_OFFSET) {
		local_irq_save(flags);
		raw_cpu_write(local_interrupt_disable_state.flags, flags);
	}

Having these primitives will avoid a read of preempt_count() after
changing preempt_count() on certain architectures.

Signed-off-by: Boqun Feng <boqun.feng@gmail.com>

---
V10:
* Add commit message I forgot
* Rebase against latest pcpu_hot changes

Signed-off-by: Lyude Paul <lyude@redhat.com>
---
 arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++
 arch/s390/include/asm/preempt.h  | 19 +++++++++++++++++++
 arch/x86/include/asm/preempt.h   | 10 ++++++++++
 include/asm-generic/preempt.h    | 14 ++++++++++++++
 4 files changed, 61 insertions(+)

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f0..49cb886c8e1dd 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -56,6 +56,24 @@ static inline void __preempt_count_sub(int val)
 	WRITE_ONCE(current_thread_info()->preempt.count, pc);
 }
 
+static inline int __preempt_count_add_return(int val)
+{
+	u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+	pc += val;
+	WRITE_ONCE(current_thread_info()->preempt.count, pc);
+
+	return pc;
+}
+
+static inline int __preempt_count_sub_return(int val)
+{
+	u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+	pc -= val;
+	WRITE_ONCE(current_thread_info()->preempt.count, pc);
+
+	return pc;
+}
+
 static inline bool __preempt_count_dec_and_test(void)
 {
 	struct thread_info *ti = current_thread_info();
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 6ccd033acfe52..67a6e265e9fff 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -98,6 +98,25 @@ static __always_inline bool should_resched(int preempt_offset)
 	return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+	/*
+	 * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
+	 * enabled, gcc 12 fails to handle __builtin_constant_p().
+	 */
+	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
+		if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
+			return val + __atomic_add_const(val, &get_lowcore()->preempt_count);
+		}
+	}
+	return val + __atomic_add(val, &get_lowcore()->preempt_count);
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+	return __preempt_count_add_return(-val);
+}
+
 #define init_task_preempt_count(p)	do { } while (0)
 /* Deferred to CPU bringup time */
 #define init_idle_preempt_count(p, cpu)	do { } while (0)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 578441db09f0b..1220656f3370b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -85,6 +85,16 @@ static __always_inline void __preempt_count_sub(int val)
 	raw_cpu_add_4(__preempt_count, -val);
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+	return raw_cpu_add_return_4(__preempt_count, val);
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+	return raw_cpu_add_return_4(__preempt_count, -val);
+}
+
 /*
  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
  * a decrement which hits zero means we have no preempt_count and should
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 51f8f3881523a..c8683c046615d 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -59,6 +59,20 @@ static __always_inline void __preempt_count_sub(int val)
 	*preempt_count_ptr() -= val;
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+	*preempt_count_ptr() += val;
+
+	return *preempt_count_ptr();
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+	*preempt_count_ptr() -= val;
+
+	return *preempt_count_ptr();
+}
+
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
 	/*
-- 
2.49.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [RFC RESEND v10 02/14] preempt: Introduce __preempt_count_{sub, add}_return()
  2025-05-27 22:21 ` [RFC RESEND v10 02/14] preempt: Introduce __preempt_count_{sub, add}_return() Lyude Paul
@ 2025-05-28  6:37   ` Heiko Carstens
  0 siblings, 0 replies; 2+ messages in thread
From: Heiko Carstens @ 2025-05-28  6:37 UTC (permalink / raw)
  To: Lyude Paul
  Cc: rust-for-linux, Thomas Gleixner, Boqun Feng, linux-kernel,
	Daniel Almeida, Catalin Marinas, Will Deacon, Vasily Gorbik,
	Alexander Gordeev, Christian Borntraeger, Sven Schnelle,
	Ingo Molnar, Borislav Petkov, Dave Hansen,
	maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT), H. Peter Anvin,
	Arnd Bergmann, Juergen Christ, Uros Bizjak, Brian Gerst,
	moderated list:ARM64 PORT (AARCH64 ARCHITECTURE),
	open list:S390 ARCHITECTURE,
	open list:GENERIC INCLUDE/ASM HEADER FILES

On Tue, May 27, 2025 at 06:21:43PM -0400, Lyude Paul wrote:
> From: Boqun Feng <boqun.feng@gmail.com>
> 
> In order to use preempt_count() to tracking the interrupt disable
> nesting level, __preempt_count_{add,sub}_return() are introduced, as
> their name suggest, these primitives return the new value of the
> preempt_count() after changing it. The following example shows the usage
> of it in local_interrupt_disable():
> 
> 	// increase the HARDIRQ_DISABLE bit
> 	new_count = __preempt_count_add_return(HARDIRQ_DISABLE_OFFSET);
> 
> 	// if it's the first-time increment, then disable the interrupt
> 	// at hardware level.
> 	if (new_count & HARDIRQ_DISABLE_MASK == HARDIRQ_DISABLE_OFFSET) {
> 		local_irq_save(flags);
> 		raw_cpu_write(local_interrupt_disable_state.flags, flags);
> 	}
> 
> Having these primitives will avoid a read of preempt_count() after
> changing preempt_count() on certain architectures.
> 
> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
> 
> ---
> V10:
> * Add commit message I forgot
> * Rebase against latest pcpu_hot changes
> 
> Signed-off-by: Lyude Paul <lyude@redhat.com>
> ---
>  arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++
>  arch/s390/include/asm/preempt.h  | 19 +++++++++++++++++++
>  arch/x86/include/asm/preempt.h   | 10 ++++++++++
>  include/asm-generic/preempt.h    | 14 ++++++++++++++
>  4 files changed, 61 insertions(+)

...

> diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
> index 6ccd033acfe52..67a6e265e9fff 100644
> --- a/arch/s390/include/asm/preempt.h
> +++ b/arch/s390/include/asm/preempt.h
> @@ -98,6 +98,25 @@ static __always_inline bool should_resched(int preempt_offset)
>  	return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
>  }
>  
> +static __always_inline int __preempt_count_add_return(int val)
> +{
> +	/*
> +	 * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
> +	 * enabled, gcc 12 fails to handle __builtin_constant_p().
> +	 */
> +	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
> +		if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
> +			return val + __atomic_add_const(val, &get_lowcore()->preempt_count);
> +		}
> +	}
> +	return val + __atomic_add(val, &get_lowcore()->preempt_count);
> +}

This is still wrong and needs to be changed to:

static __always_inline int __preempt_count_add_return(int val)
{
	return val + __atomic_add(val, &get_lowcore()->preempt_count);
}

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-05-28  6:38 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20250527222254.565881-1-lyude@redhat.com>
2025-05-27 22:21 ` [RFC RESEND v10 02/14] preempt: Introduce __preempt_count_{sub, add}_return() Lyude Paul
2025-05-28  6:37   ` Heiko Carstens

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).