All of lore.kernel.org
 help / color / mirror / Atom feed
From: Boqun Feng <boqun@kernel.org>
To: Peter Zijlstra <peterz@infradead.org>
Cc: "Catalin Marinas" <catalin.marinas@arm.com>,
	"Will Deacon" <will@kernel.org>,
	"Jonas Bonn" <jonas@southpole.se>,
	"Stefan Kristiansson" <stefan.kristiansson@saunalahti.fi>,
	"Stafford Horne" <shorne@gmail.com>,
	"Heiko Carstens" <hca@linux.ibm.com>,
	"Vasily Gorbik" <gor@linux.ibm.com>,
	"Alexander Gordeev" <agordeev@linux.ibm.com>,
	"Christian Borntraeger" <borntraeger@linux.ibm.com>,
	"Sven Schnelle" <svens@linux.ibm.com>,
	"Thomas Gleixner" <tglx@kernel.org>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Dave Hansen" <dave.hansen@linux.intel.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Juri Lelli" <juri.lelli@redhat.com>,
	"Vincent Guittot" <vincent.guittot@linaro.org>,
	"Dietmar Eggemann" <dietmar.eggemann@arm.com>,
	"Steven Rostedt" <rostedt@goodmis.org>,
	"Ben Segall" <bsegall@google.com>, "Mel Gorman" <mgorman@suse.de>,
	"Valentin Schneider" <vschneid@redhat.com>,
	"K Prateek Nayak" <kprateek.nayak@amd.com>,
	"Boqun Feng" <boqun@kernel.org>,
	"Waiman Long" <longman@redhat.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Miguel Ojeda" <ojeda@kernel.org>, "Gary Guo" <gary@garyguo.net>,
	"Björn Roy Baron" <bjorn3_gh@protonmail.com>,
	"Benno Lossin" <lossin@kernel.org>,
	"Andreas Hindborg" <a.hindborg@kernel.org>,
	"Alice Ryhl" <aliceryhl@google.com>,
	"Trevor Gross" <tmgross@umich.edu>,
	"Danilo Krummrich" <dakr@kernel.org>,
	"Jinjie Ruan" <ruanjinjie@huawei.com>,
	"Ada Couprie Diaz" <ada.coupriediaz@arm.com>,
	"Lyude Paul" <lyude@redhat.com>,
	"Sohil Mehta" <sohil.mehta@intel.com>,
	"Pawan Gupta" <pawan.kumar.gupta@linux.intel.com>,
	"Xin Li (Intel)" <xin@zytor.com>,
	"Sean Christopherson" <seanjc@google.com>,
	"Nikunj A Dadhania" <nikunj@amd.com>,
	"Joel Fernandes" <joelagnelf@nvidia.com>,
	"Andy Shevchenko" <andriy.shevchenko@linux.intel.com>,
	"Randy Dunlap" <rdunlap@infradead.org>,
	"Yury Norov" <ynorov@nvidia.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, linux-openrisc@vger.kernel.org,
	linux-s390@vger.kernel.org, linux-arch@vger.kernel.org,
	rust-for-linux@vger.kernel.org,
	"Boqun Feng" <boqun.feng@gmail.com>
Subject: [PATCH 03/11] preempt: Introduce __preempt_count_{sub, add}_return()
Date: Thu,  7 May 2026 21:21:03 -0700	[thread overview]
Message-ID: <20260508042111.24358-4-boqun@kernel.org> (raw)
In-Reply-To: <20260508042111.24358-1-boqun@kernel.org>

From: Boqun Feng <boqun.feng@gmail.com>

In order to use preempt_count() to tracking the interrupt disable
nesting level, __preempt_count_{add,sub}_return() are introduced, as
their name suggest, these primitives return the new value of the
preempt_count() after changing it. The following example shows the usage
of it in local_interrupt_disable():

	// increase the HARDIRQ_DISABLE bit
	new_count = __preempt_count_add_return(HARDIRQ_DISABLE_OFFSET);

	// if it's the first-time increment, then disable the interrupt
	// at hardware level.
	if (new_count & HARDIRQ_DISABLE_MASK == HARDIRQ_DISABLE_OFFSET) {
		local_irq_save(flags);
		raw_cpu_write(local_interrupt_disable_state.flags, flags);
	}

Having these primitives will avoid a read of preempt_count() after
changing preempt_count() on certain architectures.

Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Boqun Feng <boqun@kernel.org>
Link: https://patch.msgid.link/20260121223933.1568682-4-lyude@redhat.com
---
 arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++
 arch/s390/include/asm/preempt.h  | 10 ++++++++++
 arch/x86/include/asm/preempt.h   | 10 ++++++++++
 include/asm-generic/preempt.h    | 14 ++++++++++++++
 4 files changed, 52 insertions(+)

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 932ea4b62042..0dd8221d1bef 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -55,6 +55,24 @@ static inline void __preempt_count_sub(int val)
 	WRITE_ONCE(current_thread_info()->preempt.count, pc);
 }
 
+static inline int __preempt_count_add_return(int val)
+{
+	u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+	pc += val;
+	WRITE_ONCE(current_thread_info()->preempt.count, pc);
+
+	return pc;
+}
+
+static inline int __preempt_count_sub_return(int val)
+{
+	u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+	pc -= val;
+	WRITE_ONCE(current_thread_info()->preempt.count, pc);
+
+	return pc;
+}
+
 static inline bool __preempt_count_dec_and_test(void)
 {
 	struct thread_info *ti = current_thread_info();
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 6e5821bb047e..0a25d4648b4c 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -139,6 +139,16 @@ static __always_inline bool should_resched(int preempt_offset)
 	return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+	return val + __atomic_add(val, &get_lowcore()->preempt_count);
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+	return __preempt_count_add_return(-val);
+}
+
 #define init_task_preempt_count(p)	do { } while (0)
 /* Deferred to CPU bringup time */
 #define init_idle_preempt_count(p, cpu)	do { } while (0)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 578441db09f0..1220656f3370 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -85,6 +85,16 @@ static __always_inline void __preempt_count_sub(int val)
 	raw_cpu_add_4(__preempt_count, -val);
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+	return raw_cpu_add_return_4(__preempt_count, val);
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+	return raw_cpu_add_return_4(__preempt_count, -val);
+}
+
 /*
  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
  * a decrement which hits zero means we have no preempt_count and should
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 51f8f3881523..c8683c046615 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -59,6 +59,20 @@ static __always_inline void __preempt_count_sub(int val)
 	*preempt_count_ptr() -= val;
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+	*preempt_count_ptr() += val;
+
+	return *preempt_count_ptr();
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+	*preempt_count_ptr() -= val;
+
+	return *preempt_count_ptr();
+}
+
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
 	/*
-- 
2.50.1 (Apple Git-155)


  parent reply	other threads:[~2026-05-08  4:21 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-08  4:21 [PATCH 00/11] Refcounted interrupt disable and SpinLockIrq for rust (Part 1) Boqun Feng
2026-05-08  4:21 ` [PATCH 01/11] preempt: Introduce HARDIRQ_DISABLE_BITS Boqun Feng
2026-05-12 16:28   ` Steven Rostedt
2026-05-08  4:21 ` [PATCH 02/11] preempt: Track NMI nesting to separate per-CPU counter Boqun Feng
2026-05-12 16:30   ` Steven Rostedt
2026-05-12 19:22     ` Joel Fernandes
2026-05-12 22:16       ` Boqun Feng
2026-05-12 23:33         ` Joel Fernandes
2026-05-08  4:21 ` Boqun Feng [this message]
2026-05-09 18:09   ` [PATCH 03/11] preempt: Introduce __preempt_count_{sub, add}_return() Heiko Carstens
2026-05-08  4:21 ` [PATCH 04/11] openrisc: Include <linux/cpumask.h> in smp.h Boqun Feng
2026-05-08  4:21 ` [PATCH 05/11] irq & spin_lock: Add counted interrupt disabling/enabling Boqun Feng
2026-05-08  4:21 ` [PATCH 06/11] irq: Add KUnit test for refcounted interrupt enable/disable Boqun Feng
2026-05-08  4:21 ` [PATCH 07/11] locking: Switch to _irq_{disable,enable}() variants in cleanup guards Boqun Feng
2026-05-08  4:21 ` [PATCH 08/11] sched: Remove the unused preempt_offset parameter of __cant_sleep() Boqun Feng
2026-05-08  4:21 ` [PATCH 09/11] sched: Avoid signed comparison of preempt_count() in __cant_migrate() Boqun Feng
2026-05-08  4:21 ` [PATCH 10/11] preempt: Introduce PREEMPT_COUNT_64BIT Boqun Feng
2026-05-08  4:21 ` [PATCH 11/11] arm64: sched/preempt: Enable PREEMPT_COUNT_64BIT Boqun Feng
2026-05-08  8:22   ` Mark Rutland
2026-05-08 14:48     ` Boqun Feng
2026-05-09 18:12 ` [PATCH 00/11] Refcounted interrupt disable and SpinLockIrq for rust (Part 1) Heiko Carstens
2026-05-09 18:21   ` Boqun Feng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260508042111.24358-4-boqun@kernel.org \
    --to=boqun@kernel.org \
    --cc=a.hindborg@kernel.org \
    --cc=ada.coupriediaz@arm.com \
    --cc=agordeev@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=aliceryhl@google.com \
    --cc=andriy.shevchenko@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=bigeasy@linutronix.de \
    --cc=bjorn3_gh@protonmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=borntraeger@linux.ibm.com \
    --cc=bp@alien8.de \
    --cc=bsegall@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=dakr@kernel.org \
    --cc=dave.hansen@linux.intel.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=gary@garyguo.net \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=hpa@zytor.com \
    --cc=joelagnelf@nvidia.com \
    --cc=jonas@southpole.se \
    --cc=juri.lelli@redhat.com \
    --cc=kprateek.nayak@amd.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-openrisc@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=lossin@kernel.org \
    --cc=lyude@redhat.com \
    --cc=mgorman@suse.de \
    --cc=mingo@redhat.com \
    --cc=nikunj@amd.com \
    --cc=ojeda@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=ruanjinjie@huawei.com \
    --cc=rust-for-linux@vger.kernel.org \
    --cc=seanjc@google.com \
    --cc=shorne@gmail.com \
    --cc=sohil.mehta@intel.com \
    --cc=stefan.kristiansson@saunalahti.fi \
    --cc=svens@linux.ibm.com \
    --cc=tglx@kernel.org \
    --cc=tmgross@umich.edu \
    --cc=vincent.guittot@linaro.org \
    --cc=vschneid@redhat.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=xin@zytor.com \
    --cc=ynorov@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.