From: Boqun Feng <boqun.feng@gmail.com>
To: "Thomas Gleixner" <tglx@linutronix.de>
Cc: "Dirk Behme" <dirk.behme@gmail.com>,
"Lyude Paul" <lyude@redhat.com>,
rust-for-linux@vger.kernel.org,
"Danilo Krummrich" <dakr@redhat.com>,
airlied@redhat.com, "Ingo Molnar" <mingo@redhat.com>,
will@kernel.org, "Waiman Long" <longman@redhat.com>,
"Peter Zijlstra" <peterz@infradead.org>,
linux-kernel@vger.kernel.org, "Miguel Ojeda" <ojeda@kernel.org>,
"Alex Gaynor" <alex.gaynor@gmail.com>,
wedsonaf@gmail.com, "Gary Guo" <gary@garyguo.net>,
"Björn Roy Baron" <bjorn3_gh@protonmail.com>,
"Benno Lossin" <benno.lossin@proton.me>,
"Andreas Hindborg" <a.hindborg@samsung.com>,
aliceryhl@google.com, "Trevor Gross" <tmgross@umich.edu>,
"Boqun Feng" <boqun.feng@gmail.com>
Subject: [POC 1/6] irq & spin_lock: Add counted interrupt disabling/enabling
Date: Thu, 17 Oct 2024 22:51:20 -0700 [thread overview]
Message-ID: <20241018055125.2784186-2-boqun.feng@gmail.com> (raw)
In-Reply-To: <20241018055125.2784186-1-boqun.feng@gmail.com>
Currently the nested interrupt disabling and enabling is present by
_irqsave() and _irqrestore() APIs, which are relatively unsafe, for
example:
<interrupts are enabled as beginning>
spin_lock_irqsave(l1, flag1);
spin_lock_irqsave(l2, flag2);
spin_unlock_irqrestore(l1, flags1);
<l2 is still held but interrupts are enabled>
// accesses to interrupt-disable protect data will cause races.
This is even easier to triggered with guard facilities:
unsigned long flag2;
scoped_guard(spin_lock_irqsave, l1) {
spin_lock_irqsave(l2, flag2);
}
// l2 locked but interrupts are enabled.
spin_unlock_irqrestore(l2, flag2);
(Hand-to-hand locking critical sections are not uncommon for a
fine-grained lock design)
And because this unsafety, Rust cannot easily wrap the
interrupt-disabling locks in a safe API, which complicates the design.
To resolve this, introduce a new set of interrupt disabling APIs:
* local_interrupt_disalbe();
* local_interrupt_enable();
They work like local_irq_save() and local_irq_restore() except that 1)
the outermost local_interrupt_disable() call save the interrupt state
into a percpu variable, so that the outermost local_interrupt_enable()
can restore the state, and 2) a percpu counter is added to record the
nest level of these calls, so that interrupts are not accidentally
enabled inside the outermost critical section.
Also add the corresponding spin_lock primitives: spin_lock_irq_disable()
and spin_unlock_irq_enable(), as a result, code as follow:
spin_lock_irq_disable(l1);
spin_lock_irq_disable(l2);
spin_unlock_irq_enable(l1);
// Interrupts are still disabled.
spin_unlock_irq_enable(l2);
doesn't have the issue that interrupts are accidentally enabled.
This also makes the wrapper of interrupt-disabling locks on Rust easier
to design.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
---
include/linux/irqflags.h | 32 +++++++++++++++++++++++++++++++-
include/linux/irqflags_types.h | 6 ++++++
include/linux/spinlock.h | 13 +++++++++++++
include/linux/spinlock_api_smp.h | 29 +++++++++++++++++++++++++++++
include/linux/spinlock_rt.h | 10 ++++++++++
kernel/locking/spinlock.c | 16 ++++++++++++++++
kernel/softirq.c | 3 +++
7 files changed, 108 insertions(+), 1 deletion(-)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3f003d5fde53..7840f326514b 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -225,7 +225,6 @@ extern void warn_bogus_irq_restore(void);
raw_safe_halt(); \
} while (0)
-
#else /* !CONFIG_TRACE_IRQFLAGS */
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
@@ -254,6 +253,37 @@ extern void warn_bogus_irq_restore(void);
#define irqs_disabled() raw_irqs_disabled()
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+DECLARE_PER_CPU(struct interrupt_disable_state, local_interrupt_disable_state);
+
+static inline void local_interrupt_disable(void)
+{
+ unsigned long flags;
+ long new_count;
+
+ local_irq_save(flags);
+
+ new_count = raw_cpu_inc_return(local_interrupt_disable_state.count);
+
+ if (new_count == 1)
+ raw_cpu_write(local_interrupt_disable_state.flags, flags);
+}
+
+static inline void local_interrupt_enable(void)
+{
+ long new_count;
+
+ new_count = raw_cpu_dec_return(local_interrupt_disable_state.count);
+
+ if (new_count == 0) {
+ unsigned long flags;
+
+ flags = raw_cpu_read(local_interrupt_disable_state.flags);
+ local_irq_restore(flags);
+ } else if (unlikely(new_count < 0)) {
+ /* XXX: BUG() here? */
+ }
+}
+
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
index c13f0d915097..277433f7f53e 100644
--- a/include/linux/irqflags_types.h
+++ b/include/linux/irqflags_types.h
@@ -19,4 +19,10 @@ struct irqtrace_events {
#endif
+/* Per-cpu interrupt disabling state for local_interrupt_{disable,enable}() */
+struct interrupt_disable_state {
+ unsigned long flags;
+ long count;
+};
+
#endif /* _LINUX_IRQFLAGS_TYPES_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 63dd8cf3c3c2..c1cbf5d5ebe0 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -272,9 +272,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#endif
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_irq_disable(lock) _raw_spin_lock_irq_disable(lock)
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
+#define raw_spin_unlock_irq_enable(lock) _raw_spin_unlock_irq_enable(lock)
#define raw_spin_unlock_irqrestore(lock, flags) \
do { \
@@ -376,6 +378,11 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
raw_spin_lock_irq(&lock->rlock);
}
+static __always_inline void spin_lock_irq_disable(spinlock_t *lock)
+{
+ raw_spin_lock_irq_disable(&lock->rlock);
+}
+
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
@@ -401,6 +408,12 @@ static __always_inline void spin_unlock_irq(spinlock_t *lock)
raw_spin_unlock_irq(&lock->rlock);
}
+static __always_inline void spin_unlock_irq_enable(spinlock_t *lock)
+{
+ raw_spin_unlock_irq_enable(&lock->rlock);
+}
+
+
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 89eb6f4c659c..e96482c23044 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -28,6 +28,8 @@ _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
__acquires(lock);
+void __lockfunc _raw_spin_lock_irq_disable(raw_spinlock_t *lock)
+ __acquires(lock);
unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
__acquires(lock);
@@ -39,6 +41,7 @@ int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq_enable(raw_spinlock_t *lock) __releases(lock);
void __lockfunc
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
__releases(lock);
@@ -55,6 +58,11 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
+/* Use the same config as spin_lock_irq() temporarily. */
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
+#define _raw_spin_lock_irq_disable(lock) __raw_spin_lock_irq_disable(lock)
+#endif
+
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
@@ -79,6 +87,11 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
+/* Use the same config as spin_unlock_irq() temporarily. */
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+#define _raw_spin_unlock_irq_enable(lock) __raw_spin_unlock_irq_enable(lock)
+#endif
+
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
@@ -120,6 +133,14 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
+static inline void __raw_spin_lock_irq_disable(raw_spinlock_t *lock)
+{
+ local_interrupt_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
@@ -160,6 +181,14 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
preempt_enable();
}
+static inline void __raw_spin_unlock_irq_enable(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ local_interrupt_enable();
+ preempt_enable();
+}
+
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, _RET_IP_);
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 61c49b16f69a..c05be2cb4564 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -94,6 +94,11 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
rt_spin_lock(lock);
}
+static __always_inline void spin_lock_irq_disable(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
#define spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
@@ -117,6 +122,11 @@ static __always_inline void spin_unlock_irq(spinlock_t *lock)
rt_spin_unlock(lock);
}
+static __always_inline void spin_unlock_irq_enable(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
{
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 7685defd7c52..a2e01ec4a0c8 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -172,6 +172,14 @@ noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_lock_irq);
#endif
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
+noinline void __lockfunc _raw_spin_lock_irq_disable(raw_spinlock_t *lock)
+{
+ __raw_spin_lock_irq_disable(lock);
+}
+EXPORT_SYMBOL_GPL(_raw_spin_lock_irq_disable);
+#endif
+
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
{
@@ -204,6 +212,14 @@ noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_irq);
#endif
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+noinline void __lockfunc _raw_spin_unlock_irq_enable(raw_spinlock_t *lock)
+{
+ __raw_spin_unlock_irq_enable(lock);
+}
+EXPORT_SYMBOL_GPL(_raw_spin_unlock_irq_enable);
+#endif
+
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b756d6b3fd09..fcbf700963c4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -88,6 +88,9 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
#endif
+DEFINE_PER_CPU(struct interrupt_disable_state, local_interrupt_disable_state);
+EXPORT_PER_CPU_SYMBOL_GPL(local_interrupt_disable_state);
+
/*
* SOFTIRQ_OFFSET usage:
*
--
2.45.2
next prev parent reply other threads:[~2024-10-18 5:52 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-16 21:28 [PATCH v6 0/3] rust: Add irq abstraction, SpinLockIrq Lyude Paul
2024-09-16 21:28 ` [PATCH v6 1/3] rust: Introduce irq module Lyude Paul
2024-09-29 20:36 ` Trevor Gross
2024-09-29 23:45 ` Boqun Feng
2024-10-02 20:20 ` Thomas Gleixner
2024-10-04 8:58 ` Benno Lossin
2024-10-04 17:18 ` Lyude Paul
2024-10-17 18:51 ` Lyude Paul
2024-10-04 17:02 ` Lyude Paul
2024-10-10 21:00 ` Daniel Almeida
2024-09-16 21:28 ` [PATCH v6 2/3] rust: sync: Introduce lock::Backend::Context Lyude Paul
2024-09-29 20:40 ` Trevor Gross
2024-09-29 23:52 ` Boqun Feng
2024-09-16 21:28 ` [PATCH v6 3/3] rust: sync: Add SpinLockIrq Lyude Paul
2024-09-29 20:50 ` Trevor Gross
2024-09-29 23:59 ` Boqun Feng
2024-10-02 20:53 ` Thomas Gleixner
2024-10-03 12:51 ` Boqun Feng
2024-10-04 18:48 ` Lyude Paul
2024-10-05 18:19 ` Lyude Paul
2024-10-07 12:42 ` Boqun Feng
2024-10-07 18:13 ` Lyude Paul
2024-10-15 12:57 ` Andreas Hindborg
2024-10-15 20:17 ` Boqun Feng
2024-10-15 20:21 ` Boqun Feng
2024-10-16 20:57 ` Lyude Paul
2024-10-17 13:34 ` Andreas Hindborg
2024-10-07 12:01 ` Thomas Gleixner
2024-10-07 18:30 ` Lyude Paul
2024-10-08 15:21 ` Thomas Gleixner
2024-10-12 8:01 ` Boqun Feng
2024-10-10 16:39 ` [PATCH v6 0/3] rust: Add irq abstraction, SpinLockIrq Daniel Almeida
2024-10-12 5:29 ` Dirk Behme
2024-10-13 19:06 ` Thomas Gleixner
2024-10-13 21:43 ` Boqun Feng
2024-10-16 21:00 ` Thomas Gleixner
2024-10-16 21:31 ` Boqun Feng
2024-10-17 20:49 ` Lyude Paul
2024-10-17 22:27 ` Boqun Feng
2024-10-18 5:51 ` [POC 0/6] Allow SpinLockIrq to use a normal Guard interface Boqun Feng
2024-10-18 5:51 ` Boqun Feng [this message]
2024-10-21 7:04 ` [POC 1/6] irq & spin_lock: Add counted interrupt disabling/enabling kernel test robot
2024-10-21 7:35 ` kernel test robot
2024-10-21 20:44 ` Lyude Paul
2024-10-24 16:18 ` Peter Zijlstra
2024-10-23 19:34 ` Thomas Gleixner
2024-10-23 19:51 ` Peter Zijlstra
2024-10-23 20:38 ` Thomas Gleixner
2024-10-24 10:05 ` Peter Zijlstra
2024-10-24 17:22 ` Thomas Gleixner
2024-10-24 21:57 ` Boqun Feng
2024-10-25 15:04 ` Thomas Gleixner
2024-10-25 18:28 ` Peter Zijlstra
2024-10-24 19:12 ` Lyude Paul
2025-07-24 20:36 ` w/r/t "irq & spin_lock: Add counted interrupt disabling/enabling": holes in pcpu_hot? Lyude Paul
2025-07-24 21:59 ` Thomas Gleixner
2024-10-24 5:05 ` [POC 1/6] irq & spin_lock: Add counted interrupt disabling/enabling Boqun Feng
2024-10-24 8:17 ` Thomas Gleixner
2024-10-24 16:20 ` Boqun Feng
2024-10-18 5:51 ` [POC 2/6] rust: Introduce interrupt module Boqun Feng
2024-10-31 20:45 ` Lyude Paul
2024-10-31 20:47 ` Lyude Paul
2024-10-18 5:51 ` [POC 3/6] rust: helper: Add spin_{un,}lock_irq_{enable,disable}() helpers Boqun Feng
2024-10-18 5:51 ` [POC 4/6] rust: sync: Add SpinLockIrq Boqun Feng
2024-10-18 19:23 ` Lyude Paul
2024-10-18 20:22 ` Boqun Feng
2024-10-18 5:51 ` [POC 5/6] rust: sync: Introduce lock::Backend::Context Boqun Feng
2024-10-31 20:54 ` Lyude Paul
2024-10-18 5:51 ` [POC 6/6] rust: sync: lock: Add `Backend::BackendInContext` Boqun Feng
2024-10-18 10:22 ` [POC 0/6] Allow SpinLockIrq to use a normal Guard interface Andreas Hindborg
2024-10-18 12:42 ` Boqun Feng
2024-10-18 11:16 ` Andreas Hindborg
2024-10-18 16:05 ` Dirk Behme
2024-10-31 20:56 ` Lyude Paul
2024-10-17 20:42 ` [PATCH v6 0/3] rust: Add irq abstraction, SpinLockIrq Lyude Paul
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241018055125.2784186-2-boqun.feng@gmail.com \
--to=boqun.feng@gmail.com \
--cc=a.hindborg@samsung.com \
--cc=airlied@redhat.com \
--cc=alex.gaynor@gmail.com \
--cc=aliceryhl@google.com \
--cc=benno.lossin@proton.me \
--cc=bjorn3_gh@protonmail.com \
--cc=dakr@redhat.com \
--cc=dirk.behme@gmail.com \
--cc=gary@garyguo.net \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=lyude@redhat.com \
--cc=mingo@redhat.com \
--cc=ojeda@kernel.org \
--cc=peterz@infradead.org \
--cc=rust-for-linux@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=tmgross@umich.edu \
--cc=wedsonaf@gmail.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).