From: Joel Fernandes <joelagnelf@nvidia.com>
To: Lyude Paul <lyude@redhat.com>
Cc: rust-for-linux@vger.kernel.org,
"Thomas Gleixner" <tglx@linutronix.de>,
"Boqun Feng" <boqun.feng@gmail.com>,
linux-kernel@vger.kernel.org,
"Daniel Almeida" <daniel.almeida@collabora.com>,
"Ingo Molnar" <mingo@redhat.com>,
"Peter Zijlstra" <peterz@infradead.org>,
"Juri Lelli" <juri.lelli@redhat.com>,
"Vincent Guittot" <vincent.guittot@linaro.org>,
"Dietmar Eggemann" <dietmar.eggemann@arm.com>,
"Steven Rostedt" <rostedt@goodmis.org>,
"Ben Segall" <bsegall@google.com>, "Mel Gorman" <mgorman@suse.de>,
"Valentin Schneider" <vschneid@redhat.com>,
"Will Deacon" <will@kernel.org>,
"Waiman Long" <longman@redhat.com>,
"Miguel Ojeda" <ojeda@kernel.org>,
"Alex Gaynor" <alex.gaynor@gmail.com>,
"Gary Guo" <gary@garyguo.net>,
"Björn Roy Baron" <bjorn3_gh@protonmail.com>,
"Benno Lossin" <lossin@kernel.org>,
"Andreas Hindborg" <a.hindborg@kernel.org>,
"Alice Ryhl" <aliceryhl@google.com>,
"Trevor Gross" <tmgross@umich.edu>,
"Danilo Krummrich" <dakr@kernel.org>,
"David Woodhouse" <dwmw@amazon.co.uk>,
"Jens Axboe" <axboe@kernel.dk>,
"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
NeilBrown <neilb@suse.de>,
"Caleb Sander Mateos" <csander@purestorage.com>,
"Ryo Takakura" <ryotkkr98@gmail.com>,
"K Prateek Nayak" <kprateek.nayak@amd.com>
Subject: Re: [RFC RESEND v10 03/14] irq & spin_lock: Add counted interrupt disabling/enabling
Date: Mon, 16 Jun 2025 14:10:01 -0400 [thread overview]
Message-ID: <20250616181001.GA905960@joelnvbox> (raw)
In-Reply-To: <20250527222254.565881-4-lyude@redhat.com>
On Tue, May 27, 2025 at 06:21:44PM -0400, Lyude Paul wrote:
> From: Boqun Feng <boqun.feng@gmail.com>
>
> Currently the nested interrupt disabling and enabling is present by
> _irqsave() and _irqrestore() APIs, which are relatively unsafe, for
> example:
[...]
> diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
> index c13f0d915097a..277433f7f53eb 100644
> --- a/include/linux/irqflags_types.h
> +++ b/include/linux/irqflags_types.h
> @@ -19,4 +19,10 @@ struct irqtrace_events {
>
> #endif
>
> +/* Per-cpu interrupt disabling state for local_interrupt_{disable,enable}() */
> +struct interrupt_disable_state {
> + unsigned long flags;
> + long count;
Is count unused? I found it in earlier series but not this one. Now count
should be in the preempt counter, not in this new per-cpu var?
Sorry if I missed it from some other patch in this series. thanks,
- Joel
> +};
> +
> #endif /* _LINUX_IRQFLAGS_TYPES_H */
> diff --git a/include/linux/preempt.h b/include/linux/preempt.h
> index 809af7b57470a..c1c5795be5d0f 100644
> --- a/include/linux/preempt.h
> +++ b/include/linux/preempt.h
> @@ -148,6 +148,10 @@ static __always_inline unsigned char interrupt_context_level(void)
> #define in_softirq() (softirq_count())
> #define in_interrupt() (irq_count())
>
> +#define hardirq_disable_count() ((preempt_count() & HARDIRQ_DISABLE_MASK) >> HARDIRQ_DISABLE_SHIFT)
> +#define hardirq_disable_enter() __preempt_count_add_return(HARDIRQ_DISABLE_OFFSET)
> +#define hardirq_disable_exit() __preempt_count_sub_return(HARDIRQ_DISABLE_OFFSET)
> +
> /*
> * The preempt_count offset after preempt_disable();
> */
> diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
> index d3561c4a080e2..b21da4bd51a42 100644
> --- a/include/linux/spinlock.h
> +++ b/include/linux/spinlock.h
> @@ -272,9 +272,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
> #endif
>
> #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
> +#define raw_spin_lock_irq_disable(lock) _raw_spin_lock_irq_disable(lock)
> #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
> #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
> #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
> +#define raw_spin_unlock_irq_enable(lock) _raw_spin_unlock_irq_enable(lock)
>
> #define raw_spin_unlock_irqrestore(lock, flags) \
> do { \
> @@ -300,11 +302,56 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
> 1 : ({ local_irq_restore(flags); 0; }); \
> })
>
> +#define raw_spin_trylock_irq_disable(lock) \
> +({ \
> + local_interrupt_disable(); \
> + raw_spin_trylock(lock) ? \
> + 1 : ({ local_interrupt_enable(); 0; }); \
> +})
> +
> #ifndef CONFIG_PREEMPT_RT
> /* Include rwlock functions for !RT */
> #include <linux/rwlock.h>
> #endif
>
> +DECLARE_PER_CPU(struct interrupt_disable_state, local_interrupt_disable_state);
> +
> +static inline void local_interrupt_disable(void)
> +{
> + unsigned long flags;
> + int new_count;
> +
> + new_count = hardirq_disable_enter();
> +
> + if ((new_count & HARDIRQ_DISABLE_MASK) == HARDIRQ_DISABLE_OFFSET) {
> + local_irq_save(flags);
> + raw_cpu_write(local_interrupt_disable_state.flags, flags);
> + }
> +}
> +
> +static inline void local_interrupt_enable(void)
> +{
> + int new_count;
> +
> + new_count = hardirq_disable_exit();
> +
> + if ((new_count & HARDIRQ_DISABLE_MASK) == 0) {
> + unsigned long flags;
> +
> + flags = raw_cpu_read(local_interrupt_disable_state.flags);
> + local_irq_restore(flags);
> + /*
> + * TODO: re-read preempt count can be avoided, but it needs
> + * should_resched() taking another parameter as the current
> + * preempt count
> + */
> +#ifdef PREEMPTION
> + if (should_resched(0))
> + __preempt_schedule();
> +#endif
> + }
> +}
> +
> /*
> * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
> */
> @@ -376,6 +423,11 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
> raw_spin_lock_irq(&lock->rlock);
> }
>
> +static __always_inline void spin_lock_irq_disable(spinlock_t *lock)
> +{
> + raw_spin_lock_irq_disable(&lock->rlock);
> +}
> +
> #define spin_lock_irqsave(lock, flags) \
> do { \
> raw_spin_lock_irqsave(spinlock_check(lock), flags); \
> @@ -401,6 +453,11 @@ static __always_inline void spin_unlock_irq(spinlock_t *lock)
> raw_spin_unlock_irq(&lock->rlock);
> }
>
> +static __always_inline void spin_unlock_irq_enable(spinlock_t *lock)
> +{
> + raw_spin_unlock_irq_enable(&lock->rlock);
> +}
> +
> static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
> {
> raw_spin_unlock_irqrestore(&lock->rlock, flags);
> @@ -421,6 +478,11 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
> raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
> })
>
> +static __always_inline int spin_trylock_irq_disable(spinlock_t *lock)
> +{
> + return raw_spin_trylock_irq_disable(&lock->rlock);
> +}
> +
> /**
> * spin_is_locked() - Check whether a spinlock is locked.
> * @lock: Pointer to the spinlock.
> diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
> index 9ecb0ab504e32..92532103b9eaa 100644
> --- a/include/linux/spinlock_api_smp.h
> +++ b/include/linux/spinlock_api_smp.h
> @@ -28,6 +28,8 @@ _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
> void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
> void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
> __acquires(lock);
> +void __lockfunc _raw_spin_lock_irq_disable(raw_spinlock_t *lock)
> + __acquires(lock);
>
> unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
> __acquires(lock);
> @@ -39,6 +41,7 @@ int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
> void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
> void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
> void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
> +void __lockfunc _raw_spin_unlock_irq_enable(raw_spinlock_t *lock) __releases(lock);
> void __lockfunc
> _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
> __releases(lock);
> @@ -55,6 +58,11 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
> #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
> #endif
>
> +/* Use the same config as spin_lock_irq() temporarily. */
> +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
> +#define _raw_spin_lock_irq_disable(lock) __raw_spin_lock_irq_disable(lock)
> +#endif
> +
> #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
> #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
> #endif
> @@ -79,6 +87,11 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
> #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
> #endif
>
> +/* Use the same config as spin_unlock_irq() temporarily. */
> +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
> +#define _raw_spin_unlock_irq_enable(lock) __raw_spin_unlock_irq_enable(lock)
> +#endif
> +
> #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
> #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
> #endif
> @@ -120,6 +133,13 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
> LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
> }
>
> +static inline void __raw_spin_lock_irq_disable(raw_spinlock_t *lock)
> +{
> + local_interrupt_disable();
> + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
> + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
> +}
> +
> static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
> {
> __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
> @@ -160,6 +180,13 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
> preempt_enable();
> }
>
> +static inline void __raw_spin_unlock_irq_enable(raw_spinlock_t *lock)
> +{
> + spin_release(&lock->dep_map, _RET_IP_);
> + do_raw_spin_unlock(lock);
> + local_interrupt_enable();
> +}
> +
> static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
> {
> spin_release(&lock->dep_map, _RET_IP_);
> diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
> index 819aeba1c87e6..d02a73671713b 100644
> --- a/include/linux/spinlock_api_up.h
> +++ b/include/linux/spinlock_api_up.h
> @@ -36,6 +36,9 @@
> #define __LOCK_IRQ(lock) \
> do { local_irq_disable(); __LOCK(lock); } while (0)
>
> +#define __LOCK_IRQ_DISABLE(lock) \
> + do { local_interrupt_disable(); __LOCK(lock); } while (0)
> +
> #define __LOCK_IRQSAVE(lock, flags) \
> do { local_irq_save(flags); __LOCK(lock); } while (0)
>
> @@ -52,6 +55,9 @@
> #define __UNLOCK_IRQ(lock) \
> do { local_irq_enable(); __UNLOCK(lock); } while (0)
>
> +#define __UNLOCK_IRQ_ENABLE(lock) \
> + do { __UNLOCK(lock); local_interrupt_enable(); } while (0)
> +
> #define __UNLOCK_IRQRESTORE(lock, flags) \
> do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
>
> @@ -64,6 +70,7 @@
> #define _raw_read_lock_bh(lock) __LOCK_BH(lock)
> #define _raw_write_lock_bh(lock) __LOCK_BH(lock)
> #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
> +#define _raw_spin_lock_irq_disable(lock) __LOCK_IRQ_DISABLE(lock)
> #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
> #define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
> #define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
> @@ -80,6 +87,7 @@
> #define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
> #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
> #define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
> +#define _raw_spin_unlock_irq_enable(lock) __UNLOCK_IRQ_ENABLE(lock)
> #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
> #define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
> #define _raw_spin_unlock_irqrestore(lock, flags) \
> diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
> index f6499c37157df..6ea08fafa6d7b 100644
> --- a/include/linux/spinlock_rt.h
> +++ b/include/linux/spinlock_rt.h
> @@ -93,6 +93,11 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
> rt_spin_lock(lock);
> }
>
> +static __always_inline void spin_lock_irq_disable(spinlock_t *lock)
> +{
> + rt_spin_lock(lock);
> +}
> +
> #define spin_lock_irqsave(lock, flags) \
> do { \
> typecheck(unsigned long, flags); \
> @@ -116,6 +121,11 @@ static __always_inline void spin_unlock_irq(spinlock_t *lock)
> rt_spin_unlock(lock);
> }
>
> +static __always_inline void spin_unlock_irq_enable(spinlock_t *lock)
> +{
> + rt_spin_unlock(lock);
> +}
> +
> static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
> unsigned long flags)
> {
> diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
> index 7685defd7c526..13f91117794fd 100644
> --- a/kernel/locking/spinlock.c
> +++ b/kernel/locking/spinlock.c
> @@ -125,6 +125,21 @@ static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
> */
> BUILD_LOCK_OPS(spin, raw_spinlock);
>
> +/* No rwlock_t variants for now, so just build this function by hand */
> +static void __lockfunc __raw_spin_lock_irq_disable(raw_spinlock_t *lock)
> +{
> + for (;;) {
> + preempt_disable();
> + local_interrupt_disable();
> + if (likely(do_raw_spin_trylock(lock)))
> + break;
> + local_interrupt_enable();
> + preempt_enable();
> +
> + arch_spin_relax(&lock->raw_lock);
> + }
> +}
> +
> #ifndef CONFIG_PREEMPT_RT
> BUILD_LOCK_OPS(read, rwlock);
> BUILD_LOCK_OPS(write, rwlock);
> @@ -172,6 +187,14 @@ noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
> EXPORT_SYMBOL(_raw_spin_lock_irq);
> #endif
>
> +#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
> +noinline void __lockfunc _raw_spin_lock_irq_disable(raw_spinlock_t *lock)
> +{
> + __raw_spin_lock_irq_disable(lock);
> +}
> +EXPORT_SYMBOL_GPL(_raw_spin_lock_irq_disable);
> +#endif
> +
> #ifndef CONFIG_INLINE_SPIN_LOCK_BH
> noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
> {
> @@ -204,6 +227,14 @@ noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
> EXPORT_SYMBOL(_raw_spin_unlock_irq);
> #endif
>
> +#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
> +noinline void __lockfunc _raw_spin_unlock_irq_enable(raw_spinlock_t *lock)
> +{
> + __raw_spin_unlock_irq_enable(lock);
> +}
> +EXPORT_SYMBOL_GPL(_raw_spin_unlock_irq_enable);
> +#endif
> +
> #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
> noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
> {
> diff --git a/kernel/softirq.c b/kernel/softirq.c
> index 513b1945987cc..f7a2ff4d123be 100644
> --- a/kernel/softirq.c
> +++ b/kernel/softirq.c
> @@ -88,6 +88,9 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
> EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
> #endif
>
> +DEFINE_PER_CPU(struct interrupt_disable_state, local_interrupt_disable_state);
> +EXPORT_PER_CPU_SYMBOL_GPL(local_interrupt_disable_state);
> +
> /*
> * SOFTIRQ_OFFSET usage:
> *
> --
> 2.49.0
>
next prev parent reply other threads:[~2025-06-16 18:10 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-27 22:21 [RFC RESEND v10 00/14] Refcounted interrupts, SpinLockIrq for rust Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 01/14] preempt: Introduce HARDIRQ_DISABLE_BITS Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 02/14] preempt: Introduce __preempt_count_{sub, add}_return() Lyude Paul
2025-05-28 6:37 ` Heiko Carstens
2025-05-27 22:21 ` [RFC RESEND v10 03/14] irq & spin_lock: Add counted interrupt disabling/enabling Lyude Paul
2025-05-28 9:10 ` Peter Zijlstra
2025-05-28 14:03 ` Steven Rostedt
2025-05-28 14:47 ` Boqun Feng
2025-06-16 17:54 ` Joel Fernandes
2025-06-16 18:02 ` Boqun Feng
2025-06-16 18:37 ` Joel Fernandes
2025-06-17 14:14 ` Steven Rostedt
2025-05-28 18:47 ` Lyude Paul
2025-06-16 18:10 ` Joel Fernandes [this message]
2025-06-16 18:16 ` Boqun Feng
2025-06-17 14:11 ` Steven Rostedt
2025-06-17 14:34 ` Boqun Feng
2025-06-17 15:11 ` Steven Rostedt
2025-06-17 14:25 ` Boqun Feng
2025-05-27 22:21 ` [RFC RESEND v10 04/14] rust: Introduce interrupt module Lyude Paul
2025-05-29 9:21 ` Benno Lossin
2025-05-27 22:21 ` [RFC RESEND v10 05/14] rust: helper: Add spin_{un,}lock_irq_{enable,disable}() helpers Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 06/14] rust: sync: Add SpinLockIrq Lyude Paul
2025-06-16 19:51 ` Joel Fernandes
2025-07-16 20:29 ` Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 07/14] rust: sync: Introduce lock::Backend::Context Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 08/14] rust: sync: lock: Add `Backend::BackendInContext` Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 09/14] rust: sync: Add a lifetime parameter to lock::global::GlobalGuard Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 10/14] rust: sync: lock/global: Rename B to G in trait bounds Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 11/14] rust: sync: Expose lock::Backend Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 12/14] rust: sync: lock/global: Add Backend parameter to GlobalGuard Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 13/14] rust: sync: lock/global: Add BackendInContext support to GlobalLock Lyude Paul
2025-05-27 22:21 ` [RFC RESEND v10 14/14] locking: Switch to _irq_{disable,enable}() variants in cleanup guards Lyude Paul
2025-05-28 6:11 ` Sebastian Andrzej Siewior
2025-07-02 10:16 ` [RFC RESEND v10 00/14] Refcounted interrupts, SpinLockIrq for rust Benno Lossin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250616181001.GA905960@joelnvbox \
--to=joelagnelf@nvidia.com \
--cc=a.hindborg@kernel.org \
--cc=alex.gaynor@gmail.com \
--cc=aliceryhl@google.com \
--cc=axboe@kernel.dk \
--cc=bigeasy@linutronix.de \
--cc=bjorn3_gh@protonmail.com \
--cc=boqun.feng@gmail.com \
--cc=bsegall@google.com \
--cc=csander@purestorage.com \
--cc=dakr@kernel.org \
--cc=daniel.almeida@collabora.com \
--cc=dietmar.eggemann@arm.com \
--cc=dwmw@amazon.co.uk \
--cc=gary@garyguo.net \
--cc=juri.lelli@redhat.com \
--cc=kprateek.nayak@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=lossin@kernel.org \
--cc=lyude@redhat.com \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=neilb@suse.de \
--cc=ojeda@kernel.org \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=rust-for-linux@vger.kernel.org \
--cc=ryotkkr98@gmail.com \
--cc=tglx@linutronix.de \
--cc=tmgross@umich.edu \
--cc=vincent.guittot@linaro.org \
--cc=vschneid@redhat.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).