From: Peter Zijlstra <peterz@infradead.org>
To: torvalds@linux-foundation.org, keescook@chromium.org,
gregkh@linuxfoundation.org, pbonzini@redhat.com
Cc: masahiroy@kernel.org, nathan@kernel.org, ndesaulniers@google.com,
nicolas@fjasle.eu, catalin.marinas@arm.com, will@kernel.org,
vkoul@kernel.org, trix@redhat.com, ojeda@kernel.org,
peterz@infradead.org, mingo@redhat.com, longman@redhat.com,
boqun.feng@gmail.com, dennis@kernel.org, tj@kernel.org,
cl@linux.com, acme@kernel.org, mark.rutland@arm.com,
alexander.shishkin@linux.intel.com, jolsa@kernel.org,
namhyung@kernel.org, irogers@google.com, adrian.hunter@intel.com,
juri.lelli@redhat.com, vincent.guittot@linaro.org,
dietmar.eggemann@arm.com, rostedt@goodmis.org,
bsegall@google.com, mgorman@suse.de, bristot@redhat.com,
vschneid@redhat.com, paulmck@kernel.org, frederic@kernel.org,
quic_neeraju@quicinc.com, joel@joelfernandes.org,
josh@joshtriplett.org, mathieu.desnoyers@efficios.com,
jiangshanlai@gmail.com, rientjes@google.com, vbabka@suse.cz,
roman.gushchin@linux.dev, 42.hyeyoo@gmail.com, apw@canonical.com,
joe@perches.com, dwaipayanray1@gmail.com,
lukas.bulwahn@gmail.com, john.johansen@canonical.com,
paul@paul-moore.com, jmorris@namei.org, serge@hallyn.com,
linux-kbuild@vger.kernel.org, linux-kernel@vger.kernel.org,
dmaengine@vger.kernel.org, llvm@lists.linux.dev,
linux-perf-users@vger.kernel.org, rcu@vger.kernel.org,
linux-security-module@vger.kernel.org, tglx@linutronix.de,
ravi.bangoria@amd.com, error27@gmail.com,
luc.vanoostenryck@gmail.com
Subject: [PATCH v3 09/57] sched: Simplify ttwu()
Date: Mon, 12 Jun 2023 11:07:22 +0200 [thread overview]
Message-ID: <20230612093538.076428270@infradead.org> (raw)
In-Reply-To: 20230612090713.652690195@infradead.org
Use guards to reduce gotos and simplify control flow.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 220 +++++++++++++++++++++++++---------------------------
1 file changed, 108 insertions(+), 112 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3664,16 +3664,15 @@ ttwu_stat(struct task_struct *p, int cpu
__schedstat_inc(p->stats.nr_wakeups_local);
} else {
struct sched_domain *sd;
+ guard(rcu)();
__schedstat_inc(p->stats.nr_wakeups_remote);
- rcu_read_lock();
for_each_domain(rq->cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
__schedstat_inc(sd->ttwu_wake_remote);
break;
}
}
- rcu_read_unlock();
}
if (wake_flags & WF_MIGRATED)
@@ -4135,10 +4134,9 @@ bool ttwu_state_match(struct task_struct
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
- unsigned long flags;
+ guard(preempt)();
int cpu, success = 0;
- preempt_disable();
if (p == current) {
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
@@ -4165,129 +4163,127 @@ try_to_wake_up(struct task_struct *p, un
* reordered with p->state check below. This pairs with smp_store_mb()
* in set_current_state() that the waiting thread does.
*/
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- smp_mb__after_spinlock();
- if (!ttwu_state_match(p, state, &success))
- goto unlock;
+ scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
+ smp_mb__after_spinlock();
+ if (!ttwu_state_match(p, state, &success))
+ break;
- trace_sched_waking(p);
+ trace_sched_waking(p);
- /*
- * Ensure we load p->on_rq _after_ p->state, otherwise it would
- * be possible to, falsely, observe p->on_rq == 0 and get stuck
- * in smp_cond_load_acquire() below.
- *
- * sched_ttwu_pending() try_to_wake_up()
- * STORE p->on_rq = 1 LOAD p->state
- * UNLOCK rq->lock
- *
- * __schedule() (switch to task 'p')
- * LOCK rq->lock smp_rmb();
- * smp_mb__after_spinlock();
- * UNLOCK rq->lock
- *
- * [task p]
- * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
- *
- * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
- * __schedule(). See the comment for smp_mb__after_spinlock().
- *
- * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
- */
- smp_rmb();
- if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
- goto unlock;
+ /*
+ * Ensure we load p->on_rq _after_ p->state, otherwise it would
+ * be possible to, falsely, observe p->on_rq == 0 and get stuck
+ * in smp_cond_load_acquire() below.
+ *
+ * sched_ttwu_pending() try_to_wake_up()
+ * STORE p->on_rq = 1 LOAD p->state
+ * UNLOCK rq->lock
+ *
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * UNLOCK rq->lock
+ *
+ * [task p]
+ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
+ *
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
+ *
+ * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
+ */
+ smp_rmb();
+ if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
+ break;
#ifdef CONFIG_SMP
- /*
- * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
- * possible to, falsely, observe p->on_cpu == 0.
- *
- * One must be running (->on_cpu == 1) in order to remove oneself
- * from the runqueue.
- *
- * __schedule() (switch to task 'p') try_to_wake_up()
- * STORE p->on_cpu = 1 LOAD p->on_rq
- * UNLOCK rq->lock
- *
- * __schedule() (put 'p' to sleep)
- * LOCK rq->lock smp_rmb();
- * smp_mb__after_spinlock();
- * STORE p->on_rq = 0 LOAD p->on_cpu
- *
- * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
- * __schedule(). See the comment for smp_mb__after_spinlock().
- *
- * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
- * schedule()'s deactivate_task() has 'happened' and p will no longer
- * care about it's own p->state. See the comment in __schedule().
- */
- smp_acquire__after_ctrl_dep();
+ /*
+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+ * possible to, falsely, observe p->on_cpu == 0.
+ *
+ * One must be running (->on_cpu == 1) in order to remove oneself
+ * from the runqueue.
+ *
+ * __schedule() (switch to task 'p') try_to_wake_up()
+ * STORE p->on_cpu = 1 LOAD p->on_rq
+ * UNLOCK rq->lock
+ *
+ * __schedule() (put 'p' to sleep)
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * STORE p->on_rq = 0 LOAD p->on_cpu
+ *
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
+ *
+ * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
+ * schedule()'s deactivate_task() has 'happened' and p will no longer
+ * care about it's own p->state. See the comment in __schedule().
+ */
+ smp_acquire__after_ctrl_dep();
- /*
- * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
- * == 0), which means we need to do an enqueue, change p->state to
- * TASK_WAKING such that we can unlock p->pi_lock before doing the
- * enqueue, such as ttwu_queue_wakelist().
- */
- WRITE_ONCE(p->__state, TASK_WAKING);
+ /*
+ * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
+ * == 0), which means we need to do an enqueue, change p->state to
+ * TASK_WAKING such that we can unlock p->pi_lock before doing the
+ * enqueue, such as ttwu_queue_wakelist().
+ */
+ WRITE_ONCE(p->__state, TASK_WAKING);
- /*
- * If the owning (remote) CPU is still in the middle of schedule() with
- * this task as prev, considering queueing p on the remote CPUs wake_list
- * which potentially sends an IPI instead of spinning on p->on_cpu to
- * let the waker make forward progress. This is safe because IRQs are
- * disabled and the IPI will deliver after on_cpu is cleared.
- *
- * Ensure we load task_cpu(p) after p->on_cpu:
- *
- * set_task_cpu(p, cpu);
- * STORE p->cpu = @cpu
- * __schedule() (switch to task 'p')
- * LOCK rq->lock
- * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
- * STORE p->on_cpu = 1 LOAD p->cpu
- *
- * to ensure we observe the correct CPU on which the task is currently
- * scheduling.
- */
- if (smp_load_acquire(&p->on_cpu) &&
- ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
- goto unlock;
+ /*
+ * If the owning (remote) CPU is still in the middle of schedule() with
+ * this task as prev, considering queueing p on the remote CPUs wake_list
+ * which potentially sends an IPI instead of spinning on p->on_cpu to
+ * let the waker make forward progress. This is safe because IRQs are
+ * disabled and the IPI will deliver after on_cpu is cleared.
+ *
+ * Ensure we load task_cpu(p) after p->on_cpu:
+ *
+ * set_task_cpu(p, cpu);
+ * STORE p->cpu = @cpu
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock
+ * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
+ * STORE p->on_cpu = 1 LOAD p->cpu
+ *
+ * to ensure we observe the correct CPU on which the task is currently
+ * scheduling.
+ */
+ if (smp_load_acquire(&p->on_cpu) &&
+ ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
+ break;
- /*
- * If the owning (remote) CPU is still in the middle of schedule() with
- * this task as prev, wait until it's done referencing the task.
- *
- * Pairs with the smp_store_release() in finish_task().
- *
- * This ensures that tasks getting woken will be fully ordered against
- * their previous state and preserve Program Order.
- */
- smp_cond_load_acquire(&p->on_cpu, !VAL);
+ /*
+ * If the owning (remote) CPU is still in the middle of schedule() with
+ * this task as prev, wait until it's done referencing the task.
+ *
+ * Pairs with the smp_store_release() in finish_task().
+ *
+ * This ensures that tasks getting woken will be fully ordered against
+ * their previous state and preserve Program Order.
+ */
+ smp_cond_load_acquire(&p->on_cpu, !VAL);
- cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
- if (task_cpu(p) != cpu) {
- if (p->in_iowait) {
- delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
- }
+ cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
+ if (task_cpu(p) != cpu) {
+ if (p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
- wake_flags |= WF_MIGRATED;
- psi_ttwu_dequeue(p);
- set_task_cpu(p, cpu);
- }
+ wake_flags |= WF_MIGRATED;
+ psi_ttwu_dequeue(p);
+ set_task_cpu(p, cpu);
+ }
#else
- cpu = task_cpu(p);
+ cpu = task_cpu(p);
#endif /* CONFIG_SMP */
- ttwu_queue(p, cpu, wake_flags);
-unlock:
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ ttwu_queue(p, cpu, wake_flags);
+ }
out:
if (success)
ttwu_stat(p, task_cpu(p), wake_flags);
- preempt_enable();
return success;
}
next prev parent reply other threads:[~2023-06-12 9:56 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-12 9:07 [PATCH v3 00/57] Scope-based Resource Management Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 01/57] dmaengine: ioat: Free up __cleanup() name Peter Zijlstra
2023-06-16 22:41 ` Dave Jiang
2023-06-12 9:07 ` [PATCH v3 02/57] apparmor: " Peter Zijlstra
2023-06-13 8:38 ` John Johansen
2023-06-13 10:28 ` John Johansen
2024-01-16 18:10 ` John Johansen
2023-06-12 9:07 ` [PATCH v3 03/57] locking: Introduce __cleanup() based infrastructure Peter Zijlstra
2023-06-13 10:55 ` Peter Zijlstra
2023-06-13 19:46 ` Kees Cook
2023-06-13 17:16 ` Miguel Ojeda
2023-08-14 7:24 ` Bartosz Golaszewski
2024-01-14 6:49 ` Dmitry Torokhov
2023-06-12 9:07 ` [PATCH v3 04/57] kbuild: Drop -Wdeclaration-after-statement Peter Zijlstra
2023-06-14 6:13 ` Masahiro Yamada
2023-06-14 8:25 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 05/57] sched: Simplify get_nohz_timer_target() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 06/57] sched: Simplify sysctl_sched_uclamp_handler() Peter Zijlstra
2023-06-12 13:47 ` Dan Carpenter
2023-06-12 9:07 ` [PATCH v3 07/57] sched: Simplify: migrate_swap_stop() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 08/57] sched: Simplify wake_up_if_idle() Peter Zijlstra
2023-06-12 18:02 ` Boqun Feng
2023-06-12 20:02 ` Peter Zijlstra
2023-06-12 9:07 ` Peter Zijlstra [this message]
2023-06-12 13:51 ` [PATCH v3 09/57] sched: Simplify ttwu() Dan Carpenter
2023-06-12 14:08 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 10/57] sched: Simplify sched_exec() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 11/57] sched: Simplify sched_tick_remote() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 12/57] sched: Simplify try_steal_cookie() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 13/57] sched: Simplify sched_core_cpu_{starting,deactivate}() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 14/57] sched: Simplify set_user_nice() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 15/57] sched: Simplify syscalls Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 16/57] sched: Simplify sched_{set,get}affinity() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 17/57] sched: Simplify yield_to() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 18/57] sched: Simplify sched_rr_get_interval() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 19/57] sched: Simplify sched_move_task() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 20/57] sched: Simplify tg_set_cfs_bandwidth() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 21/57] sched: Misc cleanups Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 22/57] perf: Fix cpuctx refcounting Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 23/57] perf: Simplify perf_event_alloc() error path Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 24/57] perf: Simplify perf_pmu_register() " Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 25/57] perf: Simplify perf_fget_light() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 26/57] perf: Simplify event_function*() Peter Zijlstra
2023-06-12 14:46 ` Dan Carpenter
2023-06-12 14:58 ` Peter Zijlstra
2023-06-13 5:56 ` Namhyung Kim
2023-06-13 7:39 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 27/57] perf: Simplify perf_cgroup_connect() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 28/57] perf; Simplify event_sched_in() Peter Zijlstra
2023-06-13 19:43 ` Kees Cook
2023-06-12 9:07 ` [PATCH v3 29/57] perf: Simplify: __perf_install_in_context() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 30/57] perf: Simplify: *perf_event_{dis,en}able*() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 31/57] perf: Simplify perf_event_modify_attr() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 32/57] perf: Simplify perf_event_context_sched_in() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 33/57] perf: Simplify perf_adjust_freq_unthr_context() Peter Zijlstra
2023-06-12 16:27 ` Linus Torvalds
2023-06-12 18:44 ` Peter Zijlstra
2023-06-12 18:55 ` Linus Torvalds
2023-06-12 20:05 ` Peter Zijlstra
2023-06-13 12:05 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 34/57] perf: Simplify perf_event_*_on_exec() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 35/57] perf: Simplify *perf_event_read*() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 36/57] perf: Simplify find_get_pmu_context() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 37/57] perf: Simplify perf_read_group() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 38/57] perf: Simplify IOC_SET_OUTPUT Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 39/57] perf: Simplify perf_event_*_userpage() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 40/57] perf: Simplify perf_mmap_close()/perf_aux_sample_output() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 41/57] perf: Simplify __perf_event_output() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 42/57] perf: Simplify perf_iterate_sb() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 43/57] perf: Simplify perf_sw_event() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 44/57] perf: Simplify bpf_overflow_handler() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 45/57] perf: Simplify perf_event_parse_addr_filter() Peter Zijlstra
2023-06-13 19:47 ` Kees Cook
2023-06-12 9:07 ` [PATCH v3 46/57] perf: Simplify pmu_dev_alloc() Peter Zijlstra
2023-06-12 9:44 ` Peter Zijlstra
2023-06-12 9:55 ` Greg KH
2023-06-12 12:18 ` Greg KH
2023-06-12 12:29 ` Paolo Bonzini
2023-06-12 12:58 ` Greg KH
2023-06-12 13:09 ` Greg KH
2023-06-12 13:35 ` Greg KH
2023-06-12 14:13 ` Peter Zijlstra
2023-06-12 15:44 ` Greg KH
2023-06-13 7:34 ` Peter Zijlstra
2023-06-13 7:50 ` Greg KH
2023-06-13 10:50 ` Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 47/57] perf: Simplify perf_pmu_register() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 48/57] perf: Simplify perf_init_event() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 49/57] perf: Simplify perf_event_alloc() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 50/57] perf: Simplify sys_perf_event_open() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 51/57] perf: Simplify perf_event_create_kernel_counter() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 52/57] perf: Simplify perf_event_init_context() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 53/57] perf: Simplify perf_event_sysfs_init() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 54/57] perf: Misc cleanups Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 55/57] perf: Simplify find_get_context() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 56/57] perf: Simplify perf_pmu_output_stop() Peter Zijlstra
2023-06-12 16:19 ` Linus Torvalds
2023-06-12 17:11 ` Sean Christopherson
2023-06-12 17:43 ` Linus Torvalds
2023-06-12 18:55 ` Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 57/57] perf: Simplify perf_install_in_context() Peter Zijlstra
2023-06-12 9:51 ` [PATCH v3 00/57] Scope-based Resource Management Peter Zijlstra
2023-06-12 16:37 ` Linus Torvalds
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230612093538.076428270@infradead.org \
--to=peterz@infradead.org \
--cc=42.hyeyoo@gmail.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=apw@canonical.com \
--cc=boqun.feng@gmail.com \
--cc=bristot@redhat.com \
--cc=bsegall@google.com \
--cc=catalin.marinas@arm.com \
--cc=cl@linux.com \
--cc=dennis@kernel.org \
--cc=dietmar.eggemann@arm.com \
--cc=dmaengine@vger.kernel.org \
--cc=dwaipayanray1@gmail.com \
--cc=error27@gmail.com \
--cc=frederic@kernel.org \
--cc=gregkh@linuxfoundation.org \
--cc=irogers@google.com \
--cc=jiangshanlai@gmail.com \
--cc=jmorris@namei.org \
--cc=joe@perches.com \
--cc=joel@joelfernandes.org \
--cc=john.johansen@canonical.com \
--cc=jolsa@kernel.org \
--cc=josh@joshtriplett.org \
--cc=juri.lelli@redhat.com \
--cc=keescook@chromium.org \
--cc=linux-kbuild@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux-security-module@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=longman@redhat.com \
--cc=luc.vanoostenryck@gmail.com \
--cc=lukas.bulwahn@gmail.com \
--cc=mark.rutland@arm.com \
--cc=masahiroy@kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=nathan@kernel.org \
--cc=ndesaulniers@google.com \
--cc=nicolas@fjasle.eu \
--cc=ojeda@kernel.org \
--cc=paul@paul-moore.com \
--cc=paulmck@kernel.org \
--cc=pbonzini@redhat.com \
--cc=quic_neeraju@quicinc.com \
--cc=ravi.bangoria@amd.com \
--cc=rcu@vger.kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=serge@hallyn.com \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=trix@redhat.com \
--cc=vbabka@suse.cz \
--cc=vincent.guittot@linaro.org \
--cc=vkoul@kernel.org \
--cc=vschneid@redhat.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).