From: Peter Zijlstra <peterz@infradead.org>
To: torvalds@linux-foundation.org, keescook@chromium.org,
gregkh@linuxfoundation.org, pbonzini@redhat.com
Cc: masahiroy@kernel.org, nathan@kernel.org, ndesaulniers@google.com,
nicolas@fjasle.eu, catalin.marinas@arm.com, will@kernel.org,
vkoul@kernel.org, trix@redhat.com, ojeda@kernel.org,
peterz@infradead.org, mingo@redhat.com, longman@redhat.com,
boqun.feng@gmail.com, dennis@kernel.org, tj@kernel.org,
cl@linux.com, acme@kernel.org, mark.rutland@arm.com,
alexander.shishkin@linux.intel.com, jolsa@kernel.org,
namhyung@kernel.org, irogers@google.com, adrian.hunter@intel.com,
juri.lelli@redhat.com, vincent.guittot@linaro.org,
dietmar.eggemann@arm.com, rostedt@goodmis.org,
bsegall@google.com, mgorman@suse.de, bristot@redhat.com,
vschneid@redhat.com, paulmck@kernel.org, frederic@kernel.org,
quic_neeraju@quicinc.com, joel@joelfernandes.org,
josh@joshtriplett.org, mathieu.desnoyers@efficios.com,
jiangshanlai@gmail.com, rientjes@google.com, vbabka@suse.cz,
roman.gushchin@linux.dev, 42.hyeyoo@gmail.com, apw@canonical.com,
joe@perches.com, dwaipayanray1@gmail.com,
lukas.bulwahn@gmail.com, john.johansen@canonical.com,
paul@paul-moore.com, jmorris@namei.org, serge@hallyn.com,
linux-kbuild@vger.kernel.org, linux-kernel@vger.kernel.org,
dmaengine@vger.kernel.org, llvm@lists.linux.dev,
linux-perf-users@vger.kernel.org, rcu@vger.kernel.org,
linux-security-module@vger.kernel.org, tglx@linutronix.de,
ravi.bangoria@amd.com, error27@gmail.com,
luc.vanoostenryck@gmail.com
Subject: [PATCH v3 21/57] sched: Misc cleanups
Date: Mon, 12 Jun 2023 11:07:34 +0200 [thread overview]
Message-ID: <20230612093539.014199820@infradead.org> (raw)
In-Reply-To: 20230612090713.652690195@infradead.org
Random remaining guard use...
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 163 ++++++++++++++++++++--------------------------------
1 file changed, 63 insertions(+), 100 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1454,16 +1454,12 @@ static void __uclamp_update_util_min_rt_
static void uclamp_update_util_min_rt_default(struct task_struct *p)
{
- struct rq_flags rf;
- struct rq *rq;
-
if (!rt_task(p))
return;
/* Protect updates to p->uclamp_* */
- rq = task_rq_lock(p, &rf);
+ guard(task_rq_lock)(p);
__uclamp_update_util_min_rt_default(p);
- task_rq_unlock(rq, p, &rf);
}
static inline struct uclamp_se
@@ -1759,9 +1755,8 @@ static void uclamp_update_root_tg(void)
uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
sysctl_sched_uclamp_util_max, false);
- rcu_read_lock();
+ guard(rcu)();
cpu_util_update_eff(&root_task_group.css);
- rcu_read_unlock();
}
#else
static void uclamp_update_root_tg(void) { }
@@ -1788,10 +1783,9 @@ static void uclamp_sync_util_min_rt_defa
smp_mb__after_spinlock();
read_unlock(&tasklist_lock);
- rcu_read_lock();
+ guard(rcu)();
for_each_process_thread(g, p)
uclamp_update_util_min_rt_default(p);
- rcu_read_unlock();
}
static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
@@ -2243,10 +2237,9 @@ void migrate_disable(void)
return;
}
- preempt_disable();
+ guard(preempt)();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
- preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
@@ -2270,7 +2263,7 @@ void migrate_enable(void)
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
*/
- preempt_disable();
+ guard(preempt)();
if (p->cpus_ptr != &p->cpus_mask)
__set_cpus_allowed_ptr(p, &ac);
/*
@@ -2281,7 +2274,6 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
- preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
@@ -3449,13 +3441,11 @@ unsigned long wait_task_inactive(struct
*/
void kick_process(struct task_struct *p)
{
- int cpu;
+ guard(preempt)();
+ int cpu = task_cpu(p);
- preempt_disable();
- cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
- preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
@@ -6300,8 +6290,9 @@ static void sched_core_balance(struct rq
struct sched_domain *sd;
int cpu = cpu_of(rq);
- preempt_disable();
- rcu_read_lock();
+ guard(preempt)();
+ guard(rcu)();
+
raw_spin_rq_unlock_irq(rq);
for_each_domain(cpu, sd) {
if (need_resched())
@@ -6311,8 +6302,6 @@ static void sched_core_balance(struct rq
break;
}
raw_spin_rq_lock_irq(rq);
- rcu_read_unlock();
- preempt_enable();
}
static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
@@ -8169,8 +8158,6 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pi
#ifdef CONFIG_SMP
int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
{
- int ret = 0;
-
/*
* If the task isn't a deadline task or admission control is
* disabled then we don't care about affinity changes.
@@ -8184,11 +8171,11 @@ int dl_task_check_affinity(struct task_s
* tasks allowed to run on all the CPUs in the task's
* root_domain.
*/
- rcu_read_lock();
+ guard(rcu)();
if (!cpumask_subset(task_rq(p)->rd->span, mask))
- ret = -EBUSY;
- rcu_read_unlock();
- return ret;
+ return -EBUSY;
+
+ return 0;
}
#endif
@@ -9197,10 +9184,8 @@ int task_can_attach(struct task_struct *
* success of set_cpus_allowed_ptr() on all attached tasks
* before cpus_mask may be changed.
*/
- if (p->flags & PF_NO_SETAFFINITY) {
- ret = -EINVAL;
- goto out;
- }
+ if (p->flags & PF_NO_SETAFFINITY)
+ return -EINVAL;
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
cs_effective_cpus)) {
@@ -9211,7 +9196,6 @@ int task_can_attach(struct task_struct *
ret = dl_cpu_busy(cpu, p);
}
-out:
return ret;
}
@@ -10433,11 +10417,9 @@ static int cpu_cgroup_css_online(struct
#ifdef CONFIG_UCLAMP_TASK_GROUP
/* Propagate the effective uclamp value for the new group */
- mutex_lock(&uclamp_mutex);
- rcu_read_lock();
+ guard(mutex)(&uclamp_mutex);
+ guard(rcu)();
cpu_util_update_eff(css);
- rcu_read_unlock();
- mutex_unlock(&uclamp_mutex);
#endif
return 0;
@@ -10588,8 +10570,8 @@ static ssize_t cpu_uclamp_write(struct k
static_branch_enable(&sched_uclamp_used);
- mutex_lock(&uclamp_mutex);
- rcu_read_lock();
+ guard(mutex)(&uclamp_mutex);
+ guard(rcu)();
tg = css_tg(of_css(of));
if (tg->uclamp_req[clamp_id].value != req.util)
@@ -10604,9 +10586,6 @@ static ssize_t cpu_uclamp_write(struct k
/* Update effective clamps to track the most restrictive value */
cpu_util_update_eff(of_css(of));
- rcu_read_unlock();
- mutex_unlock(&uclamp_mutex);
-
return nbytes;
}
@@ -10632,10 +10611,10 @@ static inline void cpu_uclamp_print(stru
u64 percent;
u32 rem;
- rcu_read_lock();
- tg = css_tg(seq_css(sf));
- util_clamp = tg->uclamp_req[clamp_id].value;
- rcu_read_unlock();
+ scoped_guard (rcu) {
+ tg = css_tg(seq_css(sf));
+ util_clamp = tg->uclamp_req[clamp_id].value;
+ }
if (util_clamp == SCHED_CAPACITY_SCALE) {
seq_puts(sf, "max\n");
@@ -10952,7 +10931,6 @@ static int tg_cfs_schedulable_down(struc
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
- int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
@@ -10964,11 +10942,8 @@ static int __cfs_schedulable(struct task
do_div(data.quota, NSEC_PER_USEC);
}
- rcu_read_lock();
- ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
- rcu_read_unlock();
-
- return ret;
+ guard(rcu)();
+ return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
}
static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
@@ -11529,14 +11504,12 @@ int __sched_mm_cid_migrate_from_fetch_ci
* are not the last task to be migrated from this cpu for this mm, so
* there is no need to move src_cid to the destination cpu.
*/
- rcu_read_lock();
+ guard(rcu)();
src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
- rcu_read_unlock();
t->last_mm_cid = -1;
return -1;
}
- rcu_read_unlock();
return src_cid;
}
@@ -11580,18 +11553,17 @@ int __sched_mm_cid_migrate_from_try_stea
* the lazy-put flag, this task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET.
*/
- rcu_read_lock();
- src_task = rcu_dereference(src_rq->curr);
- if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
- rcu_read_unlock();
- /*
- * We observed an active task for this mm, there is therefore
- * no point in moving this cid to the destination cpu.
- */
- t->last_mm_cid = -1;
- return -1;
+ scoped_guard (rcu) {
+ src_task = rcu_dereference(src_rq->curr);
+ if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
+ /*
+ * We observed an active task for this mm, there is therefore
+ * no point in moving this cid to the destination cpu.
+ */
+ t->last_mm_cid = -1;
+ return -1;
+ }
}
- rcu_read_unlock();
/*
* The src_cid is unused, so it can be unset.
@@ -11664,7 +11636,6 @@ static void sched_mm_cid_remote_clear(st
{
struct rq *rq = cpu_rq(cpu);
struct task_struct *t;
- unsigned long flags;
int cid, lazy_cid;
cid = READ_ONCE(pcpu_cid->cid);
@@ -11699,23 +11670,21 @@ static void sched_mm_cid_remote_clear(st
* the lazy-put flag, that task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET.
*/
- rcu_read_lock();
- t = rcu_dereference(rq->curr);
- if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
- rcu_read_unlock();
- return;
+ scoped_guard (rcu) {
+ t = rcu_dereference(rq->curr);
+ if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
+ return;
}
- rcu_read_unlock();
/*
* The cid is unused, so it can be unset.
* Disable interrupts to keep the window of cid ownership without rq
* lock small.
*/
- local_irq_save(flags);
- if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
- __mm_cid_put(mm, cid);
- local_irq_restore(flags);
+ scoped_guard (irqsave) {
+ if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
+ __mm_cid_put(mm, cid);
+ }
}
static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
@@ -11737,14 +11706,13 @@ static void sched_mm_cid_remote_clear_ol
* snapshot associated with this cid if an active task using the mm is
* observed on this rq.
*/
- rcu_read_lock();
- curr = rcu_dereference(rq->curr);
- if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
- WRITE_ONCE(pcpu_cid->time, rq_clock);
- rcu_read_unlock();
- return;
+ scoped_guard (rcu) {
+ curr = rcu_dereference(rq->curr);
+ if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
+ WRITE_ONCE(pcpu_cid->time, rq_clock);
+ return;
+ }
}
- rcu_read_unlock();
if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
return;
@@ -11838,7 +11806,6 @@ void task_tick_mm_cid(struct rq *rq, str
void sched_mm_cid_exit_signals(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq_flags rf;
struct rq *rq;
if (!mm)
@@ -11846,7 +11813,7 @@ void sched_mm_cid_exit_signals(struct ta
preempt_disable();
rq = this_rq();
- rq_lock_irqsave(rq, &rf);
+ guard(rq_lock_irqsave)(rq);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0);
/*
@@ -11856,13 +11823,11 @@ void sched_mm_cid_exit_signals(struct ta
smp_mb();
mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1;
- rq_unlock_irqrestore(rq, &rf);
}
void sched_mm_cid_before_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq_flags rf;
struct rq *rq;
if (!mm)
@@ -11870,7 +11835,7 @@ void sched_mm_cid_before_execve(struct t
preempt_disable();
rq = this_rq();
- rq_lock_irqsave(rq, &rf);
+ guard(rq_lock_irqsave)(rq);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0);
/*
@@ -11880,13 +11845,11 @@ void sched_mm_cid_before_execve(struct t
smp_mb();
mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1;
- rq_unlock_irqrestore(rq, &rf);
}
void sched_mm_cid_after_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq_flags rf;
struct rq *rq;
if (!mm)
@@ -11894,16 +11857,16 @@ void sched_mm_cid_after_execve(struct ta
preempt_disable();
rq = this_rq();
- rq_lock_irqsave(rq, &rf);
- preempt_enable_no_resched(); /* holding spinlock */
- WRITE_ONCE(t->mm_cid_active, 1);
- /*
- * Store t->mm_cid_active before loading per-mm/cpu cid.
- * Matches barrier in sched_mm_cid_remote_clear_old().
- */
- smp_mb();
- t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
- rq_unlock_irqrestore(rq, &rf);
+ scoped_guard (rq_lock_irqsave, rq) {
+ preempt_enable_no_resched(); /* holding spinlock */
+ WRITE_ONCE(t->mm_cid_active, 1);
+ /*
+ * Store t->mm_cid_active before loading per-mm/cpu cid.
+ * Matches barrier in sched_mm_cid_remote_clear_old().
+ */
+ smp_mb();
+ t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
+ }
rseq_set_notify_resume(t);
}
next prev parent reply other threads:[~2023-06-12 9:56 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-12 9:07 [PATCH v3 00/57] Scope-based Resource Management Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 01/57] dmaengine: ioat: Free up __cleanup() name Peter Zijlstra
2023-06-16 22:41 ` Dave Jiang
2023-06-12 9:07 ` [PATCH v3 02/57] apparmor: " Peter Zijlstra
2023-06-13 8:38 ` John Johansen
2023-06-13 10:28 ` John Johansen
2024-01-16 18:10 ` John Johansen
2023-06-12 9:07 ` [PATCH v3 03/57] locking: Introduce __cleanup() based infrastructure Peter Zijlstra
2023-06-13 10:55 ` Peter Zijlstra
2023-06-13 19:46 ` Kees Cook
2023-06-13 17:16 ` Miguel Ojeda
2023-08-14 7:24 ` Bartosz Golaszewski
2024-01-14 6:49 ` Dmitry Torokhov
2023-06-12 9:07 ` [PATCH v3 04/57] kbuild: Drop -Wdeclaration-after-statement Peter Zijlstra
2023-06-14 6:13 ` Masahiro Yamada
2023-06-14 8:25 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 05/57] sched: Simplify get_nohz_timer_target() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 06/57] sched: Simplify sysctl_sched_uclamp_handler() Peter Zijlstra
2023-06-12 13:47 ` Dan Carpenter
2023-06-12 9:07 ` [PATCH v3 07/57] sched: Simplify: migrate_swap_stop() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 08/57] sched: Simplify wake_up_if_idle() Peter Zijlstra
2023-06-12 18:02 ` Boqun Feng
2023-06-12 20:02 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 09/57] sched: Simplify ttwu() Peter Zijlstra
2023-06-12 13:51 ` Dan Carpenter
2023-06-12 14:08 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 10/57] sched: Simplify sched_exec() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 11/57] sched: Simplify sched_tick_remote() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 12/57] sched: Simplify try_steal_cookie() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 13/57] sched: Simplify sched_core_cpu_{starting,deactivate}() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 14/57] sched: Simplify set_user_nice() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 15/57] sched: Simplify syscalls Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 16/57] sched: Simplify sched_{set,get}affinity() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 17/57] sched: Simplify yield_to() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 18/57] sched: Simplify sched_rr_get_interval() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 19/57] sched: Simplify sched_move_task() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 20/57] sched: Simplify tg_set_cfs_bandwidth() Peter Zijlstra
2023-06-12 9:07 ` Peter Zijlstra [this message]
2023-06-12 9:07 ` [PATCH v3 22/57] perf: Fix cpuctx refcounting Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 23/57] perf: Simplify perf_event_alloc() error path Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 24/57] perf: Simplify perf_pmu_register() " Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 25/57] perf: Simplify perf_fget_light() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 26/57] perf: Simplify event_function*() Peter Zijlstra
2023-06-12 14:46 ` Dan Carpenter
2023-06-12 14:58 ` Peter Zijlstra
2023-06-13 5:56 ` Namhyung Kim
2023-06-13 7:39 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 27/57] perf: Simplify perf_cgroup_connect() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 28/57] perf; Simplify event_sched_in() Peter Zijlstra
2023-06-13 19:43 ` Kees Cook
2023-06-12 9:07 ` [PATCH v3 29/57] perf: Simplify: __perf_install_in_context() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 30/57] perf: Simplify: *perf_event_{dis,en}able*() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 31/57] perf: Simplify perf_event_modify_attr() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 32/57] perf: Simplify perf_event_context_sched_in() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 33/57] perf: Simplify perf_adjust_freq_unthr_context() Peter Zijlstra
2023-06-12 16:27 ` Linus Torvalds
2023-06-12 18:44 ` Peter Zijlstra
2023-06-12 18:55 ` Linus Torvalds
2023-06-12 20:05 ` Peter Zijlstra
2023-06-13 12:05 ` Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 34/57] perf: Simplify perf_event_*_on_exec() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 35/57] perf: Simplify *perf_event_read*() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 36/57] perf: Simplify find_get_pmu_context() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 37/57] perf: Simplify perf_read_group() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 38/57] perf: Simplify IOC_SET_OUTPUT Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 39/57] perf: Simplify perf_event_*_userpage() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 40/57] perf: Simplify perf_mmap_close()/perf_aux_sample_output() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 41/57] perf: Simplify __perf_event_output() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 42/57] perf: Simplify perf_iterate_sb() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 43/57] perf: Simplify perf_sw_event() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 44/57] perf: Simplify bpf_overflow_handler() Peter Zijlstra
2023-06-12 9:07 ` [PATCH v3 45/57] perf: Simplify perf_event_parse_addr_filter() Peter Zijlstra
2023-06-13 19:47 ` Kees Cook
2023-06-12 9:07 ` [PATCH v3 46/57] perf: Simplify pmu_dev_alloc() Peter Zijlstra
2023-06-12 9:44 ` Peter Zijlstra
2023-06-12 9:55 ` Greg KH
2023-06-12 12:18 ` Greg KH
2023-06-12 12:29 ` Paolo Bonzini
2023-06-12 12:58 ` Greg KH
2023-06-12 13:09 ` Greg KH
2023-06-12 13:35 ` Greg KH
2023-06-12 14:13 ` Peter Zijlstra
2023-06-12 15:44 ` Greg KH
2023-06-13 7:34 ` Peter Zijlstra
2023-06-13 7:50 ` Greg KH
2023-06-13 10:50 ` Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 47/57] perf: Simplify perf_pmu_register() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 48/57] perf: Simplify perf_init_event() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 49/57] perf: Simplify perf_event_alloc() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 50/57] perf: Simplify sys_perf_event_open() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 51/57] perf: Simplify perf_event_create_kernel_counter() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 52/57] perf: Simplify perf_event_init_context() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 53/57] perf: Simplify perf_event_sysfs_init() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 54/57] perf: Misc cleanups Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 55/57] perf: Simplify find_get_context() Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 56/57] perf: Simplify perf_pmu_output_stop() Peter Zijlstra
2023-06-12 16:19 ` Linus Torvalds
2023-06-12 17:11 ` Sean Christopherson
2023-06-12 17:43 ` Linus Torvalds
2023-06-12 18:55 ` Peter Zijlstra
2023-06-12 9:08 ` [PATCH v3 57/57] perf: Simplify perf_install_in_context() Peter Zijlstra
2023-06-12 9:51 ` [PATCH v3 00/57] Scope-based Resource Management Peter Zijlstra
2023-06-12 16:37 ` Linus Torvalds
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230612093539.014199820@infradead.org \
--to=peterz@infradead.org \
--cc=42.hyeyoo@gmail.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=apw@canonical.com \
--cc=boqun.feng@gmail.com \
--cc=bristot@redhat.com \
--cc=bsegall@google.com \
--cc=catalin.marinas@arm.com \
--cc=cl@linux.com \
--cc=dennis@kernel.org \
--cc=dietmar.eggemann@arm.com \
--cc=dmaengine@vger.kernel.org \
--cc=dwaipayanray1@gmail.com \
--cc=error27@gmail.com \
--cc=frederic@kernel.org \
--cc=gregkh@linuxfoundation.org \
--cc=irogers@google.com \
--cc=jiangshanlai@gmail.com \
--cc=jmorris@namei.org \
--cc=joe@perches.com \
--cc=joel@joelfernandes.org \
--cc=john.johansen@canonical.com \
--cc=jolsa@kernel.org \
--cc=josh@joshtriplett.org \
--cc=juri.lelli@redhat.com \
--cc=keescook@chromium.org \
--cc=linux-kbuild@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux-security-module@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=longman@redhat.com \
--cc=luc.vanoostenryck@gmail.com \
--cc=lukas.bulwahn@gmail.com \
--cc=mark.rutland@arm.com \
--cc=masahiroy@kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=nathan@kernel.org \
--cc=ndesaulniers@google.com \
--cc=nicolas@fjasle.eu \
--cc=ojeda@kernel.org \
--cc=paul@paul-moore.com \
--cc=paulmck@kernel.org \
--cc=pbonzini@redhat.com \
--cc=quic_neeraju@quicinc.com \
--cc=ravi.bangoria@amd.com \
--cc=rcu@vger.kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=serge@hallyn.com \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=trix@redhat.com \
--cc=vbabka@suse.cz \
--cc=vincent.guittot@linaro.org \
--cc=vkoul@kernel.org \
--cc=vschneid@redhat.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).