From: Peter Zijlstra <peterz@infradead.org>
To: Namhyung Kim <namhyung@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>, Will Deacon <will@kernel.org>,
Waiman Long <longman@redhat.com>,
Boqun Feng <boqun.feng@gmail.com>,
LKML <linux-kernel@vger.kernel.org>,
Thomas Gleixner <tglx@linutronix.de>,
Steven Rostedt <rostedt@goodmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Byungchul Park <byungchul.park@lge.com>,
"Paul E. McKenney" <paulmck@kernel.org>,
Arnd Bergmann <arnd@arndb.de>,
linux-arch@vger.kernel.org, bpf@vger.kernel.org,
Radoslaw Burny <rburny@google.com>
Subject: Re: [PATCH 2/4] locking: Apply contention tracepoints in the slow path
Date: Tue, 1 Mar 2022 10:03:54 +0100 [thread overview]
Message-ID: <Yh3heodejlBiwqLj@hirez.programming.kicks-ass.net> (raw)
In-Reply-To: <20220301010412.431299-3-namhyung@kernel.org>
On Mon, Feb 28, 2022 at 05:04:10PM -0800, Namhyung Kim wrote:
> @@ -171,9 +172,12 @@ bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
> if (try)
> return false;
>
> + trace_contention_begin(sem, _RET_IP_,
> + LCB_F_READ | LCB_F_PERCPU | TASK_UNINTERRUPTIBLE);
That is a bit unwieldy, isn't it ?
> preempt_enable();
> percpu_rwsem_wait(sem, /* .reader = */ true);
> preempt_disable();
> + trace_contention_end(sem);
>
> return true;
> }
> @@ -224,8 +228,13 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
> * Try set sem->block; this provides writer-writer exclusion.
> * Having sem->block set makes new readers block.
> */
> - if (!__percpu_down_write_trylock(sem))
> + if (!__percpu_down_write_trylock(sem)) {
> + unsigned int flags = LCB_F_WRITE | LCB_F_PERCPU | TASK_UNINTERRUPTIBLE;
> +
> + trace_contention_begin(sem, _RET_IP_, flags);
> percpu_rwsem_wait(sem, /* .reader = */ false);
> + trace_contention_end(sem);
> + }
>
> /* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
>
Wouldn't it be easier to stick all that inside percpu_rwsem_wait() and
have it only once? You can even re-frob the wait loop such that the
tracepoint can use current->__state or something.
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index c9fdae94e098..ca01f8ff88e5 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -154,13 +154,16 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
}
spin_unlock_irq(&sem->waiters.lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ trace_contention_begin(sem, _RET_IP_, LCB_F_PERCPU | LCB_F_WRITE*!reader);
while (wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
if (!smp_load_acquire(&wq_entry.private))
break;
schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
+ trace_contention_end(sem);
}
bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
> diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
> index 8555c4efe97c..e49f5d2a232b 100644
> --- a/kernel/locking/rtmutex.c
> +++ b/kernel/locking/rtmutex.c
> @@ -24,6 +24,8 @@
> #include <linux/sched/wake_q.h>
> #include <linux/ww_mutex.h>
>
> +#include <trace/events/lock.h>
> +
> #include "rtmutex_common.h"
>
> #ifndef WW_RT
> @@ -1652,10 +1654,16 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
> static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
> unsigned int state)
> {
> + int ret;
> +
> if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
> return 0;
>
> - return rt_mutex_slowlock(lock, NULL, state);
> + trace_contention_begin(lock, _RET_IP_, LCB_F_RT | state);
> + ret = rt_mutex_slowlock(lock, NULL, state);
> + trace_contention_end(lock);
> +
> + return ret;
> }
> #endif /* RT_MUTEX_BUILD_MUTEX */
>
> @@ -1718,9 +1726,11 @@ static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
> {
> unsigned long flags;
>
> + trace_contention_begin(lock, _RET_IP_, LCB_F_RT | TASK_RTLOCK_WAIT);
> raw_spin_lock_irqsave(&lock->wait_lock, flags);
> rtlock_slowlock_locked(lock);
> raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
> + trace_contention_end(lock);
> }
Same, if you do it one level in, you can have the tracepoint itself look
at current->__state. Also, you seem to have forgotten to trace the
return value. Now you can't tell if the lock was acquired, or was denied
(ww_mutex) or we were interrupted.
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 8555c4efe97c..18b9f4bf6f34 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1579,6 +1579,8 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
set_current_state(state);
+ trace_contention_begin(lock, _RET_IP_, LCB_F_RT);
+
ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
if (likely(!ret))
ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
@@ -1601,6 +1603,9 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
+
+ trace_contention_end(lock, ret);
+
return ret;
}
@@ -1683,6 +1688,8 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
/* Save current state and set state to TASK_RTLOCK_WAIT */
current_save_and_set_rtlock_wait_state();
+ trace_contention_begin(lock, _RET_IP_, LCB_F_RT);
+
task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
for (;;) {
@@ -1703,6 +1710,8 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
set_current_state(TASK_RTLOCK_WAIT);
}
+ trace_contention_end(lock, 0);
+
/* Restore the task state */
current_restore_rtlock_saved_state();
next prev parent reply other threads:[~2022-03-01 9:04 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-01 1:04 [RFC 0/4] locking: Add new lock contention tracepoints (v2) Namhyung Kim
2022-03-01 1:04 ` [PATCH 1/4] locking: Add lock contention tracepoints Namhyung Kim
2022-03-01 1:04 ` [PATCH 2/4] locking: Apply contention tracepoints in the slow path Namhyung Kim
2022-03-01 8:43 ` Peter Zijlstra
2022-03-01 18:03 ` Namhyung Kim
2022-03-01 9:03 ` Peter Zijlstra [this message]
2022-03-01 14:45 ` Steven Rostedt
2022-03-01 18:14 ` Namhyung Kim
2022-03-01 18:11 ` Namhyung Kim
2022-03-14 21:44 ` Namhyung Kim
2022-03-01 1:04 ` [PATCH 3/4] locking/mutex: Pass proper call-site ip Namhyung Kim
2022-03-01 9:05 ` Peter Zijlstra
2022-03-01 14:53 ` Steven Rostedt
2022-03-01 19:47 ` Peter Zijlstra
2022-03-04 7:28 ` Namhyung Kim
2022-03-01 18:25 ` Namhyung Kim
2022-03-01 1:04 ` [PATCH 4/4] locking/rwsem: " Namhyung Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Yh3heodejlBiwqLj@hirez.programming.kicks-ass.net \
--to=peterz@infradead.org \
--cc=arnd@arndb.de \
--cc=boqun.feng@gmail.com \
--cc=bpf@vger.kernel.org \
--cc=byungchul.park@lge.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=mathieu.desnoyers@efficios.com \
--cc=mingo@kernel.org \
--cc=namhyung@kernel.org \
--cc=paulmck@kernel.org \
--cc=rburny@google.com \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox