* [PATCH] locking/rtmutex: Annotate API and implementation
@ 2026-05-02 1:53 Bart Van Assche
2026-05-04 6:00 ` Marco Elver
2026-05-04 9:39 ` Peter Zijlstra
0 siblings, 2 replies; 6+ messages in thread
From: Bart Van Assche @ 2026-05-02 1:53 UTC (permalink / raw)
To: Peter Zijlstra
Cc: Marco Elver, linux-kernel, Bart Van Assche, Ingo Molnar,
Will Deacon, Boqun Feng, Joel Granados, Alexei Starovoitov,
Sebastian Andrzej Siewior, Vlastimil Babka
Add lock context annotations to the rtmutex API and implementation and
enable lock context analysis.
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
include/linux/rtmutex.h | 22 +++++++++++++++-------
kernel/locking/Makefile | 1 +
kernel/locking/rtmutex.c | 4 ++++
kernel/locking/rtmutex_api.c | 10 ++++++++--
4 files changed, 28 insertions(+), 9 deletions(-)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 78e7e588817c..9e1f012f89db 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -56,6 +56,8 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
#endif
extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+context_lock_struct(rt_mutex);
+
/**
* The rt_mutex structure
*
@@ -108,8 +110,10 @@ do { \
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
-extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+ __acquires(lock);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
+ __acquires(lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
#define rt_mutex_lock_nest_lock(lock, nest_lock) \
do { \
@@ -118,15 +122,19 @@ extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *
} while (0)
#else
-extern void rt_mutex_lock(struct rt_mutex *lock);
+extern void rt_mutex_lock(struct rt_mutex *lock) __acquires(lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-extern int rt_mutex_lock_killable(struct rt_mutex *lock);
-extern int rt_mutex_trylock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ __cond_acquires(0, lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock)
+ __cond_acquires(0, lock);
+extern int rt_mutex_trylock(struct rt_mutex *lock)
+ __cond_acquires(true, lock);
-extern void rt_mutex_unlock(struct rt_mutex *lock);
+extern void rt_mutex_unlock(struct rt_mutex *lock)
+ __releases(lock);
#endif
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index cee1901d4cff..24dc00e12aa6 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -4,6 +4,7 @@
KCOV_INSTRUMENT := n
CONTEXT_ANALYSIS_mutex.o := y
+CONTEXT_ANALYSIS_rtmutex.o := y
CONTEXT_ANALYSIS_rtmutex_api.o := y
CONTEXT_ANALYSIS_ww_rt_mutex.o := y
CONTEXT_ANALYSIS_rwsem.o := y
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f386ea6c792..9147d6a31b78 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -484,6 +484,7 @@ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_nod
static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -492,6 +493,7 @@ rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -1092,6 +1094,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
static int __sched
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -1319,6 +1322,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
*/
static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
struct rt_mutex_waiter *waiter;
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 124219aea46e..09675b9cb9bd 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -41,6 +41,7 @@ static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
unsigned int state,
struct lockdep_map *nest_lock,
unsigned int subclass)
+ __cond_acquires(0, lock)
{
int ret;
@@ -66,12 +67,14 @@ EXPORT_SYMBOL(rt_mutex_base_init);
* @subclass: the lockdep subclass
*/
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+ __no_context_analysis /* ignoring the return value below is fine in this case */
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
+ __no_context_analysis /* ignoring the return value below is fine in this case */
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
}
@@ -157,6 +160,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
+ __release(lock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -182,6 +186,7 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
*/
bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
struct rt_wake_q_head *wqh)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -312,6 +317,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
int ret;
@@ -526,7 +532,7 @@ static __always_inline int __mutex_lock_common(struct mutex *lock,
unsigned int subclass,
struct lockdep_map *nest_lock,
unsigned long ip)
- __acquires(lock) __no_context_analysis
+ __acquires(lock)
{
int ret;
@@ -648,7 +654,7 @@ EXPORT_SYMBOL(mutex_trylock);
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
void __sched mutex_unlock(struct mutex *lock)
- __releases(lock) __no_context_analysis
+ __releases(lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] locking/rtmutex: Annotate API and implementation
2026-05-02 1:53 [PATCH] locking/rtmutex: Annotate API and implementation Bart Van Assche
@ 2026-05-04 6:00 ` Marco Elver
2026-05-04 6:23 ` Bart Van Assche
2026-05-04 9:39 ` Peter Zijlstra
1 sibling, 1 reply; 6+ messages in thread
From: Marco Elver @ 2026-05-04 6:00 UTC (permalink / raw)
To: Bart Van Assche
Cc: Peter Zijlstra, linux-kernel, Ingo Molnar, Will Deacon,
Boqun Feng, Joel Granados, Alexei Starovoitov,
Sebastian Andrzej Siewior, Vlastimil Babka
On Sat, 2 May 2026 at 03:54, Bart Van Assche <bvanassche@acm.org> wrote:
>
> Add lock context annotations to the rtmutex API and implementation and
> enable lock context analysis.
>
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
If I'm not mistaken, Peter was working on this concurrently?
https://lore.kernel.org/all/20260121111213.851599178@infradead.org/ ?
> ---
> include/linux/rtmutex.h | 22 +++++++++++++++-------
> kernel/locking/Makefile | 1 +
> kernel/locking/rtmutex.c | 4 ++++
> kernel/locking/rtmutex_api.c | 10 ++++++++--
> 4 files changed, 28 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
> index 78e7e588817c..9e1f012f89db 100644
> --- a/include/linux/rtmutex.h
> +++ b/include/linux/rtmutex.h
> @@ -56,6 +56,8 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
> #endif
> extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
>
> +context_lock_struct(rt_mutex);
> +
> /**
> * The rt_mutex structure
> *
> @@ -108,8 +110,10 @@ do { \
> extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
>
> #ifdef CONFIG_DEBUG_LOCK_ALLOC
> -extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
> -extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
> +extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
> + __acquires(lock);
> +extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
> + __acquires(lock);
> #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
> #define rt_mutex_lock_nest_lock(lock, nest_lock) \
> do { \
> @@ -118,15 +122,19 @@ extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *
> } while (0)
>
> #else
> -extern void rt_mutex_lock(struct rt_mutex *lock);
> +extern void rt_mutex_lock(struct rt_mutex *lock) __acquires(lock);
> #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
> #define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
> #endif
>
> -extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
> -extern int rt_mutex_lock_killable(struct rt_mutex *lock);
> -extern int rt_mutex_trylock(struct rt_mutex *lock);
> +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock)
> + __cond_acquires(0, lock);
> +extern int rt_mutex_lock_killable(struct rt_mutex *lock)
> + __cond_acquires(0, lock);
> +extern int rt_mutex_trylock(struct rt_mutex *lock)
> + __cond_acquires(true, lock);
>
> -extern void rt_mutex_unlock(struct rt_mutex *lock);
> +extern void rt_mutex_unlock(struct rt_mutex *lock)
> + __releases(lock);
>
> #endif
> diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
> index cee1901d4cff..24dc00e12aa6 100644
> --- a/kernel/locking/Makefile
> +++ b/kernel/locking/Makefile
> @@ -4,6 +4,7 @@
> KCOV_INSTRUMENT := n
>
> CONTEXT_ANALYSIS_mutex.o := y
> +CONTEXT_ANALYSIS_rtmutex.o := y
> CONTEXT_ANALYSIS_rtmutex_api.o := y
> CONTEXT_ANALYSIS_ww_rt_mutex.o := y
> CONTEXT_ANALYSIS_rwsem.o := y
> diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
> index 4f386ea6c792..9147d6a31b78 100644
> --- a/kernel/locking/rtmutex.c
> +++ b/kernel/locking/rtmutex.c
> @@ -484,6 +484,7 @@ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_nod
>
> static __always_inline void
> rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
> + __must_hold(&lock->wait_lock)
> {
> lockdep_assert_held(&lock->wait_lock);
>
> @@ -492,6 +493,7 @@ rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
>
> static __always_inline void
> rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
> + __must_hold(&lock->wait_lock)
> {
> lockdep_assert_held(&lock->wait_lock);
>
> @@ -1092,6 +1094,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
> static int __sched
> try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
> struct rt_mutex_waiter *waiter)
> + __must_hold(&lock->wait_lock)
> {
> lockdep_assert_held(&lock->wait_lock);
>
> @@ -1319,6 +1322,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
> */
> static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
> struct rt_mutex_base *lock)
> + __must_hold(&lock->wait_lock)
> {
> struct rt_mutex_waiter *waiter;
>
> diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
> index 124219aea46e..09675b9cb9bd 100644
> --- a/kernel/locking/rtmutex_api.c
> +++ b/kernel/locking/rtmutex_api.c
> @@ -41,6 +41,7 @@ static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
> unsigned int state,
> struct lockdep_map *nest_lock,
> unsigned int subclass)
> + __cond_acquires(0, lock)
> {
> int ret;
>
> @@ -66,12 +67,14 @@ EXPORT_SYMBOL(rt_mutex_base_init);
> * @subclass: the lockdep subclass
> */
> void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
> + __no_context_analysis /* ignoring the return value below is fine in this case */
> {
> __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
> }
> EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
>
> void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
> + __no_context_analysis /* ignoring the return value below is fine in this case */
> {
> __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
> }
> @@ -157,6 +160,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
> {
> mutex_release(&lock->dep_map, _RET_IP_);
> __rt_mutex_unlock(&lock->rtmutex);
> + __release(lock);
> }
> EXPORT_SYMBOL_GPL(rt_mutex_unlock);
>
> @@ -182,6 +186,7 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
> */
> bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
> struct rt_wake_q_head *wqh)
> + __must_hold(&lock->wait_lock)
> {
> lockdep_assert_held(&lock->wait_lock);
>
> @@ -312,6 +317,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
> struct rt_mutex_waiter *waiter,
> struct task_struct *task,
> struct wake_q_head *wake_q)
> + __must_hold(&lock->wait_lock)
> {
> int ret;
>
> @@ -526,7 +532,7 @@ static __always_inline int __mutex_lock_common(struct mutex *lock,
> unsigned int subclass,
> struct lockdep_map *nest_lock,
> unsigned long ip)
> - __acquires(lock) __no_context_analysis
> + __acquires(lock)
> {
> int ret;
>
> @@ -648,7 +654,7 @@ EXPORT_SYMBOL(mutex_trylock);
> #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
>
> void __sched mutex_unlock(struct mutex *lock)
> - __releases(lock) __no_context_analysis
> + __releases(lock)
> {
> mutex_release(&lock->dep_map, _RET_IP_);
> __rt_mutex_unlock(&lock->rtmutex);
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] locking/rtmutex: Annotate API and implementation
2026-05-04 6:00 ` Marco Elver
@ 2026-05-04 6:23 ` Bart Van Assche
0 siblings, 0 replies; 6+ messages in thread
From: Bart Van Assche @ 2026-05-04 6:23 UTC (permalink / raw)
To: Marco Elver
Cc: Peter Zijlstra, linux-kernel, Ingo Molnar, Will Deacon,
Boqun Feng, Joel Granados, Alexei Starovoitov,
Sebastian Andrzej Siewior, Vlastimil Babka
On 5/4/26 8:00 AM, Marco Elver wrote:
> On Sat, 2 May 2026 at 03:54, Bart Van Assche <bvanassche@acm.org> wrote:
>>
>> Add lock context annotations to the rtmutex API and implementation and
>> enable lock context analysis.
>>
>> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
>
> If I'm not mistaken, Peter was working on this concurrently?
> https://lore.kernel.org/all/20260121111213.851599178@infradead.org/ ?
Hmm ... I don't see CONTEXT_ANALYSIS_rtmutex.o := y in any of Peter's
rtmutext patches. Did I perhaps overlook something? According to the
results of this query my patch is the first patch that enables context
analysis for rtmutex.c:
https://lore.kernel.org/all/?q=CONTEXT_ANALYSIS_rtmutex
Thanks,
Bart.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] locking/rtmutex: Annotate API and implementation
2026-05-02 1:53 [PATCH] locking/rtmutex: Annotate API and implementation Bart Van Assche
2026-05-04 6:00 ` Marco Elver
@ 2026-05-04 9:39 ` Peter Zijlstra
2026-05-04 11:56 ` Bart Van Assche
1 sibling, 1 reply; 6+ messages in thread
From: Peter Zijlstra @ 2026-05-04 9:39 UTC (permalink / raw)
To: Bart Van Assche
Cc: Marco Elver, linux-kernel, Ingo Molnar, Will Deacon, Boqun Feng,
Joel Granados, Alexei Starovoitov, Sebastian Andrzej Siewior,
Vlastimil Babka
On Fri, May 01, 2026 at 06:53:49PM -0700, Bart Van Assche wrote:
> Add lock context annotations to the rtmutex API and implementation and
> enable lock context analysis.
>
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
This does not build, I need at least the below, but then PREEMPT_RT is
still not happy.
---
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -20,7 +20,7 @@
extern int max_lock_depth;
-struct rt_mutex_base {
+context_lock_struct(rt_mutex_base) {
raw_spinlock_t wait_lock;
struct rb_root_cached waiters __guarded_by(&wait_lock);
struct task_struct *owner __guarded_by(&wait_lock);
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -314,6 +314,7 @@ static __always_inline bool rt_mutex_cmp
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
+ __cond_acquires(true, lock)
{
/*
* With debug enabled rt_mutex_cmpxchg trylock() will always fail.
@@ -1095,6 +1096,7 @@ static int __sched
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
__must_hold(&lock->wait_lock)
+ __cond_acquires(nonzero, lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -1625,6 +1627,7 @@ static int __sched rt_mutex_slowlock_blo
struct rt_mutex_waiter *waiter,
struct wake_q_head *wake_q)
__releases(&lock->wait_lock) __acquires(&lock->wait_lock)
+ __cond_acquires(0, lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct task_struct *owner;
@@ -1715,6 +1718,7 @@ static int __sched __rt_mutex_slowlock(s
struct rt_mutex_waiter *waiter,
struct wake_q_head *wake_q)
__must_hold(&lock->wait_lock)
+ __cond_acquires(0, lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct ww_mutex *ww = ww_container_of(rtm);
@@ -1773,6 +1777,7 @@ static inline int __rt_mutex_slowlock_lo
unsigned int state,
struct wake_q_head *wake_q)
__must_hold(&lock->wait_lock)
+ __cond_acquires(0, lock)
{
struct rt_mutex_waiter waiter;
int ret;
@@ -1797,6 +1802,7 @@ static inline int __rt_mutex_slowlock_lo
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state)
+ __cond_acquires(0, lock)
{
DEFINE_WAKE_Q(wake_q);
unsigned long flags;
@@ -1829,6 +1835,7 @@ static int __sched rt_mutex_slowlock(str
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
unsigned int state)
+ __cond_acquires(0, lock)
{
lockdep_assert(!current->pi_blocked_on);
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -88,6 +88,7 @@ EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lo
* @lock: the rt_mutex to be locked
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
+ __no_context_analysis /* ignoring the return value below is fine in this case */
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
}
@@ -532,7 +533,7 @@ static __always_inline int __mutex_lock_
unsigned int subclass,
struct lockdep_map *nest_lock,
unsigned long ip)
- __acquires(lock)
+ __cond_acquires(0, lock)
{
int ret;
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] locking/rtmutex: Annotate API and implementation
2026-05-04 9:39 ` Peter Zijlstra
@ 2026-05-04 11:56 ` Bart Van Assche
2026-05-04 12:41 ` Peter Zijlstra
0 siblings, 1 reply; 6+ messages in thread
From: Bart Van Assche @ 2026-05-04 11:56 UTC (permalink / raw)
To: Peter Zijlstra
Cc: Marco Elver, linux-kernel, Ingo Molnar, Will Deacon, Boqun Feng,
Joel Granados, Alexei Starovoitov, Sebastian Andrzej Siewior,
Vlastimil Babka
On 5/4/26 11:39 AM, Peter Zijlstra wrote:
> This does not build, I need at least the below, but then PREEMPT_RT is
> still not happy.
Apparently I had tested my patch with PREEMPT_RT=n only. The patch below
should support both PREEMPT_RT=n and PREEMPT_RT=y:
---
include/linux/rtmutex.h | 22 +++++++++++++++-------
kernel/locking/Makefile | 1 +
kernel/locking/rtmutex.c | 5 +++++
kernel/locking/rtmutex_api.c | 6 ++++++
4 files changed, 27 insertions(+), 7 deletions(-)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 78e7e588817c..9e1f012f89db 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -56,6 +56,8 @@ static inline struct task_struct
*rt_mutex_owner(struct rt_mutex_base *lock)
#endif
extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+context_lock_struct(rt_mutex);
+
/**
* The rt_mutex structure
*
@@ -108,8 +110,10 @@ do { \
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name,
struct lock_class_key *key);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int
subclass);
-extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct
lockdep_map *nest_lock);
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int
subclass)
+ __acquires(lock);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct
lockdep_map *nest_lock)
+ __acquires(lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
#define rt_mutex_lock_nest_lock(lock, nest_lock) \
do { \
@@ -118,15 +122,19 @@ extern void _rt_mutex_lock_nest_lock(struct
rt_mutex *lock, struct lockdep_map *
} while (0)
#else
-extern void rt_mutex_lock(struct rt_mutex *lock);
+extern void rt_mutex_lock(struct rt_mutex *lock) __acquires(lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-extern int rt_mutex_lock_killable(struct rt_mutex *lock);
-extern int rt_mutex_trylock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ __cond_acquires(0, lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock)
+ __cond_acquires(0, lock);
+extern int rt_mutex_trylock(struct rt_mutex *lock)
+ __cond_acquires(true, lock);
-extern void rt_mutex_unlock(struct rt_mutex *lock);
+extern void rt_mutex_unlock(struct rt_mutex *lock)
+ __releases(lock);
#endif
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index cee1901d4cff..24dc00e12aa6 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -4,6 +4,7 @@
KCOV_INSTRUMENT := n
CONTEXT_ANALYSIS_mutex.o := y
+CONTEXT_ANALYSIS_rtmutex.o := y
CONTEXT_ANALYSIS_rtmutex_api.o := y
CONTEXT_ANALYSIS_ww_rt_mutex.o := y
CONTEXT_ANALYSIS_rwsem.o := y
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f386ea6c792..69759fde7d10 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -484,6 +484,7 @@ static __always_inline bool __waiter_less(struct
rb_node *a, const struct rb_nod
static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter
*waiter)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -492,6 +493,7 @@ rt_mutex_enqueue(struct rt_mutex_base *lock, struct
rt_mutex_waiter *waiter)
static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter
*waiter)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -1092,6 +1094,7 @@ static int __sched
rt_mutex_adjust_prio_chain(struct task_struct *task,
static int __sched
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -1319,6 +1322,7 @@ static int __sched task_blocks_on_rt_mutex(struct
rt_mutex_base *lock,
*/
static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
struct rt_mutex_waiter *waiter;
@@ -1479,6 +1483,7 @@ static void __sched rt_mutex_slowunlock(struct
rt_mutex_base *lock)
}
static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
+ __no_context_analysis
{
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
return;
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 124219aea46e..23ad997ddd65 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -41,6 +41,7 @@ static __always_inline int
__rt_mutex_lock_common(struct rt_mutex *lock,
unsigned int state,
struct lockdep_map *nest_lock,
unsigned int subclass)
+ __cond_acquires(0, lock)
{
int ret;
@@ -66,12 +67,14 @@ EXPORT_SYMBOL(rt_mutex_base_init);
* @subclass: the lockdep subclass
*/
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int
subclass)
+ __no_context_analysis /* ignoring the return value below is fine in
this case */
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct
lockdep_map *nest_lock)
+ __no_context_analysis /* ignoring the return value below is fine in
this case */
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
}
@@ -157,6 +160,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
+ __release(lock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -182,6 +186,7 @@ int __sched __rt_mutex_futex_trylock(struct
rt_mutex_base *lock)
*/
bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
struct rt_wake_q_head *wqh)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
@@ -312,6 +317,7 @@ int __sched __rt_mutex_start_proxy_lock(struct
rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
int ret;
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] locking/rtmutex: Annotate API and implementation
2026-05-04 11:56 ` Bart Van Assche
@ 2026-05-04 12:41 ` Peter Zijlstra
0 siblings, 0 replies; 6+ messages in thread
From: Peter Zijlstra @ 2026-05-04 12:41 UTC (permalink / raw)
To: Bart Van Assche
Cc: Marco Elver, linux-kernel, Ingo Molnar, Will Deacon, Boqun Feng,
Joel Granados, Alexei Starovoitov, Sebastian Andrzej Siewior,
Vlastimil Babka
On Mon, May 04, 2026 at 01:56:03PM +0200, Bart Van Assche wrote:
> On 5/4/26 11:39 AM, Peter Zijlstra wrote:
> > This does not build, I need at least the below, but then PREEMPT_RT is
> > still not happy.
> Apparently I had tested my patch with PREEMPT_RT=n only. The patch below
> should support both PREEMPT_RT=n and PREEMPT_RT=y:
Is whitespace mangled :/
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-05-04 12:41 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-02 1:53 [PATCH] locking/rtmutex: Annotate API and implementation Bart Van Assche
2026-05-04 6:00 ` Marco Elver
2026-05-04 6:23 ` Bart Van Assche
2026-05-04 9:39 ` Peter Zijlstra
2026-05-04 11:56 ` Bart Van Assche
2026-05-04 12:41 ` Peter Zijlstra
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox