The Linux Kernel Mailing List
 help / color / mirror / Atom feed
* [PATCH v3] locking/rtmutex: Annotate API and implementation
@ 2026-05-08 17:45 Bart Van Assche
  0 siblings, 0 replies; only message in thread
From: Bart Van Assche @ 2026-05-08 17:45 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Marco Elver, Nathan Chancellor, linux-kernel, Bart Van Assche,
	Ingo Molnar, Will Deacon, Boqun Feng, Sebastian Andrzej Siewior,
	Clark Williams, Steven Rostedt, Joel Granados, Alexei Starovoitov,
	Vlastimil Babka

Enable context analysis for struct rt_mutex and annotate all functions that
accept a struct rt_mutex pointer. In the __rt_mutex_lock_common() callers,
instead of adding the __no_context_analysis annotation, emit a runtime
warning if the __rt_mutex_lock_common() return value is not zero and add an
__acquire() statement.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---

Changes compared to v2:
 - Fixed the CONFIG_DEBUG_LOCK_ALLOC=n build.
 - Removed "CONTEXT_ANALYSIS_rtmutex.o := y" from the Makefile because it is not necessary.
 - Converted the "__no_context_analysis" annotations on __rt_mutex_lock_common() callers
   into a WARN_ON_ONCE(ret != 0) + __acquire().
 - Removed __no_context_analysis from __rt_mutex_unlock().

Changes compared to v1:
 - Fixed the CONFIG_PREEMPT_RT=y build.


 include/linux/rtmutex.h      | 22 +++++++++++++++-------
 kernel/locking/rtmutex.c     |  4 ++++
 kernel/locking/rtmutex_api.c | 31 ++++++++++++++++++++++++++++---
 3 files changed, 47 insertions(+), 10 deletions(-)

diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 78e7e588817c..9e1f012f89db 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -56,6 +56,8 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
 #endif
 extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
 
+context_lock_struct(rt_mutex);
+
 /**
  * The rt_mutex structure
  *
@@ -108,8 +110,10 @@ do { \
 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
-extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+	__acquires(lock);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
+	__acquires(lock);
 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
 #define rt_mutex_lock_nest_lock(lock, nest_lock)			\
 	do {								\
@@ -118,15 +122,19 @@ extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *
 	} while (0)
 
 #else
-extern void rt_mutex_lock(struct rt_mutex *lock);
+extern void rt_mutex_lock(struct rt_mutex *lock) __acquires(lock);
 #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
 #define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
 #endif
 
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-extern int rt_mutex_lock_killable(struct rt_mutex *lock);
-extern int rt_mutex_trylock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock)
+	__cond_acquires(0, lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock)
+	__cond_acquires(0, lock);
+extern int rt_mutex_trylock(struct rt_mutex *lock)
+	__cond_acquires(true, lock);
 
-extern void rt_mutex_unlock(struct rt_mutex *lock);
+extern void rt_mutex_unlock(struct rt_mutex *lock)
+	__releases(lock);
 
 #endif
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f386ea6c792..9147d6a31b78 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -484,6 +484,7 @@ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_nod
 
 static __always_inline void
 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
+	__must_hold(&lock->wait_lock)
 {
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -492,6 +493,7 @@ rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
 
 static __always_inline void
 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
+	__must_hold(&lock->wait_lock)
 {
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -1092,6 +1094,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
 static int __sched
 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
 		     struct rt_mutex_waiter *waiter)
+	__must_hold(&lock->wait_lock)
 {
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -1319,6 +1322,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
  */
 static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
 					    struct rt_mutex_base *lock)
+	__must_hold(&lock->wait_lock)
 {
 	struct rt_mutex_waiter *waiter;
 
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 124219aea46e..7c40b91422a0 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -41,6 +41,7 @@ static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
 						  unsigned int state,
 						  struct lockdep_map *nest_lock,
 						  unsigned int subclass)
+	__cond_acquires(0, lock)
 {
 	int ret;
 
@@ -67,13 +68,27 @@ EXPORT_SYMBOL(rt_mutex_base_init);
  */
 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
 {
-	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
+	if (__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass) == 0)
+		return;
+	/*
+	 * The code below is never reached because __rt_mutex_lock_common() only
+	 * returns an error code if interrupted by a signal or upon a timeout.
+	 */
+	WARN_ON_ONCE(true);
+	__acquire(lock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
 
 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
 {
-	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
+	if (__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0) == 0)
+		return;
+	/*
+	 * The code below is never reached because __rt_mutex_lock_common() only
+	 * returns an error code if interrupted by a signal or upon a timeout.
+	 */
+	WARN_ON_ONCE(true);
+	__acquire(lock);
 }
 EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
 
@@ -86,7 +101,14 @@ EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
  */
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
-	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
+	if (__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0) == 0)
+		return;
+	/*
+	 * The code below is never reached because __rt_mutex_lock_common() only
+	 * returns an error code if interrupted by a signal or upon a timeout.
+	 */
+	WARN_ON_ONCE(true);
+	__acquire(lock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 #endif
@@ -157,6 +179,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
 	mutex_release(&lock->dep_map, _RET_IP_);
 	__rt_mutex_unlock(&lock->rtmutex);
+	__release(lock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 
@@ -182,6 +205,7 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
  */
 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
 				     struct rt_wake_q_head *wqh)
+	__must_hold(&lock->wait_lock)
 {
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -312,6 +336,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
 					struct rt_mutex_waiter *waiter,
 					struct task_struct *task,
 					struct wake_q_head *wake_q)
+	__must_hold(&lock->wait_lock)
 {
 	int ret;
 

^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2026-05-08 17:45 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-08 17:45 [PATCH v3] locking/rtmutex: Annotate API and implementation Bart Van Assche

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox