public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>, Will Deacon <will@kernel.org>,
	Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: [patch 12/14] locking/rtmutex: Consolidate the fast/slowpath invocation
Date: Tue, 23 Mar 2021 22:30:31 +0100	[thread overview]
Message-ID: <20210323213708.699640478@linutronix.de> (raw)
In-Reply-To: 20210323213019.217008708@linutronix.de

The indirection via a function pointer (which is at least optimized into a
tail call by the compiler) is making the code hard to read.

Clean it up and move the futex related trylock functions down to the futex
section.

Move the wake_q wakeup into rt_mutex_slowunlock(). No point in handing it
to the caller. The futex code uses a different function.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/locking/rtmutex.c |  129 +++++++++++++++++------------------------------
 1 file changed, 49 insertions(+), 80 deletions(-)

--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1299,13 +1299,24 @@ static int __sched rt_mutex_slowtrylock(
 }
 
 /*
+ * Performs the wakeup of the top-waiter and re-enables preemption.
+ */
+void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
+{
+	wake_up_q(wake_q);
+
+	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+	preempt_enable();
+}
+
+/*
  * Slow path to release a rt-mutex.
  *
  * Return whether the current task needs to call rt_mutex_postunlock().
  */
-static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
-					struct wake_q_head *wake_q)
+static void __sched rt_mutex_slowunlock(struct rt_mutex *lock)
 {
+	DEFINE_WAKE_Q(wake_q);
 	unsigned long flags;
 
 	/* irqsave required to support early boot calls */
@@ -1347,7 +1358,7 @@ static bool __sched rt_mutex_slowunlock(
 	while (!rt_mutex_has_waiters(lock)) {
 		/* Drops lock->wait_lock ! */
 		if (unlock_rt_mutex_safe(lock, flags) == true)
-			return false;
+			return;
 		/* Relock the rtmutex and try again */
 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
 	}
@@ -1358,10 +1369,10 @@ static bool __sched rt_mutex_slowunlock(
 	 *
 	 * Queue the next waiter for wakeup once we release the wait_lock.
 	 */
-	mark_wakeup_next_waiter(wake_q, lock);
+	mark_wakeup_next_waiter(&wake_q, lock);
 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
-	return true; /* call rt_mutex_postunlock() */
+	rt_mutex_postunlock(&wake_q);
 }
 
 /*
@@ -1370,60 +1381,20 @@ static bool __sched rt_mutex_slowunlock(
  * The atomic acquire/release ops are compiled away, when either the
  * architecture does not support cmpxchg or when debugging is enabled.
  */
-static __always_inline int
-rt_mutex_fastlock(struct rt_mutex *lock, int state,
-		  int (*slowfn)(struct rt_mutex *lock, int state,
-				struct hrtimer_sleeper *timeout,
-				enum rtmutex_chainwalk chwalk))
+static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state)
 {
-	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-		return 0;
+	int ret;
 
-	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-}
+	might_sleep();
 
-static __always_inline int
-rt_mutex_fasttrylock(struct rt_mutex *lock,
-		     int (*slowfn)(struct rt_mutex *lock))
-{
 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-		return 1;
-
-	return slowfn(lock);
-}
-
-/*
- * Performs the wakeup of the top-waiter and re-enables preemption.
- */
-void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
-{
-	wake_up_q(wake_q);
-
-	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
-	preempt_enable();
-}
-
-static __always_inline void
-rt_mutex_fastunlock(struct rt_mutex *lock,
-		    bool (*slowfn)(struct rt_mutex *lock,
-				   struct wake_q_head *wqh))
-{
-	DEFINE_WAKE_Q(wake_q);
-
-	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
-		return;
-
-	if (slowfn(lock, &wake_q))
-		rt_mutex_postunlock(&wake_q);
-}
+		return 0;
 
-static __always_inline void __rt_mutex_lock(struct rt_mutex *lock,
-					    unsigned int subclass)
-{
-	might_sleep();
+	ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+	if (ret)
+		mutex_release(&lock->dep_map, _RET_IP_);
 
-	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	return ret;
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1435,7 +1406,8 @@ static __always_inline void __rt_mutex_l
  */
 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
 {
-	__rt_mutex_lock(lock, subclass);
+	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+	__rt_mutex_lock(lock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
 
@@ -1448,7 +1420,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
  */
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
-	__rt_mutex_lock(lock, 0);
+	__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 #endif
@@ -1464,32 +1436,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
  */
 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
-	if (ret)
-		mutex_release(&lock->dep_map, _RET_IP_);
-
-	return ret;
+	return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
-/*
- * Futex variant, must not use fastpath.
- */
-int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-	return rt_mutex_slowtrylock(lock);
-}
-
-int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-	return __rt_mutex_slowtrylock(lock);
-}
-
 /**
  * rt_mutex_trylock - try to lock a rt_mutex
  *
@@ -1508,7 +1458,10 @@ int __sched rt_mutex_trylock(struct rt_m
 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
 		return 0;
 
-	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+		return 1;
+
+	ret = rt_mutex_slowtrylock(lock);
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 
@@ -1524,10 +1477,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
 	mutex_release(&lock->dep_map, _RET_IP_);
-	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
+	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+		return;
+
+	rt_mutex_slowunlock(lock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 
+/*
+ * Futex variants, must not use fastpath.
+ */
+int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+	return rt_mutex_slowtrylock(lock);
+}
+
+int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+	return __rt_mutex_slowtrylock(lock);
+}
+
 /**
  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
  * do not use the fast-path, can be simple and will not need to retry.


  parent reply	other threads:[~2021-03-23 21:41 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-23 21:30 [patch 00/14] locking/rtmutex: Spring cleaning Thomas Gleixner
2021-03-23 21:30 ` [patch 01/14] locking/rtmutex: Remove rt_mutex_timed_lock() Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-23 21:30 ` [patch 02/14] locking/rtmutex: Remove rtmutex deadlock tester leftovers Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-23 21:30 ` [patch 03/14] locking/rtmutex: Remove output from deadlock detector Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-23 21:30 ` [patch 04/14] locking/rtmutex: Consolidate rt_mutex_init() Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-23 21:30 ` [patch 05/14] locking/rtmutex: Remove empty and unused debug stubs Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 06/14] locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 07/14] locking/rtmutex: Inline chainwalk depth check Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 08/14] locking/rtmutex: Remove pointless CONFIG_RT_MUTEXES=n stubs Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 09/14] locking/rtmutex: Decrapify __rt_mutex_init() Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 10/14] locking/rtmutex: Move debug functions as inlines into common header Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 11/14] locking/rtmutex: Make text section and inlining consistent Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` Thomas Gleixner [this message]
2021-03-23 22:05   ` [patch 12/14] locking/rtmutex: Consolidate the fast/slowpath invocation Thomas Gleixner
2021-03-23 21:30 ` [patch 13/14] locking/rtmutex: Fix misleading comment in rt_mutex_postunlock() Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:30 ` [patch 14/14] locking/rtmutex: Restrict the trylock WARN_ON() to debug Thomas Gleixner
2021-03-24  7:22   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-23 21:47 ` [patch 00/14] locking/rtmutex: Spring cleaning Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210323213708.699640478@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=bigeasy@linutronix.de \
    --cc=boqun.feng@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox