linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mike Galbraith <efault@gmx.de>
To: Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Thomas Gleixner <tglx@linutronix.de>
Cc: LKML <linux-kernel@vger.kernel.org>,
	linux-rt-users <linux-rt-users@vger.kernel.org>,
	Steven Rostedt <rostedt@goodmis.org>
Subject: [patch-rt] rtmutex: Fix lock stealing logic
Date: Sun, 18 Jun 2017 19:01:57 +0200	[thread overview]
Message-ID: <1497805317.6229.3.camel@gmx.de> (raw)
In-Reply-To: <20170616105610.rbc6itylcrsla56l@linutronix.de>


1. When trying to acquire an rtmutex, we first try to grab it without
queueing the waiter, and explicitly check for that initial attempt
in the !waiter path of __try_to_take_rt_mutex().  Checking whether
the lock taker is top waiter before allowing a steal attempt in that
path is a thinko: the lock taker has not yet blocked.

2. It seems wrong to change the definition of rt_mutex_waiter_less()
to mean less or perhaps equal when we have an rt_mutex_waiter_equal().

Remove the thinko, and implement rt_mutex_stealable(steal_mode) using
a restored rt_mutex_waiter_less(), and rt_mutex_waiter_equal().

Signed-off-by: Mike Galbraith <efault@gmx.de>
Fixes: f34d077c6f28 ("rt: Add the preempt-rt lock replacement APIs")
---
 kernel/locking/rtmutex.c |   69 +++++++++++++++++++++++------------------------
 1 file changed, 35 insertions(+), 34 deletions(-)

--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -236,26 +236,19 @@ static inline bool unlock_rt_mutex_safe(
 }
 #endif
 
-#define STEAL_NORMAL  0
-#define STEAL_LATERAL 1
-
 /*
  * Only use with rt_mutex_waiter_{less,equal}()
  */
-#define task_to_waiter(p)	\
-	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
+#define task_to_waiter(p) &(struct rt_mutex_waiter) \
+	{ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
 
 static inline int
 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
-		     struct rt_mutex_waiter *right, int mode)
+		     struct rt_mutex_waiter *right)
 {
-	if (mode == STEAL_NORMAL) {
-		if (left->prio < right->prio)
-			return 1;
-	} else {
-		if (left->prio <= right->prio)
-			return 1;
-	}
+	if (left->prio < right->prio)
+		return 1;
+
 	/*
 	 * If both waiters have dl_prio(), we check the deadlines of the
 	 * associated tasks.
@@ -287,6 +280,25 @@ rt_mutex_waiter_equal(struct rt_mutex_wa
 	return 1;
 }
 
+#define STEAL_NORMAL  0
+#define STEAL_LATERAL 1
+
+static inline int rt_mutex_stealable(struct rt_mutex_waiter *thief,
+				     struct rt_mutex_waiter *victim, int mode)
+{
+	if (rt_mutex_waiter_less(thief, victim))
+		return 1;
+
+	/*
+	 * Note that RT tasks are excluded from lateral-steals
+	 * to prevent the introduction of an unbounded latency.
+	 */
+	if (mode == STEAL_NORMAL || rt_task(thief->task))
+		return 0;
+
+	return rt_mutex_waiter_equal(thief, victim);
+}
+
 static void
 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
 {
@@ -298,7 +310,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
 	while (*link) {
 		parent = *link;
 		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
-		if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
+		if (rt_mutex_waiter_less(waiter, entry)) {
 			link = &parent->rb_left;
 		} else {
 			link = &parent->rb_right;
@@ -337,7 +349,7 @@ rt_mutex_enqueue_pi(struct task_struct *
 	while (*link) {
 		parent = *link;
 		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
-		if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
+		if (rt_mutex_waiter_less(waiter, entry)) {
 			link = &parent->rb_left;
 		} else {
 			link = &parent->rb_right;
@@ -847,6 +859,7 @@ static int rt_mutex_adjust_prio_chain(st
  * @task:   The task which wants to acquire the lock
  * @waiter: The waiter that is queued to the lock's wait tree if the
  *	    callsite called task_blocked_on_lock(), otherwise NULL
+ * @mode:   Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
  */
 static int __try_to_take_rt_mutex(struct rt_mutex *lock,
 				  struct task_struct *task,
@@ -890,10 +903,9 @@ static int __try_to_take_rt_mutex(struct
 		 * @lock, give up.
 		 */
 		if (waiter != rt_mutex_top_waiter(lock)) {
-			/* XXX rt_mutex_waiter_less() ? */
+			/* XXX rt_mutex_stealable() ? */
 			return 0;
 		}
-
 		/*
 		 * We can acquire the lock. Remove the waiter from the
 		 * lock waiters tree.
@@ -910,25 +922,14 @@ static int __try_to_take_rt_mutex(struct
 		 * not need to be dequeued.
 		 */
 		if (rt_mutex_has_waiters(lock)) {
-			struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
-
-			if (task != pown)
-				return 0;
-
-			/*
-			 * Note that RT tasks are excluded from lateral-steals
-			 * to prevent the introduction of an unbounded latency.
-			 */
-			if (rt_task(task))
-				mode = STEAL_NORMAL;
 			/*
-			 * If @task->prio is greater than or equal to
-			 * the top waiter priority (kernel view),
-			 * @task lost.
+			 * If @task->prio is greater than the top waiter
+			 * priority (kernel view), or equal to it when
+			 * lateral steal is forbidden, @task lost.
 			 */
-			if (!rt_mutex_waiter_less(task_to_waiter(task),
-						  rt_mutex_top_waiter(lock),
-						  mode))
+			if (!rt_mutex_stealable(task_to_waiter(task),
+						rt_mutex_top_waiter(lock),
+						mode))
 				return 0;
 			/*
 			 * The current top waiter stays enqueued. We

  parent reply	other threads:[~2017-06-18 17:02 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-16 10:56 [ANNOUNCE] v4.11.5-rt1 Sebastian Andrzej Siewior
2017-06-17  8:14 ` Mike Galbraith
2017-06-18  6:46   ` Mike Galbraith
2017-06-19  8:52   ` Sebastian Andrzej Siewior
2017-06-19 10:14     ` Mike Galbraith
2017-06-19 10:44       ` Sebastian Andrzej Siewior
2017-06-19 11:31         ` Mike Galbraith
2017-06-19 11:50           ` Sebastian Andrzej Siewior
2017-06-19 12:55             ` Mike Galbraith
2017-06-19 14:06               ` Sebastian Andrzej Siewior
2017-06-19 14:36                 ` Mike Galbraith
2017-06-19 15:03                   ` Sebastian Andrzej Siewior
2017-06-19 16:14                     ` Mike Galbraith
2017-06-19 16:27                       ` Sebastian Andrzej Siewior
2017-06-19 16:46                         ` Mike Galbraith
2017-06-19 14:08       ` Steven Rostedt
2017-06-19 14:13         ` Sebastian Andrzej Siewior
2017-06-19 14:41           ` Steven Rostedt
2017-06-19 16:29             ` Mike Galbraith
2017-06-20  7:45               ` Mike Galbraith
2017-06-22 16:34                 ` Sebastian Andrzej Siewior
2017-06-22 17:30                   ` Mike Galbraith
2017-06-22 21:36                     ` Thomas Gleixner
2017-06-23  2:00                     ` Mike Galbraith
2017-06-23 12:48                     ` Sebastian Andrzej Siewior
2017-06-18 17:01 ` Mike Galbraith [this message]
2017-06-23  7:37   ` [patch-rt v2] rtmutex: Fix lock stealing logic Mike Galbraith
2017-06-23 10:07     ` Mike Galbraith
2017-06-26 13:47       ` Sebastian Andrzej Siewior
2017-06-23 13:33     ` Steven Rostedt
2017-06-23 14:07       ` Mike Galbraith
2017-06-23 14:14         ` Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1497805317.6229.3.camel@gmx.de \
    --to=efault@gmx.de \
    --cc=bigeasy@linutronix.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).