From: Daniel Walker <dwalker@mvista.com>
To: linux-kernel@vger.kernel.org
Cc: Ulrich Drepper <drepper@gmail.com>,
Thomas Gleixner <tglx@linutronix.de>,
Arjan van de Ven <arjan@infradead.org>
Subject: [PATCH 4/5] rtmutex: add generic blocked_on usage
Date: Wed, 11 Jun 2008 13:49:20 -0700 [thread overview]
Message-ID: <20080611204917.396070661@mvista.com> (raw)
In-Reply-To: 20080611204916.271608740@mvista.com
[-- Attachment #1: blocked_on-rtmutex.patch --]
[-- Type: text/plain, Size: 5712 bytes --]
Modify the rtmutex to use the generic blocked_on field.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
include/linux/sched.h | 4 ++--
kernel/fork.c | 1 -
kernel/rtmutex.c | 35 ++++++++++++++++++++++++-----------
3 files changed, 26 insertions(+), 14 deletions(-)
Index: linux-2.6.25/include/linux/sched.h
===================================================================
--- linux-2.6.25.orig/include/linux/sched.h
+++ linux-2.6.25/include/linux/sched.h
@@ -1026,12 +1026,14 @@ struct sched_rt_entity {
enum lock_waiter_type {
MUTEX_WAITER = 1,
+ RT_MUTEX_WAITER,
};
struct lock_waiter_state {
enum lock_waiter_type lock_type;
union {
struct mutex_waiter *mutex_blocked_on;
+ struct rt_mutex_waiter *rt_blocked_on;
};
};
@@ -1218,8 +1220,6 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct plist_head pi_waiters;
- /* Deadlock detection and priority inheritance handling */
- struct rt_mutex_waiter *pi_blocked_on;
#endif
/*
Index: linux-2.6.25/kernel/fork.c
===================================================================
--- linux-2.6.25.orig/kernel/fork.c
+++ linux-2.6.25/kernel/fork.c
@@ -850,7 +850,6 @@ static void rt_mutex_init_task(struct ta
spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init(&p->pi_waiters, &p->pi_lock);
- p->pi_blocked_on = NULL;
#endif
}
Index: linux-2.6.25/kernel/rtmutex.c
===================================================================
--- linux-2.6.25.orig/kernel/rtmutex.c
+++ linux-2.6.25/kernel/rtmutex.c
@@ -74,6 +74,14 @@ static void fixup_rt_mutex_waiters(struc
clear_rt_mutex_waiters(lock);
}
+static
+struct rt_mutex_waiter *rt_mutex_get_waiter(struct task_struct *task)
+{
+ if (task->blocked_on && task->blocked_on->lock_type == RT_MUTEX_WAITER)
+ return task->blocked_on->rt_blocked_on;
+ return NULL;
+}
+
/*
* We can speed up the acquire/release, if the architecture
* supports cmpxchg and if there's no debugging state to be set up
@@ -197,7 +205,7 @@ static int rt_mutex_adjust_prio_chain(st
*/
spin_lock_irqsave(&task->pi_lock, flags);
- waiter = task->pi_blocked_on;
+ waiter = rt_mutex_get_waiter(task);
/*
* Check whether the end of the boosting chain has been
* reached or the state of the chain has changed while we
@@ -411,6 +419,7 @@ static int try_to_take_rt_mutex(struct r
*/
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
+ struct lock_waiter_state *lock_waiter,
int detect_deadlock)
{
struct task_struct *owner = rt_mutex_owner(lock);
@@ -430,7 +439,7 @@ static int task_blocks_on_rt_mutex(struc
top_waiter = rt_mutex_top_waiter(lock);
plist_add(&waiter->list_entry, &lock->wait_list);
- current->pi_blocked_on = waiter;
+ current->blocked_on = lock_waiter;
spin_unlock_irqrestore(¤t->pi_lock, flags);
@@ -440,7 +449,7 @@ static int task_blocks_on_rt_mutex(struc
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
+ if (rt_mutex_get_waiter(owner))
chain_walk = 1;
spin_unlock_irqrestore(&owner->pi_lock, flags);
}
@@ -501,7 +510,7 @@ static void wakeup_next_waiter(struct rt
spin_unlock_irqrestore(¤t->pi_lock, flags);
/*
- * Clear the pi_blocked_on variable and enqueue a possible
+ * Clear the blocked_on variable and enqueue a possible
* waiter into the pi_waiters list of the pending owner. This
* prevents that in case the pending owner gets unboosted a
* waiter with higher priority than pending-owner->normal_prio
@@ -509,11 +518,12 @@ static void wakeup_next_waiter(struct rt
*/
spin_lock_irqsave(&pendowner->pi_lock, flags);
- WARN_ON(!pendowner->pi_blocked_on);
- WARN_ON(pendowner->pi_blocked_on != waiter);
- WARN_ON(pendowner->pi_blocked_on->lock != lock);
+ WARN_ON(!pendowner->blocked_on);
+ WARN_ON(pendowner->blocked_on->lock_type != RT_MUTEX_WAITER);
+ WARN_ON(pendowner->blocked_on->rt_blocked_on != waiter);
+ WARN_ON(pendowner->blocked_on->rt_blocked_on->lock != lock);
- pendowner->pi_blocked_on = NULL;
+ pendowner->blocked_on = NULL;
if (rt_mutex_has_waiters(lock)) {
struct rt_mutex_waiter *next;
@@ -542,7 +552,7 @@ static void remove_waiter(struct rt_mute
spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
- current->pi_blocked_on = NULL;
+ current->blocked_on = NULL;
spin_unlock_irqrestore(¤t->pi_lock, flags);
if (first && owner != current) {
@@ -559,7 +569,7 @@ static void remove_waiter(struct rt_mute
}
__rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
+ if (rt_mutex_get_waiter(owner))
chain_walk = 1;
spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -592,7 +602,7 @@ void rt_mutex_adjust_pi(struct task_stru
spin_lock_irqsave(&task->pi_lock, flags);
- waiter = task->pi_blocked_on;
+ waiter = rt_mutex_get_waiter(task);
if (!waiter || waiter->list_entry.prio == task->prio) {
spin_unlock_irqrestore(&task->pi_lock, flags);
return;
@@ -614,6 +624,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
int detect_deadlock)
{
struct rt_mutex_waiter waiter;
+ struct lock_waiter_state lock_waiter = {
+ .lock_type = RT_MUTEX_WAITER, { .rt_blocked_on = &waiter} };
int ret = 0;
debug_rt_mutex_init_waiter(&waiter);
@@ -663,6 +675,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
if (!waiter.task) {
ret = task_blocks_on_rt_mutex(lock, &waiter,
+ &lock_waiter,
detect_deadlock);
/*
* If we got woken up by the owner then start loop
--
next prev parent reply other threads:[~2008-06-11 20:56 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-06-11 20:49 [PATCH 1/5] futex: checkpatch cleanup Daniel Walker
2008-06-11 20:49 ` [PATCH 2/5] futex: update prio on requeue Daniel Walker
2008-06-12 5:22 ` Peter Zijlstra
2008-06-11 20:49 ` [PATCH 3/5] mutex debug: add generic blocked_on usage Daniel Walker
2008-06-12 5:25 ` Peter Zijlstra
2008-06-12 13:21 ` Daniel Walker
2008-06-11 20:49 ` Daniel Walker [this message]
2008-06-11 20:49 ` [PATCH 5/5] futex: fix miss ordered wakeups Daniel Walker
2008-06-12 6:07 ` Peter Zijlstra
2008-06-12 13:22 ` Daniel Walker
2008-06-12 13:57 ` Peter Zijlstra
2008-06-12 14:04 ` Daniel Walker
2008-06-12 8:56 ` Thomas Gleixner
2008-06-12 13:30 ` Daniel Walker
2008-06-12 13:33 ` Thomas Gleixner
2008-06-12 13:44 ` Daniel Walker
2008-06-12 15:24 ` Thomas Gleixner
2008-06-12 15:56 ` Daniel Walker
2008-06-12 19:55 ` Thomas Gleixner
2008-06-12 22:09 ` Daniel Walker
2008-06-12 22:43 ` Thomas Gleixner
2008-06-12 23:06 ` Daniel Walker
2008-06-12 23:30 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20080611204917.396070661@mvista.com \
--to=dwalker@mvista.com \
--cc=arjan@infradead.org \
--cc=drepper@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox