public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] lock stat for 2.6.19-rt1
@ 2006-12-04  1:54 Bill Huey
  2006-12-04  1:56 ` [PATCH 2/4] lock stat (rt/rtmutex.c mods) " Bill Huey
  0 siblings, 1 reply; 4+ messages in thread
From: Bill Huey @ 2006-12-04  1:54 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Steven Rostedt, Thomas Gleixner, linux-kernel, Eric W. Biederman,
	Peter Zijlstra, Paul E. McKenney, Bill Huey (hui)

[-- Attachment #1: Type: text/plain, Size: 129 bytes --]


This hooks into the preexisting lock definitions in the -rt kernel and
hijacks parts of lockdep for the object hash key.

bill


[-- Attachment #2: lockpatch.diff --]
[-- Type: text/plain, Size: 11998 bytes --]

============================================================
--- include/linux/mutex.h	d231debc2848a8344e1b04055ef22e489702e648
+++ include/linux/mutex.h	734c89362a3d77d460eb20eec3107e7b76fed938
@@ -15,6 +15,7 @@
 #include <linux/rt_lock.h>
 #include <linux/linkage.h>
 #include <linux/lockdep.h>
+#include <linux/lock_stat.h>
 
 #include <asm/atomic.h>
 
@@ -35,7 +36,8 @@ extern void
 	}
 
 extern void
-_mutex_init(struct mutex *lock, char *name, struct lock_class_key *key);
+_mutex_init(struct mutex *lock, char *name, struct lock_class_key *key
+					__COMMA_LOCK_STAT_NOTE_PARAM_DECL);
 
 extern void __lockfunc _mutex_lock(struct mutex *lock);
 extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
@@ -56,11 +58,15 @@ extern void __lockfunc _mutex_unlock(str
 # define mutex_lock_nested(l, s)	_mutex_lock(l)
 #endif
 
+#define __mutex_init(l,n)		__rt_mutex_init(&(l)->mutex,	\
+					n				\
+					__COMMA_LOCK_STAT_NOTE)
+
 # define mutex_init(mutex)				\
 do {							\
 	static struct lock_class_key __key;		\
 							\
-	_mutex_init((mutex), #mutex, &__key);		\
+	_mutex_init((mutex), #mutex, &__key __COMMA_LOCK_STAT_NOTE);	\
 } while (0)
 
 #else
============================================================
--- include/linux/rt_lock.h	d7515027865666075d3e285bcec8c36e9b6cfc47
+++ include/linux/rt_lock.h	297792307de5b4aef2c7e472e2a32c727e5de3f1
@@ -13,6 +13,7 @@
 #include <linux/rtmutex.h>
 #include <asm/atomic.h>
 #include <linux/spinlock_types.h>
+#include <linux/lock_stat.h>
 
 #ifdef CONFIG_PREEMPT_RT
 /*
@@ -28,8 +29,8 @@ typedef struct {
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # define __SPIN_LOCK_UNLOCKED(name) \
-	(spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) \
-	, .save_state = 1, .file = __FILE__, .line = __LINE__ } }
+	(spinlock_t) { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) \
+	, .save_state = 1, .file = __FILE__, .line = __LINE__ __COMMA_LOCK_STAT_INITIALIZER} }
 #else
 # define __SPIN_LOCK_UNLOCKED(name) \
 	(spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) } }
@@ -92,7 +93,7 @@ typedef struct {
 # ifdef CONFIG_DEBUG_RT_MUTEXES
 #  define __RW_LOCK_UNLOCKED(name) (rwlock_t) \
 	{ .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name), \
-	 .save_state = 1, .file = __FILE__, .line = __LINE__ } }
+	 .save_state = 1, .file = __FILE__, .line = __LINE__ __COMMA_LOCK_STAT_INITIALIZER } }
 # else
 #  define __RW_LOCK_UNLOCKED(name) (rwlock_t) \
 	{ .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) } }
@@ -139,14 +140,16 @@ struct semaphore name = \
  */
 #define DECLARE_MUTEX_LOCKED COMPAT_DECLARE_MUTEX_LOCKED
 
-extern void fastcall __sema_init(struct semaphore *sem, int val, char *name, char *file, int line);
+extern void fastcall __sema_init(struct semaphore *sem, int val, char *name
+				__COMMA_LOCK_STAT_FN_DECL, char *_file, int _line);
 
 #define rt_sema_init(sem, val) \
-		__sema_init(sem, val, #sem, __FILE__, __LINE__)
+		__sema_init(sem, val, #sem __COMMA_LOCK_STAT_NOTE_FN, __FILE__, __LINE__)
 
-extern void fastcall __init_MUTEX(struct semaphore *sem, char *name, char *file, int line);
+extern void fastcall __init_MUTEX(struct semaphore *sem, char *name
+				__COMMA_LOCK_STAT_FN_DECL, char *_file, int _line);
 #define rt_init_MUTEX(sem) \
-		__init_MUTEX(sem, #sem, __FILE__, __LINE__)
+		__init_MUTEX(sem, #sem __COMMA_LOCK_STAT_NOTE_FN, __FILE__, __LINE__)
 
 extern void there_is_no_init_MUTEX_LOCKED_for_RT_semaphores(void);
 
@@ -247,13 +250,14 @@ extern void fastcall __rt_rwsem_init(str
 	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
 
 extern void fastcall __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
-				     struct lock_class_key *key);
+				     struct lock_class_key *key
+					__COMMA_LOCK_STAT_NOTE_PARAM_DECL);
 
 # define rt_init_rwsem(sem)				\
 do {							\
 	static struct lock_class_key __key;		\
 							\
-	__rt_rwsem_init((sem), #sem, &__key);		\
+	__rt_rwsem_init((sem), #sem, &__key __COMMA_LOCK_STAT_NOTE);		\
 } while (0)
 
 extern void fastcall rt_down_write(struct rw_semaphore *rwsem);
============================================================
--- include/linux/rtmutex.h	e6fa10297e6c20d27edba172aeb078a60c64488e
+++ include/linux/rtmutex.h	55cd2de44a52e049fa8a0da63bde6449cefeb8fe
@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <linux/plist.h>
 #include <linux/spinlock_types.h>
+#include <linux/lock_stat.h>
 
 /*
  * The rt_mutex structure
@@ -33,6 +34,9 @@ struct rt_mutex {
 	int			line;
 	void			*magic;
 #endif
+#ifdef CONFIG_LOCK_STAT
+	struct lock_stat	*lock_stat;
+#endif
 };
 
 struct rt_mutex_waiter;
@@ -54,11 +58,13 @@ struct hrtimer_sleeper;
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
 	, .name = #mutexname, .file = __FILE__, .line = __LINE__
-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, __FUNCTION__)
+# define rt_mutex_init(mutex)		__rt_mutex_init(mutex, __FUNCTION__ \
+						__COMMA_LOCK_STAT_NOTE_DECL_FLLN)
  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
 #else
 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, NULL)
+# define rt_mutex_init(mutex)		__rt_mutex_init(mutex, NULL		\
+						__COMMA_LOCK_STAT_NOTE_DECL_FLLN)
 # define rt_mutex_debug_task_free(t)			do { } while (0)
 #endif
 
@@ -66,6 +72,7 @@ struct hrtimer_sleeper;
 	{ .wait_lock = RAW_SPIN_LOCK_UNLOCKED(mutexname) \
 	, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
 	, .owner = NULL \
+	__COMMA_LOCK_STAT_INITIALIZER	\
 	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
 
 #define DEFINE_RT_MUTEX(mutexname) \
@@ -82,10 +89,19 @@ static inline int rt_mutex_is_locked(str
 	return lock->owner != NULL;
 }
 
-extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void __rt_mutex_init(struct rt_mutex *lock,
+				const char *name
+				__COMMA_LOCK_STAT_NOTE_PARAM_DECL);
+#ifdef CONFIG_LOCK_STAT
+extern void __rt_mutex_init_annotated(struct rt_mutex *lock, const char *name,
+					LOCK_STAT_NOTE_PARAM_DECL,
+					struct lock_stat *lsobject);
+#endif
+
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
 extern void rt_mutex_lock(struct rt_mutex *lock);
+
 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
 						int detect_deadlock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
@@ -96,6 +112,20 @@ extern void rt_mutex_unlock(struct rt_mu
 
 extern void rt_mutex_unlock(struct rt_mutex *lock);
 
+#ifdef CONFIG_LOCK_STAT
+extern void rt_mutex_lock_with_ip(struct rt_mutex *lock,
+					unsigned long ip);
+extern int rt_mutex_lock_interruptible_with_ip(struct rt_mutex *lock,
+						int detect_deadlock,
+						unsigned long ip);
+extern int rt_mutex_timed_lock_with_ip(struct rt_mutex *lock,
+					struct hrtimer_sleeper *timeout,
+					int detect_deadlock,
+					unsigned long ip);
+extern int rt_mutex_trylock_with_ip(struct rt_mutex *lock,
+					unsigned long ip);
+#endif
+
 #ifdef CONFIG_RT_MUTEXES
 # define INIT_RT_MUTEXES(tsk)						\
 	.pi_waiters	= PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock),	\
============================================================
--- include/linux/spinlock.h	14bc1c854dc566f95ef972f0821415de7630a573
+++ include/linux/spinlock.h	f373183297a6575c36da57dbeed5d8631a7c604a
@@ -90,6 +90,7 @@
 #include <linux/cache.h>
 #include <linux/stringify.h>
 #include <linux/irqflags.h>
+#include <linux/lock_stat.h>
 
 #include <asm/system.h>
 
@@ -152,8 +153,17 @@ extern void
 extern int __bad_rwlock_type(void);
 
 extern void
-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key
+			__COMMA_LOCK_STAT_NOTE_PARAM_DECL);
 
+#ifdef CONFIG_LOCK_STAT
+extern void
+__rt_spin_lock_init_annotated(spinlock_t *lock, char *name,
+				struct lock_class_key *key,
+				LOCK_STAT_NOTE_PARAM_DECL,
+				struct lock_stat *lsobject);
+#endif
+
 extern void __lockfunc rt_spin_lock(spinlock_t *lock);
 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
@@ -170,6 +180,10 @@ extern void __lockfunc __rt_spin_unlock(
 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
 
+#ifdef CONFIG_LOCK_STAT
+extern void __lockfunc __rt_spin_lock_with_ip(struct rt_mutex *lock, unsigned long _ip);
+#endif
+
 #ifdef CONFIG_PREEMPT_RT
 # define _spin_lock(l)			rt_spin_lock(l)
 # define _spin_lock_nested(l, s)	rt_spin_lock_nested(l, s)
@@ -216,9 +230,18 @@ do {							\
 do {							\
 	static struct lock_class_key __key;		\
 							\
-	__rt_spin_lock_init(sl, n, &__key);		\
+	__rt_spin_lock_init(sl, n, &__key __COMMA_LOCK_STAT_NOTE);		\
 } while (0)
 
+#ifdef CONFIG_LOCK_STAT
+#define _spin_lock_init_annotated(sl, n, f, l, lsobj)	\
+do {							\
+	static struct lock_class_key __key;		\
+							\
+	__rt_spin_lock_init_annotated(sl, n, &__key, f, __func__, l, lsobj);		\
+} while (0)
+#endif
+
 # ifdef CONFIG_PREEMPT_RT
 #  define _spin_can_lock(l)		(!rt_mutex_is_locked(&(l)->lock))
 #  define _spin_is_locked(l)		rt_mutex_is_locked(&(l)->lock)
@@ -299,15 +322,31 @@ extern void
 extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
 extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
 extern void
-__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
+__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key
+					__COMMA_LOCK_STAT_NOTE_PARAM_DECL);
 
 #define _rwlock_init(rwl, n, f, l)			\
 do {							\
 	static struct lock_class_key __key;		\
 							\
-	__rt_rwlock_init(rwl, n, &__key);		\
+	__rt_rwlock_init(rwl, n, &__key __COMMA_LOCK_STAT_NOTE);		\
 } while (0)
 
+#ifdef CONFIG_LOCK_STAT
+extern void
+__rt_rwlock_init_annotated(rwlock_t *rwlock, char *name,
+				struct lock_class_key *key,
+				LOCK_STAT_NOTE_PARAM_DECL,
+				struct lock_stat *lsobject);
+
+#define _rwlock_init_annotated(rwl, n, f, l, lsobject)	\
+do {							\
+	static struct lock_class_key __key;		\
+							\
+	__rt_rwlock_init(rwl, n, &__key, LOCK_STAT_NOTE, lsobject);		\
+} while (0)
+#endif
+
 #ifdef CONFIG_PREEMPT_RT
 # define rt_read_can_lock(rwl)	(!rt_mutex_is_locked(&(rwl)->lock))
 # define rt_write_can_lock(rwl)	(!rt_mutex_is_locked(&(rwl)->lock))
@@ -425,6 +464,19 @@ do {									\
 
 #define spin_lock_init(lock)		PICK_OP_INIT(_lock_init, lock)
 
+#ifdef CONFIG_LOCK_STAT
+#define PICK_OP_INIT_ANNOTATED(op, lock, lsobj)				\
+do {									\
+	if (TYPE_EQUAL((lock), raw_spinlock_t))				\
+		_raw_spin##op((raw_spinlock_t *)(lock));		\
+	else if (TYPE_EQUAL(lock, spinlock_t))				\
+		_spin##op##_annotated((spinlock_t *)(lock), #lock, __FILE__, __LINE__, lsobj); \
+	else __bad_spinlock_type();					\
+} while (0)
+
+#define spin_lock_init_annotated(lock, lsobj)	PICK_OP_INIT_ANNOTATED(_lock_init, lock, lsobj)
+#endif
+
 #ifdef CONFIG_DEBUG_SPINLOCK
   extern void __raw_rwlock_init(raw_rwlock_t *lock, const char *name,
 				struct lock_class_key *key);
============================================================
--- include/linux/wait.h	12da8de69f1f2660443a04c3df199e5d851ea2ca
+++ include/linux/wait.h	9b7448af82583bd11d18032aedfa8f2af44345f4
@@ -81,7 +81,7 @@ extern void init_waitqueue_head(wait_que
 
 extern void init_waitqueue_head(wait_queue_head_t *q);
 
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LOCK_STAT)
 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
 	({ init_waitqueue_head(&name); name; })
 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
============================================================
--- init/main.c	268ab0d5f5bdc422e2864cadf35a7bb95958de10
+++ init/main.c	9d14ac66cb0fe3b90334512c0659146aec5e241c
@@ -608,6 +608,7 @@ asmlinkage void __init start_kernel(void
 #ifdef CONFIG_PROC_FS
 	proc_root_init();
 #endif
+	lock_stat_sys_init(); //--billh
 	cpuset_init();
 	taskstats_init_early();
 	delayacct_init();

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 2/4] lock stat (rt/rtmutex.c mods) for 2.6.19-rt1
  2006-12-04  1:54 [PATCH 1/4] lock stat for 2.6.19-rt1 Bill Huey
@ 2006-12-04  1:56 ` Bill Huey
  2006-12-04  2:00   ` [PATCH 3/4] " Bill Huey
  0 siblings, 1 reply; 4+ messages in thread
From: Bill Huey @ 2006-12-04  1:56 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Steven Rostedt, Thomas Gleixner, linux-kernel, Eric W. Biederman,
	Peter Zijlstra, Paul E. McKenney, Bill Huey (hui)

[-- Attachment #1: Type: text/plain, Size: 35 bytes --]


Mods to rt.c and rtmutex.c

bill


[-- Attachment #2: lockpatch2.diff --]
[-- Type: text/plain, Size: 25102 bytes --]

============================================================
--- init/main.c	268ab0d5f5bdc422e2864cadf35a7bb95958de10
+++ init/main.c	9d14ac66cb0fe3b90334512c0659146aec5e241c
@@ -608,6 +608,7 @@ asmlinkage void __init start_kernel(void
 #ifdef CONFIG_PROC_FS
 	proc_root_init();
 #endif
+	lock_stat_sys_init(); //--billh
 	cpuset_init();
 	taskstats_init_early();
 	delayacct_init();
============================================================
--- kernel/rt.c	5fc97ed10d5053f52488dddfefdb92e6aee2b148
+++ kernel/rt.c	3b86109e8e4163223f17c7d13a5bf53df0e04d70
@@ -66,6 +66,7 @@
 #include <linux/plist.h>
 #include <linux/fs.h>
 #include <linux/futex.h>
+#include <linux/lock_stat.h>
 
 #include "rtmutex_common.h"
 
@@ -75,6 +76,42 @@
 # include "rtmutex.h"
 #endif
 
+#ifdef CONFIG_LOCK_STAT
+#define __LOCK_STAT_RT_MUTEX_LOCK(a)				\
+	rt_mutex_lock_with_ip(a,				\
+		(unsigned long) __builtin_return_address(0))
+#else
+#define __LOCK_STAT_RT_MUTEX_LOCK(a)				\
+	rt_mutex_lock(a);
+#endif
+
+#ifdef CONFIG_LOCK_STAT
+#define __LOCK_STAT_RT_MUTEX_LOCK_INTERRUPTIBLE(a, b)		\
+	rt_mutex_lock_interruptible_with_ip(a, b,		\
+		(unsigned long) __builtin_return_address(0))
+#else
+#define __LOCK_STAT_RT_MUTEX_LOCK_INTERRUPTIBLE(a)		\
+	rt_mutex_lock_interruptible(a, b);
+#endif
+
+#ifdef CONFIG_LOCK_STAT
+#define __LOCK_STAT_RT_MUTEX_TRYLOCK(a)				\
+	rt_mutex_trylock_with_ip(a,				\
+		(unsigned long) __builtin_return_address(0))
+#else
+#define __LOCK_STAT_RT_MUTEX_TRYLOCK(a)				\
+	rt_mutex_trylock(a);
+#endif
+
+#ifdef CONFIG_LOCK_STAT
+#define __LOCK_STAT_RT_SPIN_LOCK(a)				\
+	__rt_spin_lock_with_ip(a,				\
+		(unsigned long) __builtin_return_address(0))
+#else
+#define __LOCK_STAT_RT_SPIN_LOCK(a)				\
+	__rt_spin_lock(a);
+#endif
+
 #ifdef CONFIG_PREEMPT_RT
 /*
  * Unlock these on crash:
@@ -88,7 +125,8 @@ void zap_rt_locks(void)
 /*
  * struct mutex functions
  */
-void _mutex_init(struct mutex *lock, char *name, struct lock_class_key *key)
+void _mutex_init(struct mutex *lock, char *name, struct lock_class_key *key
+			__COMMA_LOCK_STAT_NOTE_PARAM_DECL)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	/*
@@ -97,14 +135,15 @@ void _mutex_init(struct mutex *lock, cha
 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 	lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
-	__rt_mutex_init(&lock->lock, name);
+	__rt_mutex_init(&lock->lock, name __COMMA_LOCK_STAT_NOTE_VARS);
 }
 EXPORT_SYMBOL(_mutex_init);
 
 void __lockfunc _mutex_lock(struct mutex *lock)
 {
 	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	rt_mutex_lock(&lock->lock);
+
+	__LOCK_STAT_RT_MUTEX_LOCK(&lock->lock);
 }
 EXPORT_SYMBOL(_mutex_lock);
 
@@ -124,14 +163,14 @@ void __lockfunc _mutex_lock_nested(struc
 void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
 {
 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	rt_mutex_lock(&lock->lock);
+	__LOCK_STAT_RT_MUTEX_LOCK(&lock->lock);
 }
 EXPORT_SYMBOL(_mutex_lock_nested);
 #endif
 
 int __lockfunc _mutex_trylock(struct mutex *lock)
 {
-	int ret = rt_mutex_trylock(&lock->lock);
+	int ret = __LOCK_STAT_RT_MUTEX_TRYLOCK(&lock->lock);
 
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
@@ -152,7 +191,7 @@ int __lockfunc rt_write_trylock(rwlock_t
  */
 int __lockfunc rt_write_trylock(rwlock_t *rwlock)
 {
-	int ret = rt_mutex_trylock(&rwlock->lock);
+	int ret = __LOCK_STAT_RT_MUTEX_TRYLOCK(&rwlock->lock);
 
 	if (ret)
 		rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
@@ -179,7 +218,7 @@ int __lockfunc rt_read_trylock(rwlock_t 
 	}
 	spin_unlock_irqrestore(&lock->wait_lock, flags);
 
-	ret = rt_mutex_trylock(lock);
+	ret = __LOCK_STAT_RT_MUTEX_TRYLOCK(lock);
 	if (ret)
 		rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
 
@@ -190,7 +229,7 @@ void __lockfunc rt_write_lock(rwlock_t *
 void __lockfunc rt_write_lock(rwlock_t *rwlock)
 {
 	rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-	__rt_spin_lock(&rwlock->lock);
+	__LOCK_STAT_RT_SPIN_LOCK(&rwlock->lock);
 }
 EXPORT_SYMBOL(rt_write_lock);
 
@@ -210,11 +249,44 @@ void __lockfunc rt_read_lock(rwlock_t *r
 		return;
 	}
 	spin_unlock_irqrestore(&lock->wait_lock, flags);
-	__rt_spin_lock(lock);
+	__LOCK_STAT_RT_SPIN_LOCK(lock);
 }
 
 EXPORT_SYMBOL(rt_read_lock);
 
+#ifdef CONFIG_LOCK_STAT
+void __lockfunc rt_write_lock_with_ip(rwlock_t *rwlock, unsigned long ip)
+{
+	rwlock_acquire(&rwlock->dep_map, 0, 0, ip);
+	__rt_spin_lock_with_ip(&rwlock->lock, ip);
+}
+EXPORT_SYMBOL(rt_write_lock_with_ip);
+
+void __lockfunc rt_read_lock_with_ip(rwlock_t *rwlock, unsigned long ip)
+{
+	unsigned long flags;
+	struct rt_mutex *lock = &rwlock->lock;
+
+	/*
+	 * NOTE: we handle it as a write-lock:
+	 */
+	rwlock_acquire(&rwlock->dep_map, 0, 0, ip);
+	/*
+	 * Read locks within the write lock succeed.
+	 */
+	spin_lock_irqsave(&lock->wait_lock, flags);
+	if (rt_mutex_real_owner(lock) == current) {
+		spin_unlock_irqrestore(&lock->wait_lock, flags);
+		rwlock->read_depth++;
+		return;
+	}
+	spin_unlock_irqrestore(&lock->wait_lock, flags);
+	__rt_spin_lock_with_ip(lock, ip);
+}
+
+EXPORT_SYMBOL(rt_read_lock_with_ip);
+#endif
+
 void __lockfunc rt_write_unlock(rwlock_t *rwlock)
 {
 	/* NOTE: we always pass in '1' for nested, for simplicity */
@@ -246,7 +318,12 @@ unsigned long __lockfunc rt_write_lock_i
 
 unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
 {
+#ifdef CONFIG_LOCK_STAT
+	rt_write_lock_with_ip(rwlock,
+				(unsigned long) __builtin_return_address(0));
+#else
 	rt_write_lock(rwlock);
+#endif
 
 	return 0;
 }
@@ -254,13 +331,19 @@ unsigned long __lockfunc rt_read_lock_ir
 
 unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
 {
+#ifdef CONFIG_LOCK_STAT
+	rt_read_lock_with_ip(rwlock,
+				(unsigned long) __builtin_return_address(0));
+#else
 	rt_read_lock(rwlock);
+#endif
 
 	return 0;
 }
 EXPORT_SYMBOL(rt_read_lock_irqsave);
 
-void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key
+			__COMMA_LOCK_STAT_NOTE_PARAM_DECL)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	/*
@@ -269,11 +352,30 @@ void __rt_rwlock_init(rwlock_t *rwlock, 
 	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
 	lockdep_init_map(&rwlock->dep_map, name, key, 0);
 #endif
-	__rt_mutex_init(&rwlock->lock, name);
+	__rt_mutex_init(&rwlock->lock, name __COMMA_LOCK_STAT_NOTE_VARS);
 	rwlock->read_depth = 0;
 }
 EXPORT_SYMBOL(__rt_rwlock_init);
 
+#ifdef CONFIG_LOCK_STAT
+void __rt_rwlock_init_annotated(rwlock_t *rwlock, char *name,
+				struct lock_class_key *key,
+				LOCK_STAT_NOTE_PARAM_DECL,
+				struct lock_stat *lsobject)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+	lockdep_init_map(&rwlock->dep_map, name, key, 0);
+#endif
+	__rt_mutex_init_annotated(&rwlock->lock, name, LOCK_STAT_NOTE_VARS, lsobject);
+	rwlock->read_depth = 0;
+}
+EXPORT_SYMBOL(__rt_rwlock_init_annotated);
+#endif
+
 /*
  * rw_semaphores
  */
@@ -335,7 +437,7 @@ int fastcall rt_down_write_trylock(struc
 
 int fastcall rt_down_write_trylock(struct rw_semaphore *rwsem)
 {
-	int ret = rt_mutex_trylock(&rwsem->lock);
+	int ret = __LOCK_STAT_RT_MUTEX_TRYLOCK(&rwsem->lock);
 
 	if (ret)
 		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
@@ -346,14 +448,14 @@ void fastcall rt_down_write(struct rw_se
 void fastcall rt_down_write(struct rw_semaphore *rwsem)
 {
 	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
-	rt_mutex_lock(&rwsem->lock);
+	__LOCK_STAT_RT_MUTEX_LOCK(&rwsem->lock);
 }
 EXPORT_SYMBOL(rt_down_write);
 
 void fastcall rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
 {
 	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
-	rt_mutex_lock(&rwsem->lock);
+	__LOCK_STAT_RT_MUTEX_LOCK(&rwsem->lock);
 }
 EXPORT_SYMBOL(rt_down_write_nested);
 
@@ -374,7 +476,7 @@ int fastcall rt_down_read_trylock(struct
 	}
 	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
 
-	ret = rt_mutex_trylock(&rwsem->lock);
+	ret = __LOCK_STAT_RT_MUTEX_TRYLOCK(&rwsem->lock);
 	if (ret)
 		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
 	return ret;
@@ -398,7 +500,7 @@ static void __rt_down_read(struct rw_sem
 		return;
 	}
 	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
-	rt_mutex_lock(&rwsem->lock);
+	__LOCK_STAT_RT_MUTEX_LOCK(&rwsem->lock);
 }
 
 void fastcall rt_down_read(struct rw_semaphore *rwsem)
@@ -433,14 +535,15 @@ void fastcall rt_down_read_non_owner(str
 		return;
 	}
 	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
-	rt_mutex_lock(&rwsem->lock);
+	__LOCK_STAT_RT_MUTEX_LOCK(&rwsem->lock);
 }
 EXPORT_SYMBOL(rt_down_read_non_owner);
 
 #endif
 
 void fastcall __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
-			      struct lock_class_key *key)
+			      struct lock_class_key *key
+				__COMMA_LOCK_STAT_NOTE_PARAM_DECL)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	/*
@@ -449,7 +552,7 @@ void fastcall __rt_rwsem_init(struct rw_
 	debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
 	lockdep_init_map(&rwsem->dep_map, name, key, 0);
 #endif
-	__rt_mutex_init(&rwsem->lock, name);
+	__rt_mutex_init(&rwsem->lock, name __COMMA_LOCK_STAT_NOTE_VARS);
 	rwsem->read_depth = 0;
 }
 EXPORT_SYMBOL(__rt_rwsem_init);
@@ -478,7 +581,7 @@ void fastcall rt_down(struct semaphore *
 
 void fastcall rt_down(struct semaphore *sem)
 {
-	rt_mutex_lock(&sem->lock);
+	__LOCK_STAT_RT_MUTEX_LOCK(&sem->lock);
 	__down_complete(sem);
 }
 EXPORT_SYMBOL(rt_down);
@@ -487,7 +590,7 @@ int fastcall rt_down_interruptible(struc
 {
 	int ret;
 
-	ret = rt_mutex_lock_interruptible(&sem->lock, 0);
+	ret = __LOCK_STAT_RT_MUTEX_LOCK_INTERRUPTIBLE(&sem->lock, 0);
 	if (ret)
 		return ret;
 	__down_complete(sem);
@@ -507,7 +610,7 @@ int fastcall rt_down_trylock(struct sema
 	 * embedded mutex internally. It would be quite complex to remove
 	 * these transient failures so lets try it the simple way first:
 	 */
-	if (rt_mutex_trylock(&sem->lock)) {
+	if (__LOCK_STAT_RT_MUTEX_TRYLOCK(&sem->lock)) {
 		__down_complete(sem);
 		return 0;
 	}
@@ -534,26 +637,26 @@ EXPORT_SYMBOL(rt_up);
 }
 EXPORT_SYMBOL(rt_up);
 
-void fastcall __sema_init(struct semaphore *sem, int val,
-			  char *name, char *file, int line)
+void fastcall __sema_init(struct semaphore *sem, int val, char *name
+			__COMMA_LOCK_STAT_FN_DECL, char *_file, int _line)
 {
 	atomic_set(&sem->count, val);
 	switch (val) {
 	case 0:
-		__rt_mutex_init(&sem->lock, name);
-		rt_mutex_lock(&sem->lock);
+		__rt_mutex_init(&sem->lock, name __COMMA_LOCK_STAT_NOTE_VARS);
+		rt_mutex_lock_with_ip(&sem->lock, (unsigned long) __builtin_return_address(0));
 		break;
 	default:
-		__rt_mutex_init(&sem->lock, name);
+		__rt_mutex_init(&sem->lock, name __COMMA_LOCK_STAT_NOTE_VARS);
 		break;
 	}
 }
 EXPORT_SYMBOL(__sema_init);
 
-void fastcall __init_MUTEX(struct semaphore *sem, char *name, char *file,
-			   int line)
+void fastcall __init_MUTEX(struct semaphore *sem, char *name
+			__COMMA_LOCK_STAT_FN_DECL, char *_file, int _line)
 {
-	__sema_init(sem, 1, name, file, line);
+	__sema_init(sem, 1, name __COMMA_LOCK_STAT_FN_VAR, _file, _line);
 }
 EXPORT_SYMBOL(__init_MUTEX);
 
============================================================
--- kernel/rtmutex.c	5fbb6943266e0a2de638851c887e331999eaa16d
+++ kernel/rtmutex.c	e7b2ec607eeb126d358ca71e97feb3cdc2799cd6
@@ -15,6 +15,10 @@
 #include <linux/sched.h>
 #include <linux/timer.h>
 
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
+#include <linux/lock_stat.h>
+
 #include "rtmutex_common.h"
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -68,6 +72,11 @@ rt_mutex_set_owner(struct rt_mutex *lock
 	lock->owner = (struct task_struct *)val;
 }
 
+static inline void rt_mutex_clear_owner(struct rt_mutex *lock)
+{
+	lock->owner = (struct task_struct *) NULL;
+}
+
 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
 {
 	lock->owner = (struct task_struct *)
@@ -146,6 +155,7 @@ static void __rt_mutex_adjust_prio(struc
 
 	if (task->prio != prio)
 		rt_mutex_setprio(task, prio);
+	/* reschedules task if the priority is lower, holds the run queue lock */
 }
 
 /*
@@ -623,12 +633,14 @@ rt_spin_lock_fastlock(struct rt_mutex *l
 
 static inline void
 rt_spin_lock_fastlock(struct rt_mutex *lock,
-		void fastcall (*slowfn)(struct rt_mutex *lock))
+		void fastcall (*slowfn)(struct rt_mutex *lock
+						__COMMA_LOCK_STAT_IP_DECL)
+				__COMMA_LOCK_STAT_IP_DECL)
 {
 	if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
 		rt_mutex_deadlock_account_lock(lock, current);
 	else
-		slowfn(lock);
+		slowfn(lock __COMMA_LOCK_STAT_IP);
 }
 
 static inline void
@@ -652,11 +664,15 @@ static void fastcall noinline __sched
  * sleep/wakeup event loops.
  */
 static void fastcall noinline __sched
-rt_spin_lock_slowlock(struct rt_mutex *lock)
+rt_spin_lock_slowlock(struct rt_mutex *lock __COMMA_LOCK_STAT_IP_DECL)
 {
 	struct rt_mutex_waiter waiter;
 	unsigned long saved_state, state, flags;
 
+#ifdef CONFIG_LOCK_STAT
+	lock_stat_note_contention(&lock->lock_stat, _ip);
+#endif
+
 	debug_rt_mutex_init_waiter(&waiter);
 	waiter.task = NULL;
 
@@ -775,22 +791,40 @@ void __lockfunc rt_spin_lock(spinlock_t 
 
 void __lockfunc rt_spin_lock(spinlock_t *lock)
 {
-	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock
+						__COMMA_LOCK_STAT_RET_IP);
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 }
 EXPORT_SYMBOL(rt_spin_lock);
 
 void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
 {
-	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock
+						__COMMA_LOCK_STAT_RET_IP);
 }
 EXPORT_SYMBOL(__rt_spin_lock);
 
+#ifdef CONFIG_LOCK_STAT
+void __lockfunc rt_spin_lock_with_ip(spinlock_t *lock, unsigned long _ip)
+{
+	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, _ip);
+	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock_with_ip);
+
+void __lockfunc __rt_spin_lock_with_ip(struct rt_mutex *lock, unsigned long _ip)
+{
+	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, _ip);
+}
+EXPORT_SYMBOL(__rt_spin_lock_with_ip);
+#endif
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
 {
-	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock
+						__COMMA_LOCK_STAT_RET_IP);
 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 }
 EXPORT_SYMBOL(rt_spin_lock_nested);
@@ -823,10 +857,20 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
 }
 EXPORT_SYMBOL(rt_spin_unlock_wait);
 
+#if 0
+//#ifdef CONFIG_LOCK_STAT
+#define __RT_MUTEX_TRYLOCK_WITH_IP(a)	\
+	rt_mutex_trylock_with_ip(a, (unsigned long) __builtin_return_address(0));
+#else
+#define __RT_MUTEX_TRYLOCK_WITH_IP(a)	\
+	rt_mutex_trylock(a);
+#endif
+
 int __lockfunc rt_spin_trylock(spinlock_t *lock)
 {
-	int ret = rt_mutex_trylock(&lock->lock);
+	int ret;
 
+	ret = __RT_MUTEX_TRYLOCK_WITH_IP(&lock->lock)
 	if (ret)
 		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 
@@ -839,7 +883,8 @@ int __lockfunc rt_spin_trylock_irqsave(s
 	int ret;
 
 	*flags = 0;
-	ret = rt_mutex_trylock(&lock->lock);
+
+	ret = __RT_MUTEX_TRYLOCK_WITH_IP(&lock->lock)
 	if (ret)
 		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 
@@ -861,7 +906,8 @@ void
 EXPORT_SYMBOL(_atomic_dec_and_spin_lock);
 
 void
-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key
+			__COMMA_LOCK_STAT_NOTE_PARAM_DECL)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	/*
@@ -870,12 +916,31 @@ __rt_spin_lock_init(spinlock_t *lock, ch
 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 	lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
-	__rt_mutex_init(&lock->lock, name);
+	__rt_mutex_init(&lock->lock, name __COMMA_LOCK_STAT_NOTE_VARS);
 }
 EXPORT_SYMBOL(__rt_spin_lock_init);
 
+#ifdef CONFIG_LOCK_STAT
+void
+__rt_spin_lock_init_annotated(spinlock_t *lock, char *name,
+				struct lock_class_key *key,
+				LOCK_STAT_NOTE_PARAM_DECL,
+				struct lock_stat *lsobject)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
+	__rt_mutex_init_annotated(&lock->lock, name, LOCK_STAT_NOTE_VARS, lsobject);
+}
+EXPORT_SYMBOL(__rt_spin_lock_init_annotated);
+#endif
 
+#endif
+
 #ifdef CONFIG_PREEMPT_BKL
 
 static inline int rt_release_bkl(struct rt_mutex *lock, unsigned long flags)
@@ -914,12 +979,16 @@ rt_mutex_slowlock(struct rt_mutex *lock,
 static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		  struct hrtimer_sleeper *timeout,
-		  int detect_deadlock)
+		  int detect_deadlock
+			__COMMA_LOCK_STAT_IP_DECL)
 {
 	int ret = 0, saved_lock_depth = -1;
 	struct rt_mutex_waiter waiter;
 	unsigned long flags;
 
+#ifdef CONFIG_LOCK_STAT
+	lock_stat_note_contention(&lock->lock_stat, _ip);
+#endif
 	debug_rt_mutex_init_waiter(&waiter);
 	waiter.task = NULL;
 
@@ -937,7 +1006,11 @@ rt_mutex_slowlock(struct rt_mutex *lock,
 	 * possible deadlock in the scheduler.
 	 */
 	if (unlikely(current->lock_depth >= 0))
+#ifdef CONFIG_PREEMPT_BKL
 		saved_lock_depth = rt_release_bkl(lock, flags);
+#else
+		saved_lock_depth = rt_release_bkl(lock);
+#endif
 
 	set_current_state(state);
 
@@ -1040,11 +1113,16 @@ static inline int
  * Slow path try-lock function:
  */
 static inline int
-rt_mutex_slowtrylock(struct rt_mutex *lock)
+rt_mutex_slowtrylock(struct rt_mutex *lock __COMMA_LOCK_STAT_IP_DECL)
 {
 	unsigned long flags;
 	int ret = 0;
 
+#ifdef CONFIG_LOCK_STAT
+//	lock_stat_note_contention(&lock->lock_stat, _ip);
+#endif
+	debug_rt_mutex_init_waiter(&waiter);
+
 	spin_lock_irqsave(&lock->wait_lock, flags);
 
 	if (likely(rt_mutex_owner(lock) != current)) {
@@ -1103,38 +1181,62 @@ rt_mutex_fastlock(struct rt_mutex *lock,
 		  int detect_deadlock,
 		  int (*slowfn)(struct rt_mutex *lock, int state,
 				struct hrtimer_sleeper *timeout,
-				int detect_deadlock))
+				int detect_deadlock __COMMA_LOCK_STAT_IP_DECL)
+				__COMMA_LOCK_STAT_IP_DECL)
 {
 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 0;
 	} else
-		return slowfn(lock, state, NULL, detect_deadlock);
+		return slowfn(lock, state, NULL, detect_deadlock
+							__COMMA_LOCK_STAT_RET_IP);
 }
 
+#ifdef CONFIG_LOCK_STAT
 static inline int
+rt_mutex_fastlock_with_ip(struct rt_mutex *lock, int state,
+		  int detect_deadlock,
+		  int (*slowfn)(struct rt_mutex *lock, int state,
+				struct hrtimer_sleeper *timeout,
+				int detect_deadlock, unsigned long _ip),
+		  unsigned long _ip)
+{
+	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+		rt_mutex_deadlock_account_lock(lock, current);
+		return 0;
+	} else
+		return slowfn(lock, state, NULL, detect_deadlock, _ip);
+}
+#endif
+
+static inline int
 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
 			struct hrtimer_sleeper *timeout, int detect_deadlock,
 			int (*slowfn)(struct rt_mutex *lock, int state,
 				      struct hrtimer_sleeper *timeout,
-				      int detect_deadlock))
+				      int detect_deadlock
+					__COMMA_LOCK_STAT_IP_DECL)
+			__COMMA_LOCK_STAT_IP_DECL)
 {
 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 0;
 	} else
-		return slowfn(lock, state, timeout, detect_deadlock);
+		return slowfn(lock, state, timeout, detect_deadlock
+							__COMMA_LOCK_STAT_IP);
 }
 
 static inline int
 rt_mutex_fasttrylock(struct rt_mutex *lock,
-		     int (*slowfn)(struct rt_mutex *lock))
+		     int (*slowfn)(struct rt_mutex *lock
+					__COMMA_LOCK_STAT_IP_DECL)
+			__COMMA_LOCK_STAT_IP_DECL)
 {
 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 1;
 	}
-	return slowfn(lock);
+	return slowfn(lock __COMMA_LOCK_STAT_IP);
 }
 
 static inline void
@@ -1147,6 +1249,17 @@ rt_mutex_fastunlock(struct rt_mutex *loc
 		slowfn(lock);
 }
 
+#define PANIC_IF_IN_ATOMIC()					\
+	if (							\
+	    (system_state == SYSTEM_RUNNING) &&			\
+	     in_atomic() &&					\
+	     !oops_in_progress &&				\
+	     !current->exit_state				\
+	     ) {						\
+		panic("%s: in atomic: " "%s/0x%08x/%d\n",	\
+			__func__, current->comm, preempt_count(), current->pid);	\
+	}
+
 /**
  * rt_mutex_lock - lock a rt_mutex
  *
@@ -1155,11 +1268,26 @@ void __sched rt_mutex_lock(struct rt_mut
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
 	might_sleep();
+	PANIC_IF_IN_ATOMIC();
 
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
+	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock
+							__COMMA_LOCK_STAT_RET_IP);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 
+#ifdef CONFIG_LOCK_STAT
+void __sched rt_mutex_lock_with_ip(struct rt_mutex *lock, unsigned long ip)
+{
+	might_sleep();
+	PANIC_IF_IN_ATOMIC();
+
+	rt_mutex_fastlock_with_ip(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock,
+										ip);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_with_ip);
+#endif
+
+
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  *
@@ -1175,11 +1303,23 @@ int __sched rt_mutex_lock_interruptible(
 						 int detect_deadlock)
 {
 	might_sleep();
+	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
+				 detect_deadlock, rt_mutex_slowlock
+							__COMMA_LOCK_STAT_RET_IP);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
+#ifdef CONFIG_LOCK_STAT
+int __sched rt_mutex_lock_interruptible_with_ip(struct rt_mutex *lock,
+						int detect_deadlock,
+						unsigned long ip)
+{
+	might_sleep();
 	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
-				 detect_deadlock, rt_mutex_slowlock);
+				 detect_deadlock, rt_mutex_slowlock, ip);
 }
-EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible_with_ip);
+#endif
 
 /**
  * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
@@ -1203,7 +1343,8 @@ rt_mutex_timed_lock(struct rt_mutex *loc
 	might_sleep();
 
 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-				       detect_deadlock, rt_mutex_slowlock);
+				       detect_deadlock, rt_mutex_slowlock
+					__COMMA_LOCK_STAT_RET_IP);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
@@ -1216,7 +1357,8 @@ int __sched rt_mutex_trylock(struct rt_m
  */
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
-	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock
+							__COMMA_LOCK_STAT_RET_IP);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 
@@ -1231,6 +1373,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 
+#ifdef CONFIG_LOCK_STAT
+int __sched rt_mutex_trylock_with_ip(struct rt_mutex *lock, unsigned long _ip)
+{
+	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock, _ip);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_trylock_with_ip);
+#endif
+
 /***
  * rt_mutex_destroy - mark a mutex unusable
  * @lock: the mutex to be destroyed
@@ -1258,16 +1408,50 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  *
  * Initializing of a locked rt lock is not allowed
  */
-void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+void __rt_mutex_init(struct rt_mutex *lock, const char *name
+			__COMMA_LOCK_STAT_NOTE_PARAM_DECL)
 {
-	lock->owner = NULL;
-	spin_lock_init(&lock->wait_lock);
+	rt_mutex_clear_owner(lock);
+	spin_lock_init(&lock->wait_lock); /* raw spinlock here */
 	plist_head_init(&lock->wait_list, &lock->wait_lock);
 
 	debug_rt_mutex_init(lock, name);
+#ifdef CONFIG_LOCK_STAT
+	lock->lock_stat = NULL;
+	lock_stat_scoped_attach(&lock->lock_stat, LOCK_STAT_NOTE_VARS);
+#endif
 }
 EXPORT_SYMBOL_GPL(__rt_mutex_init);
 
+/*
+ * Annotated version of lock_stat
+ */
+#ifdef CONFIG_LOCK_STAT
+void __rt_mutex_init_annotated(struct rt_mutex *lock, const char *name,
+				LOCK_STAT_NOTE_PARAM_DECL,
+				struct lock_stat *lsobject)
+{
+	rt_mutex_clear_owner(lock);
+	spin_lock_init(&lock->wait_lock); /* raw spinlock here */
+	plist_head_init(&lock->wait_list, &lock->wait_lock);
+
+	debug_rt_mutex_init(lock, name);
+
+	BUG_ON(!lsobject);
+	lock->lock_stat = lsobject;
+
+	if(!lock_stat_is_initialized(&lock->lock_stat))
+	{
+		ksym_strcpy(lock->lock_stat->function, _function);
+		lock->lock_stat->file	= (char *) _file;
+		lock->lock_stat->line	= _line;
+	}
+
+	atomic_inc(&lock->lock_stat->ninlined);
+}
+#endif
+EXPORT_SYMBOL_GPL(__rt_mutex_init_annotated);
+
 /**
  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
  *				proxy owner
@@ -1281,7 +1465,7 @@ void rt_mutex_init_proxy_locked(struct r
 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
 				struct task_struct *proxy_owner)
 {
-	__rt_mutex_init(lock, NULL);
+	__rt_mutex_init(lock, NULL __COMMA_LOCK_STAT_NOTE);
 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
 	rt_mutex_set_owner(lock, proxy_owner, 0);
 	rt_mutex_deadlock_account_lock(lock, proxy_owner);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 3/4] lock stat (rt/rtmutex.c mods) for 2.6.19-rt1
  2006-12-04  1:56 ` [PATCH 2/4] lock stat (rt/rtmutex.c mods) " Bill Huey
@ 2006-12-04  2:00   ` Bill Huey
  2006-12-04  2:01     ` Bill Huey
  0 siblings, 1 reply; 4+ messages in thread
From: Bill Huey @ 2006-12-04  2:00 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Steven Rostedt, Thomas Gleixner, linux-kernel, Eric W. Biederman,
	Peter Zijlstra, Paul E. McKenney, Bill Huey (hui)

[-- Attachment #1: Type: text/plain, Size: 292 bytes --]


Rudimentary annotations to the lock initializers to avoid the binary
tree search before attachment. For things like inodes that are created
and destroyed constantly this might be useful to get around some
overhead.

Sorry, about the patch numbering order. I think I screwed up on it.

bill


[-- Attachment #2: annotation.diff --]
[-- Type: text/plain, Size: 2439 bytes --]

============================================================
--- arch/xtensa/platform-iss/network.c	eee47b0ca011d1c327ce7aff0c9a7547695d3a1f
+++ arch/xtensa/platform-iss/network.c	76b16d29a46677a45d56b64983e0783959aa2160
@@ -648,6 +648,8 @@ static int iss_net_configure(int index, 
 		.have_mac		= 0,
 		});
 
+	spin_lock_init(&lp->lock);
+
 	/*
 	 * Try all transport protocols.
 	 * Note: more protocols can be added by adding '&& !X_init(lp, eth)'.
============================================================
--- fs/dcache.c	20226054e6d6b080847e7a892d0b47a7ad042288
+++ fs/dcache.c	64d2b2b78b50dc2da7e409f2a9721b80c8fbbaf3
@@ -884,7 +884,7 @@ struct dentry *d_alloc(struct dentry * p
 
 	atomic_set(&dentry->d_count, 1);
 	dentry->d_flags = DCACHE_UNHASHED;
-	spin_lock_init(&dentry->d_lock);
+	spin_lock_init_annotated(&dentry->d_lock, &_lock_stat_d_alloc_entry);
 	dentry->d_inode = NULL;
 	dentry->d_parent = NULL;
 	dentry->d_sb = NULL;
============================================================
--- fs/xfs/support/ktrace.c	1136cf72f9273718da47405b594caebaa59b66d3
+++ fs/xfs/support/ktrace.c	122729d6084fa84115b8f8f75cc55c585bfe3676
@@ -162,6 +162,7 @@ ktrace_enter(
 
 	ASSERT(ktp != NULL);
 
+	spin_lock_init(&wrap_lock); //--billh
 	/*
 	 * Grab an entry by pushing the index up to the next one.
 	 */
============================================================
--- include/linux/eventpoll.h	bd142a622609d04952fac6215586fff353dab729
+++ include/linux/eventpoll.h	43271ded1a3b9f40beb37aaff9e02fadeecb4655
@@ -15,6 +15,7 @@
 #define _LINUX_EVENTPOLL_H
 
 #include <linux/types.h>
+#include <linux/lock_stat.h>
 
 
 /* Valid opcodes to issue to sys_epoll_ctl() */
@@ -55,7 +56,7 @@ static inline void eventpoll_init_file(s
 static inline void eventpoll_init_file(struct file *file)
 {
 	INIT_LIST_HEAD(&file->f_ep_links);
-	spin_lock_init(&file->f_ep_lock);
+	spin_lock_init_annotated(&file->f_ep_lock, &_lock_stat_eventpoll_init_file_entry);
 }
 
 
============================================================
--- net/tipc/node.c	d6ddb08c5332517b0eff3b72ee0adc48f47801ff
+++ net/tipc/node.c	9712633ceb8f939fc14a0a4861f7121840beff1d
@@ -77,7 +77,7 @@ struct node *tipc_node_create(u32 addr)
 		
 	memset(n_ptr, 0, sizeof(*n_ptr));
 	n_ptr->addr = addr;
-                spin_lock_init(&n_ptr->lock);
+	spin_lock_init(&n_ptr->lock);
 	INIT_LIST_HEAD(&n_ptr->nsub);
 	n_ptr->owner = c_ptr;
 	tipc_cltr_attach_node(c_ptr, n_ptr);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 3/4] lock stat (rt/rtmutex.c mods) for 2.6.19-rt1
  2006-12-04  2:00   ` [PATCH 3/4] " Bill Huey
@ 2006-12-04  2:01     ` Bill Huey
  0 siblings, 0 replies; 4+ messages in thread
From: Bill Huey @ 2006-12-04  2:01 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Steven Rostedt, Thomas Gleixner, linux-kernel, Eric W. Biederman,
	Peter Zijlstra, Paul E. McKenney, Bill Huey (hui)

On Sun, Dec 03, 2006 at 06:00:09PM -0800, Bill Huey wrote:
> Rudimentary annotations to the lock initializers to avoid the binary
> tree search before attachment. For things like inodes that are created
> and destroyed constantly this might be useful to get around some
> overhead.
> 
> Sorry, about the patch numbering order. I think I screwed up on it.

I also screwed up on the title for the email contents. Sorry about that.

bill


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2006-12-04  2:01 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-12-04  1:54 [PATCH 1/4] lock stat for 2.6.19-rt1 Bill Huey
2006-12-04  1:56 ` [PATCH 2/4] lock stat (rt/rtmutex.c mods) " Bill Huey
2006-12-04  2:00   ` [PATCH 3/4] " Bill Huey
2006-12-04  2:01     ` Bill Huey

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox