* [RFC 1/4] lockdep: additional lock specific information when dumping locks
@ 2015-01-12 14:57 Sasha Levin
2015-01-12 14:57 ` [RFC 2/4] locking/mutex: additional lock " Sasha Levin
` (3 more replies)
0 siblings, 4 replies; 10+ messages in thread
From: Sasha Levin @ 2015-01-12 14:57 UTC (permalink / raw)
To: linux-kernel; +Cc: peterz, mingo, Sasha Levin
When dumping held locks, it might be useful to get additional lock-specific
information about each lock.
This is mainly useful to figure out who really holds each lock rather then
who's just waiting on it, but it could be extended further or extended to
the other lockdep users if required.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
include/linux/lockdep.h | 10 ++++++++++
kernel/locking/lockdep.c | 21 +++++++++++++++++++--
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 74ab231..2f4c3fe 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -143,6 +143,8 @@ struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
+enum LOCK_TYPE { LOCKTYPE_NONE, };
+
/*
* Map the lock object (the lock instance) to the lock-class object.
* This is embedded into specific lock instances:
@@ -151,6 +153,7 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
+ enum LOCK_TYPE type;
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
@@ -279,6 +282,10 @@ extern void lockdep_on(void);
extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass,
+ enum LOCK_TYPE t);
+
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
@@ -301,6 +308,8 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
#define lockdep_set_subclass(lock, sub) \
lockdep_init_map(&(lock)->dep_map, #lock, \
(lock)->dep_map.key, sub)
+#define lockdep_set_class_type(lock, key, type) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, type)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
@@ -395,6 +404,7 @@ static inline void lockdep_on(void)
#define lockdep_set_class_and_subclass(lock, key, sub) \
do { (void)(key); } while (0)
#define lockdep_set_subclass(lock, sub) do { } while (0)
+#define lockdep_set_class_type(lock, key, type) do { } while (0)
#define lockdep_set_novalidate_class(lock) do { } while (0)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 88d0d44..de4c9aa 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -508,6 +508,13 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
usage[i] = '\0';
}
+static void get_lock_info(enum LOCK_TYPE t, struct lockdep_map *map)
+{
+ switch (t) {
+ case LOCKTYPE_NONE: return;
+ }
+}
+
static void __print_lock_name(struct lock_class *class)
{
char str[KSYM_NAME_LEN];
@@ -554,6 +561,7 @@ static void print_lock(struct held_lock *hlock)
print_lock_name(hlock_class(hlock));
printk(", at: ");
print_ip_sym(hlock->acquire_ip);
+ get_lock_info(hlock->instance->type, hlock->instance);
}
static void lockdep_print_held_locks(struct task_struct *curr)
@@ -2951,8 +2959,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* Initialize a lock instance's lock-class mapping info:
*/
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass)
+void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass,
+ enum LOCK_TYPE t)
{
int i;
@@ -2974,6 +2983,7 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
}
lock->name = name;
+ lock->type = t;
/*
* No key, no joy, we need to hash something.
@@ -2999,6 +3009,13 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (subclass)
register_lock_class(lock, subclass, 1);
}
+EXPORT_SYMBOL_GPL(lockdep_init_map_type);
+
+void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass)
+{
+ lockdep_init_map_type(lock, name, key, subclass, LOCKTYPE_NONE);
+}
EXPORT_SYMBOL_GPL(lockdep_init_map);
struct lock_class_key __lockdep_no_validate__;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC 2/4] locking/mutex: additional lock information when dumping locks
2015-01-12 14:57 [RFC 1/4] lockdep: additional lock specific information when dumping locks Sasha Levin
@ 2015-01-12 14:57 ` Sasha Levin
2015-01-12 14:57 ` [RFC 3/4] locking/rwsem: " Sasha Levin
` (2 subsequent siblings)
3 siblings, 0 replies; 10+ messages in thread
From: Sasha Levin @ 2015-01-12 14:57 UTC (permalink / raw)
To: linux-kernel; +Cc: peterz, mingo, Sasha Levin
Show the counter and the owner of the lock when dumping held locks in the
system.
This is useful to figure out who really holds a lock and how many waiters
it has.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
drivers/usb/storage/usb.c | 2 +-
fs/inode.c | 3 ++-
fs/super.c | 3 ++-
include/linux/lockdep.h | 2 +-
include/linux/mutex-debug.h | 2 ++
include/linux/mutex.h | 2 +-
kernel/events/core.c | 3 ++-
kernel/locking/lockdep.c | 8 +++++++-
kernel/locking/mutex-debug.c | 14 +++++++++++++-
9 files changed, 31 insertions(+), 8 deletions(-)
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index d468d02..b0a2565 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -165,7 +165,7 @@ static void us_set_lock_class(struct mutex *mutex,
BUG_ON(i == config->desc.bNumInterfaces);
- lockdep_set_class(mutex, &us_interface_key[i]);
+ lockdep_set_class_type(mutex, &us_interface_key[i], LOCKTYPE_MUTEX);
}
#else
diff --git a/fs/inode.c b/fs/inode.c
index 3a53b1d..8e24ff7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -160,7 +160,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
mutex_init(&inode->i_mutex);
- lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
+ lockdep_set_class_type(&inode->i_mutex, &sb->s_type->i_mutex_key,
+ LOCKTYPE_MUTEX);
atomic_set(&inode->i_dio_count, 0);
diff --git a/fs/super.c b/fs/super.c
index eae088f..d6b585c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -217,7 +217,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
s->s_count = 1;
atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex);
- lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
+ lockdep_set_class_type(&s->s_vfs_rename_mutex,
+ &type->s_vfs_rename_key, LOCKTYPE_MUTEX);
mutex_init(&s->s_dquot.dqio_mutex);
mutex_init(&s->s_dquot.dqonoff_mutex);
s->s_maxbytes = MAX_NON_LFS;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2f4c3fe..cab929b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -143,7 +143,7 @@ struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
-enum LOCK_TYPE { LOCKTYPE_NONE, };
+enum LOCK_TYPE { LOCKTYPE_NONE, LOCKTYPE_MUTEX, };
/*
* Map the lock object (the lock instance) to the lock-class object.
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 4ac8b19..ed125d3 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -21,4 +21,6 @@ do { \
extern void mutex_destroy(struct mutex *lock);
+extern void mutex_print_debug(const struct mutex *lock);
+
#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index cc31498..cba876d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -102,7 +102,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
+ , .dep_map = { .name = #lockname, .type = LOCKTYPE_MUTEX }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c1ee7f..fef4343 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6777,7 +6777,8 @@ skip_type:
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
- lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
+ lockdep_set_class_type(&cpuctx->ctx.mutex, &cpuctx_mutex,
+ LOCKTYPE_MUTEX);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index de4c9aa..07c337d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -510,8 +510,14 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
static void get_lock_info(enum LOCK_TYPE t, struct lockdep_map *map)
{
+ struct mutex *mtx;
+
switch (t) {
- case LOCKTYPE_NONE: return;
+ case LOCKTYPE_NONE: break;
+ case LOCKTYPE_MUTEX:
+ mtx = container_of(map, struct mutex, dep_map);
+ mutex_print_debug(mtx);
+ break;
}
}
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 5cf6731..e97f763 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -98,7 +98,7 @@ void debug_mutex_init(struct mutex *lock, const char *name,
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
+ lockdep_init_map_type(&lock->dep_map, name, key, 0, LOCKTYPE_MUTEX);
#endif
lock->magic = lock;
}
@@ -118,3 +118,15 @@ void mutex_destroy(struct mutex *lock)
}
EXPORT_SYMBOL_GPL(mutex_destroy);
+
+void mutex_print_debug(const struct mutex *lock)
+{
+ const char *owner = "None";
+ int c = atomic_read(&lock->count);
+
+ if (lock->owner)
+ owner = lock->owner->comm;
+
+ printk("Mutex: counter: %d owner: %s\n", c, owner);
+}
+EXPORT_SYMBOL_GPL(mutex_print_debug);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC 3/4] locking/rwsem: additional lock information when dumping locks
2015-01-12 14:57 [RFC 1/4] lockdep: additional lock specific information when dumping locks Sasha Levin
2015-01-12 14:57 ` [RFC 2/4] locking/mutex: additional lock " Sasha Levin
@ 2015-01-12 14:57 ` Sasha Levin
2015-01-12 14:57 ` [RFC 4/4] locking/spinlock: " Sasha Levin
2015-01-12 15:06 ` [RFC 1/4] lockdep: additional lock specific " Peter Zijlstra
3 siblings, 0 replies; 10+ messages in thread
From: Sasha Levin @ 2015-01-12 14:57 UTC (permalink / raw)
To: linux-kernel; +Cc: peterz, mingo, Sasha Levin
Show the counter and the owner of the lock when dumping held locks in the
system.
This is useful to figure out who really holds a lock and how many waiters
it has.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
include/linux/lockdep.h | 2 +-
include/linux/rwsem.h | 3 ++-
kernel/locking/lockdep.c | 5 +++++
kernel/locking/rwsem-spinlock.c | 7 ++++++-
kernel/locking/rwsem-xadd.c | 2 +-
kernel/locking/rwsem.c | 15 +++++++++++++++
6 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index cab929b..4527f99 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -143,7 +143,7 @@ struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
-enum LOCK_TYPE { LOCKTYPE_NONE, LOCKTYPE_MUTEX, };
+enum LOCK_TYPE { LOCKTYPE_NONE, LOCKTYPE_MUTEX, LOCKTYPE_RWSEM, };
/*
* Map the lock object (the lock instance) to the lock-class object.
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8f498cd..68732af 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -60,7 +60,8 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
/* Common initializer macros and functions */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+extern void rwsem_print_debug(const struct rw_semaphore *sem);
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname, .type = LOCKTYPE_RWSEM }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 07c337d..e75f83b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -511,6 +511,7 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
static void get_lock_info(enum LOCK_TYPE t, struct lockdep_map *map)
{
struct mutex *mtx;
+ struct rw_semaphore *rw;
switch (t) {
case LOCKTYPE_NONE: break;
@@ -518,6 +519,10 @@ static void get_lock_info(enum LOCK_TYPE t, struct lockdep_map *map)
mtx = container_of(map, struct mutex, dep_map);
mutex_print_debug(mtx);
break;
+ case LOCKTYPE_RWSEM:
+ rw = container_of(map, struct rw_semaphore, dep_map);
+ rwsem_print_debug(rw);
+ break;
}
}
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2c93571..b07029e 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key, 0);
+ lockdep_init_map_type(&sem->dep_map, name, key, 0, LOCKTYPE_RWSEM);
#endif
sem->count = 0;
raw_spin_lock_init(&sem->wait_lock);
@@ -294,3 +294,8 @@ void __downgrade_write(struct rw_semaphore *sem)
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
+void rwsem_print_debug(const struct rw_semaphore *sem)
+{
+ printk("RWsem: count: %l\n", sem->count);
+}
+EXPORT_SYMBOL_GPL(rwsem_print_debug);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 7628c3f..8b00656 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -77,7 +77,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key, 0);
+ lockdep_init_map_type(&sem->dep_map, name, key, 0, LOCKTYPE_RWSEM);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
raw_spin_lock_init(&sem->wait_lock);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index e2d3bc7..24183cc 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -181,6 +181,21 @@ void up_read_non_owner(struct rw_semaphore *sem)
EXPORT_SYMBOL(up_read_non_owner);
+void rwsem_print_debug(const struct rw_semaphore *sem)
+{
+ const char *owner = "Unknown";
+
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ if (sem->owner)
+ owner = sem->owner->comm;
+ else
+ owner = "None";
+#endif
+
+ printk("RWsem: count: %ld owner: %s\n", sem->count, owner);
+}
+EXPORT_SYMBOL_GPL(rwsem_print_debug);
+
#endif
--
1.7.10.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC 4/4] locking/spinlock: additional lock information when dumping locks
2015-01-12 14:57 [RFC 1/4] lockdep: additional lock specific information when dumping locks Sasha Levin
2015-01-12 14:57 ` [RFC 2/4] locking/mutex: additional lock " Sasha Levin
2015-01-12 14:57 ` [RFC 3/4] locking/rwsem: " Sasha Levin
@ 2015-01-12 14:57 ` Sasha Levin
2015-01-12 15:06 ` [RFC 1/4] lockdep: additional lock specific " Peter Zijlstra
3 siblings, 0 replies; 10+ messages in thread
From: Sasha Levin @ 2015-01-12 14:57 UTC (permalink / raw)
To: linux-kernel; +Cc: peterz, mingo, Sasha Levin
Show the owner cpu and task of the lock when dumping held locks in the system.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
include/linux/lockdep.h | 2 +-
include/linux/spinlock.h | 4 ++++
include/linux/spinlock_types.h | 2 +-
kernel/locking/lockdep.c | 5 +++++
kernel/locking/spinlock_debug.c | 17 ++++++++++++++++-
5 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4527f99..01e0b14 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -143,7 +143,7 @@ struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
-enum LOCK_TYPE { LOCKTYPE_NONE, LOCKTYPE_MUTEX, LOCKTYPE_RWSEM, };
+enum LOCK_TYPE { LOCKTYPE_NONE, LOCKTYPE_MUTEX, LOCKTYPE_RWSEM, LOCKTYPE_SPINLOCK, };
/*
* Map the lock object (the lock instance) to the lock-class object.
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3e18379..963f7626 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -409,6 +409,10 @@ static inline int spin_can_lock(spinlock_t *lock)
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void spinlock_print_debug(const spinlock_t *lock);
+#endif
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb..efbe667 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -36,7 +36,7 @@ typedef struct raw_spinlock {
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname, .type = LOCKTYPE_SPINLOCK }
#else
# define SPIN_DEP_MAP_INIT(lockname)
#endif
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e75f83b..b3d4151 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -512,6 +512,7 @@ static void get_lock_info(enum LOCK_TYPE t, struct lockdep_map *map)
{
struct mutex *mtx;
struct rw_semaphore *rw;
+ spinlock_t *spin;
switch (t) {
case LOCKTYPE_NONE: break;
@@ -523,6 +524,10 @@ static void get_lock_info(enum LOCK_TYPE t, struct lockdep_map *map)
rw = container_of(map, struct rw_semaphore, dep_map);
rwsem_print_debug(rw);
break;
+ case LOCKTYPE_SPINLOCK:
+ spin = container_of(map, struct spinlock, dep_map);
+ spinlock_print_debug(spin);
+ break;
}
}
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a59..52798c0 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -21,7 +21,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
+ lockdep_init_map_type(&lock->dep_map, name, key, 0, LOCKTYPE_SPINLOCK);
#endif
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
@@ -300,3 +300,18 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
+
+void spinlock_print_debug(const spinlock_t *lock)
+{
+ const char *owner_comm = NULL;
+ const struct task_struct *owner;
+ unsigned int cpu = lock->rlock.owner_cpu;
+
+ owner = lock->rlock.owner;
+
+ if (owner && owner != SPINLOCK_OWNER_INIT)
+ owner_comm = owner->comm;
+
+ printk("Spinlock: owner cpu: %u owner: %s\n", cpu, owner_comm);
+}
+EXPORT_SYMBOL_GPL(spinlock_print_debug);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [RFC 1/4] lockdep: additional lock specific information when dumping locks
2015-01-12 14:57 [RFC 1/4] lockdep: additional lock specific information when dumping locks Sasha Levin
` (2 preceding siblings ...)
2015-01-12 14:57 ` [RFC 4/4] locking/spinlock: " Sasha Levin
@ 2015-01-12 15:06 ` Peter Zijlstra
2015-01-12 15:12 ` Sasha Levin
3 siblings, 1 reply; 10+ messages in thread
From: Peter Zijlstra @ 2015-01-12 15:06 UTC (permalink / raw)
To: Sasha Levin; +Cc: linux-kernel, mingo
On Mon, Jan 12, 2015 at 09:57:08AM -0500, Sasha Levin wrote:
> When dumping held locks, it might be useful to get additional lock-specific
> information about each lock.
>
> This is mainly useful to figure out who really holds each lock rather then
> who's just waiting on it, but it could be extended further or extended to
> the other lockdep users if required.
I really don't see the point in this; I would much rather have something
useful like:
http://lwn.net/Articles/579849/
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [RFC 1/4] lockdep: additional lock specific information when dumping locks
2015-01-12 15:06 ` [RFC 1/4] lockdep: additional lock specific " Peter Zijlstra
@ 2015-01-12 15:12 ` Sasha Levin
2015-01-12 15:37 ` Peter Zijlstra
0 siblings, 1 reply; 10+ messages in thread
From: Sasha Levin @ 2015-01-12 15:12 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: linux-kernel, mingo
On 01/12/2015 10:06 AM, Peter Zijlstra wrote:
> On Mon, Jan 12, 2015 at 09:57:08AM -0500, Sasha Levin wrote:
>> When dumping held locks, it might be useful to get additional lock-specific
>> information about each lock.
>>
>> This is mainly useful to figure out who really holds each lock rather then
>> who's just waiting on it, but it could be extended further or extended to
>> the other lockdep users if required.
>
> I really don't see the point in this; I would much rather have something
> useful like:
>
> http://lwn.net/Articles/579849/
As far as I can tell, they have completely different purposes.
The reason for my patch is simple: I'm fuzzing with hundreds of worker threads
which at some point trigger a complete system lockup for some reason.
When lockdep dumps the list of held locks it shows that pretty much every one
of those threads is holding the lock which caused the lockup, which is incorrect
because it considers locks in the process of getting acquired as "held".
This is my solution to that issue. I wanted to know which one of the threads is
really holding the lock rather than just waiting on it.
Is there a better way to solve that problem?
Thanks,
Sasha
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [RFC 1/4] lockdep: additional lock specific information when dumping locks
2015-01-12 15:12 ` Sasha Levin
@ 2015-01-12 15:37 ` Peter Zijlstra
2015-01-12 16:06 ` Sasha Levin
0 siblings, 1 reply; 10+ messages in thread
From: Peter Zijlstra @ 2015-01-12 15:37 UTC (permalink / raw)
To: Sasha Levin; +Cc: linux-kernel, mingo
On Mon, Jan 12, 2015 at 10:12:38AM -0500, Sasha Levin wrote:
> The reason for my patch is simple:
That might have maybe been good changelog material?
> I'm fuzzing with hundreds of worker threads
> which at some point trigger a complete system lockup for some reason.
>
> When lockdep dumps the list of held locks it shows that pretty much every one
> of those threads is holding the lock which caused the lockup, which is incorrect
> because it considers locks in the process of getting acquired as "held".
>
> This is my solution to that issue. I wanted to know which one of the threads is
> really holding the lock rather than just waiting on it.
>
> Is there a better way to solve that problem?
Sure, think moar, if the accompanying stack trace is in the middle
of the blocking primitive, ignore the top held lock ;-)
Alternatively, make better/more use of lock_acquired() and track the
acquire vs acquired information in the held_lock (1 bit) and look at it
when printing.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [RFC 1/4] lockdep: additional lock specific information when dumping locks
2015-01-12 15:37 ` Peter Zijlstra
@ 2015-01-12 16:06 ` Sasha Levin
2015-01-12 16:23 ` Peter Zijlstra
0 siblings, 1 reply; 10+ messages in thread
From: Sasha Levin @ 2015-01-12 16:06 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: linux-kernel, mingo
On 01/12/2015 10:37 AM, Peter Zijlstra wrote:
> On Mon, Jan 12, 2015 at 10:12:38AM -0500, Sasha Levin wrote:
>> The reason for my patch is simple:
>
> That might have maybe been good changelog material?
>
>> I'm fuzzing with hundreds of worker threads
>> which at some point trigger a complete system lockup for some reason.
>>
>> When lockdep dumps the list of held locks it shows that pretty much every one
>> of those threads is holding the lock which caused the lockup, which is incorrect
>> because it considers locks in the process of getting acquired as "held".
>>
>> This is my solution to that issue. I wanted to know which one of the threads is
>> really holding the lock rather than just waiting on it.
>>
>> Is there a better way to solve that problem?
>
> Sure, think moar, if the accompanying stack trace is in the middle
> of the blocking primitive, ignore the top held lock ;-)
Tried that, it's a pain.
Consider this scenario:
Process A | Process B | Process C-[...]
----------------|-----------------------|----------------
mutex_lock(x) | |
[busy working] | |
| mutex_lock(z) |
| mutex_lock(x) |
| [waiting on x] |
| | mutex_lock(z)
| | [waiting on z]
So at the end of all of that I have 1000 processes waiting on 'z', while
the process that has 'z' is waiting on 'x'. So if I look at which processes
are not stuck inside a blocking primitive I'll miss on process B., and it's
link between process A and process B.
> Alternatively, make better/more use of lock_acquired() and track the
> acquire vs acquired information in the held_lock (1 bit) and look at it
> when printing.
We could do that, but then we'd lose the ability to get information out of
locks, what's the benefit of doing that?
Thanks,
Sasha
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [RFC 1/4] lockdep: additional lock specific information when dumping locks
2015-01-12 16:06 ` Sasha Levin
@ 2015-01-12 16:23 ` Peter Zijlstra
2015-01-13 5:18 ` Sasha Levin
0 siblings, 1 reply; 10+ messages in thread
From: Peter Zijlstra @ 2015-01-12 16:23 UTC (permalink / raw)
To: Sasha Levin; +Cc: linux-kernel, mingo
On Mon, Jan 12, 2015 at 11:06:17AM -0500, Sasha Levin wrote:
> On 01/12/2015 10:37 AM, Peter Zijlstra wrote:
> > On Mon, Jan 12, 2015 at 10:12:38AM -0500, Sasha Levin wrote:
> >> The reason for my patch is simple:
> >
> > That might have maybe been good changelog material?
> >
> >> I'm fuzzing with hundreds of worker threads
> >> which at some point trigger a complete system lockup for some reason.
> >>
> >> When lockdep dumps the list of held locks it shows that pretty much every one
> >> of those threads is holding the lock which caused the lockup, which is incorrect
> >> because it considers locks in the process of getting acquired as "held".
> >>
> >> This is my solution to that issue. I wanted to know which one of the threads is
> >> really holding the lock rather than just waiting on it.
> >>
> >> Is there a better way to solve that problem?
> >
> > Sure, think moar, if the accompanying stack trace is in the middle
> > of the blocking primitive, ignore the top held lock ;-)
>
> Tried that, it's a pain.
>
> Consider this scenario:
>
> Process A | Process B | Process C-[...]
> ----------------|-----------------------|----------------
> mutex_lock(x) | |
> [busy working] | |
> | mutex_lock(z) |
> | mutex_lock(x) |
> | [waiting on x] |
> | | mutex_lock(z)
> | | [waiting on z]
>
> So at the end of all of that I have 1000 processes waiting on 'z', while
> the process that has 'z' is waiting on 'x'. So if I look at which processes
> are not stuck inside a blocking primitive I'll miss on process B., and it's
> link between process A and process B.
I never said to ignore everything for tasks blocked inside locking
primitives, only ignore the top held.
But sure, I can relate how large numbers make this painful.
> > Alternatively, make better/more use of lock_acquired() and track the
> > acquire vs acquired information in the held_lock (1 bit) and look at it
> > when printing.
>
> We could do that, but then we'd lose the ability to get information out of
> locks, what's the benefit of doing that?
That's mission creep; you never stated that as a goal.
One of the reasons i'm not particularly keen on it is because it creates
a circular dependency between lock implementations and lockdep. It also
creates asymmetry between lock types/capabilty.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [RFC 1/4] lockdep: additional lock specific information when dumping locks
2015-01-12 16:23 ` Peter Zijlstra
@ 2015-01-13 5:18 ` Sasha Levin
0 siblings, 0 replies; 10+ messages in thread
From: Sasha Levin @ 2015-01-13 5:18 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: linux-kernel, mingo
On 01/12/2015 11:23 AM, Peter Zijlstra wrote:
>>> > > Alternatively, make better/more use of lock_acquired() and track the
>>> > > acquire vs acquired information in the held_lock (1 bit) and look at it
>>> > > when printing.
>> >
>> > We could do that, but then we'd lose the ability to get information out of
>> > locks, what's the benefit of doing that?
> That's mission creep; you never stated that as a goal.
>
> One of the reasons i'm not particularly keen on it is because it creates
> a circular dependency between lock implementations and lockdep. It also
> creates asymmetry between lock types/capabilty.
Fair enough.
__lock_acquired() which looks up held_lock doesn't happen unless
CONFIG_LOCK_STAT is set, which means that if we want to use this method we'd
need to look up held_lock just for that, which would make that path heavier.
Before I go ahead and implement it, do you find it acceptable?
Thanks,
Sasha
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2015-01-13 5:19 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-01-12 14:57 [RFC 1/4] lockdep: additional lock specific information when dumping locks Sasha Levin
2015-01-12 14:57 ` [RFC 2/4] locking/mutex: additional lock " Sasha Levin
2015-01-12 14:57 ` [RFC 3/4] locking/rwsem: " Sasha Levin
2015-01-12 14:57 ` [RFC 4/4] locking/spinlock: " Sasha Levin
2015-01-12 15:06 ` [RFC 1/4] lockdep: additional lock specific " Peter Zijlstra
2015-01-12 15:12 ` Sasha Levin
2015-01-12 15:37 ` Peter Zijlstra
2015-01-12 16:06 ` Sasha Levin
2015-01-12 16:23 ` Peter Zijlstra
2015-01-13 5:18 ` Sasha Levin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox