From: Yuyang Du <duyuyang@gmail.com>
To: peterz@infradead.org, will.deacon@arm.com, mingo@kernel.org
Cc: bvanassche@acm.org, ming.lei@redhat.com, frederic@kernel.org,
tglx@linutronix.de, linux-kernel@vger.kernel.org,
longman@redhat.com, paulmck@linux.vnet.ibm.com,
boqun.feng@gmail.com, Yuyang Du <duyuyang@gmail.com>
Subject: [PATCH v3 21/30] locking/lockdep: Hash held lock's read-write type into chain key
Date: Fri, 28 Jun 2019 17:15:19 +0800 [thread overview]
Message-ID: <20190628091528.17059-22-duyuyang@gmail.com> (raw)
In-Reply-To: <20190628091528.17059-1-duyuyang@gmail.com>
When computing a chain's hash key, we need to consider a held lock's
read-write type, so the additional data to use Jenkins hash algorithm is
a composite of the new held lock's lock class index (lower 16 bits) and
its read-write type (higher 16 bits) as opposed to just class index
before:
held lock type (16 bits) : lock class index (16 bits)
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
include/linux/lockdep.h | 1 +
kernel/locking/lockdep.c | 55 ++++++++++++++++++++++++++++++++++--------------
2 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fd619ac..7878481 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -186,6 +186,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
}
#define LOCK_TYPE_BITS 2
+#define LOCK_TYPE_SHIFT 16
/*
* Every lock has a list of other locks that were taken after or before
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 27eeacc..10df8eb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -370,11 +370,22 @@ struct pending_free {
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
+ *
+ * The additional u32 data to hash is a composite of the new held lock's
+ * lock class index (lower 16 bits) and its read-write type (higher 16
+ * bits):
+ *
+ * hlock type (16 bits) : lock class index (16 bits)
+ *
+ * N.B. The bits taken for lock type and index are specified by
+ * LOCK_TYPE_SHIFT.
*/
-static inline u64 iterate_chain_key(u64 key, u32 idx)
+static inline u64 iterate_chain_key(u64 key, u32 idx, int hlock_type)
{
u32 k0 = key, k1 = key >> 32;
+ idx += hlock_type << LOCK_TYPE_SHIFT;
+
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
return k0 | (u64)k1 << 32;
@@ -892,7 +903,8 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
/*
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
@@ -2623,12 +2635,13 @@ static inline int get_first_held_lock(struct task_struct *curr,
/*
* Returns the next chain_key iteration
*/
-static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key, int lock_type)
{
- u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+ u64 new_chain_key = iterate_chain_key(chain_key, class_idx, lock_type);
- printk(" class_idx:%d -> chain_key:%016Lx",
+ printk(" class_idx:%d (lock_type %d) -> chain_key:%016Lx",
class_idx,
+ lock_type,
(unsigned long long)new_chain_key);
return new_chain_key;
}
@@ -2645,12 +2658,15 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
hlock_next->irq_context);
for (; i < depth; i++) {
hlock = curr->held_locks + i;
- chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
+ chain_key = print_chain_key_iteration(hlock->class_idx,
+ chain_key,
+ hlock->read);
print_lock(hlock);
}
- print_chain_key_iteration(hlock_next->class_idx, chain_key);
+ print_chain_key_iteration(hlock_next->class_idx, chain_key,
+ hlock_next->read);
print_lock(hlock_next);
}
@@ -2658,12 +2674,14 @@ static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
u64 chain_key = INITIAL_CHAIN_KEY;
- int class_id;
+ int class_id, lock_type;
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
- chain_key = print_chain_key_iteration(class_id, chain_key);
+ lock_type = chain_hlocks_type[chain->base + i];
+ chain_key = print_chain_key_iteration(class_id, chain_key,
+ lock_type);
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2703,7 +2721,7 @@ static int check_no_collision(struct task_struct *curr, struct held_lock *hlock,
struct lock_chain *chain, int depth)
{
#ifdef CONFIG_DEBUG_LOCKDEP
- int i, j, id;
+ int i, j, id, type;
i = get_first_held_lock(curr, hlock, depth);
@@ -2712,10 +2730,12 @@ static int check_no_collision(struct task_struct *curr, struct held_lock *hlock,
return 0;
}
- for (j = 0; j < chain->depth - 1; j++, i++) {
+ for (j = chain->base; j < chain->base + chain->depth - 1; j++, i++) {
id = curr->held_locks[i].class_idx;
+ type = curr->held_locks[i].read;
- if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+ if (DEBUG_LOCKS_WARN_ON((chain_hlocks[j] != id) ||
+ (chain_hlocks_type[j] != type))) {
print_collision(curr, hlock, chain, depth);
return 0;
}
@@ -3004,7 +3024,8 @@ static int validate_chain(struct task_struct *curr, struct held_lock *next,
* lock_chains. If it exists the check is actually not needed.
*/
chain_key = iterate_chain_key(hlock->prev_chain_key,
- hlock_class(next) - lock_classes);
+ hlock_class(next) - lock_classes,
+ next->read);
goto chain_again;
@@ -3062,7 +3083,8 @@ static void check_chain_key(struct task_struct *curr)
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = INITIAL_CHAIN_KEY;
- chain_key = iterate_chain_key(chain_key, hlock->class_idx);
+ chain_key = iterate_chain_key(chain_key, hlock->class_idx,
+ hlock->read);
prev_hlock = hlock;
}
if (chain_key != curr->curr_chain_key) {
@@ -3972,7 +3994,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (separate_irq_context(curr, hlock))
chain_key = INITIAL_CHAIN_KEY;
- chain_key = iterate_chain_key(chain_key, class_idx);
+ chain_key = iterate_chain_key(chain_key, class_idx, read);
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
print_lock_nested_lock_not_held(curr, hlock, ip);
@@ -4833,7 +4855,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
recalc:
chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
if (chain->depth && chain->chain_key == chain_key)
return;
/* Overwrite the chain key for concurrent RCU readers. */
--
1.8.3.1
next prev parent reply other threads:[~2019-06-28 9:17 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-28 9:14 [PATCH v3 00/30] Support recursive-read lock deadlock detection Yuyang Du
2019-06-28 9:14 ` [PATCH v3 01/30] locking/lockdep: Rename deadlock check functions Yuyang Du
2019-06-28 9:15 ` [PATCH v3 02/30] locking/lockdep: Change return type of add_chain_cache() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 03/30] locking/lockdep: Change return type of lookup_chain_cache_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 04/30] locking/lockdep: Pass lock chain from validate_chain() to check_prev_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 05/30] locking/lockdep: Add lock chain list_head field in struct lock_list and lock_chain Yuyang Du
2019-06-28 9:15 ` [PATCH v3 06/30] locking/lockdep: Update comments in struct lock_list and held_lock Yuyang Du
2019-06-28 9:15 ` [PATCH v3 07/30] locking/lockdep: Remove indirect dependency redundancy check Yuyang Du
2019-06-28 9:15 ` [PATCH v3 08/30] locking/lockdep: Skip checks if direct dependency is already present Yuyang Du
2019-06-28 9:15 ` [PATCH v3 09/30] locking/lockdep: Remove chain_head argument in validate_chain() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 10/30] locking/lockdep: Remove useless lock type assignment Yuyang Du
2019-06-28 9:15 ` [PATCH v3 11/30] locking/lockdep: Specify the depth of current lock stack in lookup_chain_cache_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 12/30] locking/lockdep: Treat every lock dependency as in a new lock chain Yuyang Du
2019-06-28 9:15 ` [PATCH v3 13/30] locking/lockdep: Combine lock_lists in struct lock_class into an array Yuyang Du
2019-06-28 9:15 ` [PATCH v3 14/30] locking/lockdep: Consolidate forward and backward lock_lists into one Yuyang Du
2019-06-28 9:15 ` [PATCH v3 15/30] locking/lockdep: Add lock chains to direct lock dependency graph Yuyang Du
2019-06-28 9:15 ` [PATCH v3 16/30] locking/lockdep: Use lock type enum to explicitly specify read or write locks Yuyang Du
2019-06-28 9:15 ` [PATCH v3 17/30] locking/lockdep: Add read-write type for a lock dependency Yuyang Du
2019-07-10 5:18 ` Boqun Feng
2019-07-11 5:02 ` Yuyang Du
2019-06-28 9:15 ` [PATCH v3 18/30] locking/lockdep: Add helper functions to operate on the searched path Yuyang Du
2019-06-28 9:15 ` [PATCH v3 19/30] locking/lockdep: Update direct dependency's read-write type if it exists Yuyang Du
2019-06-28 9:15 ` [PATCH v3 20/30] locking/lockdep: Introduce chain_hlocks_type for held lock's read-write type Yuyang Du
2019-06-28 9:15 ` Yuyang Du [this message]
2019-06-28 9:15 ` [PATCH v3 22/30] locking/lockdep: Adjust BFS algorithm to support multiple matches Yuyang Du
2019-06-28 9:15 ` [PATCH v3 23/30] locking/lockdep: Define the two task model for lockdep checks formally Yuyang Du
2019-06-28 9:15 ` [PATCH v3 24/30] locking/lockdep: Introduce mark_lock_unaccessed() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 25/30] locking/lockdep: Add nest lock type Yuyang Du
2019-06-28 9:15 ` [PATCH v3 26/30] locking/lockdep: Add lock exclusiveness table Yuyang Du
2019-06-28 9:15 ` [PATCH v3 27/30] locking/lockdep: Support read-write lock's deadlock detection Yuyang Du
2019-06-28 9:15 ` [PATCH v3 28/30] locking/lockdep: Adjust selftest case for recursive read lock Yuyang Du
2019-06-28 9:15 ` [PATCH v3 29/30] locking/lockdep: Add more lockdep selftest cases Yuyang Du
2019-06-28 9:15 ` [PATCH v3 30/30] locking/lockdep: Remove irq-safe to irq-unsafe read check Yuyang Du
2019-07-10 5:30 ` Boqun Feng
2019-07-10 6:30 ` Yuyang Du
2019-07-10 1:54 ` [PATCH v3 00/30] Support recursive-read lock deadlock detection Yuyang Du
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190628091528.17059-22-duyuyang@gmail.com \
--to=duyuyang@gmail.com \
--cc=boqun.feng@gmail.com \
--cc=bvanassche@acm.org \
--cc=frederic@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=ming.lei@redhat.com \
--cc=mingo@kernel.org \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox