From: Yuyang Du <duyuyang@gmail.com>
To: peterz@infradead.org, will.deacon@arm.com, mingo@kernel.org
Cc: bvanassche@acm.org, ming.lei@redhat.com, frederic@kernel.org,
tglx@linutronix.de, linux-kernel@vger.kernel.org,
longman@redhat.com, paulmck@linux.vnet.ibm.com,
boqun.feng@gmail.com, Yuyang Du <duyuyang@gmail.com>
Subject: [PATCH v3 11/30] locking/lockdep: Specify the depth of current lock stack in lookup_chain_cache_add()
Date: Fri, 28 Jun 2019 17:15:09 +0800 [thread overview]
Message-ID: <20190628091528.17059-12-duyuyang@gmail.com> (raw)
In-Reply-To: <20190628091528.17059-1-duyuyang@gmail.com>
When looking up and adding a chain (i.e., in lookup_chain_cache_add()
and only in it), explicitly specify the depth of the held lock stack.
This is now only curr->lockdep_depth.
No functional change.
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
kernel/locking/lockdep.c | 48 ++++++++++++++++++++++++++----------------------
1 file changed, 26 insertions(+), 22 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 095e532..5d19dc6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2527,12 +2527,12 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
* Returns the index of the first held_lock of the current chain
*/
static inline int get_first_held_lock(struct task_struct *curr,
- struct held_lock *hlock)
+ struct held_lock *hlock, int depth)
{
int i;
struct held_lock *hlock_curr;
- for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+ for (i = depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
if (hlock_curr->irq_context != hlock->irq_context)
break;
@@ -2557,12 +2557,12 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
}
static void
-print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
+print_chain_keys_held_locks(struct task_struct *curr,
+ struct held_lock *hlock_next, int depth)
{
struct held_lock *hlock;
u64 chain_key = INITIAL_CHAIN_KEY;
- int depth = curr->lockdep_depth;
- int i = get_first_held_lock(curr, hlock_next);
+ int i = get_first_held_lock(curr, hlock_next, depth);
printk("depth: %u (irq_context %u)\n", depth - i + 1,
hlock_next->irq_context);
@@ -2594,8 +2594,8 @@ static void print_chain_keys_chain(struct lock_chain *chain)
}
static void print_collision(struct task_struct *curr,
- struct held_lock *hlock_next,
- struct lock_chain *chain)
+ struct held_lock *hlock_next,
+ struct lock_chain *chain, int depth)
{
pr_warn("\n");
pr_warn("============================\n");
@@ -2606,7 +2606,7 @@ static void print_collision(struct task_struct *curr,
pr_warn("Hash chain already cached but the contents don't match!\n");
pr_warn("Held locks:");
- print_chain_keys_held_locks(curr, hlock_next);
+ print_chain_keys_held_locks(curr, hlock_next, depth);
pr_warn("Locks in cached chain:");
print_chain_keys_chain(chain);
@@ -2622,17 +2622,16 @@ static void print_collision(struct task_struct *curr,
* that there was a collision during the calculation of the chain_key.
* Returns: 0 not passed, 1 passed
*/
-static int check_no_collision(struct task_struct *curr,
- struct held_lock *hlock,
- struct lock_chain *chain)
+static int check_no_collision(struct task_struct *curr, struct held_lock *hlock,
+ struct lock_chain *chain, int depth)
{
#ifdef CONFIG_DEBUG_LOCKDEP
int i, j, id;
- i = get_first_held_lock(curr, hlock);
+ i = get_first_held_lock(curr, hlock, depth);
- if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
- print_collision(curr, hlock, chain);
+ if (DEBUG_LOCKS_WARN_ON(chain->depth != depth - (i - 1))) {
+ print_collision(curr, hlock, chain, depth);
return 0;
}
@@ -2640,7 +2639,7 @@ static int check_no_collision(struct task_struct *curr,
id = curr->held_locks[i].class_idx;
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
- print_collision(curr, hlock, chain);
+ print_collision(curr, hlock, chain, depth);
return 0;
}
}
@@ -2684,7 +2683,7 @@ static struct lock_chain *alloc_lock_chain(void)
*/
static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
- u64 chain_key)
+ u64 chain_key, int depth)
{
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2710,8 +2709,8 @@ static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
- i = get_first_held_lock(curr, hlock);
- chain->depth = curr->lockdep_depth + 1 - i;
+ i = get_first_held_lock(curr, hlock, depth);
+ chain->depth = depth + 1 - i;
BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
@@ -2764,17 +2763,21 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
* add it and return the chain - in this case the new dependency
* chain will be validated. If the key is already hashed, return
* NULL. (On return with the new chain graph_lock is held.)
+ *
+ * If the key is not hashed, the new chain is composed of @hlock
+ * and @depth worth of the current held lock stack, of which the
+ * held locks are in the same context as @hlock.
*/
static inline struct lock_chain *
lookup_chain_cache_add(struct task_struct *curr, struct held_lock *hlock,
- u64 chain_key)
+ u64 chain_key, int depth)
{
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
if (chain) {
cache_hit:
- if (!check_no_collision(curr, hlock, chain))
+ if (!check_no_collision(curr, hlock, chain, depth))
return NULL;
if (very_verbose(class)) {
@@ -2804,7 +2807,7 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
goto cache_hit;
}
- return add_chain_cache(curr, hlock, chain_key);
+ return add_chain_cache(curr, hlock, chain_key, depth);
}
static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
@@ -2822,7 +2825,8 @@ static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
* graph_lock for us)
*/
if (!hlock->trylock && hlock->check &&
- (chain = lookup_chain_cache_add(curr, hlock, chain_key))) {
+ (chain = lookup_chain_cache_add(curr, hlock, chain_key,
+ curr->lockdep_depth))) {
/*
* Check whether last held lock:
*
--
1.8.3.1
next prev parent reply other threads:[~2019-06-28 9:16 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-28 9:14 [PATCH v3 00/30] Support recursive-read lock deadlock detection Yuyang Du
2019-06-28 9:14 ` [PATCH v3 01/30] locking/lockdep: Rename deadlock check functions Yuyang Du
2019-06-28 9:15 ` [PATCH v3 02/30] locking/lockdep: Change return type of add_chain_cache() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 03/30] locking/lockdep: Change return type of lookup_chain_cache_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 04/30] locking/lockdep: Pass lock chain from validate_chain() to check_prev_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 05/30] locking/lockdep: Add lock chain list_head field in struct lock_list and lock_chain Yuyang Du
2019-06-28 9:15 ` [PATCH v3 06/30] locking/lockdep: Update comments in struct lock_list and held_lock Yuyang Du
2019-06-28 9:15 ` [PATCH v3 07/30] locking/lockdep: Remove indirect dependency redundancy check Yuyang Du
2019-06-28 9:15 ` [PATCH v3 08/30] locking/lockdep: Skip checks if direct dependency is already present Yuyang Du
2019-06-28 9:15 ` [PATCH v3 09/30] locking/lockdep: Remove chain_head argument in validate_chain() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 10/30] locking/lockdep: Remove useless lock type assignment Yuyang Du
2019-06-28 9:15 ` Yuyang Du [this message]
2019-06-28 9:15 ` [PATCH v3 12/30] locking/lockdep: Treat every lock dependency as in a new lock chain Yuyang Du
2019-06-28 9:15 ` [PATCH v3 13/30] locking/lockdep: Combine lock_lists in struct lock_class into an array Yuyang Du
2019-06-28 9:15 ` [PATCH v3 14/30] locking/lockdep: Consolidate forward and backward lock_lists into one Yuyang Du
2019-06-28 9:15 ` [PATCH v3 15/30] locking/lockdep: Add lock chains to direct lock dependency graph Yuyang Du
2019-06-28 9:15 ` [PATCH v3 16/30] locking/lockdep: Use lock type enum to explicitly specify read or write locks Yuyang Du
2019-06-28 9:15 ` [PATCH v3 17/30] locking/lockdep: Add read-write type for a lock dependency Yuyang Du
2019-07-10 5:18 ` Boqun Feng
2019-07-11 5:02 ` Yuyang Du
2019-06-28 9:15 ` [PATCH v3 18/30] locking/lockdep: Add helper functions to operate on the searched path Yuyang Du
2019-06-28 9:15 ` [PATCH v3 19/30] locking/lockdep: Update direct dependency's read-write type if it exists Yuyang Du
2019-06-28 9:15 ` [PATCH v3 20/30] locking/lockdep: Introduce chain_hlocks_type for held lock's read-write type Yuyang Du
2019-06-28 9:15 ` [PATCH v3 21/30] locking/lockdep: Hash held lock's read-write type into chain key Yuyang Du
2019-06-28 9:15 ` [PATCH v3 22/30] locking/lockdep: Adjust BFS algorithm to support multiple matches Yuyang Du
2019-06-28 9:15 ` [PATCH v3 23/30] locking/lockdep: Define the two task model for lockdep checks formally Yuyang Du
2019-06-28 9:15 ` [PATCH v3 24/30] locking/lockdep: Introduce mark_lock_unaccessed() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 25/30] locking/lockdep: Add nest lock type Yuyang Du
2019-06-28 9:15 ` [PATCH v3 26/30] locking/lockdep: Add lock exclusiveness table Yuyang Du
2019-06-28 9:15 ` [PATCH v3 27/30] locking/lockdep: Support read-write lock's deadlock detection Yuyang Du
2019-06-28 9:15 ` [PATCH v3 28/30] locking/lockdep: Adjust selftest case for recursive read lock Yuyang Du
2019-06-28 9:15 ` [PATCH v3 29/30] locking/lockdep: Add more lockdep selftest cases Yuyang Du
2019-06-28 9:15 ` [PATCH v3 30/30] locking/lockdep: Remove irq-safe to irq-unsafe read check Yuyang Du
2019-07-10 5:30 ` Boqun Feng
2019-07-10 6:30 ` Yuyang Du
2019-07-10 1:54 ` [PATCH v3 00/30] Support recursive-read lock deadlock detection Yuyang Du
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190628091528.17059-12-duyuyang@gmail.com \
--to=duyuyang@gmail.com \
--cc=boqun.feng@gmail.com \
--cc=bvanassche@acm.org \
--cc=frederic@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=ming.lei@redhat.com \
--cc=mingo@kernel.org \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox