From: Yuyang Du <duyuyang@gmail.com>
To: peterz@infradead.org, will.deacon@arm.com, mingo@kernel.org
Cc: bvanassche@acm.org, ming.lei@redhat.com, frederic@kernel.org,
tglx@linutronix.de, linux-kernel@vger.kernel.org,
longman@redhat.com, paulmck@linux.vnet.ibm.com,
boqun.feng@gmail.com, Yuyang Du <duyuyang@gmail.com>
Subject: [PATCH v3 22/30] locking/lockdep: Adjust BFS algorithm to support multiple matches
Date: Fri, 28 Jun 2019 17:15:20 +0800 [thread overview]
Message-ID: <20190628091528.17059-23-duyuyang@gmail.com> (raw)
In-Reply-To: <20190628091528.17059-1-duyuyang@gmail.com>
With recursive-read locks, a circle is not sufficient condition for a
deadlock. As a result, we need to continue the search after a match but
the match is not wanted. __bfs() is adjusted to that end. The basic idea
is to enqueue a node's children before matching the node.
No functional change.
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
kernel/locking/lockdep.c | 51 ++++++++++++++++++++++++------------------------
1 file changed, 26 insertions(+), 25 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 10df8eb..7d02b94 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1551,7 +1551,7 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry, int forward)
+ struct lock_list **target_entry, int forward, bool init)
{
struct lock_list *entry;
struct lock_list *lock;
@@ -1559,19 +1559,11 @@ static int __bfs(struct lock_list *source_entry,
struct circular_queue *cq = &lock_cq;
int ret = 1;
- if (match(source_entry, data)) {
- *target_entry = source_entry;
- ret = 0;
- goto exit;
+ if (init) {
+ __cq_init(cq);
+ __cq_enqueue(cq, source_entry);
}
- head = get_dep_list(source_entry, forward);
- if (list_empty(head))
- goto exit;
-
- __cq_init(cq);
- __cq_enqueue(cq, source_entry);
-
while ((lock = __cq_dequeue(cq))) {
if (!lock->class[forward]) {
@@ -1583,25 +1575,34 @@ static int __bfs(struct lock_list *source_entry,
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ /*
+ * Enqueue a node's children before match() so that even
+ * this node matches but is not wanted, we can continue
+ * the search.
+ */
list_for_each_entry_rcu(entry, head, entry[forward]) {
if (!lock_accessed(entry, forward)) {
unsigned int cq_depth;
+
mark_lock_accessed(entry, lock, forward);
- if (match(entry, data)) {
- *target_entry = entry;
- ret = 0;
- goto exit;
- }
if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
+
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
+
+ if (match(lock, data)) {
+ *target_entry = lock;
+ ret = 0;
+ goto exit;
+ }
+
}
exit:
return ret;
@@ -1610,9 +1611,9 @@ static int __bfs(struct lock_list *source_entry,
static inline int __bfs_forwards(struct lock_list *src_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
+ struct lock_list **target_entry, bool init)
{
- return __bfs(src_entry, data, match, target_entry, 1);
+ return __bfs(src_entry, data, match, target_entry, 1, init);
}
static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1620,7 +1621,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry, 0);
+ return __bfs(src_entry, data, match, target_entry, 0, true);
}
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
@@ -1792,7 +1793,7 @@ static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
unsigned long count = 0;
struct lock_list *uninitialized_var(target_entry);
- __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
+ __bfs_forwards(this, (void *)&count, noop_count, &target_entry, true);
return count;
}
@@ -1846,12 +1847,12 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
*/
static noinline int
check_path(struct lock_class *target, struct lock_list *src_entry,
- struct lock_list **target_entry)
+ struct lock_list **target_entry, bool init)
{
int ret;
ret = __bfs_forwards(src_entry, (void *)target, class_equal,
- target_entry);
+ target_entry, init);
if (unlikely(ret < 0))
print_bfs_bug(ret);
@@ -1879,7 +1880,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
debug_atomic_inc(nr_cyclic_checks);
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
if (unlikely(!ret)) {
if (!trace->nr_entries) {
@@ -1940,7 +1941,7 @@ static inline int usage_match_backward(struct lock_list *entry, void *mask)
debug_atomic_inc(nr_find_usage_forwards_checks);
result = __bfs_forwards(root, &usage_mask, usage_match_forward,
- target_entry);
+ target_entry, true);
return result;
}
--
1.8.3.1
next prev parent reply other threads:[~2019-06-28 9:17 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-28 9:14 [PATCH v3 00/30] Support recursive-read lock deadlock detection Yuyang Du
2019-06-28 9:14 ` [PATCH v3 01/30] locking/lockdep: Rename deadlock check functions Yuyang Du
2019-06-28 9:15 ` [PATCH v3 02/30] locking/lockdep: Change return type of add_chain_cache() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 03/30] locking/lockdep: Change return type of lookup_chain_cache_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 04/30] locking/lockdep: Pass lock chain from validate_chain() to check_prev_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 05/30] locking/lockdep: Add lock chain list_head field in struct lock_list and lock_chain Yuyang Du
2019-06-28 9:15 ` [PATCH v3 06/30] locking/lockdep: Update comments in struct lock_list and held_lock Yuyang Du
2019-06-28 9:15 ` [PATCH v3 07/30] locking/lockdep: Remove indirect dependency redundancy check Yuyang Du
2019-06-28 9:15 ` [PATCH v3 08/30] locking/lockdep: Skip checks if direct dependency is already present Yuyang Du
2019-06-28 9:15 ` [PATCH v3 09/30] locking/lockdep: Remove chain_head argument in validate_chain() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 10/30] locking/lockdep: Remove useless lock type assignment Yuyang Du
2019-06-28 9:15 ` [PATCH v3 11/30] locking/lockdep: Specify the depth of current lock stack in lookup_chain_cache_add() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 12/30] locking/lockdep: Treat every lock dependency as in a new lock chain Yuyang Du
2019-06-28 9:15 ` [PATCH v3 13/30] locking/lockdep: Combine lock_lists in struct lock_class into an array Yuyang Du
2019-06-28 9:15 ` [PATCH v3 14/30] locking/lockdep: Consolidate forward and backward lock_lists into one Yuyang Du
2019-06-28 9:15 ` [PATCH v3 15/30] locking/lockdep: Add lock chains to direct lock dependency graph Yuyang Du
2019-06-28 9:15 ` [PATCH v3 16/30] locking/lockdep: Use lock type enum to explicitly specify read or write locks Yuyang Du
2019-06-28 9:15 ` [PATCH v3 17/30] locking/lockdep: Add read-write type for a lock dependency Yuyang Du
2019-07-10 5:18 ` Boqun Feng
2019-07-11 5:02 ` Yuyang Du
2019-06-28 9:15 ` [PATCH v3 18/30] locking/lockdep: Add helper functions to operate on the searched path Yuyang Du
2019-06-28 9:15 ` [PATCH v3 19/30] locking/lockdep: Update direct dependency's read-write type if it exists Yuyang Du
2019-06-28 9:15 ` [PATCH v3 20/30] locking/lockdep: Introduce chain_hlocks_type for held lock's read-write type Yuyang Du
2019-06-28 9:15 ` [PATCH v3 21/30] locking/lockdep: Hash held lock's read-write type into chain key Yuyang Du
2019-06-28 9:15 ` Yuyang Du [this message]
2019-06-28 9:15 ` [PATCH v3 23/30] locking/lockdep: Define the two task model for lockdep checks formally Yuyang Du
2019-06-28 9:15 ` [PATCH v3 24/30] locking/lockdep: Introduce mark_lock_unaccessed() Yuyang Du
2019-06-28 9:15 ` [PATCH v3 25/30] locking/lockdep: Add nest lock type Yuyang Du
2019-06-28 9:15 ` [PATCH v3 26/30] locking/lockdep: Add lock exclusiveness table Yuyang Du
2019-06-28 9:15 ` [PATCH v3 27/30] locking/lockdep: Support read-write lock's deadlock detection Yuyang Du
2019-06-28 9:15 ` [PATCH v3 28/30] locking/lockdep: Adjust selftest case for recursive read lock Yuyang Du
2019-06-28 9:15 ` [PATCH v3 29/30] locking/lockdep: Add more lockdep selftest cases Yuyang Du
2019-06-28 9:15 ` [PATCH v3 30/30] locking/lockdep: Remove irq-safe to irq-unsafe read check Yuyang Du
2019-07-10 5:30 ` Boqun Feng
2019-07-10 6:30 ` Yuyang Du
2019-07-10 1:54 ` [PATCH v3 00/30] Support recursive-read lock deadlock detection Yuyang Du
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190628091528.17059-23-duyuyang@gmail.com \
--to=duyuyang@gmail.com \
--cc=boqun.feng@gmail.com \
--cc=bvanassche@acm.org \
--cc=frederic@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=ming.lei@redhat.com \
--cc=mingo@kernel.org \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox