From: Alexander Aring <aahringo@redhat.com>
To: teigland@redhat.com
Cc: aahringo@redhat.com, gfs2@lists.linux.dev
Subject: [PATCH RESEND v7.1-rc1 1/4] dlm: use hlist_for_each_entry_srcu for SRCU protected lists
Date: Mon, 27 Apr 2026 11:59:32 -0400 [thread overview]
Message-ID: <20260427155935.2415989-2-aahringo@redhat.com> (raw)
In-Reply-To: <20260427155935.2415989-1-aahringo@redhat.com>
From: Li RongQing <lirongqing@baidu.com>
The connection and node hash tables in DLM are protected by SRCU, but
the code currently uses hlist_for_each_entry_rcu() for traversal.
While this works functionally, it is semantically incorrect and triggers
warnings when RCU lockdep debugging is enabled, as it expects regular
RCU read-side critical sections.
This patch replaces the incorrect macros with hlist_for_each_entry_srcu()
and adds the appropriate lockdep expressions using srcu_read_lock_held()
to ensure consistency with the underlying locking mechanism.
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Acked-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
fs/dlm/lowcomms.c | 12 ++++++++----
fs/dlm/midcomms.c | 15 ++++++++++-----
2 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 5b61427879195..2aff1c7c17de4 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -271,7 +271,8 @@ static struct connection *__find_con(int nodeid, int r)
{
struct connection *con;
- hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
+ hlist_for_each_entry_srcu(con, &connection_hash[r], list,
+ srcu_read_lock_held(&connections_srcu)) {
if (con->nodeid == nodeid)
return con;
}
@@ -426,7 +427,8 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
idx = srcu_read_lock(&connections_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
+ hlist_for_each_entry_srcu(con, &connection_hash[i], list,
+ srcu_read_lock_held(&connections_srcu)) {
WARN_ON_ONCE(!con->addr_count);
spin_lock(&con->addrs_lock);
@@ -1729,7 +1731,8 @@ void dlm_lowcomms_shutdown(void)
idx = srcu_read_lock(&connections_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
+ hlist_for_each_entry_srcu(con, &connection_hash[i], list,
+ srcu_read_lock_held(&connections_srcu)) {
shutdown_connection(con, true);
stop_connection_io(con);
flush_workqueue(process_workqueue);
@@ -1968,7 +1971,8 @@ void dlm_lowcomms_exit(void)
idx = srcu_read_lock(&connections_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
+ hlist_for_each_entry_srcu(con, &connection_hash[i], list,
+ srcu_read_lock_held(&connections_srcu)) {
spin_lock(&connections_lock);
hlist_del_rcu(&con->list);
spin_unlock(&connections_lock);
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index d54bdd8fc4f2e..a5b363b4785f3 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -275,7 +275,8 @@ static struct midcomms_node *__find_node(int nodeid, int r)
{
struct midcomms_node *node;
- hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
+ hlist_for_each_entry_srcu(node, &node_hash[r], hlist,
+ srcu_read_lock_held(&nodes_srcu)) {
if (node->nodeid == nodeid)
return node;
}
@@ -1165,7 +1166,8 @@ void dlm_midcomms_exit(void)
idx = srcu_read_lock(&nodes_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ hlist_for_each_entry_srcu(node, &node_hash[i], hlist,
+ srcu_read_lock_held(&nodes_srcu)) {
dlm_delete_debug_comms_file(node->debugfs);
spin_lock(&nodes_lock);
@@ -1325,7 +1327,8 @@ void dlm_midcomms_version_wait(void)
idx = srcu_read_lock(&nodes_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ hlist_for_each_entry_srcu(node, &node_hash[i], hlist,
+ srcu_read_lock_held(&nodes_srcu)) {
ret = wait_event_timeout(node->shutdown_wait,
node->version != DLM_VERSION_NOT_SET ||
node->state == DLM_CLOSED ||
@@ -1396,7 +1399,8 @@ void dlm_midcomms_shutdown(void)
mutex_lock(&close_lock);
idx = srcu_read_lock(&nodes_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ hlist_for_each_entry_srcu(node, &node_hash[i], hlist,
+ srcu_read_lock_held(&nodes_srcu)) {
midcomms_shutdown(node);
}
}
@@ -1404,7 +1408,8 @@ void dlm_midcomms_shutdown(void)
dlm_lowcomms_shutdown();
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ hlist_for_each_entry_srcu(node, &node_hash[i], hlist,
+ srcu_read_lock_held(&nodes_srcu)) {
midcomms_node_reset(node);
}
}
--
2.43.0
next prev parent reply other threads:[~2026-04-27 15:59 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-27 15:59 [PATCH RESEND v7.1-rc1 0/4] dlm: pending DLM patches Alexander Aring
2026-04-27 15:59 ` Alexander Aring [this message]
2026-04-27 15:59 ` [PATCH RESEND v7.1-rc1 2/4] dlm: add usercopy whitelist to dlm_cb cache Alexander Aring
2026-04-27 15:59 ` [PATCH RESEND v7.1-rc1 3/4] dlm: fix add msg handle in send_queue ordered Alexander Aring
2026-04-27 15:59 ` [PATCH RESEND v7.1-rc1 4/4] dlm: init per node debugfs before add to node hash Alexander Aring
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260427155935.2415989-2-aahringo@redhat.com \
--to=aahringo@redhat.com \
--cc=gfs2@lists.linux.dev \
--cc=teigland@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox