From: Alexander Aring <aahringo@redhat.com>
To: teigland@redhat.com
Cc: gfs2@lists.linux.dev, aahringo@redhat.com
Subject: [PATCHv4 dlm/next 09/15] dlm: convert ls_waiters_mutex to spinlock
Date: Tue, 2 Apr 2024 15:18:04 -0400 [thread overview]
Message-ID: <20240402191810.1932939-10-aahringo@redhat.com> (raw)
In-Reply-To: <20240402191810.1932939-1-aahringo@redhat.com>
This patch converts the per dlm lockspace waiters lock from a mutex to a
spinlock. To prepare switching to a softirq context when parsing DLM
messages.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
fs/dlm/debug_fs.c | 4 ++--
fs/dlm/dlm_internal.h | 2 +-
fs/dlm/lock.c | 20 ++++++++++----------
fs/dlm/lockspace.c | 2 +-
4 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index c238a9308323..487dcf05d076 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -743,7 +743,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
goto out;
}
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
memset(debug_buf, 0, sizeof(debug_buf));
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
@@ -754,7 +754,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
break;
pos += ret;
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
dlm_unlock_recovery(ls);
rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index e03a379832d5..98029fd5cd2b 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -595,7 +595,7 @@ struct dlm_ls {
struct dlm_rsbtable *ls_rsbtbl;
uint32_t ls_rsbtbl_size;
- struct mutex ls_waiters_mutex;
+ spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
struct mutex ls_orphans_mutex;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 395b904a82f4..af677add4f5f 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1406,7 +1406,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error = 0;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
if (is_overlap_unlock(lkb) ||
(is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
@@ -1449,7 +1449,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, dlm_iflags_val(lkb), mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return error;
}
@@ -1549,9 +1549,9 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
error = _remove_from_waiters(lkb, mstype, NULL);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return error;
}
@@ -1569,13 +1569,13 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb,
int error;
if (!local)
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
else
WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) ||
!dlm_locking_stopped(ls));
error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
if (!local)
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return error;
}
@@ -4993,7 +4993,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
struct dlm_lkb *lkb = NULL, *iter;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
hold_lkb(iter);
@@ -5001,7 +5001,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
break;
}
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return lkb;
}
@@ -5101,9 +5101,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
}
/* Forcibly remove from waiters list */
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
list_del_init(&lkb->lkb_wait_reply);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
/*
* The lkb is now clear of all prior waiters state and can be
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index af7769f8e38c..945139805605 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -515,7 +515,7 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock_init(&ls->ls_lkbidr_spin);
INIT_LIST_HEAD(&ls->ls_waiters);
- mutex_init(&ls->ls_waiters_mutex);
+ spin_lock_init(&ls->ls_waiters_lock);
INIT_LIST_HEAD(&ls->ls_orphans);
mutex_init(&ls->ls_orphans_mutex);
--
2.43.0
next prev parent reply other threads:[~2024-04-02 19:18 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-02 19:17 [PATCHv4 dlm/next 00/15] dlm: bring message parsing to softirq context Alexander Aring
2024-04-02 19:17 ` [PATCHv4 dlm/next 01/15] dlm: Simplify the allocation of slab caches in dlm_lowcomms_msg_cache_create Alexander Aring
2024-04-02 19:17 ` [PATCHv4 dlm/next 02/15] dlm: remove allocation parameter in msg allocation Alexander Aring
2024-04-02 19:17 ` [PATCHv4 dlm/next 03/15] dlm: switch to GFP_ATOMIC in dlm allocations Alexander Aring
2024-04-02 19:17 ` [PATCHv4 dlm/next 04/15] dlm: move root_list functionality to recover.c Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 05/15] dlm: move master dir dump to own list Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 06/15] dlm: move root_list to ls_recover() stack Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 07/15] dlm: implement directory dump context Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 08/15] dlm: drop holding waiters mutex in waiters recovery Alexander Aring
2024-04-02 19:18 ` Alexander Aring [this message]
2024-04-02 19:18 ` [PATCHv4 dlm/next 10/15] dlm: convert res_lock to spinlock Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 11/15] dlm: make requestqueue handling non sleepable Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 12/15] dlm: ls_recv_active semaphore to rwlock Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 13/15] dlm: remove schedule in dlm receive path Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 14/15] dlm: convert message parsing locks to disable bh Alexander Aring
2024-04-02 19:18 ` [PATCHv4 dlm/next 15/15] dlm: do dlm message processing in softirq context Alexander Aring
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240402191810.1932939-10-aahringo@redhat.com \
--to=aahringo@redhat.com \
--cc=gfs2@lists.linux.dev \
--cc=teigland@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox