From: Alexander Aring <aahringo@redhat.com>
To: teigland@redhat.com
Cc: cluster-devel@redhat.com, gfs2@lists.linux.dev
Subject: [Cluster-devel] [RFC dlm/next 08/10] fs: dlm: ls_recv_active semaphore to rwlock
Date: Fri, 8 Sep 2023 16:46:09 -0400 [thread overview]
Message-ID: <20230908204611.1910601-8-aahringo@redhat.com> (raw)
In-Reply-To: <20230908204611.1910601-1-aahringo@redhat.com>
This patch converts the ls_recv_active semaphore to a rwlock to not
sleep during dlm message processing.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
fs/dlm/dlm_internal.h | 2 +-
fs/dlm/lock.c | 4 ++--
fs/dlm/lockspace.c | 2 +-
fs/dlm/member.c | 4 ++--
fs/dlm/recoverd.c | 4 ++--
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 9106e20e6c20..6a1b2c806f72 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -622,7 +622,7 @@ struct dlm_ls {
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
- struct rw_semaphore ls_recv_active; /* block dlm_recv */
+ rwlock_t ls_recv_active; /* block dlm_recv */
struct list_head ls_requestqueue;/* queue remote requests */
rwlock_t ls_requestqueue_lock;
struct dlm_rcom *ls_recover_buf;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 1031f233a3ad..dccc0b888ca1 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -4831,7 +4831,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
be inactive (in this ls) before transitioning to recovery mode */
- down_read(&ls->ls_recv_active);
+ read_lock(&ls->ls_recv_active);
if (hd->h_cmd == DLM_MSG)
dlm_receive_message(ls, &p->message, nodeid);
else if (hd->h_cmd == DLM_RCOM)
@@ -4839,7 +4839,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
else
log_error(ls, "invalid h_cmd %d from %d lockspace %x",
hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
- up_read(&ls->ls_recv_active);
+ read_unlock(&ls->ls_recv_active);
dlm_put_lockspace(ls);
}
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 265d69752b90..e35ea06200b5 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -552,7 +552,7 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_recover_seq = get_random_u64();
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
- init_rwsem(&ls->ls_recv_active);
+ rwlock_init(&ls->ls_recv_active);
INIT_LIST_HEAD(&ls->ls_requestqueue);
rwlock_init(&ls->ls_requestqueue_lock);
spin_lock_init(&ls->ls_clear_proc_locks);
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 707cebcdc533..ac1b555af9d6 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -630,7 +630,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
* message to the requestqueue without races.
*/
- down_write(&ls->ls_recv_active);
+ write_lock(&ls->ls_recv_active);
/*
* Abort any recovery that's in progress (see RECOVER_STOP,
@@ -654,7 +654,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
* requestqueue for later.
*/
- up_write(&ls->ls_recv_active);
+ write_unlock(&ls->ls_recv_active);
/*
* This in_recovery lock does two things:
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 4d17491dea2f..c47bcc8be398 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -32,7 +32,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
- down_write(&ls->ls_recv_active);
+ write_lock(&ls->ls_recv_active);
spin_lock(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
@@ -44,7 +44,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
}
spin_unlock(&ls->ls_recover_lock);
- up_write(&ls->ls_recv_active);
+ write_unlock(&ls->ls_recv_active);
return error;
}
--
2.31.1
next prev parent reply other threads:[~2023-09-08 20:46 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-08 20:46 [Cluster-devel] [RFC dlm/next 01/10] fs: dlm: remove allocation parameter in msg allocation Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 02/10] fs: dlm: switch to GFP_ATOMIC in dlm allocations Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 03/10] fs: dlm: remove explicit scheduling points Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 04/10] fs: dlm: convert ls_waiters_mutex to spinlock Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 05/10] fs: dlm: convert res_lock " Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 06/10] fs: dlm: make requestqueue handling non sleepable Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 07/10] fs: dlm: ls_root_lock semaphore to rwlock Alexander Aring
2023-09-08 20:46 ` Alexander Aring [this message]
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 09/10] fs: dlm: convert message parsing locks to disable bh Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 10/10] fs: dlm: do dlm message processing in softirq context Alexander Aring
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230908204611.1910601-8-aahringo@redhat.com \
--to=aahringo@redhat.com \
--cc=cluster-devel@redhat.com \
--cc=gfs2@lists.linux.dev \
--cc=teigland@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).