cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
From: Alexander Aring <aahringo@redhat.com>
To: teigland@redhat.com
Cc: cluster-devel@redhat.com, gfs2@lists.linux.dev
Subject: [Cluster-devel] [RFC dlm/next 07/10] fs: dlm: ls_root_lock semaphore to rwlock
Date: Fri,  8 Sep 2023 16:46:08 -0400	[thread overview]
Message-ID: <20230908204611.1910601-7-aahringo@redhat.com> (raw)
In-Reply-To: <20230908204611.1910601-1-aahringo@redhat.com>

This patch converts the ls_root_lock semaphore to a rwlock to not
sleep during dlm message processing.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/dir.c          | 14 +++++++-------
 fs/dlm/dlm_internal.h |  2 +-
 fs/dlm/lock.c         |  4 ++--
 fs/dlm/lockspace.c    |  2 +-
 fs/dlm/recover.c      | 28 ++++++++++++++--------------
 5 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index f6acba4310a7..c70e286f3dbc 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -51,11 +51,11 @@ void dlm_recover_dir_nodeid(struct dlm_ls *ls)
 {
 	struct dlm_rsb *r;
 
-	down_read(&ls->ls_root_sem);
+	read_lock(&ls->ls_root_lock);
 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
 		r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash);
 	}
-	up_read(&ls->ls_root_sem);
+	read_unlock(&ls->ls_root_lock);
 }
 
 int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq)
@@ -216,16 +216,16 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name,
 	if (!rv)
 		return r;
 
-	down_read(&ls->ls_root_sem);
+	read_lock(&ls->ls_root_lock);
 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
 		if (len == r->res_length && !memcmp(name, r->res_name, len)) {
-			up_read(&ls->ls_root_sem);
+			read_unlock(&ls->ls_root_lock);
 			log_debug(ls, "find_rsb_root revert to root_list %s",
 				  r->res_name);
 			return r;
 		}
 	}
-	up_read(&ls->ls_root_sem);
+	read_unlock(&ls->ls_root_lock);
 	return NULL;
 }
 
@@ -241,7 +241,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
 	int offset = 0, dir_nodeid;
 	__be16 be_namelen;
 
-	down_read(&ls->ls_root_sem);
+	read_lock(&ls->ls_root_lock);
 
 	if (inlen > 1) {
 		r = find_rsb_root(ls, inbuf, inlen);
@@ -302,6 +302,6 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
 		ls->ls_recover_dir_sent_msg++;
 	}
  out:
-	up_read(&ls->ls_root_sem);
+	read_unlock(&ls->ls_root_lock);
 }
 
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 65db6f834f04..9106e20e6c20 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -642,7 +642,7 @@ struct dlm_ls {
 	spinlock_t		ls_clear_proc_locks;
 
 	struct list_head	ls_root_list;	/* root resources */
-	struct rw_semaphore	ls_root_sem;	/* protect root_list */
+	rwlock_t		ls_root_lock;	/* protect root_list */
 
 	const struct dlm_lockspace_ops *ls_ops;
 	void			*ls_ops_arg;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 989603643c03..1031f233a3ad 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -5216,7 +5216,7 @@ void dlm_recover_purge(struct dlm_ls *ls)
 	if (!nodes_count)
 		return;
 
-	down_write(&ls->ls_root_sem);
+	write_lock(&ls->ls_root_lock);
 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
 		hold_rsb(r);
 		lock_rsb(r);
@@ -5231,7 +5231,7 @@ void dlm_recover_purge(struct dlm_ls *ls)
 		unlock_rsb(r);
 		unhold_rsb(r);
 	}
-	up_write(&ls->ls_root_sem);
+	write_unlock(&ls->ls_root_lock);
 
 	if (lkb_count)
 		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index b2cb0621031f..265d69752b90 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -579,7 +579,7 @@ static int new_lockspace(const char *name, const char *cluster,
 	ls->ls_local_handle = ls;
 	init_waitqueue_head(&ls->ls_wait_general);
 	INIT_LIST_HEAD(&ls->ls_root_list);
-	init_rwsem(&ls->ls_root_sem);
+	rwlock_init(&ls->ls_root_lock);
 
 	spin_lock(&lslist_lock);
 	ls->ls_create_count = 1;
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 752002304ca9..0d5b0f94eb46 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -529,10 +529,10 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
 
 	log_rinfo(ls, "dlm_recover_masters");
 
-	down_read(&ls->ls_root_sem);
+	read_lock(&ls->ls_root_lock);
 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
 		if (dlm_recovery_stopped(ls)) {
-			up_read(&ls->ls_root_sem);
+			read_unlock(&ls->ls_root_lock);
 			error = -EINTR;
 			goto out;
 		}
@@ -546,11 +546,11 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
 		total++;
 
 		if (error) {
-			up_read(&ls->ls_root_sem);
+			read_unlock(&ls->ls_root_lock);
 			goto out;
 		}
 	}
-	up_read(&ls->ls_root_sem);
+	read_unlock(&ls->ls_root_lock);
 
 	log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
 
@@ -660,7 +660,7 @@ int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq)
 	struct dlm_rsb *r;
 	int error, count = 0;
 
-	down_read(&ls->ls_root_sem);
+	read_lock(&ls->ls_root_lock);
 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
 		if (is_master(r)) {
 			rsb_clear_flag(r, RSB_NEW_MASTER);
@@ -672,19 +672,19 @@ int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq)
 
 		if (dlm_recovery_stopped(ls)) {
 			error = -EINTR;
-			up_read(&ls->ls_root_sem);
+			read_unlock(&ls->ls_root_lock);
 			goto out;
 		}
 
 		error = recover_locks(r, seq);
 		if (error) {
-			up_read(&ls->ls_root_sem);
+			read_unlock(&ls->ls_root_lock);
 			goto out;
 		}
 
 		count += r->res_recover_locks_count;
 	}
-	up_read(&ls->ls_root_sem);
+	read_unlock(&ls->ls_root_lock);
 
 	log_rinfo(ls, "dlm_recover_locks %d out", count);
 
@@ -858,7 +858,7 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
 	struct dlm_rsb *r;
 	unsigned int count = 0;
 
-	down_read(&ls->ls_root_sem);
+	read_lock(&ls->ls_root_lock);
 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
 		lock_rsb(r);
 		if (is_master(r)) {
@@ -880,7 +880,7 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
 		rsb_clear_flag(r, RSB_NEW_MASTER2);
 		unlock_rsb(r);
 	}
-	up_read(&ls->ls_root_sem);
+	read_unlock(&ls->ls_root_lock);
 
 	if (count)
 		log_rinfo(ls, "dlm_recover_rsbs %d done", count);
@@ -894,7 +894,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
 	struct dlm_rsb *r;
 	int i, error = 0;
 
-	down_write(&ls->ls_root_sem);
+	write_lock(&ls->ls_root_lock);
 	if (!list_empty(&ls->ls_root_list)) {
 		log_error(ls, "root list not empty");
 		error = -EINVAL;
@@ -914,7 +914,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
 		spin_unlock(&ls->ls_rsbtbl[i].lock);
 	}
  out:
-	up_write(&ls->ls_root_sem);
+	write_unlock(&ls->ls_root_lock);
 	return error;
 }
 
@@ -922,12 +922,12 @@ void dlm_release_root_list(struct dlm_ls *ls)
 {
 	struct dlm_rsb *r, *safe;
 
-	down_write(&ls->ls_root_sem);
+	write_lock(&ls->ls_root_lock);
 	list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
 		list_del_init(&r->res_root_list);
 		dlm_put_rsb(r);
 	}
-	up_write(&ls->ls_root_sem);
+	write_unlock(&ls->ls_root_lock);
 }
 
 void dlm_clear_toss(struct dlm_ls *ls)
-- 
2.31.1


  parent reply	other threads:[~2023-09-08 20:46 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-08 20:46 [Cluster-devel] [RFC dlm/next 01/10] fs: dlm: remove allocation parameter in msg allocation Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 02/10] fs: dlm: switch to GFP_ATOMIC in dlm allocations Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 03/10] fs: dlm: remove explicit scheduling points Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 04/10] fs: dlm: convert ls_waiters_mutex to spinlock Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 05/10] fs: dlm: convert res_lock " Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 06/10] fs: dlm: make requestqueue handling non sleepable Alexander Aring
2023-09-08 20:46 ` Alexander Aring [this message]
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 08/10] fs: dlm: ls_recv_active semaphore to rwlock Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 09/10] fs: dlm: convert message parsing locks to disable bh Alexander Aring
2023-09-08 20:46 ` [Cluster-devel] [RFC dlm/next 10/10] fs: dlm: do dlm message processing in softirq context Alexander Aring

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230908204611.1910601-7-aahringo@redhat.com \
    --to=aahringo@redhat.com \
    --cc=cluster-devel@redhat.com \
    --cc=gfs2@lists.linux.dev \
    --cc=teigland@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).