cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] [PATCH dlm-next] fs: dlm: wait for recovery when acquire plock
@ 2020-06-25 19:40 Alexander Aring
  0 siblings, 0 replies; only message in thread
From: Alexander Aring @ 2020-06-25 19:40 UTC (permalink / raw)
  To: cluster-devel.redhat.com

This patch waits for recovery indicated by clearing the
LSFL_RECOVER_LOCK flag of the recovery state machine. This behaviour is
by default disabled and can be activated by the waitplock_recovery
cluster attribute.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/config.c       |  6 ++++++
 fs/dlm/config.h       |  1 +
 fs/dlm/dlm_internal.h |  1 +
 fs/dlm/lockspace.c    |  1 +
 fs/dlm/plock.c        | 20 ++++++++++++++++++++
 fs/dlm/recoverd.c     |  1 +
 6 files changed, 30 insertions(+)

diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 3b21082e1b550..aee1be86adbdc 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -75,6 +75,7 @@ struct dlm_cluster {
 	unsigned int cl_protocol;
 	unsigned int cl_timewarn_cs;
 	unsigned int cl_waitwarn_us;
+	unsigned int cl_waitplock_recovery;
 	unsigned int cl_new_rsb_count;
 	unsigned int cl_recover_callbacks;
 	char cl_cluster_name[DLM_LOCKSPACE_LEN];
@@ -98,6 +99,7 @@ enum {
 	CLUSTER_ATTR_PROTOCOL,
 	CLUSTER_ATTR_TIMEWARN_CS,
 	CLUSTER_ATTR_WAITWARN_US,
+	CLUSTER_ATTR_WAITPLOCK_RECOVERY,
 	CLUSTER_ATTR_NEW_RSB_COUNT,
 	CLUSTER_ATTR_RECOVER_CALLBACKS,
 	CLUSTER_ATTR_CLUSTER_NAME,
@@ -170,6 +172,7 @@ CLUSTER_ATTR(log_info, 0);
 CLUSTER_ATTR(protocol, 0);
 CLUSTER_ATTR(timewarn_cs, 1);
 CLUSTER_ATTR(waitwarn_us, 0);
+CLUSTER_ATTR(waitplock_recovery, 0);
 CLUSTER_ATTR(new_rsb_count, 0);
 CLUSTER_ATTR(recover_callbacks, 0);
 
@@ -185,6 +188,7 @@ static struct configfs_attribute *cluster_attrs[] = {
 	[CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol,
 	[CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs,
 	[CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us,
+	[CLUSTER_ATTR_WAITPLOCK_RECOVERY] = &cluster_attr_waitplock_recovery,
 	[CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count,
 	[CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks,
 	[CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name,
@@ -857,6 +861,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
 #define DEFAULT_PROTOCOL           0
 #define DEFAULT_TIMEWARN_CS      500 /* 5 sec = 500 centiseconds */
 #define DEFAULT_WAITWARN_US	   0
+#define DEFAULT_WAITPLOCK_RECOVERY 0
 #define DEFAULT_NEW_RSB_COUNT    128
 #define DEFAULT_RECOVER_CALLBACKS  0
 #define DEFAULT_CLUSTER_NAME      ""
@@ -873,6 +878,7 @@ struct dlm_config_info dlm_config = {
 	.ci_protocol = DEFAULT_PROTOCOL,
 	.ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
 	.ci_waitwarn_us = DEFAULT_WAITWARN_US,
+	.ci_waitplock_recovery = DEFAULT_WAITPLOCK_RECOVERY,
 	.ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
 	.ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
 	.ci_cluster_name = DEFAULT_CLUSTER_NAME
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 2b471aae4e61c..0cf824367668c 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -33,6 +33,7 @@ struct dlm_config_info {
 	int ci_protocol;
 	int ci_timewarn_cs;
 	int ci_waitwarn_us;
+	int ci_waitplock_recovery;
 	int ci_new_rsb_count;
 	int ci_recover_callbacks;
 	char ci_cluster_name[DLM_LOCKSPACE_LEN];
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 04fe9f525ac78..4f94171d76f17 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -615,6 +615,7 @@ struct dlm_ls {
 	spinlock_t		ls_recover_idr_lock;
 	wait_queue_head_t	ls_wait_general;
 	wait_queue_head_t	ls_recover_lock_wait;
+	wait_queue_head_t	ls_posix_lock_wait;
 	struct mutex		ls_clear_proc_locks;
 
 	struct list_head	ls_root_list;	/* root resources */
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index e93670ecfae5b..4c5ebb5b7c672 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -605,6 +605,7 @@ static int new_lockspace(const char *name, const char *cluster,
 	}
 
 	init_waitqueue_head(&ls->ls_recover_lock_wait);
+	init_waitqueue_head(&ls->ls_posix_lock_wait);
 
 	/*
 	 * Once started, dlm_recoverd first looks for ls in lslist, then
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index c38b2b8ffd1d3..8803093a9f3c2 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -153,6 +153,26 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
 			do_unlock_close(ls, number, file, fl);
 			goto out;
 		}
+
+		/* due backward compatible we do this
+		 * behaviour only if enabled
+		 */
+		if (dlm_config.ci_waitplock_recovery) {
+			/* wait for recovery if recovery is running */
+			rv = wait_event_interruptible(ls->ls_posix_lock_wait,
+						      !test_bit(LSFL_RECOVER_LOCK,
+								&ls->ls_flags));
+			if (rv == -ERESTARTSYS) {
+				log_debug(ls, "%s: wait for recovery killed %llx",
+					  __func__, (unsigned long long)number);
+				spin_lock(&ops_lock);
+				list_del(&op->list);
+				spin_unlock(&ops_lock);
+				kfree(xop);
+				do_unlock_close(ls, number, file, fl);
+				goto out;
+			}
+		}
 	} else {
 		rv = FILE_LOCK_DEFERRED;
 		goto out;
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 85e2453927153..c2fed5e78121d 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -40,6 +40,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
 		/* unblocks processes waiting to enter the dlm */
 		up_write(&ls->ls_in_recovery);
 		clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
+		wake_up(&ls->ls_posix_lock_wait);
 		error = 0;
 	}
 	spin_unlock(&ls->ls_recover_lock);
-- 
2.26.2



^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2020-06-25 19:40 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-06-25 19:40 [Cluster-devel] [PATCH dlm-next] fs: dlm: wait for recovery when acquire plock Alexander Aring

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).