cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] [PATCHv2 dlm-tool] fs: dlm: implement DLM_PLOCK_OP_CANCEL
@ 2023-07-18 18:31 Alexander Aring
  0 siblings, 0 replies; only message in thread
From: Alexander Aring @ 2023-07-18 18:31 UTC (permalink / raw)
  To: cluster-devel.redhat.com

This patch implements DLM_PLOCK_OP_CANCEL to try to delete waiters for a
lock request which are waiting to being granted. If the waiter can be
deleted the reply is 0. If the waiter cannot be found it will return
-ENOENT to the kernel, either that there never was be a waiter or we were
to late to cancel the lock request.
---
changes since v2:

- drop the no reply flag
- always send a reply on DLM_PLOCK_OP_CANCEL
- return -ENOENT instead of 1 to indicated cancellation failed

 dlm_controld/plock.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/dlm_controld/plock.c b/dlm_controld/plock.c
index a91aecb0..1944cd59 100644
--- a/dlm_controld/plock.c
+++ b/dlm_controld/plock.c
@@ -9,6 +9,10 @@
 #include "dlm_daemon.h"
 #include <linux/dlm_plock.h>
 
+#ifndef DLM_PLOCK_OP_CANCEL
+#define DLM_PLOCK_OP_CANCEL 4
+#endif
+
 static uint32_t plock_read_count;
 static uint32_t plock_recv_count;
 static uint32_t plock_rate_delays;
@@ -132,6 +136,8 @@ static const char *op_str(int optype)
 	switch (optype) {
 	case DLM_PLOCK_OP_LOCK:
 		return "LK";
+	case DLM_PLOCK_OP_CANCEL:
+		return "CL";
 	case DLM_PLOCK_OP_UNLOCK:
 		return "UN";
 	case DLM_PLOCK_OP_GET:
@@ -741,6 +747,40 @@ static void do_lock(struct lockspace *ls, struct dlm_plock_info *in,
 	put_resource(ls, r);
 }
 
+static int remove_waiter(const struct resource *r, const struct dlm_plock_info *in)
+{
+	struct lock_waiter *w;
+
+	list_for_each_entry(w, &r->waiters, list) {
+		if (w->info.nodeid == in->nodeid &&
+		    w->info.fsid == in->fsid &&
+		    w->info.number == in->number &&
+		    w->info.owner == in->owner &&
+		    w->info.pid == in->pid &&
+		    w->info.start == in->start &&
+		    w->info.end == in->end &&
+		    w->info.ex == in->ex) {
+			list_del(&w->list);
+			free(w);
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+static void do_cancel(struct lockspace *ls, struct dlm_plock_info *in,
+		      struct resource *r)
+{
+	int rv;
+
+	rv = remove_waiter(r, in);
+	if (in->nodeid == our_nodeid)
+		write_result(in, rv);
+
+	put_resource(ls, r);
+}
+
 static void do_unlock(struct lockspace *ls, struct dlm_plock_info *in,
 		      struct resource *r)
 {
@@ -806,6 +846,10 @@ static void __receive_plock(struct lockspace *ls, struct dlm_plock_info *in,
 		ls->last_plock_time = monotime();
 		do_lock(ls, in, r);
 		break;
+	case DLM_PLOCK_OP_CANCEL:
+		ls->last_plock_time = monotime();
+		do_cancel(ls, in, r);
+		break;
 	case DLM_PLOCK_OP_UNLOCK:
 		ls->last_plock_time = monotime();
 		do_unlock(ls, in, r);
-- 
2.31.1


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2023-07-18 18:31 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-07-18 18:31 [Cluster-devel] [PATCHv2 dlm-tool] fs: dlm: implement DLM_PLOCK_OP_CANCEL Alexander Aring

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).