linux-lvm.redhat.com archive mirror
 help / color / mirror / Atom feed
From: "Xinwei Hu" <hxinwei@gmail.com>
To: LVM general discussion and development <linux-lvm@redhat.com>
Subject: Re: [linux-lvm] clvmd on openais
Date: Wed, 23 Apr 2008 16:10:33 +0800	[thread overview]
Message-ID: <1cafab770804230110n4370eb37ga0dc41ee255ecffb@mail.gmail.com> (raw)
In-Reply-To: <480EE876.6050400@redhat.com>

[-- Attachment #1: Type: text/plain, Size: 776 bytes --]

2008/4/23, Christine Caulfield <ccaulfie@redhat.com>:
> Xinwei Hu wrote:
>  > Hi all,
>  >   clvmd-openais.c uses saLckResourceLockAsync and
>  > saLckResourceUnlockAsync. But they then pthread_cond_wait the lock
>  > operation to finished.
>  >
>  >   Since we have to wait, why not switch to use saLckResourceLock and
>  > saLckResourceUnlock directly ? Are there any reasons behind this ?
>  >
>
>
> It's mainly because the code was copied and editted from the other
>  cluster systems, I think :-)
Then I propose this patch to switch over. ;)
>  Chrissie
>
>  _______________________________________________
>  linux-lvm mailing list
>  linux-lvm@redhat.com
>  https://www.redhat.com/mailman/listinfo/linux-lvm
>  read the LVM HOW-TO at http://tldp.org/HOWTO/LVM-HOWTO/
>

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: clvmd-openais.diff --]
[-- Type: text/x-patch; name=clvmd-openais.diff, Size: 4899 bytes --]

--- clvmd-openais.c.orig	2008-04-30 01:35:27.000000000 -0400
+++ clvmd-openais.c	2008-04-30 01:57:10.000000000 -0400
@@ -50,11 +50,6 @@
 /* Timeout value for several openais calls */
 #define TIMEOUT 10
 
-static void lck_lock_callback(SaInvocationT invocation,
-			      SaLckLockStatusT lockStatus,
-			      SaAisErrorT error);
-static void lck_unlock_callback(SaInvocationT invocation,
-				SaAisErrorT error);
 static void cpg_deliver_callback (cpg_handle_t handle,
 				  struct cpg_name *groupName,
 				  uint32_t nodeid,
@@ -92,11 +87,6 @@
 	.cpg_confchg_fn =            cpg_confchg_callback,
 };
 
-SaLckCallbacksT lck_callbacks = {
-        .saLckLockGrantCallback      = lck_lock_callback,
-        .saLckResourceUnlockCallback = lck_unlock_callback
-};
-
 struct node_info
 {
 	enum {NODE_UNKNOWN, NODE_DOWN, NODE_UP, NODE_CLVMD} state;
@@ -305,40 +295,6 @@
 	num_nodes = joined_list_entries;
 }
 
-static void lck_lock_callback(SaInvocationT invocation,
-			      SaLckLockStatusT lockStatus,
-			      SaAisErrorT error)
-{
-	struct lock_wait *lwait = (struct lock_wait *)(long)invocation;
-
-	DEBUGLOG("lck_lock_callback, error = %d\n", error);
-
-	lwait->status = error;
-	pthread_mutex_lock(&lwait->mutex);
-	pthread_cond_signal(&lwait->cond);
-	pthread_mutex_unlock(&lwait->mutex);
-}
-
-static void lck_unlock_callback(SaInvocationT invocation,
-				SaAisErrorT error)
-{
-	struct lock_wait *lwait = (struct lock_wait *)(long)invocation;
-
-	DEBUGLOG("lck_unlock_callback\n");
-
-	lwait->status = SA_AIS_OK;
-	//pthread_mutex_lock(&lwait->mutex);
-	if (pthread_mutex_trylock(&lwait->mutex) == EBUSY) {
-		DEBUGLOG("lck_unlock_callback EBUSY\n");
-		pthread_mutex_lock(&lwait->mutex);
-		//return;
-	}
-	DEBUGLOG("lck_unlock_callback Lock\n");
-	pthread_cond_signal(&lwait->cond);
-	DEBUGLOG("lck_unlock_callback Signal\n");
-	pthread_mutex_unlock(&lwait->mutex);
-	DEBUGLOG("lck_unlock_callback Unlock\n");
-}
 
 static int lck_dispatch(struct local_client *client, char *buf, int len,
 			const char *csid, struct local_client **new_client)
@@ -367,7 +323,7 @@
 	}
 
 	err = saLckInitialize(&lck_handle,
-			      &lck_callbacks,
+					NULL,
 			      &ver);
 	if (err != SA_AIS_OK) {
 		cpg_initialize(&cpg_handle, &cpg_callbacks);
@@ -503,15 +459,11 @@
 /* Real locking */
 static int _lock_resource(char *resource, int mode, int flags, int *lockid)
 {
-	struct lock_wait lwait;
 	struct lock_info *linfo;
 	SaLckResourceHandleT res_handle;
 	SaAisErrorT err;
 	SaLckLockIdT lock_id;
-
-	pthread_cond_init(&lwait.cond, NULL);
-	pthread_mutex_init(&lwait.mutex, NULL);
-	pthread_mutex_lock(&lwait.mutex);
+	SaLckLockStatusT lockStatus;
 
 	/* This needs to be converted from DLM/LVM2 value for OpenAIS LCK */
 	if (flags & LCK_NONBLOCK) flags = SA_LCK_LOCK_NO_QUEUE;
@@ -534,24 +486,24 @@
 		return ais_to_errno(err);
 	}
 
-	err = saLckResourceLockAsync(res_handle,
-				     (SaInvocationT)(long)&lwait,
-				     &lock_id,
-				     mode,
-				     flags,
-				     0);
-	if (err != SA_AIS_OK)
+	err = saLckResourceLock(
+			res_handle,
+			&lock_id,
+			mode,
+			flags,
+			0,
+			SA_TIME_END,
+			&lockStatus);
+	if (err != SA_AIS_OK && lockStatus != SA_LCK_LOCK_GRANTED)
 	{
 		free(linfo);
 		saLckResourceClose(res_handle);
 		return ais_to_errno(err);
 	}
-
+			
 	/* Wait for it to complete */
-	pthread_cond_wait(&lwait.cond, &lwait.mutex);
-	pthread_mutex_unlock(&lwait.mutex);
 
-	DEBUGLOG("lock_resource returning %d, lock_id=%llx\n", lwait.status,
+	DEBUGLOG("lock_resource returning %d, lock_id=%llx\n", err,
 		 lock_id);
 
 	linfo->lock_id = lock_id;
@@ -559,20 +511,15 @@
 
 	dm_hash_insert(lock_hash, resource, linfo);
 
-	return ais_to_errno(lwait.status);
+	return ais_to_errno(err);
 }
 
 
 static int _unlock_resource(char *resource, int lockid)
 {
-	struct lock_wait lwait;
 	SaAisErrorT err;
 	struct lock_info *linfo;
 
-	pthread_cond_init(&lwait.cond, NULL);
-	pthread_mutex_init(&lwait.mutex, NULL);
-	pthread_mutex_lock(&lwait.mutex);
-
 	DEBUGLOG("unlock_resource %s\n", resource);
 	linfo = dm_hash_lookup(lock_hash, resource);
 	if (!linfo)
@@ -580,8 +527,7 @@
 
 	DEBUGLOG("unlock_resource: lockid: %llx\n", linfo->lock_id);
 	DEBUGLOG("unlock_resource: lockid: %llx\n", lockid);
-	err = saLckResourceUnlockAsync((SaInvocationT)(long)&lwait, linfo->lock_id);
+	err = saLckResourceUnlock(linfo->lock_id, SA_TIME_END);
 	if (err != SA_AIS_OK)
 	{
 		DEBUGLOG("Unlock returned %d\n", err);
@@ -590,15 +536,13 @@
 
 	DEBUGLOG("Unlock in progress\n");
 	/* Wait for it to complete */
-	pthread_cond_wait(&lwait.cond, &lwait.mutex);
-	pthread_mutex_unlock(&lwait.mutex);
 
 	/* Release the resource */
 	dm_hash_remove(lock_hash, resource);
 	saLckResourceClose(linfo->res_handle);
 	free(linfo);
 
-	return ais_to_errno(lwait.status);
+	return ais_to_errno(err);
 }
 
 static int _sync_lock(const char *resource, int mode, int flags, int *lockid)

  reply	other threads:[~2008-04-23  8:23 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-04-23  7:25 [linux-lvm] clvmd on openais Xinwei Hu
2008-04-23  7:42 ` Christine Caulfield
2008-04-23  8:10   ` Xinwei Hu [this message]
2008-04-23  9:12     ` Christine Caulfield
2008-04-23  9:25       ` Xinwei Hu
2008-04-23  9:54         ` Christine Caulfield

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1cafab770804230110n4370eb37ga0dc41ee255ecffb@mail.gmail.com \
    --to=hxinwei@gmail.com \
    --cc=linux-lvm@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).