From: "Xinwei Hu" <hxinwei@gmail.com>
To: LVM general discussion and development <linux-lvm@redhat.com>
Subject: Re: [linux-lvm] clvmd on openais
Date: Wed, 23 Apr 2008 17:25:17 +0800 [thread overview]
Message-ID: <1cafab770804230225l6dabdd65na69df87466a81ed6@mail.gmail.com> (raw)
In-Reply-To: <480EFD72.5010809@redhat.com>
[-- Attachment #1: Type: text/plain, Size: 1300 bytes --]
2008/4/23, Christine Caulfield <ccaulfie@redhat.com>:
> Xinwei Hu wrote:
> > 2008/4/23, Christine Caulfield <ccaulfie@redhat.com>:
> >> Xinwei Hu wrote:
> >> > Hi all,
> >> > clvmd-openais.c uses saLckResourceLockAsync and
> >> > saLckResourceUnlockAsync. But they then pthread_cond_wait the lock
> >> > operation to finished.
> >> >
> >> > Since we have to wait, why not switch to use saLckResourceLock and
> >> > saLckResourceUnlock directly ? Are there any reasons behind this ?
> >> >
> >>
> >>
> >> It's mainly because the code was copied and editted from the other
> >> cluster systems, I think :-)
> > Then I propose this patch to switch over. ;)
>
>
>
> Thanks, but that patch seems to have got corrupted or something, it
> doesn't apply to head of CVS. can you send it again please ?
>
>
> Hunk #3 FAILED at 295.
> Hunk #4 succeeded at 315 (offset -8 lines).
> Hunk #6 succeeded at 478 (offset -8 lines).
> patch: **** malformed patch at line 161: @@ -590,15 +536,13 @@
>
Take 2.
Against CVS head now. Please help to review ;)
>
> Chrissie
>
> _______________________________________________
> linux-lvm mailing list
> linux-lvm@redhat.com
> https://www.redhat.com/mailman/listinfo/linux-lvm
> read the LVM HOW-TO at http://tldp.org/HOWTO/LVM-HOWTO/
>
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: clvmd-openais.patch --]
[-- Type: text/x-patch; name=clvmd-openais.patch, Size: 4540 bytes --]
--- LVM2/daemons/clvmd/clvmd-openais.c 2007-07-11 20:07:39.000000000 +0800
+++ clvmd-openais.c 2008-04-23 17:25:30.000000000 +0800
@@ -50,11 +50,6 @@
/* Timeout value for several openais calls */
#define TIMEOUT 10
-static void lck_lock_callback(SaInvocationT invocation,
- SaLckLockStatusT lockStatus,
- SaAisErrorT error);
-static void lck_unlock_callback(SaInvocationT invocation,
- SaAisErrorT error);
static void cpg_deliver_callback (cpg_handle_t handle,
struct cpg_name *groupName,
uint32_t nodeid,
@@ -92,11 +87,6 @@
.cpg_confchg_fn = cpg_confchg_callback,
};
-SaLckCallbacksT lck_callbacks = {
- .saLckLockGrantCallback = lck_lock_callback,
- .saLckResourceUnlockCallback = lck_unlock_callback
-};
-
struct node_info
{
enum {NODE_UNKNOWN, NODE_DOWN, NODE_UP, NODE_CLVMD} state;
@@ -305,32 +295,6 @@
num_nodes = joined_list_entries;
}
-static void lck_lock_callback(SaInvocationT invocation,
- SaLckLockStatusT lockStatus,
- SaAisErrorT error)
-{
- struct lock_wait *lwait = (struct lock_wait *)(long)invocation;
-
- DEBUGLOG("lck_lock_callback, error = %d\n", error);
-
- lwait->status = error;
- pthread_mutex_lock(&lwait->mutex);
- pthread_cond_signal(&lwait->cond);
- pthread_mutex_unlock(&lwait->mutex);
-}
-
-static void lck_unlock_callback(SaInvocationT invocation,
- SaAisErrorT error)
-{
- struct lock_wait *lwait = (struct lock_wait *)(long)invocation;
-
- DEBUGLOG("lck_unlock_callback\n");
-
- lwait->status = SA_AIS_OK;
- pthread_mutex_lock(&lwait->mutex);
- pthread_cond_signal(&lwait->cond);
- pthread_mutex_unlock(&lwait->mutex);
-}
static int lck_dispatch(struct local_client *client, char *buf, int len,
const char *csid, struct local_client **new_client)
@@ -359,7 +323,7 @@
}
err = saLckInitialize(&lck_handle,
- &lck_callbacks,
+ NULL,
&ver);
if (err != SA_AIS_OK) {
cpg_initialize(&cpg_handle, &cpg_callbacks);
@@ -495,15 +459,11 @@
/* Real locking */
static int _lock_resource(char *resource, int mode, int flags, int *lockid)
{
- struct lock_wait lwait;
struct lock_info *linfo;
SaLckResourceHandleT res_handle;
SaAisErrorT err;
SaLckLockIdT lock_id;
-
- pthread_cond_init(&lwait.cond, NULL);
- pthread_mutex_init(&lwait.mutex, NULL);
- pthread_mutex_lock(&lwait.mutex);
+ SaLckLockStatusT lockStatus;
/* This needs to be converted from DLM/LVM2 value for OpenAIS LCK */
if (flags & LCK_NONBLOCK) flags = SA_LCK_LOCK_NO_QUEUE;
@@ -526,24 +486,24 @@
return ais_to_errno(err);
}
- err = saLckResourceLockAsync(res_handle,
- (SaInvocationT)(long)&lwait,
- &lock_id,
- mode,
- flags,
- 0);
- if (err != SA_AIS_OK)
+ err = saLckResourceLock(
+ res_handle,
+ &lock_id,
+ mode,
+ flags,
+ 0,
+ SA_TIME_END,
+ &lockStatus);
+ if (err != SA_AIS_OK && lockStatus != SA_LCK_LOCK_GRANTED)
{
free(linfo);
saLckResourceClose(res_handle);
return ais_to_errno(err);
}
-
+
/* Wait for it to complete */
- pthread_cond_wait(&lwait.cond, &lwait.mutex);
- pthread_mutex_unlock(&lwait.mutex);
- DEBUGLOG("lock_resource returning %d, lock_id=%llx\n", lwait.status,
+ DEBUGLOG("lock_resource returning %d, lock_id=%llx\n", err,
lock_id);
linfo->lock_id = lock_id;
@@ -551,43 +511,34 @@
dm_hash_insert(lock_hash, resource, linfo);
- return ais_to_errno(lwait.status);
+ return ais_to_errno(err);
}
static int _unlock_resource(char *resource, int lockid)
{
- struct lock_wait lwait;
SaAisErrorT err;
struct lock_info *linfo;
- pthread_cond_init(&lwait.cond, NULL);
- pthread_mutex_init(&lwait.mutex, NULL);
- pthread_mutex_lock(&lwait.mutex);
-
DEBUGLOG("unlock_resource %s\n", resource);
linfo = dm_hash_lookup(lock_hash, resource);
if (!linfo)
return 0;
DEBUGLOG("unlock_resource: lockid: %llx\n", linfo->lock_id);
- err = saLckResourceUnlockAsync((SaInvocationT)(long)&lwait, linfo->lock_id);
+ err = saLckResourceUnlock(linfo->lock_id, SA_TIME_END);
if (err != SA_AIS_OK)
{
DEBUGLOG("Unlock returned %d\n", err);
return ais_to_errno(err);
}
- /* Wait for it to complete */
- pthread_cond_wait(&lwait.cond, &lwait.mutex);
- pthread_mutex_unlock(&lwait.mutex);
-
/* Release the resource */
dm_hash_remove(lock_hash, resource);
saLckResourceClose(linfo->res_handle);
free(linfo);
- return ais_to_errno(lwait.status);
+ return ais_to_errno(err);
}
static int _sync_lock(const char *resource, int mode, int flags, int *lockid)
next prev parent reply other threads:[~2008-04-23 9:36 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-04-23 7:25 [linux-lvm] clvmd on openais Xinwei Hu
2008-04-23 7:42 ` Christine Caulfield
2008-04-23 8:10 ` Xinwei Hu
2008-04-23 9:12 ` Christine Caulfield
2008-04-23 9:25 ` Xinwei Hu [this message]
2008-04-23 9:54 ` Christine Caulfield
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1cafab770804230225l6dabdd65na69df87466a81ed6@mail.gmail.com \
--to=hxinwei@gmail.com \
--cc=linux-lvm@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).