From: lhh@sourceware.org <lhh@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] cluster/rgmanager ChangeLog src/daemons/groups ...
Date: 2 Jul 2007 15:13:43 -0000 [thread overview]
Message-ID: <20070702151343.16897.qmail@sourceware.org> (raw)
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: lhh at sourceware.org 2007-07-02 15:13:43
Modified files:
rgmanager : ChangeLog
rgmanager/src/daemons: groups.c rg_state.c
Log message:
Fix #237144 - pass 2. All testcases accounted for now.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/ChangeLog.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.31.2.17&r2=1.31.2.18
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/groups.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25.2.7&r2=1.25.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.24.2.9&r2=1.24.2.10
--- cluster/rgmanager/ChangeLog 2007/06/27 14:03:20 1.31.2.17
+++ cluster/rgmanager/ChangeLog 2007/07/02 15:13:42 1.31.2.18
@@ -1,3 +1,7 @@
+2007-06-29 Lon Hohberger <lhh@redhat.com>
+ * src/daemons/groups.c, rg_state.c: Make all test cases for #237144
+ work correctly
+
2007-06-27 Lon Hohberger <lhh@redhat.com>
* include/resgroup.h: Make RG_STATUS_INQUIRY forwards-compatible
with -HEAD
--- cluster/rgmanager/src/daemons/groups.c 2007/06/26 21:55:46 1.25.2.7
+++ cluster/rgmanager/src/daemons/groups.c 2007/07/02 15:13:43 1.25.2.8
@@ -144,12 +144,46 @@
}
+static inline int
+is_exclusive_res(resource_t *res)
+{
+ char *val;
+
+ val = res_attr_value(res, "exclusive");
+ if (val && ((!strcmp(val, "yes") ||
+ (atoi(val)>0))) ) {
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Locked exported function */
+int
+is_exclusive(char *svcName)
+{
+ int ret = 0;
+ resource_t *res = NULL;
+
+ pthread_rwlock_rdlock(&resource_lock);
+ res = find_root_by_ref(&_resources, svcName);
+
+ if (!res)
+ ret = RG_ENOSERVICE;
+ else
+ ret = is_exclusive_res(res);
+
+ pthread_rwlock_unlock(&resource_lock);
+ return ret;
+}
+
+
int
count_resource_groups_local(cman_node_t *mp)
{
resource_t *res;
resource_node_t *node;
- char rgname[64], *val;
+ char rgname[64];
rg_state_t st;
mp->cn_svccount = 0;
@@ -176,11 +210,8 @@
++mp->cn_svccount;
- val = res_attr_value(res, "exclusive");
- if (val && ((!strcmp(val, "yes") ||
- (atoi(val)>0))) ) {
+ if (is_exclusive_res(res))
++mp->cn_svcexcl;
- }
} while (!list_done(&_tree, node));
@@ -193,14 +224,11 @@
have_exclusive_resources(void)
{
resource_t *res;
- char *val;
pthread_rwlock_rdlock(&resource_lock);
list_do(&_resources, res) {
- val = res_attr_value(res, "exclusive");
- if (val && ((!strcmp(val, "yes") ||
- (atoi(val)>0))) ) {
+ if (is_exclusive_res(res)) {
pthread_rwlock_unlock(&resource_lock);
return 1;
}
@@ -217,9 +245,8 @@
check_exclusive_resources(cluster_member_list_t *membership, char *svcName)
{
cman_node_t *mp;
- int exclusive, count;
+ int exclusive, count, excl;
resource_t *res;
- char *val;
mp = memb_id_to_p(membership, my_id());
assert(mp);
@@ -230,14 +257,13 @@
res = find_root_by_ref(&_resources, svcName);
if (!res) {
pthread_rwlock_unlock(&resource_lock);
- return RG_EFAIL;
+ return RG_ENOSERVICE;
}
- val = res_attr_value(res, "exclusive");
+
+ excl = is_exclusive_res(res);
pthread_rwlock_unlock(&resource_lock);
- if (exclusive || (count && val &&
- (!strcmp(val, "yes") || (atoi(val)>0)))) {
+ if (exclusive || (count && excl))
return RG_YES;
- }
return 0;
}
--- cluster/rgmanager/src/daemons/rg_state.c 2007/06/26 21:55:46 1.24.2.9
+++ cluster/rgmanager/src/daemons/rg_state.c 2007/07/02 15:13:43 1.24.2.10
@@ -58,6 +58,7 @@
static inline int handle_started_status(char *svcName, int ret, rg_state_t *svcStatus);
static inline int handle_migrate_status(char *svcName, int ret, rg_state_t *svcStatus);
int count_resource_groups_local(cman_node_t *mp);
+int is_exclusive(char *svcName);
int
@@ -842,13 +843,13 @@
}
count_resource_groups_local(m);
- if (m->cn_svcexcl) {
+ if (m->cn_svcexcl ||
+ (m->cn_svccount && is_exclusive(svcName))) {
free_member_list(membership);
return RG_EDEPEND;
}
free_member_list(membership);
-
if (rg_lock(svcName, &lockp) < 0) {
clulog(LOG_ERR, "#45: Unable to obtain cluster lock: %s\n",
strerror(errno));
@@ -1503,12 +1504,32 @@
handle_relocate_req(char *svcName, int request, int preferred_target,
int *new_owner)
{
- cluster_member_list_t *allowed_nodes, *backup = NULL;
+ cluster_member_list_t *allowed_nodes = NULL, *backup = NULL;
cman_node_t *m;
int target = preferred_target, me = my_id();
int ret, x;
rg_state_t svcStatus;
+ if (preferred_target > 0) {
+ /* TODO: simplify this and don't keep alloc/freeing
+ member lists */
+ allowed_nodes = member_list();
+ /* Avoid even bothering the other node if we can */
+ m = memb_id_to_p(allowed_nodes, preferred_target);
+ if (!m) {
+ free_member_list(allowed_nodes);
+ return RG_EINVAL;
+ }
+
+ count_resource_groups_local(m);
+ if (m->cn_svcexcl ||
+ (m->cn_svccount && is_exclusive(svcName))) {
+ free_member_list(allowed_nodes);
+ return RG_EDEPEND;
+ }
+ free_member_list(allowed_nodes);
+ }
+
/*
* Stop the service - if we haven't already done so.
*/
@@ -1525,19 +1546,6 @@
if (preferred_target > 0) {
allowed_nodes = member_list();
- m = memb_id_to_p(allowed_nodes, preferred_target);
- if (!m) {
- free_member_list(allowed_nodes);
- return RG_EINVAL;
- }
-
- /* Avoid even bothering the other node if we can */
- count_resource_groups_local(m);
- if (m->cn_svcexcl) {
- free_member_list(allowed_nodes);
- return RG_EDEPEND;
- }
-
/*
Mark everyone except me and the preferred target DOWN for now
If we can't start it on the preferred target, then we'll try
next reply other threads:[~2007-07-02 15:13 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-07-02 15:13 lhh [this message]
-- strict thread matches above, loose matches on Subject: below --
2007-12-12 21:41 [Cluster-devel] cluster/rgmanager ChangeLog src/daemons/groups lhh
2007-09-28 15:14 lhh
2007-07-31 18:26 lhh
2007-07-10 18:25 lhh
2007-07-10 18:24 lhh
2007-07-02 15:15 lhh
2007-04-19 18:05 lhh
2007-04-19 17:59 lhh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070702151343.16897.qmail@sourceware.org \
--to=lhh@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).