cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] cluster/rgmanager include/resgroup.h src/daemo ...
@ 2006-12-13 18:38 lhh
  0 siblings, 0 replies; 4+ messages in thread
From: lhh @ 2006-12-13 18:38 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	lhh at sourceware.org	2006-12-13 18:38:42

Modified files:
	rgmanager/include: resgroup.h 
	rgmanager/src/daemons: rg_forward.c rg_state.c 
	rgmanager/src/resources/utils: ra-skelet.sh 
	rgmanager/src/utils: clustat.c clusvcadm.c 
Added files:
	rgmanager/src/resources: named.metadata named.sh 
	rgmanager/src/resources/utils: named-parse-config.pl 

Log message:
	Merge from HEAD branch

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/resgroup.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15&r2=1.15.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_forward.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.8&r2=1.8.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.24&r2=1.24.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/named.metadata.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/named.sh.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/utils/named-parse-config.pl.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/utils/ra-skelet.sh.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.5&r2=1.5.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clustat.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25&r2=1.25.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clusvcadm.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.12&r2=1.12.2.1

--- cluster/rgmanager/include/resgroup.h	2006/10/06 21:22:27	1.15
+++ cluster/rgmanager/include/resgroup.h	2006/12/13 18:38:41	1.15.2.1
@@ -125,7 +125,7 @@
 		       msgctx_t *resp_ctx,
        		       int max, uint32_t target, int arg0, int arg1);
 
-void send_response(int ret, request_t *req);
+void send_response(int ret, int node, request_t *req);
 void send_ret(msgctx_t *ctx, char *name, int ret, int req);
 
 /* do this op on all resource groups.  The handler for the request 
--- cluster/rgmanager/src/daemons/rg_forward.c	2006/10/23 22:47:01	1.8
+++ cluster/rgmanager/src/daemons/rg_forward.c	2006/12/13 18:38:41	1.8.2.1
@@ -91,7 +91,7 @@
 	msg_free_ctx(ctx);
 
 	swab_SmMessageSt(&msg);
-	send_response(msg.sm_data.d_ret, req);
+	send_response(msg.sm_data.d_ret, msg.sm_data.d_svcOwner, req);
 	rq_free(req);
 	pthread_exit(NULL);
 	
--- cluster/rgmanager/src/daemons/rg_state.c	2006/10/23 22:47:01	1.24
+++ cluster/rgmanager/src/daemons/rg_state.c	2006/12/13 18:38:41	1.24.2.1
@@ -213,7 +213,7 @@
 
 	
 void
-send_response(int ret, request_t *req)
+send_response(int ret, int nodeid, request_t *req)
 {
 	SmMessageSt msg, *msgp = &msg;
 
@@ -226,7 +226,10 @@
 	msgp->sm_data.d_action = req->rr_orig_request;
 	strncpy(msgp->sm_data.d_svcName, req->rr_group,
 		sizeof(msgp->sm_data.d_svcName));
-	msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+	if (!nodeid)
+		msgp->sm_data.d_svcOwner = my_id();
+	else 
+		msgp->sm_data.d_svcOwner = nodeid;
 	msgp->sm_data.d_ret = ret;
 
 	swab_SmMessageSt(msgp);
/cvs/cluster/cluster/rgmanager/src/resources/named.metadata,v  -->  standard output
revision 1.1.2.1
--- cluster/rgmanager/src/resources/named.metadata
+++ -	2006-12-13 18:38:42.926637000 +0000
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<resource-agent version="rgmanager 2.0" name="named">
+    <version>1.0</version>
+
+    <longdesc lang="en">
+        This defines an instance of DNS named server
+    </longdesc>
+    <shortdesc lang="en">
+        Defines an instance of named server
+    </shortdesc>
+
+    <parameters>
+        <parameter name="name" primary="1">
+	    <longdesc lang="en">
+	        Specifies a service name for logging and other purposes
+	    </longdesc>
+            <shortdesc lang="en">
+                Name
+            </shortdesc>
+	    <content type="string"/>
+        </parameter>
+
+        <parameter name="config_file">
+            <longdesc lang="en">
+                Define absolute path to configuration file
+            </longdesc>
+            <shortdesc lang="en">
+                Config File
+            </shortdesc>
+	    <content type="string" default="/etc/named.conf"/>
+        </parameter>
+
+        <parameter name="named_sdb">
+            <longdesc lang="en">
+		Simplified Database Backend
+            </longdesc>
+            <shortdesc lang="en">
+		Simplified Database Backend
+            </shortdesc>
+	    <content type="boolean" default="0"/>
+        </parameter>
+
+	<parameter name="named_working_dir">
+		<longdesc lang="en">
+			Other command-line options for named
+		</longdesc>
+		<shortdesc lang="en">
+			Other command-line options for named
+		</shortdesc>
+		<content type="string" default="/var/named" />
+	</parameter>
+
+	<parameter name="named_options">
+		<longdesc lang="en">
+			Other command-line options for named
+		</longdesc>
+		<shortdesc lang="en">
+			Other command-line options for named
+		</shortdesc>
+		<content type="string" />
+	</parameter>
+
+	<parameter name="shutdown_wait">
+		<longdesc lang="en">
+			Wait X seconds for correct end of service shutdown
+		</longdesc>
+		<shortdesc lang="en">
+			Wait X seconds for correct end of service shutdown
+		</shortdesc>
+		<content type="number" default="5" />
+	</parameter>
+
+        <parameter name="service_name" inherit="service%name">
+            <longdesc lang="en">
+	    	Inherit the service name.  We need to know
+		the service name in order to determine file
+		systems and IPs for this service.
+            </longdesc>
+            <shortdesc lang="en">
+	    	Inherit the service name.
+            </shortdesc>
+	    <content type="string"/>
+        </parameter>
+    </parameters>
+
+    <actions>
+        <action name="start" timeout="0"/>
+	<action name="stop" timeout="0"/>
+
+	<!-- Checks to see if it''s mounted in the right place -->
+	<action name="status" interval="1m" timeout="10"/>
+	<action name="monitor" interval="1m" timeout="10"/>
+
+	<!-- Checks to see if we can read from the mountpoint -->
+	<action name="status" depth="10" timeout="30" interval="5m"/>
+	<action name="monitor" depth="10" timeout="30" interval="5m"/>
+
+	<action name="meta-data" timeout="0"/>
+	<action name="verify-all" timeout="0"/>
+    </actions>
+
+    <special tag="rgmanager">
+    </special>
+</resource-agent>
/cvs/cluster/cluster/rgmanager/src/resources/named.sh,v  -->  standard output
revision 1.1.2.1
--- cluster/rgmanager/src/resources/named.sh
+++ -	2006-12-13 18:38:43.011549000 +0000
@@ -0,0 +1,223 @@
+#!/bin/bash
+
+#
+#  Copyright Red Hat, Inc. 2006
+#
+#  This program is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License as published by the
+#  Free Software Foundation; either version 2, or (at your option) any
+#  later version.
+#
+#  This program is distributed in the hope that it will be useful, but
+#  WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; see the file COPYING.  If not, write to the
+#  Free Software Foundation, Inc.,  675 Mass Ave, Cambridge, 
+#  MA 02139, USA.
+#
+#
+#  Author(s):
+#	Marek Grac (mgrac at redhat.com)
+#
+
+export LC_ALL=C
+export LANG=C
+export PATH=/bin:/sbin:/usr/bin:/usr/sbin
+
+. $(dirname $0)/ocf-shellfuncs
+. $(dirname $0)/utils/config-utils.sh
+. $(dirname $0)/utils/messages.sh
+. $(dirname $0)/utils/ra-skelet.sh
+
+declare NAMED_NAMED=/usr/sbin/named
+declare NAMED_pid_file="`generate_name_for_pid_file`"
+declare NAMED_conf_dir="`generate_name_for_conf_dir`"
+declare NAMED_gen_config_file="$NAMED_conf_dir/named.conf"
+declare NAMED_url_list
+declare NAMED_parse_config=$(dirname $0)/utils/named-parse-config.pl
+
+verify_all()
+{
+	clog_service_verify $CLOG_INIT
+
+	if [ -z "$OCF_RESKEY_name" ]; then
+		clog_service_verify $CLOG_FAILED "Invalid Name Of Service"
+		return $OCF_ERR_ARGS
+	fi
+
+	if [ -z "$OCF_RESKEY_service_name" ]; then
+		clog_service_verify $CLOG_FAILED_NOT_CHILD
+		return $OCF_ERR_ARGS
+	fi
+
+	if [ -z "$OCF_RESKEY_config_file" ]; then
+		clog_check_file_exist $CLOG_FAILED_INVALID "$OCF_RESKEY_config_file"
+		clog_service_verify $CLOG_FAILED
+		return $OCF_ERR_ARGS
+	fi
+
+	if [ ! -r "$OCF_RESKEY_config_file" ]; then
+		clog_check_file_exist $CLOG_FAILED_NOT_READABLE $OCF_RESKEY_config_file
+		clog_service_verify $CLOG_FAILED
+		return $OCF_ERR_ARGS
+	fi
+
+	clog_service_verify $CLOG_SUCCEED
+		
+	return 0
+}
+
+generate_config_file()
+{
+	declare original_file="$1"
+	declare generated_file="$2"
+	declare ip_address="$3"
+
+	if [ -f "$generated_file" ]; then
+		sha1_verify "$generated_file"
+		if [ $? -ne 0 ]; then
+			clog_check_sha1 $CLOG_FAILED
+			return 0
+		fi
+	fi	
+
+	clog_generate_config $CLOG_INIT "$original_file" "$generated_file"
+
+	generate_configTemplate "$generated_file" "$1"
+	cat $original_file | grep -v "^[[:space:]]*listen-on" | \
+		grep -v "^[[:space:]]*pid-file" | \
+		grep -v "^[[:space:]]*directory" >> $generated_file
+
+	declare tmp_file=`mktemp -t cluster.XXXXXXXXXX`
+	mv "$generated_file" "$tmp_file"
+
+	"$NAMED_parse_config" "$OCF_RESKEY_named_working_dir" "$NAMED_pid_file" "$ip_address" \
+		 < "$tmp_file" > "$generated_file"
+
+	rm "$tmp_file"	
+        sha1_addToFile "$generated_file"
+	clog_generate_config $CLOG_SUCCEED "$original_file" "$generated_file"
+
+	return 0;
+}
+
+start()
+{
+	declare ccs_fd;
+	declare ip_list;
+	
+	clog_service_start $CLOG_INIT
+
+	create_pid_directory
+	create_conf_directory "$NAMED_conf_dir"
+	check_pid_file "$NAMED_pid_file"
+
+	if [ $? -ne 0 ]; then
+		clog_check_pid $CLOG_FAILED "$NAMED_pid_file"
+		clog_service_start $CLOG_FAILED
+		return $OCF_ERR_GENERIC
+	fi
+	clog_looking_for $CLOG_INIT "IP Addresses"
+
+        ccs_fd=$(ccs_connect);
+        if [ $? -ne 0 ]; then
+		clog_looking_for $CLOG_FAILED_CCS
+                return $OCF_ERR_GENERIC
+        fi
+
+        get_service_ip_keys "$ccs_fd" "$OCF_RESKEY_service_name"
+        ip_addresses=`build_ip_list "$ccs_fd"`
+
+	if [ -z "$ip_addresses" ]; then
+		clog_looking_for $CLOG_FAILED_NOT_FOUND "IP Addresses"
+		return $OCF_ERR_GENERIC
+	fi
+	
+	clog_looking_for $CLOG_SUCCEED "IP Addresses"
+
+	ip_list=`echo $ip_addresses | sed 's/ /;/;s/\([[:digit:]]\)$/\1;/' `
+
+	if [ -z "$ip_list" ]; then
+		clog_looking_for $CLOG_FAILED_NOT_FOUND "IP Addresses"
+		return $OCF_ERR_GENERIC
+	fi
+
+	generate_config_file "$OCF_RESKEY_config_file" "$NAMED_gen_config_file" "$ip_list"
+
+	$NAMED_NAMED -c "$NAMED_gen_config_file" $OCF_RESKEY_named_options
+
+	if [ $? -ne 0 ]; then
+		clog_service_start $CLOG_FAILED
+		return $OCF_ERR_GENERIC
+	fi
+
+	clog_service_start $CLOG_SUCCEED
+
+	return 0;
+}
+
+stop()
+{
+	clog_service_stop $CLOG_INIT
+
+	stop_generic "$NAMED_pid_file" "$OCF_RESKEY_shutdown_wait"
+	
+	if [ $? -ne 0 ]; then
+		clog_service_stop $CLOG_FAILED
+		return $OCF_ERR_GENERIC
+	fi
+	
+	clog_service_stop $CLOG_SUCCEED
+	return 0;
+}
+
+status()
+{
+	clog_service_status $CLOG_INIT
+
+	status_check_pid "$NAMED_pid_file"
+	if [ $? -ne 0 ]; then
+		clog_service_status $CLOG_FAILED "$NAMED_pid_file"
+		return $OCF_ERR_GENERIC
+	fi
+
+	clog_service_status $CLOG_SUCCEED
+	return 0
+}
+
+case $1 in
+	meta-data)
+		cat `echo $0 | sed 's/^\(.*\)\.sh$/\1.metadata/'`
+		exit 0
+		;;
+	verify-all)
+		verify_all
+		exit $?
+		;;
+	start)
+		verify_all && start
+		exit $?
+		;;
+	stop)
+		verify_all && stop
+		exit $?
+		;;
+	status|monitor)
+		verify_all
+		status
+		exit $?
+		;;
+	restart)
+		verify_all
+		stop
+		start
+		exit $?
+		;;
+	*)
+		echo "Usage: $0 {start|stop|status|monitor|restart|meta-data|verify-all}"
+		exit $OCF_ERR_GENERIC
+		;;
+esac
/cvs/cluster/cluster/rgmanager/src/resources/utils/named-parse-config.pl,v  -->  standard output
revision 1.1.2.1
--- cluster/rgmanager/src/resources/utils/named-parse-config.pl
+++ -	2006-12-13 18:38:43.089888000 +0000
@@ -0,0 +1,26 @@
+#!/usr/bin/perl -w
+
+##
+##  Parse named.conf (from STDIN) and add options from cluster.conf
+##  
+##  ./named-parse-config.pl "directory" "pid-file" "listen-on"
+##
+use strict;
+
+if ($#ARGV < 2) {
+	die ("Not enough arguments");
+}
+
+while (my $line = <STDIN>) {
+	chomp($line);
+	$line =~ s/(.*?)\s*$/$1/;
+	if ($line =~ /^\s*options\s+\{/) {
+		print $line, "\n";
+		print "\tdirectory \"$ARGV[0]\";\n";
+		print "\tpid-file \"$ARGV[1]\";\n";
+		print "\tlisten-on { $ARGV[2] };\n";
+	} else {
+		print $line, "\n";
+	}
+}
+
--- cluster/rgmanager/src/resources/utils/ra-skelet.sh	2006/10/10 13:27:02	1.5
+++ cluster/rgmanager/src/resources/utils/ra-skelet.sh	2006/12/13 18:38:41	1.5.4.1
@@ -62,10 +62,10 @@
 	read pid < "$pid_file"
 
 	# @todo: PID is not running -> error?
-	if [ ! -d "/proc/$pid_file" ]; then
+	if [ ! -d "/proc/$pid" ]; then
 		return 0;
 	fi
-                                
+
 	kill -TERM "$pid"
 
 	if [ $? -ne 0 ]; then
--- cluster/rgmanager/src/utils/clustat.c	2006/10/12 22:07:57	1.25
+++ cluster/rgmanager/src/utils/clustat.c	2006/12/13 18:38:41	1.25.2.1
@@ -345,6 +345,9 @@
 	int x, y;
 	cman_node_t *m, *new;
 
+	if (!these)
+		return all;
+
 	for (x=0; x<these->cml_count; x++) {
 
 		m = NULL;
@@ -482,14 +485,17 @@
 }
 
 
-void
+int
 txt_rg_states(rg_state_list_t *rgl, cluster_member_list_t *members, 
 	      char *svcname, int flags)
 {
-	int x;
+	int x, ret = 0;
 
 	if (!rgl || !members)
-		return;
+		return -1;
+
+	if (svcname)
+		ret = -1;
 
 	if (!(flags & RG_VERBOSE)) {
 		printf("  %-20.20s %-30.30s %-14.14s\n",
@@ -506,18 +512,32 @@
 		    strcmp(rgl->rgl_states[x].rs_name, svcname))
 			continue;
 		txt_rg_state(&rgl->rgl_states[x], members, flags);
+		if (svcname) {
+			switch (rgl->rgl_states[x].rs_state) {
+			case RG_STATE_STARTING:
+			case RG_STATE_STARTED:
+			case RG_STATE_STOPPING:
+				ret = 0;
+				break;
+			default:
+				ret = rgl->rgl_states[x].rs_state;
+			}
+		}
 	}
+	
+	return ret;
 }
 
 
-void
+int
 xml_rg_states(rg_state_list_t *rgl, cluster_member_list_t *members,
 	      char *svcname)
 {
 	int x;
+	int ret = 0;
 
 	if (!rgl || !members)
-		return;
+		return -1;
 
 	printf("  <groups>\n");
 
@@ -525,14 +545,23 @@
 		if (svcname &&
 		    strcmp(rgl->rgl_states[x].rs_name, svcname))
 			continue;
-		xml_rg_state(&rgl->rgl_states[x], members, 0);
+		if (svcname) {
+			switch (rgl->rgl_states[x].rs_state) {
+			case RG_STATE_STARTING:
+			case RG_STATE_STARTED:
+			case RG_STATE_STOPPING:
+				break;
+			default:
+				ret = rgl->rgl_states[x].rs_state;
+			}
+		}
 	}
 
 	printf("  </groups>\n");
+	return ret;
 }
 
 
-
 void
 txt_quorum_state(int qs)
 {
@@ -606,10 +635,15 @@
 }
 
 
-void
+int
 txt_member_states(cluster_member_list_t *membership, char *name)
 {
-	int x;
+	int x, ret = 0;
+
+  	if (!membership) {
+  		printf("Membership information not available\n");
+ 		return -1;
+  	}
 
 	printf("  %-34.34s %-4.4s %s\n", "Member Name", "ID", "Status");
 	printf("  %-34.34s %-4.4s %s\n", "------ ----", "----", "------");
@@ -618,35 +652,45 @@
 		if (name && strcmp(membership->cml_members[x].cn_name, name))
 			continue;
 		txt_member_state(&membership->cml_members[x]);
+ 		ret = !(membership->cml_members[x].cn_member & FLAG_UP);
 	}
 
 	printf("\n");
+	return ret;
 }
 
 
-void
+int
 xml_member_states(cluster_member_list_t *membership, char *name)
 {
-	int x;
+	int x, ret = 0;
 
-	if (!membership)
-		return;
+	if (!membership) {
+		printf("  <nodes/>\n");
+		return -1;
+	}
 
 	printf("  <nodes>\n");
 	for (x = 0; x < membership->cml_count; x++) {
 		if (name && strcmp(membership->cml_members[x].cn_name, name))
 			continue;
 		xml_member_state(&membership->cml_members[x]);
+		if (name)
+			ret = !(membership->cml_members[x].cn_member & FLAG_UP);
 	}
 	printf("  </nodes>\n");
+	
+	return ret;
 }
 
 
-void
+int 
 txt_cluster_status(int qs, cluster_member_list_t *membership,
 		   rg_state_list_t *rgs, char *name, char *svcname, 
 		   int flags)
 {
+	int ret;
+	
 	if (!svcname && !name) {
 		txt_quorum_state(qs);
 		if (!membership) {
@@ -656,18 +700,22 @@
 		}
 	}
 
-	if (!svcname || (name && svcname))
-		txt_member_states(membership, name);
-	if (!name || (name && svcname))
-		txt_rg_states(rgs, membership, svcname, flags);
+  	if (!svcname || (name && svcname))
+ 		ret = txt_member_states(membership, name);
+ 	if (name && !svcname)
+ 		return ret;
+ 	if (!name || (name && svcname))
+ 		ret = txt_rg_states(rgs, membership, svcname, flags);
+ 	return ret;
 }
 
 
-void
+int
 xml_cluster_status(int qs, cluster_member_list_t *membership,
 		   rg_state_list_t *rgs, char *name, char *svcname,
 		   int flags)
 {
+ 	int ret1 = 0, ret2 = -1;
 	int x;
 
 	printf("<?xml version=\"1.0\"?>\n");
@@ -687,36 +735,24 @@
 		}
 	}
 
-	if (!svcname && !name)
-		xml_quorum_state(qs);
-	if (!svcname || (name && svcname)) 
-		xml_member_states(membership, name);
-	if (rgs &&
-	    (!name || (name && svcname)))
-		xml_rg_states(rgs, membership, svcname);
-	printf("</clustat>\n");
+  	if (!svcname && !name)
+  		xml_quorum_state(qs);
+  	if (!svcname || (name && svcname)) 
+ 		ret1 = xml_member_states(membership, name);
+ 	
+  	if (rgs &&
+  	    (!name || (name && svcname)))
+ 		ret2 = xml_rg_states(rgs, membership, svcname);
+  	printf("</clustat>\n");
+ 	
+ 	if (name && ret1)
+ 		return ret1;
+ 	if (svcname && ret2)
+ 		return ret2;
+ 	return 0;
 }
 
 
-void
-dump_node(cman_node_t *node)
-{
-	printf("Node %s state %02x\n", node->cn_name, node->cn_member);
-}
-
-
-void 
-dump_nodes(cluster_member_list_t *nodes)
-{
-	int x;
-
-	for (x=0; x<nodes->cml_count; x++) {
-		dump_node(&nodes->cml_members[x]);
-	}
-}
-
-
-
 cluster_member_list_t *
 build_member_list(cman_handle_t ch, int *lid)
 {
@@ -921,11 +957,13 @@
 		}
 
 		if (xml)
-			xml_cluster_status(qs, membership, rgs, member_name,
-					   rg_name,flags);
+			ret = xml_cluster_status(qs, membership, rgs,
+						 member_name, rg_name,
+						 flags);
 		else
-			txt_cluster_status(qs, membership, rgs, member_name,
-					   rg_name,flags);
+			ret = txt_cluster_status(qs, membership, rgs,
+						 member_name, rg_name,
+						 flags);
 
 		if (membership)
 			free_member_list(membership);
--- cluster/rgmanager/src/utils/clusvcadm.c	2006/09/01 19:02:22	1.12
+++ cluster/rgmanager/src/utils/clusvcadm.c	2006/12/13 18:38:41	1.12.2.1
@@ -180,6 +180,46 @@
 
 
 int
+find_closest_node(cluster_member_list_t *cml, char *name, size_t maxlen)
+{
+	int x, c = 0, cl = 0, nc = 0, ncl = 0, cur = 0;
+
+	for (x=0; x<cml->cml_count; x++) {
+		cur = 0;
+
+		while (cml->cml_members[x].cn_name[cur] && name[cur] &&
+		       (cml->cml_members[x].cn_name[cur] == name[cur]))
+			cur++;
+		if (!cur)
+			continue;
+		if (cur >= cl) {
+			ncl = cl; /* Next-closest */
+			nc = c;
+			cl = cur;
+			c = x;
+		}
+	}
+
+	if (!cl) {
+		printf("No matches for '%s' found\n", name);
+		return 0;
+	}
+
+	if (ncl == cl) {
+		printf("More than one possible match for '%s' found\n",
+		       name);
+		return 0;
+	}
+
+	printf("Closest match: '%s'\n", 
+	       cml->cml_members[c].cn_name);
+
+	strncpy(name, cml->cml_members[c].cn_name, maxlen);
+	return cml->cml_members[c].cn_nodeid;
+}
+
+
+int
 main(int argc, char **argv)
 {
 	extern char *optarg;
@@ -291,9 +331,12 @@
 	if (node_specified) {
 		svctarget = memb_name_to_id(membership, nodename);
 		if (svctarget == 0) {
-			fprintf(stderr, "Member %s not in membership list\n",
-				nodename);
-			return 1;
+			printf("'%s' not in membership list\n",
+			       nodename);
+			svctarget = find_closest_node(membership, nodename,
+						      sizeof(nodename));
+			if (!svctarget)
+				return 1;
 		}
 	} else {
 		svctarget = 0;
@@ -301,9 +344,9 @@
 		clu_local_nodename(RG_SERVICE_GROUP, nodename,
 				   sizeof(nodename));
 				   */
+		strcpy(nodename,"me");
 	}
 	
-	strcpy(nodename,"me");
 	build_message(&msg, action, svcname, svctarget);
 
 	if (action != RG_RELOCATE && action != RG_MIGRATE) {
@@ -312,7 +355,11 @@
 		fflush(stdout);
 		msg_open(MSG_SOCKET, 0, RG_PORT, &ctx, 5);
 	} else {
-		printf("Trying to relocate %s to %s", svcname, nodename);
+		if (!svctarget)
+			printf("Trying to relocate %s", svcname);
+		else 
+			printf("Trying to relocate %s to %s", svcname,
+			       nodename);
 		printf("...");
 		fflush(stdout);
 		msg_open(MSG_SOCKET, 0, RG_PORT, &ctx, 5);
@@ -342,5 +389,19 @@
 
 	swab_SmMessageSt(&msg);
 	printf("%s\n", rg_strerror(msg.sm_data.d_ret));
+	switch (action) {
+	case RG_MIGRATE:
+	case RG_RELOCATE:
+	case RG_START:
+	case RG_ENABLE:
+		printf("%s%s is now running on %s\n",
+		       msg.sm_data.d_svcOwner==svctarget?"":"Warning: ",
+		       svcname, memb_id_to_name(membership,
+		       			        msg.sm_data.d_svcOwner));
+		break;
+	default:
+		break;
+	}
+	
 	return msg.sm_data.d_ret;
 }



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [Cluster-devel] cluster/rgmanager include/resgroup.h src/daemo ...
@ 2006-12-13 18:39 lhh
  0 siblings, 0 replies; 4+ messages in thread
From: lhh @ 2006-12-13 18:39 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL50
Changes by:	lhh at sourceware.org	2006-12-13 18:39:20

Modified files:
	rgmanager/include: resgroup.h 
	rgmanager/src/daemons: rg_forward.c rg_state.c 
	rgmanager/src/resources/utils: ra-skelet.sh 
	rgmanager/src/utils: clustat.c clusvcadm.c 
Added files:
	rgmanager/src/resources: named.metadata named.sh 
	rgmanager/src/resources/utils: named-parse-config.pl 

Log message:
	Merge from HEAD branch

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/resgroup.h.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.15&r2=1.15.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_forward.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.8&r2=1.8.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.24&r2=1.24.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/named.metadata.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=NONE&r2=1.1.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/named.sh.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=NONE&r2=1.1.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/utils/named-parse-config.pl.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=NONE&r2=1.1.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/utils/ra-skelet.sh.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.5&r2=1.5.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clustat.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.25&r2=1.25.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clusvcadm.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.12&r2=1.12.4.1

--- cluster/rgmanager/include/resgroup.h	2006/10/06 21:22:27	1.15
+++ cluster/rgmanager/include/resgroup.h	2006/12/13 18:39:19	1.15.4.1
@@ -125,7 +125,7 @@
 		       msgctx_t *resp_ctx,
        		       int max, uint32_t target, int arg0, int arg1);
 
-void send_response(int ret, request_t *req);
+void send_response(int ret, int node, request_t *req);
 void send_ret(msgctx_t *ctx, char *name, int ret, int req);
 
 /* do this op on all resource groups.  The handler for the request 
--- cluster/rgmanager/src/daemons/rg_forward.c	2006/10/23 22:47:01	1.8
+++ cluster/rgmanager/src/daemons/rg_forward.c	2006/12/13 18:39:19	1.8.4.1
@@ -91,7 +91,7 @@
 	msg_free_ctx(ctx);
 
 	swab_SmMessageSt(&msg);
-	send_response(msg.sm_data.d_ret, req);
+	send_response(msg.sm_data.d_ret, msg.sm_data.d_svcOwner, req);
 	rq_free(req);
 	pthread_exit(NULL);
 	
--- cluster/rgmanager/src/daemons/rg_state.c	2006/10/23 22:47:01	1.24
+++ cluster/rgmanager/src/daemons/rg_state.c	2006/12/13 18:39:19	1.24.4.1
@@ -213,7 +213,7 @@
 
 	
 void
-send_response(int ret, request_t *req)
+send_response(int ret, int nodeid, request_t *req)
 {
 	SmMessageSt msg, *msgp = &msg;
 
@@ -226,7 +226,10 @@
 	msgp->sm_data.d_action = req->rr_orig_request;
 	strncpy(msgp->sm_data.d_svcName, req->rr_group,
 		sizeof(msgp->sm_data.d_svcName));
-	msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+	if (!nodeid)
+		msgp->sm_data.d_svcOwner = my_id();
+	else 
+		msgp->sm_data.d_svcOwner = nodeid;
 	msgp->sm_data.d_ret = ret;
 
 	swab_SmMessageSt(msgp);
/cvs/cluster/cluster/rgmanager/src/resources/named.metadata,v  -->  standard output
revision 1.1.4.1
--- cluster/rgmanager/src/resources/named.metadata
+++ -	2006-12-13 18:39:22.124025000 +0000
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<resource-agent version="rgmanager 2.0" name="named">
+    <version>1.0</version>
+
+    <longdesc lang="en">
+        This defines an instance of DNS named server
+    </longdesc>
+    <shortdesc lang="en">
+        Defines an instance of named server
+    </shortdesc>
+
+    <parameters>
+        <parameter name="name" primary="1">
+	    <longdesc lang="en">
+	        Specifies a service name for logging and other purposes
+	    </longdesc>
+            <shortdesc lang="en">
+                Name
+            </shortdesc>
+	    <content type="string"/>
+        </parameter>
+
+        <parameter name="config_file">
+            <longdesc lang="en">
+                Define absolute path to configuration file
+            </longdesc>
+            <shortdesc lang="en">
+                Config File
+            </shortdesc>
+	    <content type="string" default="/etc/named.conf"/>
+        </parameter>
+
+        <parameter name="named_sdb">
+            <longdesc lang="en">
+		Simplified Database Backend
+            </longdesc>
+            <shortdesc lang="en">
+		Simplified Database Backend
+            </shortdesc>
+	    <content type="boolean" default="0"/>
+        </parameter>
+
+	<parameter name="named_working_dir">
+		<longdesc lang="en">
+			Other command-line options for named
+		</longdesc>
+		<shortdesc lang="en">
+			Other command-line options for named
+		</shortdesc>
+		<content type="string" default="/var/named" />
+	</parameter>
+
+	<parameter name="named_options">
+		<longdesc lang="en">
+			Other command-line options for named
+		</longdesc>
+		<shortdesc lang="en">
+			Other command-line options for named
+		</shortdesc>
+		<content type="string" />
+	</parameter>
+
+	<parameter name="shutdown_wait">
+		<longdesc lang="en">
+			Wait X seconds for correct end of service shutdown
+		</longdesc>
+		<shortdesc lang="en">
+			Wait X seconds for correct end of service shutdown
+		</shortdesc>
+		<content type="number" default="5" />
+	</parameter>
+
+        <parameter name="service_name" inherit="service%name">
+            <longdesc lang="en">
+	    	Inherit the service name.  We need to know
+		the service name in order to determine file
+		systems and IPs for this service.
+            </longdesc>
+            <shortdesc lang="en">
+	    	Inherit the service name.
+            </shortdesc>
+	    <content type="string"/>
+        </parameter>
+    </parameters>
+
+    <actions>
+        <action name="start" timeout="0"/>
+	<action name="stop" timeout="0"/>
+
+	<!-- Checks to see if it''s mounted in the right place -->
+	<action name="status" interval="1m" timeout="10"/>
+	<action name="monitor" interval="1m" timeout="10"/>
+
+	<!-- Checks to see if we can read from the mountpoint -->
+	<action name="status" depth="10" timeout="30" interval="5m"/>
+	<action name="monitor" depth="10" timeout="30" interval="5m"/>
+
+	<action name="meta-data" timeout="0"/>
+	<action name="verify-all" timeout="0"/>
+    </actions>
+
+    <special tag="rgmanager">
+    </special>
+</resource-agent>
/cvs/cluster/cluster/rgmanager/src/resources/named.sh,v  -->  standard output
revision 1.1.4.1
--- cluster/rgmanager/src/resources/named.sh
+++ -	2006-12-13 18:39:22.206293000 +0000
@@ -0,0 +1,223 @@
+#!/bin/bash
+
+#
+#  Copyright Red Hat, Inc. 2006
+#
+#  This program is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License as published by the
+#  Free Software Foundation; either version 2, or (at your option) any
+#  later version.
+#
+#  This program is distributed in the hope that it will be useful, but
+#  WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; see the file COPYING.  If not, write to the
+#  Free Software Foundation, Inc.,  675 Mass Ave, Cambridge, 
+#  MA 02139, USA.
+#
+#
+#  Author(s):
+#	Marek Grac (mgrac at redhat.com)
+#
+
+export LC_ALL=C
+export LANG=C
+export PATH=/bin:/sbin:/usr/bin:/usr/sbin
+
+. $(dirname $0)/ocf-shellfuncs
+. $(dirname $0)/utils/config-utils.sh
+. $(dirname $0)/utils/messages.sh
+. $(dirname $0)/utils/ra-skelet.sh
+
+declare NAMED_NAMED=/usr/sbin/named
+declare NAMED_pid_file="`generate_name_for_pid_file`"
+declare NAMED_conf_dir="`generate_name_for_conf_dir`"
+declare NAMED_gen_config_file="$NAMED_conf_dir/named.conf"
+declare NAMED_url_list
+declare NAMED_parse_config=$(dirname $0)/utils/named-parse-config.pl
+
+verify_all()
+{
+	clog_service_verify $CLOG_INIT
+
+	if [ -z "$OCF_RESKEY_name" ]; then
+		clog_service_verify $CLOG_FAILED "Invalid Name Of Service"
+		return $OCF_ERR_ARGS
+	fi
+
+	if [ -z "$OCF_RESKEY_service_name" ]; then
+		clog_service_verify $CLOG_FAILED_NOT_CHILD
+		return $OCF_ERR_ARGS
+	fi
+
+	if [ -z "$OCF_RESKEY_config_file" ]; then
+		clog_check_file_exist $CLOG_FAILED_INVALID "$OCF_RESKEY_config_file"
+		clog_service_verify $CLOG_FAILED
+		return $OCF_ERR_ARGS
+	fi
+
+	if [ ! -r "$OCF_RESKEY_config_file" ]; then
+		clog_check_file_exist $CLOG_FAILED_NOT_READABLE $OCF_RESKEY_config_file
+		clog_service_verify $CLOG_FAILED
+		return $OCF_ERR_ARGS
+	fi
+
+	clog_service_verify $CLOG_SUCCEED
+		
+	return 0
+}
+
+generate_config_file()
+{
+	declare original_file="$1"
+	declare generated_file="$2"
+	declare ip_address="$3"
+
+	if [ -f "$generated_file" ]; then
+		sha1_verify "$generated_file"
+		if [ $? -ne 0 ]; then
+			clog_check_sha1 $CLOG_FAILED
+			return 0
+		fi
+	fi	
+
+	clog_generate_config $CLOG_INIT "$original_file" "$generated_file"
+
+	generate_configTemplate "$generated_file" "$1"
+	cat $original_file | grep -v "^[[:space:]]*listen-on" | \
+		grep -v "^[[:space:]]*pid-file" | \
+		grep -v "^[[:space:]]*directory" >> $generated_file
+
+	declare tmp_file=`mktemp -t cluster.XXXXXXXXXX`
+	mv "$generated_file" "$tmp_file"
+
+	"$NAMED_parse_config" "$OCF_RESKEY_named_working_dir" "$NAMED_pid_file" "$ip_address" \
+		 < "$tmp_file" > "$generated_file"
+
+	rm "$tmp_file"	
+        sha1_addToFile "$generated_file"
+	clog_generate_config $CLOG_SUCCEED "$original_file" "$generated_file"
+
+	return 0;
+}
+
+start()
+{
+	declare ccs_fd;
+	declare ip_list;
+	
+	clog_service_start $CLOG_INIT
+
+	create_pid_directory
+	create_conf_directory "$NAMED_conf_dir"
+	check_pid_file "$NAMED_pid_file"
+
+	if [ $? -ne 0 ]; then
+		clog_check_pid $CLOG_FAILED "$NAMED_pid_file"
+		clog_service_start $CLOG_FAILED
+		return $OCF_ERR_GENERIC
+	fi
+	clog_looking_for $CLOG_INIT "IP Addresses"
+
+        ccs_fd=$(ccs_connect);
+        if [ $? -ne 0 ]; then
+		clog_looking_for $CLOG_FAILED_CCS
+                return $OCF_ERR_GENERIC
+        fi
+
+        get_service_ip_keys "$ccs_fd" "$OCF_RESKEY_service_name"
+        ip_addresses=`build_ip_list "$ccs_fd"`
+
+	if [ -z "$ip_addresses" ]; then
+		clog_looking_for $CLOG_FAILED_NOT_FOUND "IP Addresses"
+		return $OCF_ERR_GENERIC
+	fi
+	
+	clog_looking_for $CLOG_SUCCEED "IP Addresses"
+
+	ip_list=`echo $ip_addresses | sed 's/ /;/;s/\([[:digit:]]\)$/\1;/' `
+
+	if [ -z "$ip_list" ]; then
+		clog_looking_for $CLOG_FAILED_NOT_FOUND "IP Addresses"
+		return $OCF_ERR_GENERIC
+	fi
+
+	generate_config_file "$OCF_RESKEY_config_file" "$NAMED_gen_config_file" "$ip_list"
+
+	$NAMED_NAMED -c "$NAMED_gen_config_file" $OCF_RESKEY_named_options
+
+	if [ $? -ne 0 ]; then
+		clog_service_start $CLOG_FAILED
+		return $OCF_ERR_GENERIC
+	fi
+
+	clog_service_start $CLOG_SUCCEED
+
+	return 0;
+}
+
+stop()
+{
+	clog_service_stop $CLOG_INIT
+
+	stop_generic "$NAMED_pid_file" "$OCF_RESKEY_shutdown_wait"
+	
+	if [ $? -ne 0 ]; then
+		clog_service_stop $CLOG_FAILED
+		return $OCF_ERR_GENERIC
+	fi
+	
+	clog_service_stop $CLOG_SUCCEED
+	return 0;
+}
+
+status()
+{
+	clog_service_status $CLOG_INIT
+
+	status_check_pid "$NAMED_pid_file"
+	if [ $? -ne 0 ]; then
+		clog_service_status $CLOG_FAILED "$NAMED_pid_file"
+		return $OCF_ERR_GENERIC
+	fi
+
+	clog_service_status $CLOG_SUCCEED
+	return 0
+}
+
+case $1 in
+	meta-data)
+		cat `echo $0 | sed 's/^\(.*\)\.sh$/\1.metadata/'`
+		exit 0
+		;;
+	verify-all)
+		verify_all
+		exit $?
+		;;
+	start)
+		verify_all && start
+		exit $?
+		;;
+	stop)
+		verify_all && stop
+		exit $?
+		;;
+	status|monitor)
+		verify_all
+		status
+		exit $?
+		;;
+	restart)
+		verify_all
+		stop
+		start
+		exit $?
+		;;
+	*)
+		echo "Usage: $0 {start|stop|status|monitor|restart|meta-data|verify-all}"
+		exit $OCF_ERR_GENERIC
+		;;
+esac
/cvs/cluster/cluster/rgmanager/src/resources/utils/named-parse-config.pl,v  -->  standard output
revision 1.1.4.1
--- cluster/rgmanager/src/resources/utils/named-parse-config.pl
+++ -	2006-12-13 18:39:22.286403000 +0000
@@ -0,0 +1,26 @@
+#!/usr/bin/perl -w
+
+##
+##  Parse named.conf (from STDIN) and add options from cluster.conf
+##  
+##  ./named-parse-config.pl "directory" "pid-file" "listen-on"
+##
+use strict;
+
+if ($#ARGV < 2) {
+	die ("Not enough arguments");
+}
+
+while (my $line = <STDIN>) {
+	chomp($line);
+	$line =~ s/(.*?)\s*$/$1/;
+	if ($line =~ /^\s*options\s+\{/) {
+		print $line, "\n";
+		print "\tdirectory \"$ARGV[0]\";\n";
+		print "\tpid-file \"$ARGV[1]\";\n";
+		print "\tlisten-on { $ARGV[2] };\n";
+	} else {
+		print $line, "\n";
+	}
+}
+
--- cluster/rgmanager/src/resources/utils/ra-skelet.sh	2006/10/10 13:27:02	1.5
+++ cluster/rgmanager/src/resources/utils/ra-skelet.sh	2006/12/13 18:39:20	1.5.6.1
@@ -62,10 +62,10 @@
 	read pid < "$pid_file"
 
 	# @todo: PID is not running -> error?
-	if [ ! -d "/proc/$pid_file" ]; then
+	if [ ! -d "/proc/$pid" ]; then
 		return 0;
 	fi
-                                
+
 	kill -TERM "$pid"
 
 	if [ $? -ne 0 ]; then
--- cluster/rgmanager/src/utils/clustat.c	2006/10/12 22:07:57	1.25
+++ cluster/rgmanager/src/utils/clustat.c	2006/12/13 18:39:20	1.25.4.1
@@ -345,6 +345,9 @@
 	int x, y;
 	cman_node_t *m, *new;
 
+	if (!these)
+		return all;
+
 	for (x=0; x<these->cml_count; x++) {
 
 		m = NULL;
@@ -482,14 +485,17 @@
 }
 
 
-void
+int
 txt_rg_states(rg_state_list_t *rgl, cluster_member_list_t *members, 
 	      char *svcname, int flags)
 {
-	int x;
+	int x, ret = 0;
 
 	if (!rgl || !members)
-		return;
+		return -1;
+
+	if (svcname)
+		ret = -1;
 
 	if (!(flags & RG_VERBOSE)) {
 		printf("  %-20.20s %-30.30s %-14.14s\n",
@@ -506,18 +512,32 @@
 		    strcmp(rgl->rgl_states[x].rs_name, svcname))
 			continue;
 		txt_rg_state(&rgl->rgl_states[x], members, flags);
+		if (svcname) {
+			switch (rgl->rgl_states[x].rs_state) {
+			case RG_STATE_STARTING:
+			case RG_STATE_STARTED:
+			case RG_STATE_STOPPING:
+				ret = 0;
+				break;
+			default:
+				ret = rgl->rgl_states[x].rs_state;
+			}
+		}
 	}
+	
+	return ret;
 }
 
 
-void
+int
 xml_rg_states(rg_state_list_t *rgl, cluster_member_list_t *members,
 	      char *svcname)
 {
 	int x;
+	int ret = 0;
 
 	if (!rgl || !members)
-		return;
+		return -1;
 
 	printf("  <groups>\n");
 
@@ -525,14 +545,23 @@
 		if (svcname &&
 		    strcmp(rgl->rgl_states[x].rs_name, svcname))
 			continue;
-		xml_rg_state(&rgl->rgl_states[x], members, 0);
+		if (svcname) {
+			switch (rgl->rgl_states[x].rs_state) {
+			case RG_STATE_STARTING:
+			case RG_STATE_STARTED:
+			case RG_STATE_STOPPING:
+				break;
+			default:
+				ret = rgl->rgl_states[x].rs_state;
+			}
+		}
 	}
 
 	printf("  </groups>\n");
+	return ret;
 }
 
 
-
 void
 txt_quorum_state(int qs)
 {
@@ -606,10 +635,15 @@
 }
 
 
-void
+int
 txt_member_states(cluster_member_list_t *membership, char *name)
 {
-	int x;
+	int x, ret = 0;
+
+  	if (!membership) {
+  		printf("Membership information not available\n");
+ 		return -1;
+  	}
 
 	printf("  %-34.34s %-4.4s %s\n", "Member Name", "ID", "Status");
 	printf("  %-34.34s %-4.4s %s\n", "------ ----", "----", "------");
@@ -618,35 +652,45 @@
 		if (name && strcmp(membership->cml_members[x].cn_name, name))
 			continue;
 		txt_member_state(&membership->cml_members[x]);
+ 		ret = !(membership->cml_members[x].cn_member & FLAG_UP);
 	}
 
 	printf("\n");
+	return ret;
 }
 
 
-void
+int
 xml_member_states(cluster_member_list_t *membership, char *name)
 {
-	int x;
+	int x, ret = 0;
 
-	if (!membership)
-		return;
+	if (!membership) {
+		printf("  <nodes/>\n");
+		return -1;
+	}
 
 	printf("  <nodes>\n");
 	for (x = 0; x < membership->cml_count; x++) {
 		if (name && strcmp(membership->cml_members[x].cn_name, name))
 			continue;
 		xml_member_state(&membership->cml_members[x]);
+		if (name)
+			ret = !(membership->cml_members[x].cn_member & FLAG_UP);
 	}
 	printf("  </nodes>\n");
+	
+	return ret;
 }
 
 
-void
+int 
 txt_cluster_status(int qs, cluster_member_list_t *membership,
 		   rg_state_list_t *rgs, char *name, char *svcname, 
 		   int flags)
 {
+	int ret;
+	
 	if (!svcname && !name) {
 		txt_quorum_state(qs);
 		if (!membership) {
@@ -656,18 +700,22 @@
 		}
 	}
 
-	if (!svcname || (name && svcname))
-		txt_member_states(membership, name);
-	if (!name || (name && svcname))
-		txt_rg_states(rgs, membership, svcname, flags);
+  	if (!svcname || (name && svcname))
+ 		ret = txt_member_states(membership, name);
+ 	if (name && !svcname)
+ 		return ret;
+ 	if (!name || (name && svcname))
+ 		ret = txt_rg_states(rgs, membership, svcname, flags);
+ 	return ret;
 }
 
 
-void
+int
 xml_cluster_status(int qs, cluster_member_list_t *membership,
 		   rg_state_list_t *rgs, char *name, char *svcname,
 		   int flags)
 {
+ 	int ret1 = 0, ret2 = -1;
 	int x;
 
 	printf("<?xml version=\"1.0\"?>\n");
@@ -687,36 +735,24 @@
 		}
 	}
 
-	if (!svcname && !name)
-		xml_quorum_state(qs);
-	if (!svcname || (name && svcname)) 
-		xml_member_states(membership, name);
-	if (rgs &&
-	    (!name || (name && svcname)))
-		xml_rg_states(rgs, membership, svcname);
-	printf("</clustat>\n");
+  	if (!svcname && !name)
+  		xml_quorum_state(qs);
+  	if (!svcname || (name && svcname)) 
+ 		ret1 = xml_member_states(membership, name);
+ 	
+  	if (rgs &&
+  	    (!name || (name && svcname)))
+ 		ret2 = xml_rg_states(rgs, membership, svcname);
+  	printf("</clustat>\n");
+ 	
+ 	if (name && ret1)
+ 		return ret1;
+ 	if (svcname && ret2)
+ 		return ret2;
+ 	return 0;
 }
 
 
-void
-dump_node(cman_node_t *node)
-{
-	printf("Node %s state %02x\n", node->cn_name, node->cn_member);
-}
-
-
-void 
-dump_nodes(cluster_member_list_t *nodes)
-{
-	int x;
-
-	for (x=0; x<nodes->cml_count; x++) {
-		dump_node(&nodes->cml_members[x]);
-	}
-}
-
-
-
 cluster_member_list_t *
 build_member_list(cman_handle_t ch, int *lid)
 {
@@ -921,11 +957,13 @@
 		}
 
 		if (xml)
-			xml_cluster_status(qs, membership, rgs, member_name,
-					   rg_name,flags);
+			ret = xml_cluster_status(qs, membership, rgs,
+						 member_name, rg_name,
+						 flags);
 		else
-			txt_cluster_status(qs, membership, rgs, member_name,
-					   rg_name,flags);
+			ret = txt_cluster_status(qs, membership, rgs,
+						 member_name, rg_name,
+						 flags);
 
 		if (membership)
 			free_member_list(membership);
--- cluster/rgmanager/src/utils/clusvcadm.c	2006/09/01 19:02:22	1.12
+++ cluster/rgmanager/src/utils/clusvcadm.c	2006/12/13 18:39:20	1.12.4.1
@@ -180,6 +180,46 @@
 
 
 int
+find_closest_node(cluster_member_list_t *cml, char *name, size_t maxlen)
+{
+	int x, c = 0, cl = 0, nc = 0, ncl = 0, cur = 0;
+
+	for (x=0; x<cml->cml_count; x++) {
+		cur = 0;
+
+		while (cml->cml_members[x].cn_name[cur] && name[cur] &&
+		       (cml->cml_members[x].cn_name[cur] == name[cur]))
+			cur++;
+		if (!cur)
+			continue;
+		if (cur >= cl) {
+			ncl = cl; /* Next-closest */
+			nc = c;
+			cl = cur;
+			c = x;
+		}
+	}
+
+	if (!cl) {
+		printf("No matches for '%s' found\n", name);
+		return 0;
+	}
+
+	if (ncl == cl) {
+		printf("More than one possible match for '%s' found\n",
+		       name);
+		return 0;
+	}
+
+	printf("Closest match: '%s'\n", 
+	       cml->cml_members[c].cn_name);
+
+	strncpy(name, cml->cml_members[c].cn_name, maxlen);
+	return cml->cml_members[c].cn_nodeid;
+}
+
+
+int
 main(int argc, char **argv)
 {
 	extern char *optarg;
@@ -291,9 +331,12 @@
 	if (node_specified) {
 		svctarget = memb_name_to_id(membership, nodename);
 		if (svctarget == 0) {
-			fprintf(stderr, "Member %s not in membership list\n",
-				nodename);
-			return 1;
+			printf("'%s' not in membership list\n",
+			       nodename);
+			svctarget = find_closest_node(membership, nodename,
+						      sizeof(nodename));
+			if (!svctarget)
+				return 1;
 		}
 	} else {
 		svctarget = 0;
@@ -301,9 +344,9 @@
 		clu_local_nodename(RG_SERVICE_GROUP, nodename,
 				   sizeof(nodename));
 				   */
+		strcpy(nodename,"me");
 	}
 	
-	strcpy(nodename,"me");
 	build_message(&msg, action, svcname, svctarget);
 
 	if (action != RG_RELOCATE && action != RG_MIGRATE) {
@@ -312,7 +355,11 @@
 		fflush(stdout);
 		msg_open(MSG_SOCKET, 0, RG_PORT, &ctx, 5);
 	} else {
-		printf("Trying to relocate %s to %s", svcname, nodename);
+		if (!svctarget)
+			printf("Trying to relocate %s", svcname);
+		else 
+			printf("Trying to relocate %s to %s", svcname,
+			       nodename);
 		printf("...");
 		fflush(stdout);
 		msg_open(MSG_SOCKET, 0, RG_PORT, &ctx, 5);
@@ -342,5 +389,19 @@
 
 	swab_SmMessageSt(&msg);
 	printf("%s\n", rg_strerror(msg.sm_data.d_ret));
+	switch (action) {
+	case RG_MIGRATE:
+	case RG_RELOCATE:
+	case RG_START:
+	case RG_ENABLE:
+		printf("%s%s is now running on %s\n",
+		       msg.sm_data.d_svcOwner==svctarget?"":"Warning: ",
+		       svcname, memb_id_to_name(membership,
+		       			        msg.sm_data.d_svcOwner));
+		break;
+	default:
+		break;
+	}
+	
 	return msg.sm_data.d_ret;
 }



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [Cluster-devel] cluster/rgmanager include/resgroup.h src/daemo ...
@ 2006-12-14 22:03 lhh
  0 siblings, 0 replies; 4+ messages in thread
From: lhh @ 2006-12-14 22:03 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	lhh at sourceware.org	2006-12-14 22:03:17

Modified files:
	rgmanager/include: resgroup.h 
	rgmanager/src/daemons: rg_forward.c rg_state.c 
	rgmanager/src/utils: clusvcadm.c 

Log message:
	Fix #216774

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/resgroup.h.diff?cvsroot=cluster&r1=1.15&r2=1.16
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_forward.c.diff?cvsroot=cluster&r1=1.8&r2=1.9
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&r1=1.24&r2=1.25
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clusvcadm.c.diff?cvsroot=cluster&r1=1.12&r2=1.13

--- cluster/rgmanager/include/resgroup.h	2006/10/06 21:22:27	1.15
+++ cluster/rgmanager/include/resgroup.h	2006/12/14 22:03:17	1.16
@@ -125,7 +125,7 @@
 		       msgctx_t *resp_ctx,
        		       int max, uint32_t target, int arg0, int arg1);
 
-void send_response(int ret, request_t *req);
+void send_response(int ret, int node, request_t *req);
 void send_ret(msgctx_t *ctx, char *name, int ret, int req);
 
 /* do this op on all resource groups.  The handler for the request 
--- cluster/rgmanager/src/daemons/rg_forward.c	2006/10/23 22:47:01	1.8
+++ cluster/rgmanager/src/daemons/rg_forward.c	2006/12/14 22:03:17	1.9
@@ -91,7 +91,7 @@
 	msg_free_ctx(ctx);
 
 	swab_SmMessageSt(&msg);
-	send_response(msg.sm_data.d_ret, req);
+	send_response(msg.sm_data.d_ret, msg.sm_data.d_svcOwner, req);
 	rq_free(req);
 	pthread_exit(NULL);
 	
--- cluster/rgmanager/src/daemons/rg_state.c	2006/10/23 22:47:01	1.24
+++ cluster/rgmanager/src/daemons/rg_state.c	2006/12/14 22:03:17	1.25
@@ -213,7 +213,7 @@
 
 	
 void
-send_response(int ret, request_t *req)
+send_response(int ret, int nodeid, request_t *req)
 {
 	SmMessageSt msg, *msgp = &msg;
 
@@ -226,7 +226,10 @@
 	msgp->sm_data.d_action = req->rr_orig_request;
 	strncpy(msgp->sm_data.d_svcName, req->rr_group,
 		sizeof(msgp->sm_data.d_svcName));
-	msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+	if (!nodeid)
+		msgp->sm_data.d_svcOwner = my_id();
+	else 
+		msgp->sm_data.d_svcOwner = nodeid;
 	msgp->sm_data.d_ret = ret;
 
 	swab_SmMessageSt(msgp);
--- cluster/rgmanager/src/utils/clusvcadm.c	2006/09/01 19:02:22	1.12
+++ cluster/rgmanager/src/utils/clusvcadm.c	2006/12/14 22:03:17	1.13
@@ -180,6 +180,46 @@
 
 
 int
+find_closest_node(cluster_member_list_t *cml, char *name, size_t maxlen)
+{
+	int x, c = 0, cl = 0, nc = 0, ncl = 0, cur = 0;
+
+	for (x=0; x<cml->cml_count; x++) {
+		cur = 0;
+
+		while (cml->cml_members[x].cn_name[cur] && name[cur] &&
+		       (cml->cml_members[x].cn_name[cur] == name[cur]))
+			cur++;
+		if (!cur)
+			continue;
+		if (cur >= cl) {
+			ncl = cl; /* Next-closest */
+			nc = c;
+			cl = cur;
+			c = x;
+		}
+	}
+
+	if (!cl) {
+		printf("No matches for '%s' found\n", name);
+		return 0;
+	}
+
+	if (ncl == cl) {
+		printf("More than one possible match for '%s' found\n",
+		       name);
+		return 0;
+	}
+
+	printf("Closest match: '%s'\n", 
+	       cml->cml_members[c].cn_name);
+
+	strncpy(name, cml->cml_members[c].cn_name, maxlen);
+	return cml->cml_members[c].cn_nodeid;
+}
+
+
+int
 main(int argc, char **argv)
 {
 	extern char *optarg;
@@ -291,9 +331,12 @@
 	if (node_specified) {
 		svctarget = memb_name_to_id(membership, nodename);
 		if (svctarget == 0) {
-			fprintf(stderr, "Member %s not in membership list\n",
-				nodename);
-			return 1;
+			printf("'%s' not in membership list\n",
+			       nodename);
+			svctarget = find_closest_node(membership, nodename,
+						      sizeof(nodename));
+			if (!svctarget)
+				return 1;
 		}
 	} else {
 		svctarget = 0;
@@ -301,9 +344,9 @@
 		clu_local_nodename(RG_SERVICE_GROUP, nodename,
 				   sizeof(nodename));
 				   */
+		strcpy(nodename,"me");
 	}
 	
-	strcpy(nodename,"me");
 	build_message(&msg, action, svcname, svctarget);
 
 	if (action != RG_RELOCATE && action != RG_MIGRATE) {
@@ -312,7 +355,11 @@
 		fflush(stdout);
 		msg_open(MSG_SOCKET, 0, RG_PORT, &ctx, 5);
 	} else {
-		printf("Trying to relocate %s to %s", svcname, nodename);
+		if (!svctarget)
+			printf("Trying to relocate %s", svcname);
+		else 
+			printf("Trying to relocate %s to %s", svcname,
+			       nodename);
 		printf("...");
 		fflush(stdout);
 		msg_open(MSG_SOCKET, 0, RG_PORT, &ctx, 5);
@@ -342,5 +389,19 @@
 
 	swab_SmMessageSt(&msg);
 	printf("%s\n", rg_strerror(msg.sm_data.d_ret));
+	switch (action) {
+	case RG_MIGRATE:
+	case RG_RELOCATE:
+	case RG_START:
+	case RG_ENABLE:
+		printf("%s%s is now running on %s\n",
+		       msg.sm_data.d_svcOwner==svctarget?"":"Warning: ",
+		       svcname, memb_id_to_name(membership,
+		       			        msg.sm_data.d_svcOwner));
+		break;
+	default:
+		break;
+	}
+	
 	return msg.sm_data.d_ret;
 }



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [Cluster-devel] cluster/rgmanager include/resgroup.h src/daemo ...
@ 2007-11-14 19:03 lhh
  0 siblings, 0 replies; 4+ messages in thread
From: lhh @ 2007-11-14 19:03 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL4
Changes by:	lhh at sourceware.org	2007-11-14 19:03:37

Modified files:
	rgmanager/include: resgroup.h 
	rgmanager/src/daemons: groups.c rg_state.c 

Log message:
	Fix #360401 - hang forever during shutdown due to previous service boot problem

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/resgroup.h.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.3.2.9&r2=1.3.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/groups.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.8.2.21&r2=1.8.2.22
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4.2.21&r2=1.4.2.22

--- cluster/rgmanager/include/resgroup.h	2007/01/03 21:08:17	1.3.2.9
+++ cluster/rgmanager/include/resgroup.h	2007/11/14 19:03:37	1.3.2.10
@@ -156,6 +156,7 @@
 cluster_member_list_t *member_list(void);
 uint64_t my_id(void);
 
+#define RG_ERELO	-9 /* Operation cannot complete here */
 #define RG_ENODEDEATH	-8 /* Processing node died */
 #define RG_ERUN		-7 /* Service is running already */
 #define RG_EAGAIN	-6 /* Try again */
--- cluster/rgmanager/src/daemons/groups.c	2007/09/28 15:14:52	1.8.2.21
+++ cluster/rgmanager/src/daemons/groups.c	2007/11/14 19:03:37	1.8.2.22
@@ -192,7 +192,8 @@
 		}
 
 		if (st.rs_state != RG_STATE_STARTED &&
-		     st.rs_state != RG_STATE_STARTING)
+		     st.rs_state != RG_STATE_STARTING &&
+		     st.rs_state != RG_STATE_STOPPING)
 			continue;
 
 		if (mp->cm_id != st.rs_owner)
--- cluster/rgmanager/src/daemons/rg_state.c	2007/06/28 11:54:50	1.4.2.21
+++ cluster/rgmanager/src/daemons/rg_state.c	2007/11/14 19:03:37	1.4.2.22
@@ -41,10 +41,13 @@
 int set_rg_state(char *servicename, rg_state_t *svcblk);
 int get_rg_state(char *servicename, rg_state_t *svcblk);
 void get_recovery_policy(char *rg_name, char *buf, size_t buflen);
-int have_exclusive_resources();
+int have_exclusive_resources(void);
 int check_exclusive_resources(cluster_member_list_t *membership, char *svcName);
 
 
+pthread_mutex_t exclusive_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+
 uint64_t
 next_node_id(cluster_member_list_t *membership, uint64_t me)
 {
@@ -446,6 +449,7 @@
 		break;
 	
 	case RG_STATE_DISABLED:
+		ret = 2;
 	case RG_STATE_UNINITIALIZED:
 		if (req == RG_DISABLE) {
 			clulog(LOG_NOTICE,
@@ -536,7 +540,7 @@
 			}
 
 			clulog(LOG_NOTICE,
-			       "Starting stopped service%s\n",
+			       "Starting stopped service %s\n",
 			       svcName);
 			ret = 1;
 			break;
@@ -557,7 +561,7 @@
 			snprintf(query,
 				sizeof(query),
 				"/cluster/clusternodes/clusternode[@nodeid=\"%d\"]/@name",
-				svcStatus->rs_owner);
+				(int)svcStatus->rs_owner);
 			ccs_get(fd, query, &nodename);
 			ccs_disconnect(fd);
 		}
@@ -650,42 +654,61 @@
 svc_start(char *svcName, int req)
 {
 	void *lockp = NULL;
-	int ret;
+	int ret, xret;
 	rg_state_t svcStatus;
+	int need_check = have_exclusive_resources();
+	cluster_member_list_t *membership;
+
+ 	if (need_check)
+		pthread_mutex_lock(&exclusive_mutex);
+
+	ret = RG_EFAIL;
 
 	if (rg_lock(svcName, &lockp) < 0) {
 		clulog(LOG_ERR, "#45: Unable to obtain cluster lock: %s\n",
 		       strerror(errno));
-		return FAIL;
+		goto out_nolock;
 	}
 
 	if (get_rg_state(svcName, &svcStatus) != 0) {
-		rg_unlock(svcName, lockp);
 		clulog(LOG_ERR, "#46: Failed getting status for RG %s\n",
 		       svcName);
-		return FAIL;
+		goto out_unlock;
+	}
+
+	if (need_check) {
+		membership = member_list();
+		xret = check_exclusive_resources(membership, svcName);
+		cml_free(membership);
+		if (xret != 0) {
+			if (xret > 0)
+				/* Exc. service running */
+				ret = RG_ERELO;
+			else
+				/* XXX */
+				ret = RG_ENOSERVICE;
+			goto out_unlock;
+		}
 	}
 
 	/* LOCK HELD */
 	switch (svc_advise_start(&svcStatus, svcName, req)) {
 	case 0: /* Don't start service, return FAIL */
-		rg_unlock(svcName, lockp);
-		return FAIL;
+		goto out_unlock;
 	case 2: /* Don't start service, return 0 */
-		rg_unlock(svcName, lockp);
-		return 0;
+		ret = 0;
+		goto out_unlock;
 	case 3:
-		rg_unlock(svcName, lockp);
-		return RG_EAGAIN;
+		ret = RG_EAGAIN;
+		goto out_unlock;
 	case 4:
-		rg_unlock(svcName, lockp);
-		return RG_ERUN;
+		ret = RG_ERUN;
+		goto out_unlock;
 	default:
 		break;
 	}
 
 	/* LOCK HELD if we get here */
-
 	svcStatus.rs_owner = my_id();
 	svcStatus.rs_state = RG_STATE_STARTING;
 	svcStatus.rs_transition = (uint64_t)time(NULL);
@@ -699,10 +722,17 @@
 		clulog(LOG_ERR,
 		       "#47: Failed changing service status\n");
 		rg_unlock(svcName, lockp);
-		return FAIL;
+		goto out_unlock;
 	}
 	
 	rg_unlock(svcName, lockp);
+	
+	/* release excl. mutex during start */
+	if (need_check) {
+		/* Also clear need_check so we don't double-unlock */
+		pthread_mutex_unlock(&exclusive_mutex);
+		need_check = 0;
+	}
 
 	ret = group_op(svcName, RG_START);
 	ret = !!ret; /* Either it worked or it didn't.  Ignore all the
@@ -711,17 +741,17 @@
 	if (rg_lock(svcName, &lockp) < 0) {
 		clulog(LOG_ERR, "#74: Unable to obtain cluster lock: %s\n",
 		       strerror(errno));
-		return FAIL;
+		ret = RG_EFAIL;
+		goto out_nolock;
 	}
 
 	svcStatus.rs_state = RG_STATE_STARTED;
 	if (set_rg_state(svcName, &svcStatus) != 0) {
 		clulog(LOG_ERR,
 		       "#75: Failed changing service status\n");
-		rg_unlock(svcName, lockp);
-		return FAIL;
+		ret = RG_EFAIL;
+		goto out_unlock;
 	}
-	rg_unlock(svcName, lockp);
        
 	if (ret == 0)
 		clulog(LOG_NOTICE,
@@ -732,6 +762,11 @@
 		       "#68: Failed to start %s; return value: %d\n",
 		       svcName, ret);
 
+out_unlock:
+	rg_unlock(svcName, lockp);
+out_nolock:
+	if (need_check)
+		pthread_mutex_unlock(&exclusive_mutex);
 	return ret;
 }
 
@@ -1115,7 +1150,7 @@
 {
 	cluster_member_list_t *allowed_nodes, *backup = NULL;
 	uint64_t target = preferred_target, me = my_id();
-	int ret, x;
+	int ret, x, tried = 0;
 	
 	/*
 	 * Stop the service - if we haven't already done so.
@@ -1181,6 +1216,7 @@
 		 	 * It's legal to start the service on the given
 		 	 * node.  Try to do so.
 		 	 */
+			++tried;
 			if (relocate_service(svcName, request, target) == 0) {
 				*new_owner = target;
 				/*
@@ -1211,9 +1247,12 @@
 		if (target == me)
 			goto exhausted;
 
+		++tried;
+
+		/* Each node gets one try */
+		memb_mark_down(allowed_nodes, target);
 		switch (relocate_service(svcName, request, target)) {
 		case RG_EFAIL:
-			memb_mark_down(allowed_nodes, target);
 			continue;
 		case RG_EABORT:
 			svc_report_failure(svcName);
@@ -1228,9 +1267,10 @@
 			       (uint32_t)(target&0xffffffff), request);
 			return 0;
 		case 0:
-			*new_owner = target;
 			clulog(LOG_NOTICE, "Service %s is now running "
 			       "on member %d\n", svcName, (int)target);
+		case RG_ERUN:
+			*new_owner = target;
 			cml_free(allowed_nodes);
 			return 0;
 		default:
@@ -1254,9 +1294,10 @@
 	 */
 exhausted:
 	if (!rg_locked()) {
-		clulog(LOG_WARNING,
-		       "#70: Attempting to restart service %s locally.\n",
-		       svcName);
+		if (tried)
+			clulog(LOG_WARNING,
+			       "#70: Attempting to restart service %s locally.\n",
+			       svcName);
 		if (svc_start(svcName, RG_START_RECOVER) == 0) {
 			*new_owner = me;
 			return FAIL;
@@ -1275,9 +1316,9 @@
 int
 handle_fd_start_req(char *svcName, int request, uint64_t *new_owner)
 {
-	cluster_member_list_t *allowed_nodes, *backup = NULL;
+	cluster_member_list_t *allowed_nodes;
 	uint64_t target, me = my_id();
-	int ret, x;
+	int ret;
 	
 	allowed_nodes = member_list();
 
@@ -1326,7 +1367,6 @@
 }
 
 
-pthread_mutex_t exclusive_mutex = PTHREAD_MUTEX_INITIALIZER;
 /**
  * handle_start_req - Handle a generic start request from a user or during
  * service manager boot.
@@ -1342,7 +1382,6 @@
 {
 	int ret, tolerance = FOD_BEST;
 	cluster_member_list_t *membership = member_list();
-	int need_check = have_exclusive_resources();
 
 	/*
 	 * When a service request is from a user application (eg, clusvcadm),
@@ -1358,18 +1397,6 @@
 		cml_free(membership);
 		return FAIL;
 	}
-	if (need_check) {
-		pthread_mutex_lock(&exclusive_mutex);
-		ret = check_exclusive_resources(membership, svcName);
-		if (ret != 0) {
-			cml_free(membership);
-			pthread_mutex_unlock(&exclusive_mutex);
-			if (ret > 0)
-				goto relocate;
-			else
-				return FAIL;
-		}
-	}
 	cml_free(membership);
 	
 	/*
@@ -1377,25 +1404,22 @@
 	 * mask here - so that we can try all nodes if necessary.
 	 */
 	ret = svc_start(svcName, req);
-	if (need_check)
-		pthread_mutex_unlock(&exclusive_mutex);
-
-	/* 
-	   If services are locked, return the error 
-	  */
-	if (ret == RG_EAGAIN || ret == RG_ERUN)
+	switch(ret) {
+	case RG_ERELO:
+		goto relocate;
+
+	case RG_EAGAIN:
+		/* If services are locked, return the error */
+	case RG_ENOSERVICE:
+		/* service doesn't exist? */
+	case RG_ERUN:
+		/* If service is already running, return that value */
 		return ret;
 
-	/*
-	 * If we succeeded, then we're done.
-	 */
-	if (ret == SUCCESS) {
+	case SUCCESS:
+		/* If we succeeded, then we're done.  */
 		*new_owner = my_id();
-		return SUCCESS;
-	}
-
-	/* Already running? */
-	if (ret == NO) {
+	case NO: 
 		return SUCCESS;
 	}
 	
@@ -1418,13 +1442,13 @@
 		return RG_EABORT;
 	}
 	
-relocate:
 	/*
 	 * OK, it failed to start - but succeeded to stop.  Now,
 	 * we should relocate the service.
 	 */
 	clulog(LOG_WARNING, "#71: Relocating failed service %s\n",
 	       svcName);
+relocate:
 	ret = handle_relocate_req(svcName, RG_START_RECOVER, -1, new_owner);
 
 	/* If we leave the service stopped, instead of disabled, someone
@@ -1456,7 +1480,6 @@
 	int x;
 	uint64_t me = my_id();
 	cluster_member_list_t *membership = member_list();
-	int need_check = have_exclusive_resources();
 
 	/* XXX ok, so we need to say "should I start this if I was the
 	   only cluster member online */
@@ -1477,23 +1500,29 @@
 		cml_free(membership);
 		return FAIL;
 	}
- 	if (need_check) {
- 		pthread_mutex_lock(&exclusive_mutex);
- 		if (check_exclusive_resources(membership, svcName) != 0) {
- 			pthread_mutex_unlock(&exclusive_mutex);
- 			cml_free(membership);
- 			return FAIL;
- 		}
- 	}
 	cml_free(membership);
 
 	x = svc_start(svcName, req);
-	if (need_check)
- 		pthread_mutex_unlock(&exclusive_mutex);
-	if (x == 0)
-		return 0;
-	if (x == RG_ERUN)
-		return RG_ERUN;
+	switch(x) {
+	case RG_ERELO:
+		/* Don't relocate from here; it was a remote start */
+		/* Return fail so the other node can go ahead and 
+		   try the other nodes in the cluster */
+	case NO: 
+		return RG_EFAIL;
+
+	case RG_EAGAIN:
+		/* If services are locked, return the error */
+	case RG_ENOSERVICE:
+		/* service doesn't exist? */
+	case RG_ERUN:
+		/* If service is already running, return that value */
+		return x;
+
+	case SUCCESS:
+		/* If we succeeded, then we're done.  */
+		return SUCCESS;
+	}
 
 	if (svc_stop(svcName, RG_STOP_RECOVER) == 0)
 		return RG_EFAIL;



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2007-11-14 19:03 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-11-14 19:03 [Cluster-devel] cluster/rgmanager include/resgroup.h src/daemo lhh
  -- strict thread matches above, loose matches on Subject: below --
2006-12-14 22:03 lhh
2006-12-13 18:39 lhh
2006-12-13 18:38 lhh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).