cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] conga luci/cluster/form-macros luci/cluster/in ...
@ 2007-07-27 16:49 rmccabe
  0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2007-07-27 16:49 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-07-27 16:43:47

Modified files:
	luci/cluster   : form-macros index_html validate_fence.js 
	luci/homebase  : validate_cluster_add.js 
	luci/site/luci/Extensions: FenceHandler.py LuciClusterActions.py 
	                           LuciClusterInfo.py LuciDB.py 
	                           LuciZope.py PropsObject.py 
	                           RicciQueries.py StorageReport.py 
	                           cluster_adapters.py 
	                           conga_constants.py 
	ricci/common   : XML.cpp 
	ricci/test_suite/cluster: generate_xvm_key.xml 

Log message:
	Fixes from the RHEL5 branch

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.204&r2=1.205
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.36&r2=1.37
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_fence.js.diff?cvsroot=cluster&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&r1=1.5&r2=1.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/PropsObject.py.diff?cvsroot=cluster&r1=1.6&r2=1.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/StorageReport.py.diff?cvsroot=cluster&r1=1.24&r2=1.25
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.262&r2=1.263
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.41&r2=1.42
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/common/XML.cpp.diff?cvsroot=cluster&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/test_suite/cluster/generate_xvm_key.xml.diff?cvsroot=cluster&r1=1.1&r2=1.2

--- conga/luci/cluster/index_html	2007/07/12 02:42:42	1.36
+++ conga/luci/cluster/index_html	2007/07/27 16:43:47	1.37
@@ -44,7 +44,7 @@
 			<tal:block tal:condition="firsttime">
 				<tal:block tal:define="global busywaiting python:True" />
 				<meta http-equiv="refresh"
-					tal:attributes="content isBusy/refreshurl | python:'3%surl=/luci/cluster' % chr(0x3b)" />
+					tal:attributes="content isBusy/refreshurl | python:'5%surl=/luci/cluster' % chr(0x3b)" />
 			</tal:block>
 
 			<tal:block tal:define="global busy isBusy/busy | nothing" />
@@ -52,7 +52,7 @@
 			<tal:block tal:condition="busy">
 				<tal:block tal:define="global busywaiting python:True" />
 				<meta http-equiv="refresh"
-					tal:attributes="content isBusy/refreshurl | python:'3%surl=/luci/cluster' % chr(0x3b)" />
+					tal:attributes="content isBusy/refreshurl | python:'5%surl=/luci/cluster' % chr(0x3b)" />
 			</tal:block>
 		</tal:block>
     </metal:headslot>
--- conga/luci/cluster/validate_fence.js	2007/07/23 18:53:08	1.10
+++ conga/luci/cluster/validate_fence.js	2007/07/27 16:43:47	1.11
@@ -51,12 +51,22 @@
 }
 
 function validate_field_passwd(form, form_elem) {
+	var errors = [];
 	if (form_elem.disabled) {
 		clr_form_err(form_elem);
 		return (null);
 	}
 
-	var errors = validate_field_str(form, form_elem);
+	if (!form_elem || !form_elem.value) {
+		if (!form_elem) {
+			errors.push('No value was given for this field.');
+		} else {
+			set_form_err(form_elem);
+			errors.push(form_elem.name + ' values must not be empty.');
+		}
+		return (errors);
+	}
+
 	if (errors && errors.length > 0 && form.passwd_script && !str_is_blank(form.passwd_script.value))
 	{
 		clr_form_err(form_elem);
--- conga/luci/homebase/validate_cluster_add.js	2007/07/11 22:47:07	1.10
+++ conga/luci/homebase/validate_cluster_add.js	2007/07/27 16:43:47	1.11
@@ -43,9 +43,13 @@
 	if (!view_certs || !view_certs.checked) {
 		var confirm_str = '';
 		if (form.addnode) {
-			confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?';
+			confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?\nEach node added will be rebooted during this process.';
 		} else {
-			confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
+			if (form.cluster_create) {
+				confirm_str = 'All nodes added to this cluster will be rebooted as part of this process.\n\nCreate cluster \"' + clustername + '\"?';
+			} else {
+				confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
+			}
 		}
 
 		if (confirm(confirm_str)) {
--- conga/luci/site/luci/Extensions/FenceHandler.py	2007/07/12 15:44:43	1.20
+++ conga/luci/site/luci/Extensions/FenceHandler.py	2007/07/27 16:43:47	1.21
@@ -772,7 +772,12 @@
 	try:
 		pwd = form['passwd'].strip()
 		if not pwd:
-			raise Exception, 'blank'
+			if form['passwd']:
+				# allow passwords consisting of nothing but whitespace
+				fencedev.addAttribute('passwd', form['passwd'])
+			else:
+				raise Exception, 'blank'
+
 		fencedev.addAttribute('passwd', pwd)
 		has_passwd = True
 	except Exception, e:
@@ -1152,6 +1157,11 @@
 	fenceinst = Device()
 	fenceinst.addAttribute('name', parent_name)
 
+	if form.has_key('option'):
+		option = form['option'].strip()
+		if option:
+			fenceinst.addAttribute('option', option)
+
 	try:
 		ret = FI_VALIDATE[fence_agent](form, fenceinst)
 		if len(ret) > 0:
--- conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/06/25 16:11:30	1.3
+++ conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/07/27 16:43:47	1.4
@@ -245,7 +245,11 @@
 		return None
 	return True
 
-def NodeLeaveCluster(self, rc, clustername, nodename_resolved):
+def NodeLeaveCluster(	self,
+						rc,
+						clustername,
+						nodename_resolved,
+						stop_cluster=False):
 	reported_cluname = None
 	try:
 		cluster_info = rc.cluster_info()
@@ -266,7 +270,7 @@
 				% nodename_resolved)
 		return None
 
-	batch_number, result = rq.nodeLeaveCluster(rc)
+	batch_number, result = rq.nodeLeaveCluster(rc, cluster_shutdown=stop_cluster)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('NLC2: %s: batch_number or result is None' \
@@ -367,7 +371,7 @@
 	# First, delete cluster.conf from node to be deleted.
 	# next, have node leave cluster.
 
-	batch_number, result = rq.nodeLeaveCluster(rc, purge=False)
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('ND5: batch_number and/or result is None')
@@ -505,7 +509,8 @@
 					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
 				errors += 1
 		else:
-			ret = NodeLeaveCluster(self, rc, clustername, nodename_resolved)
+			ret = NodeLeaveCluster(self, rc, clustername,
+					nodename_resolved, stop_cluster=True)
 			if ret is None:
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py	2007/07/12 22:35:40	1.7
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py	2007/07/27 16:43:47	1.8
@@ -817,22 +817,19 @@
 	found = False
 	for item in status:
 		if (item['type'] == 'node') and (item['name'] == nodename):
+			if item['online'] == 'false':
+				nodestate = NODE_UNKNOWN
+			elif item['clustered'] == 'true':
+				nodestate = NODE_ACTIVE
+			else:
+				nodestate = NODE_INACTIVE
 			found = True
 			break
 
 	if found is False:
-		item = {}
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getNodeInfo1: Unable to resolve node name in cluster status')
-		return item
-
-	# Now determine state of node...
-	if item['online'] == 'false':
+			luci_log.debug_verbose('getNodeInfo1: Unable to find node "%s" in cluster status' % nodename)
 		nodestate = NODE_UNKNOWN
-	elif item['clustered'] == 'true':
-		nodestate = NODE_ACTIVE
-	else:
-		nodestate = NODE_INACTIVE
 
 	infohash['nodestate'] = nodestate
 	infohash['nodename'] = nodename
@@ -899,12 +896,10 @@
 
 	infohash['fdoms'] = fdom_dict_list
 
-	# return infohash
 	infohash['d_states'] = None
-
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
+
 	if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
-	# call service module on node and find out which daemons are running
 		try:
 			rc = RicciCommunicator(nodename_resolved)
 			if not rc:
@@ -916,6 +911,7 @@
 				% (nodename_resolved, str(e)))
 
 		if rc is not None:
+			# call service module on node and find out which daemons are running
 			dlist = list()
 			dlist.append('ccsd')
 			if not gulm_cluster:
--- conga/luci/site/luci/Extensions/LuciDB.py	2007/07/12 22:35:40	1.5
+++ conga/luci/site/luci/Extensions/LuciDB.py	2007/07/27 16:43:47	1.6
@@ -825,6 +825,7 @@
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('GRA8: cluster name is none for %s' \
 						% ricci_hostname)
+				raise Exception, '%s not in a cluster' % ricci_hostname
 
 			cur_alias = str(clu_info[1]).strip().lower()
 			if not cur_alias:
@@ -931,7 +932,7 @@
 			luci_log.debug_verbose('GCF0: cluster %s [%s] folder missing: %r %s -- returning empty map' % (cluname, path, e, str(e)))
 	return None
 
-def getClusterStatusDB(self, clustername, errmsg=None):
+def getClusterStatusDB(self, clustername):
 	results = list()
 	vals = {}
 
@@ -942,8 +943,6 @@
 	vals['quorate'] = '[unknown]'
 	vals['votes'] = '[unknown]'
 	vals['minQuorum'] = '[unknown]'
-	if errmsg:
-		vals['errmsg'] = errmsg
 	results.append(vals)
 
 	nodelist = getClusterDBNodes(self, clustername)
--- conga/luci/site/luci/Extensions/LuciZope.py	2007/06/25 16:03:38	1.2
+++ conga/luci/site/luci/Extensions/LuciZope.py	2007/07/27 16:43:47	1.3
@@ -128,11 +128,11 @@
 	ret = {}
 	for i in varlist:
 		pval = None
-		if req.has_key(i):
+		if req and req.has_key(i):
 			pval = req[i].strip()
 			if not pval:
 				pval = None
-		if pval is None:
+		if req and pval is None:
 			if req.form and req.form.has_key(i):
 				pval = req.form[i].strip()
 				if not pval:
--- conga/luci/site/luci/Extensions/PropsObject.py	2007/07/12 22:35:40	1.6
+++ conga/luci/site/luci/Extensions/PropsObject.py	2007/07/27 16:43:47	1.7
@@ -7,6 +7,7 @@
 
 from Variable import parse_variable
 from ricci_defines import PROPS_TAG
+import xml
 import xml.dom
 
 class PropsObject:
@@ -26,7 +27,7 @@
         return self.__vars
 
     def export_xml(self, doc, parent_node):
-        props = doc.createElement(PROPS_TAG)
+        props = doc.createElement(str(PROPS_TAG))
         parent_node.appendChild(props)
         for var in self.__vars:
             props.appendChild(self.__vars[var].export_xml(doc))
@@ -36,7 +37,7 @@
         props = None
         for node in parent_node.childNodes:
             if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                if node.nodeName == PROPS_TAG:
+                if node.nodeName == str(PROPS_TAG):
                     props = node
         if props is None:
             return self
@@ -47,3 +48,4 @@
             except:
                 continue
         return self
+
--- conga/luci/site/luci/Extensions/RicciQueries.py	2007/07/12 22:35:40	1.4
+++ conga/luci/site/luci/Extensions/RicciQueries.py	2007/07/27 16:43:47	1.5
@@ -701,17 +701,19 @@
 
 	try:
 		ret = rc.process_batch(batch)
+		if not ret:
+			raise Exception, 'no XML response'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GCC1: process_batch error for %s: %r %s' \
 				% (system_info, e, str(e)))
 		return None
 
-	if not ret:
-		return None
-
 	var_nodes = ret.getElementsByTagName('var')
 	for i in var_nodes:
 		if i.getAttribute('name') == 'cluster.conf':
 			return i.childNodes[0]
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GCC2: no conf node found')
 	return None
--- conga/luci/site/luci/Extensions/StorageReport.py	2007/06/25 16:03:38	1.24
+++ conga/luci/site/luci/Extensions/StorageReport.py	2007/07/27 16:43:47	1.25
@@ -2000,7 +2000,7 @@
                                            'id'   : '%s_origin' % bd['path'],
                                            'type' : 'snapshot-origin'})
                 for snap in bd['snapshots']:
-                    high_list[d['id']].append('%s_snapshot', snap['path'])
+                    high_list[d['id']].append('%s_snapshot' % snap['path'])
 
 
 
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/07/23 18:53:08	1.262
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/07/27 16:43:47	1.263
@@ -1749,7 +1749,7 @@
 
 	ret = propagateClusterConfAsync(self, model, None, FENCEDEV_NODE_CONFIG,
 			'Updating fence configuration for node "%s"' % fvar['nodename'])
-	if ret[1] is not True:
+	if ret[0] is not True:
 		return ret
 
 	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (baseurl, NODE, clustername, nodename))
@@ -2123,10 +2123,18 @@
 		% (baseurl, SERVICES, clustername))
 
 def process_cluster_conf_editor(self, req):
-	clustername = req['clustername']
+	if req.has_key('clustername'):
+		clustername = req['clustername'].strip() or None
+	else:
+		clustername = None
+
+	if clustername is None:
+		return { 'msg': 'No cluster name was given', 'cluster_conf': '' }
+
 	msg_list = list(('\n'))
-	cc = ''
-	if 'new_cluster_conf' in req:
+
+	cc = None
+	if req.has_key('new_cluster_conf'):
 		cc = req['new_cluster_conf']
 		msg_list.append('Checking if valid XML - ')
 		cc_xml = None
@@ -2147,12 +2155,10 @@
 				msg_list.append('Fix the error and try again:\n')
 			else:
 				msg_list.append('PASSED\n')
-
-				msg_list.append('Incrementing the cluster version number - ')
 				model.setModified(True)
 				msg_list.append('DONE\n')
-
 				msg_list.append('Propagating the new cluster.conf')
+
 				rc = getRicciAgent(self, clustername)
 				if not rc:
 					if LUCI_DEBUG_MODE is True:
--- conga/luci/site/luci/Extensions/conga_constants.py	2007/06/25 16:11:30	1.41
+++ conga/luci/site/luci/Extensions/conga_constants.py	2007/07/27 16:43:47	1.42
@@ -134,7 +134,7 @@
 	'A problem occurred when starting this node: %s'
 ]
 
-REDIRECT_SEC = 3
+REDIRECT_SEC = 5
 
 # cluster/node create error status messages
 PRE_INSTALL = 'The install state is not yet complete.'
--- conga/ricci/common/XML.cpp	2007/06/25 16:03:42	1.9
+++ conga/ricci/common/XML.cpp	2007/07/27 16:43:47	1.10
@@ -119,15 +119,15 @@
     xml += " " + name + "=\"" + value + "\"";
   }
   if (children().empty())
-    xml += "/>";
+    xml += "/>\n";
   else {
-    xml += ">";
+    xml += ">\n";
     for (list<XMLObject>::const_iterator iter = children().begin();
 	 iter != children().end();
 	 iter++) {
-      iter->generate_xml(xml, indent);
+      iter->generate_xml(xml, indent + "\t");
     }
-    xml += indent + "</" + _tag + ">";
+    xml += indent + "</" + _tag + ">\n";
   }
 }
 
--- conga/ricci/test_suite/cluster/generate_xvm_key.xml	2007/07/23 18:47:51	1.1
+++ conga/ricci/test_suite/cluster/generate_xvm_key.xml	2007/07/27 16:43:47	1.2
@@ -4,7 +4,9 @@
 
 <module name="cluster">
 <request sequence="1254" API_version="1.0">
-<function_call name="generate_xvm_key" />
+<function_call name="generate_xvm_key">
+	<var mutable="false" name="size" type="int" value="4096" />
+</function_call>
 </request>
 </module>
 



^ permalink raw reply	[flat|nested] 2+ messages in thread
* [Cluster-devel] conga luci/cluster/form-macros luci/cluster/in ...
@ 2007-06-25 16:11 rmccabe
  0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2007-06-25 16:11 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-06-25 16:11:31

Modified files:
	luci/cluster   : form-macros index_html 
	                 resource_form_handlers.js validate_fence.js 
	luci/plone-custom: conga.js 
	luci/site/luci/Extensions: LuciClusterActions.py 
	                           LuciClusterInfo.py 
	                           cluster_adapters.py 
	                           conga_constants.py 
	ricci/modules/storage: VG.cpp parted_wrapper.cpp 

Log message:
	Fixes from the RHEL5 branch

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.199&r2=1.200
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.33&r2=1.34
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource_form_handlers.js.diff?cvsroot=cluster&r1=1.35&r2=1.36
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_fence.js.diff?cvsroot=cluster&r1=1.6&r2=1.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/conga.js.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.256&r2=1.257
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.40&r2=1.41
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/VG.cpp.diff?cvsroot=cluster&r1=1.12&r2=1.13
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/parted_wrapper.cpp.diff?cvsroot=cluster&r1=1.10&r2=1.11

--- conga/luci/cluster/form-macros	2007/06/25 16:03:37	1.199
+++ conga/luci/cluster/form-macros	2007/06/25 16:11:30	1.200
@@ -3117,7 +3117,11 @@
 					<option tal:attributes="value nodeinfo/delete_url"
 						tal:condition="python: not 'ricci_error' in nodeinfo">
 						Delete this node</option>
+					<option tal:attributes="value nodeinfo/force_delete_url"
+						tal:condition="python: 'ricci_error' in nodeinfo">
+						Force the deletion of this node</option>
 				</select>
+
 				<input type="button" value="Go"
 					onclick="if (this.form.gourl[this.form.gourl.selectedIndex].value && confirm(this.form.gourl[this.form.gourl.selectedIndex].text + '?')) return dropdown(this.form.gourl)" />
 				</form>
@@ -3129,6 +3133,7 @@
 				<select name="gourl">
 					<option value="">Choose a Task...</option>
 					<option tal:attributes="value nodeinfo/fence_url | nothing">Fence this node</option>
+					<option tal:attributes="value nodeinfo/force_delete_url | nothing">Force the deletion of this node</option>
 				</select>
 				<input type="button" value="Go"
 					onclick="if (this.form.gourl[this.form.gourl.selectedIndex].value && confirm(this.form.gourl[this.form.gourl.selectedIndex].text + '?')) return dropdown(this.form.gourl)" />
@@ -3537,6 +3542,7 @@
 						<select class="node" name="gourl">
 							<option value="">Choose a Task...</option>
 							<option tal:attributes="value nd/fence_it_url | nothing">Fence this node</option>
+							<option tal:attributes="value nd/force_delete_url| nothing">Force the deletion of this node</option>
 						</select>
 						<input type="button" value="Go"
 							onclick="if (this.form.gourl[this.form.gourl.selectedIndex].value && confirm(this.form.gourl[this.form.gourl.selectedIndex].text + '?')) return dropdown(this.form.gourl)" />
--- conga/luci/cluster/index_html	2007/06/25 16:03:37	1.33
+++ conga/luci/cluster/index_html	2007/06/25 16:11:30	1.34
@@ -38,7 +38,14 @@
 				resmap python:here.getClusterOS(ri_agent);
 				global isVirtualized resmap/isVirtualized | nothing;
 				global os_version resmap/os | nothing;
-				global isBusy python:here.isClusterBusy(request)" />
+				global isBusy python:here.isClusterBusy(request);
+				global firsttime request/busyfirst | nothing" />
+
+			<tal:block tal:condition="firsttime">
+				<tal:block tal:define="global busywaiting python:True" />
+				<meta http-equiv="refresh"
+					tal:attributes="content isBusy/refreshurl | string:." />
+			</tal:block>
 
 			<tal:block tal:define="global busy isBusy/busy | nothing" />
 
--- conga/luci/cluster/resource_form_handlers.js	2007/06/25 16:03:37	1.35
+++ conga/luci/cluster/resource_form_handlers.js	2007/06/25 16:11:30	1.36
@@ -500,12 +500,12 @@
 			if (res_type == 'hidden' || res_type == 'text' ||
 				res_type == 'password')
 			{
-				temp += '<input type="' + res_type + '" name="' + input_elem[j].name + '" value="' + input_elem[j].value + '" />';
+				temp += '<input type="' + res_type + '" name="' + escapeXML(input_elem[j].name) + '" value="' + escapeXML(input_elem[j].value) + '" />';
 			} else if (res_type == 'checkbox' || res_type == 'radio') {
 				if (input_elem[j].checked) {
-					temp += '<input type="' + res_type + '" name="' + input_elem[j].name + '" checked="checked"';
+					temp += '<input type="' + res_type + '" name="' + escapeXML(input_elem[j].name) + '" checked="checked"';
 					if (res_type == 'radio')
-						temp += ' value="' + input_elem[j].value + '"';
+						temp += ' value="' + escapeXML(input_elem[j].value) + '"';
 					temp += ' />';
 				}
 			}
@@ -513,11 +513,11 @@
 
 		var select_elem = form[i].getElementsByTagName('select');
 		for (var j = 0 ; j < select_elem.length ; j++) {
-			temp += '<input type="text" name="' + select_elem[j].name + '" value="' + select_elem[j].options[select_elem[j].options.selectedIndex].value + '" />';
+			temp += '<input type="text" name="' + escapeXML(select_elem[j].name) + '" value="' + escapeXML(select_elem[j].options[select_elem[j].options.selectedIndex].value) + '" />';
 		}
 
-		form_xml += '<form id="' + form[i].uuid.value + '" parent="' +
-					form[i].parent_uuid.value + '">' + temp + '</form>';
+		form_xml += '<form id="' + escapeXML(form[i].uuid.value) + '" parent="' +
+					escapeXML(form[i].parent_uuid.value) + '">' + temp + '</form>';
 	}
 
 	if (!svc_name)
--- conga/luci/cluster/validate_fence.js	2007/02/16 23:25:27	1.6
+++ conga/luci/cluster/validate_fence.js	2007/06/25 16:11:30	1.7
@@ -218,27 +218,27 @@
 			if (res_type == 'hidden' || res_type == 'text' ||
 				res_type == 'password')
 			{
-				temp += '<input type="' + res_type + '" name="' + input_elem[j].name + '" value="' + input_elem[j].value + '" />';
+				temp += '<input type="' + res_type + '" name="' + escapeXML(input_elem[j].name) + '" value="' + escapeXML(input_elem[j].value) + '" />';
 			} else if (res_type == 'checkbox' || res_type == 'radio') {
 				if (input_elem[j].checked) {
-					temp += '<input type="' + res_type + '" name="' + input_elem[j].name + '"';
+					temp += '<input type="' + res_type + '" name="' + escapeXML(input_elem[j].name) + '"';
 					if (res_type == 'checkbox')
 						temp += ' value="1"';
 					else if (res_type == 'radio')
-						temp += ' value="' + input_elem[j].value + '"';
+						temp += ' value="' + escapeXML(input_elem[j].value) + '"';
 					temp += ' />';
 				} else if (res_type == 'checkbox') {
-					temp += '<input type="' + res_type + '" name="' + input_elem[j].name + '" value="0" />';
+					temp += '<input type="' + res_type + '" name="' + escapeXML(input_elem[j].name) + '" value="0" />';
 				}
 			}
 		}
 
 		var select_elem = form[i].getElementsByTagName('select');
 		for (var j = 0 ; j < select_elem.length ; j++) {
-			temp += '<input type="text" name="' + select_elem[j].name + '" value="' + select_elem[j].options[select_elem[j].options.selectedIndex].value + '" />';
+			temp += '<input type="text" name="' + escapeXML(select_elem[j].name) + '" value="' + escapeXML(select_elem[j].options[select_elem[j].options.selectedIndex].value) + '" />';
 		}
 
-		form_xml += '<form id="' + form[i].getAttribute('name') + '">' + temp + '</form>';
+		form_xml += '<form id="' + escapeXML(form[i].getAttribute('name')) + '">' + temp + '</form>';
 	}
 
 	master_form.fence_xml.value = '<formlist>' + form_xml + '</formlist>';
--- conga/luci/plone-custom/conga.js	2006/11/03 21:47:27	1.4
+++ conga/luci/plone-custom/conga.js	2007/06/25 16:11:30	1.5
@@ -5,6 +5,12 @@
 	return (0);
 }
 
+function escapeXML(str) {
+	if (!str)
+		return '';
+	return str.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace('\'', '&apos;');
+}
+
 function popup_window(url, width_percent, height_percent) {
 	var width = window.innerWidth * (width_percent / 100);
 	var height = window.innerHeight * (height_percent / 100);
--- conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/06/25 16:03:38	1.2
+++ conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/06/25 16:11:30	1.3
@@ -17,7 +17,7 @@
 	CLUSTER_NODE_NEED_AUTH
 
 from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
-	NODE_DELETE, CLUSTER_DELETE, CLUSTERLIST, \
+	NODE_DELETE, NODE_FORCE_DELETE, CLUSTER_DELETE, CLUSTERLIST, \
 	NODE_FENCE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
 	RESOURCE_ADD, RESOURCE_CONFIG, RESOURCE_REMOVE, \
 	SERVICE_DELETE, SERVICE_RESTART, SERVICE_START, SERVICE_STOP
@@ -283,6 +283,64 @@
 				% (nodename_resolved, e, str(e)))
 	return True
 
+def NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved):
+	rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ], exclude_busy=True)
+
+	if rc is None:
+		rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ])
+
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC0: no agent to delete node %s "%s"' \
+				% (nodename_resolved, clustername))
+		return None
+
+	try:
+		model.deleteNodeByName(nodename.lower())
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC1: deleteNode %s: %r %s' \
+				% (nodename, e, str(e)))
+		return None
+
+	try:
+		model.setModified(True)
+		str_buf = str(model.exportModelAsString())
+		if not str_buf:
+			raise Exception, 'model string is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC2: exportModelAsString: %r %s' \
+				% (e, str(e)))
+		return None
+
+	batch_number, result = rq.setClusterConf(rc, str_buf)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC3: batch number is None')
+		return None
+
+	try:
+		ret = delClusterSystem(self, clustername, nodename_resolved)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC4: error deleting %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FORCE_DELETE,
+			'Forcing the deletion of node "%s"' % nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC5: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
 def NodeDeleteFromCluster(	self,
 							rc,
 							model,
@@ -354,7 +412,7 @@
 		batch_number, result = rq.setClusterConf(rc2, str_buf)
 		if batch_number is None:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+				luci_log.debug_verbose('ND8: batch number is None')
 			return None
 
 	try:
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py	2007/06/25 16:03:38	1.2
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py	2007/06/25 16:11:30	1.3
@@ -17,7 +17,7 @@
 
 from conga_constants import CLUSTER_CONFIG, CLUSTER_DELETE, \
 	CLUSTER_PROCESS, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
-	FDOM, FENCEDEV, NODE, NODE_ACTIVE, \
+	NODE_FORCE_DELETE, FDOM, FENCEDEV, NODE, NODE_ACTIVE, \
 	NODE_ACTIVE_STR, NODE_DELETE, NODE_FENCE, NODE_INACTIVE, \
 	NODE_INACTIVE_STR, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
 	NODE_PROCESS, NODE_REBOOT, NODE_UNKNOWN, NODE_UNKNOWN_STR, \
@@ -149,18 +149,10 @@
 	if not doc:
 		try:
 			from LuciDB import getClusterStatusDB
+			fvars = GetReqVars(request, [ 'clustername' ])
 
-			clustername = cluname
+			clustername = fvars['clustername']
 			if clustername is None:
-				try:
-					clustername = request['clustername']
-				except:
-					try:
-						clustername = request.form['clustername']
-					except:
-						pass
-
-			if not clustername:
 				raise Exception, 'unable to determine cluster name'
 
 			cinfo = getClusterStatusDB(self, clustername)
@@ -860,6 +852,8 @@
 	else:
 		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
 			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['force_delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FORCE_DELETE, nodename, clustername)
 
 	# figure out current services running on this node
 	svc_dict_list = list()
@@ -1021,6 +1015,8 @@
 		else:
 			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
 				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['force_delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FORCE_DELETE, name, clustername)
 
 		# figure out current services running on this node
 		svc_dict_list = list()
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/06/25 16:03:39	1.256
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/06/25 16:11:30	1.257
@@ -36,12 +36,12 @@
 	DISABLE_SVC_TASK, ENABLE_SVC_TASK, FDOM, FDOM_ADD, FENCEDEV, \
 	FENCEDEV_NODE_CONFIG, FENCEDEVS, FLAG_DESC, INSTALL_TASK, \
 	LAST_STATUS, LUCI_DEBUG_MODE, NODE, NODE_ADD, NODE_DELETE, \
-	NODE_FENCE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
-	NODES, POSSIBLE_REBOOT_MESSAGE, PRE_CFG, PRE_INSTALL, PRE_JOIN, \
-	REBOOT_TASK, REDIRECT_MSG, RESOURCES, RICCI_CONNECT_FAILURE, \
+	NODE_FENCE, NODE_FORCE_DELETE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
+	NODE_REBOOT, NODES, POSSIBLE_REBOOT_MESSAGE, PRE_CFG, PRE_INSTALL, \
+	PRE_JOIN, REBOOT_TASK, REDIRECT_MSG, RESOURCES, RICCI_CONNECT_FAILURE, \
 	RICCI_CONNECT_FAILURE_MSG, SEND_CONF, SERVICE_ADD, SERVICE_CONFIG, \
 	SERVICE_LIST, SERVICES, START_NODE, TASKTYPE, VM_ADD, VM_CONFIG, \
-	REDIRECT_SEC
+	REDIRECT_SEC, LUCI_CLUSTER_BASE_URL
 
 from FenceHandler import validateNewFenceDevice, \
 	validateFenceDevice, validate_fenceinstance, \
@@ -719,7 +719,7 @@
 	errors = list()
 
 	try:
-		form_xml = request['form_xml']
+		form_xml = request['form_xml'].strip()
 		if not form_xml:
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
@@ -741,7 +741,7 @@
 		doc = minidom.parseString(form_xml)
 		forms = doc.getElementsByTagName('form')
 		if len(forms) < 1:
-			raise
+			raise Exception, 'invalid XML'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('vSA1: error: %r %s' % (e, str(e)))
@@ -1682,7 +1682,7 @@
 	errors = list()
 
 	try:
-		form_xml = request['fence_xml']
+		form_xml = request['fence_xml'].strip()
 		if not form_xml:
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
@@ -2639,7 +2639,9 @@
 	return getRicciAgent(self, clustername)
 
 def clusterTaskProcess(self, model, request):
-	fvar = GetReqVars(request, [ 'task', 'clustername' ])
+	fvar = GetReqVars(request, [ 'task', 'clustername', 'URL' ])
+
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
 	task = fvar['task']
 	if task is None:
@@ -2648,7 +2650,7 @@
 		return 'No cluster task was given'
 
 	if not model:
-		cluname = fvar['cluname']
+		cluname = fvar['clustername']
 		if cluname is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('CTP1: no cluster name')
@@ -2686,7 +2688,7 @@
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], redirect_page, model.getClusterName()))
+		% (baseurl, redirect_page, model.getClusterName()))
 
 def nodeTaskProcess(self, model, request):
 	fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename', 'URL' ])
@@ -2694,6 +2696,7 @@
 	task = fvar['task']
 	clustername = fvar['clustername']
 	nodename = fvar['nodename']
+	baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
 
 	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
@@ -2713,10 +2716,9 @@
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
 	response = request.RESPONSE
 
-	if task != NODE_FENCE:
-		# Fencing is the only task for which we don't
-		# want to talk to the node on which the action is
-		# to be performed.
+	if task != NODE_FENCE and task != NODE_FORCE_DELETE:
+		# Fencing and forced deletion are the only tasks
+		# for which we don't want to talk to the target node.
 		try:
 			rc = RicciCommunicator(nodename_resolved)
 			if not rc:
@@ -2775,7 +2777,7 @@
 			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
 
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], NODES, clustername))
+			% (baseurl, NODES, clustername))
 	elif task == NODE_JOIN_CLUSTER:
 		from LuciClusterActions import NodeJoinCluster
 		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
@@ -2784,7 +2786,7 @@
 			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
 
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], NODES, clustername))
+			% (baseurl, NODES, clustername))
 	elif task == NODE_REBOOT:
 		from LuciClusterActions import NodeReboot
 		if NodeReboot(self, rc, clustername, nodename_resolved) is None:
@@ -2794,7 +2796,7 @@
 				% nodename_resolved ]})
 
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], NODES, clustername))
+			% (baseurl, NODES, clustername))
 	elif task == NODE_FENCE:
 		from LuciClusterActions import NodeFence
 		if NodeFence(self, clustername, nodename, nodename_resolved) is None:
@@ -2803,7 +2805,7 @@
 			return (False, {'errors': [ 'Fencing of node "%s" failed' \
 				% nodename_resolved]})
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], NODES, clustername))
+			% (baseurl, NODES, clustername))
 	elif task == NODE_DELETE:
 		from LuciClusterActions import NodeDeleteFromCluster
 		if NodeDeleteFromCluster(self, rc, model, clustername, nodename, nodename_resolved) is None:
@@ -2812,7 +2814,16 @@
 			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
 
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], NODES, clustername))
+			% (baseurl, NODES, clustername))
+	elif task == NODE_FORCE_DELETE:
+		from LuciClusterActions import NodeForceDeleteFromCluster
+		if NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP13: nodeForceDelete failed')
+			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (baseurl, NODES, clustername))
 
 def isClusterBusy(self, req):
 	items = None
@@ -3180,11 +3191,13 @@
 
 	fvars = GetReqVars(req,
 				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
 	ret = RestartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
@@ -3193,11 +3206,13 @@
 
 	fvars = GetReqVars(req,
 				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
 	ret = StopCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
@@ -3206,11 +3221,13 @@
 
 	fvars = GetReqVars(req,
 				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
 	ret = StartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
@@ -3219,6 +3236,8 @@
 
 	fvars = GetReqVars(req,
 				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
 	try:
 		model = LuciExtractCluModel(self, req,
 					cluster_name=fvars['clustername'])
@@ -3231,7 +3250,7 @@
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (fvars['URL'], SERVICES, fvars['clustername']))
+			% (baseurl, SERVICES, fvars['clustername']))
 	else:
 		return ret
 
@@ -3240,11 +3259,13 @@
 
 	fvars = GetReqVars(req,
 				[ 'clustername', 'servicename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
 	ret = MigrateCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
+			% (baseurl, SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
@@ -3253,6 +3274,8 @@
 
 	fvars = GetReqVars(req,
 		[ 'clustername', 'resourcename', 'nodename', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
 	try:
 		model = LuciExtractCluModel(self, req,
 					cluster_name=fvars['clustername'])
@@ -3267,12 +3290,14 @@
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (fvars['URL'], RESOURCES, fvars['clustername']))
+			% (baseurl, RESOURCES, fvars['clustername']))
 	else:
 		return ret
 
 def resourceAdd(self, req, model, res):
 	from LuciClusterActions import AddResource, EditResource
+	fvars = GetReqVars(req, [ 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
 
 	try:
 		cluname = model.getClusterName()
@@ -3293,7 +3318,7 @@
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], RESOURCES, cluname))
+			% (baseurl, RESOURCES, cluname))
 	else:
 		return ret
 
--- conga/luci/site/luci/Extensions/conga_constants.py	2007/06/25 16:03:39	1.40
+++ conga/luci/site/luci/Extensions/conga_constants.py	2007/06/25 16:11:30	1.41
@@ -59,17 +59,18 @@
 SYS_SERVICE_UPDATE		= '91'
 
 # Cluster tasks
-CLUSTER_STOP	= '1000'
-CLUSTER_START	= '1001'
-CLUSTER_RESTART	= '1002'
-CLUSTER_DELETE	= '1003'
+CLUSTER_STOP			= '1000'
+CLUSTER_START			= '1001'
+CLUSTER_RESTART			= '1002'
+CLUSTER_DELETE			= '1003'
 
 # Node tasks
-NODE_LEAVE_CLUSTER	= '100'
-NODE_JOIN_CLUSTER	= '101'
-NODE_REBOOT			= '102'
-NODE_FENCE			= '103'
-NODE_DELETE			= '104'
+NODE_LEAVE_CLUSTER		= '100'
+NODE_JOIN_CLUSTER		= '101'
+NODE_REBOOT				= '102'
+NODE_FENCE				= '103'
+NODE_DELETE				= '104'
+NODE_FORCE_DELETE		= '105'
 
 # General tasks
 BASECLUSTER	= '201'
--- conga/ricci/modules/storage/VG.cpp	2007/03/30 14:40:24	1.12
+++ conga/ricci/modules/storage/VG.cpp	2007/06/25 16:11:30	1.13
@@ -354,10 +354,15 @@
   props.set(Variable("extent_size", 
 		     4 * 1024 * 1024 /* 4 MB */,
 		     ext_sizes));
-  
+
   // clustered
-  props.set(Variable("clustered", false, true));
-  
+  bool use_clustered = false;
+  try {
+    LVM::check_locking();
+    use_clustered = true;
+  } catch ( ... ) { }
+  props.set(Variable("clustered", use_clustered, true));
+
   // new sources
   VG unused(VG_PREFIX);
   for (list<counting_auto_ptr<BD> >::iterator iter = unused.sources.begin();
--- conga/ricci/modules/storage/parted_wrapper.cpp	2007/03/20 15:31:20	1.10
+++ conga/ricci/modules/storage/parted_wrapper.cpp	2007/06/25 16:11:30	1.11
@@ -680,7 +680,7 @@
     else if (s[s.size()-2] == 'g')
       multiplier = 1000 * 1000 * 1000;
     else if (s[s.size()-2] == 't')
-      multiplier = 1000 * 1000 * 1000 * 1000;
+      multiplier = (long long) 1000 * 1000 * 1000 * 1000;
   }
   
   return (long long) utils::to_float(s) * multiplier;



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2007-07-27 16:49 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-07-27 16:49 [Cluster-devel] conga luci/cluster/form-macros luci/cluster/in rmccabe
  -- strict thread matches above, loose matches on Subject: below --
2007-06-25 16:11 rmccabe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).