cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-05-14 18:00 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-05-14 18:00 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-14 18:00:14

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py 
	                           LuciClusterInfo.py LuciDB.py 
	                           LuciSyslog.py LuciZope.py 
	                           PropsObject.py ResourceHandler.py 
	                           RicciQueries.py cluster_adapters.py 
	                           homebase_adapters.py 
	                           ricci_communicator.py 
	                           system_adapters.py 

Log message:
	Next pass of cleanup and refactor

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciSyslog.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.11&r2=1.11.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/PropsObject.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.2.8.1&r2=1.2.8.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ResourceHandler.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.3&r2=1.255.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.50.2.2&r2=1.50.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.25.2.2&r2=1.25.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/system_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.2.2.1&r2=1.2.2.2

--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/08 22:19:35	1.1.2.1
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/14 18:00:14	1.1.2.2
@@ -1,7 +1,11 @@
 import RicciQueries as rq
-from conga_constants import CLUSTER_CONFIG
+from LuciDB import set_node_flag, getRicciAgent
+from LuciSyslog import get_logger
+from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE
 
-def propagateClusterConfAsync(model, rc=None):
+luci_log = get_logger()
+
+def propagateClusterConfAsync(self, model, rc=None):
 	errors = list()
 	messages = list()
 
@@ -10,7 +14,8 @@
 		if not clustername:
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
-		luci_log.debug_verbose('PCC0: getClusterName: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC0: getClusterName: %r' % e)
 		errors.append('Unable to determine cluster name')
 		return (False, { 'errors': errors, 'messages': messages })
 
@@ -18,19 +23,36 @@
 		rc = getRicciAgent(self, clustername)
 
 	if not rc:
-		luci_log.debug_verbose('PCC1: unable to find a ricci agent for the %s cluster' % clustername)
-		errors.append('Unable to contact a ricci agent for cluster "%s"' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC1: no ricci agent for the %s cluster' \
+				% clustername)
+		errors.append('Unable to contact a ricci agent for cluster "%s"' \
+			% clustername)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	try:
+		conf_str = str(model.exportModelAsString()).strip()
+		if not conf_str:
+			raise Exception, 'The new cluster configuration is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC2: %r' % e)
+		errors.append(repr(e))
 		return (False, { 'errors': errors, 'messages': messages })
 
-	batch_id, result = rq.setClusterConf(rc, str(conf_str))
+	batch_id, result = rq.setClusterConf(rc, conf_str)
 	if batch_id is None or result is None:
-		luci_log.debug_verbose('PCC2: setCluserConf: batchid or result is None')
-		errors.append('Unable to propagate the new cluster configuration for %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC3: batchid or result is None')
+		errors.append('Unable to propagate a new cluster configuration for %s' \
+			% clustername)
 		return (False, { 'errors': errors, 'messages': messages })
 
 	try:
 		set_node_flag(self, clustername, rc.hostname(), batch_id,
-			CLUSTER_CONFIG, 'Adding new fence device "%s"' % retobj)
+			CLUSTER_CONFIG, 'Updating the cluster configuration for "%s"' %
+				clustername)
 	except Exception, e:
-		luci_log.debug_verbose('PCC3: set_node_flag: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC4: set_node_flag: %r' % e)
 	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
--- conga/luci/site/luci/Extensions/Attic/LuciClusterInfo.py	2007/05/08 22:19:35	1.1.2.1
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterInfo.py	2007/05/14 18:00:14	1.1.2.2
@@ -2,6 +2,7 @@
 import RicciQueries as rq
 from ricci_communicator import RicciCommunicator
 from FenceHandler import FENCE_OPTS
+from LuciSyslog import get_logger
 
 from conga_constants import CLUNAME, CLUSTER_CONFIG, CLUSTER_DELETE, \
 	CLUSTER_PROCESS, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
@@ -14,6 +15,15 @@
 	SERVICE, SERVICE_DELETE, SERVICE_MIGRATE, SERVICE_RESTART, \
 	SERVICE_START, SERVICE_STOP, VM_CONFIG
 
+luci_log = get_logger()
+
+def getnodes(self, model):
+	try:
+		return map(lambda x: str(x.getName()), model.getNodes())
+	except Exception, e:
+		luci_log.debug_verbose('getnodes0: %s' % str(e))
+	return []
+
 def getResourceInfo(model, name, res=None):
 	if res is None:
 		try:
@@ -1469,3 +1479,14 @@
 
 	#CALL
 	return {}
+
+def getClusterName(self, model):
+	return model.getClusterName()
+
+def getClusterAlias(self, model):
+	if not model:
+		return ''
+	alias = model.getClusterAlias()
+	if not alias:
+		return model.getClusterName()
+	return alias
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/14 16:02:11	1.1.2.4
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/14 18:00:14	1.1.2.5
@@ -2,13 +2,14 @@
 import RicciQuery as rq
 from ricci_communicator import RicciCommunicator
 from LuciZope import isAdmin
+from LuciSyslog import get_logger
 
 from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, \
 	FLAG_DESC, CLUSTER_ADD, CLUSTER_NODE_ADDED, CLUSTER_NODE_NEED_AUTH, \
 	LAST_STATUS, PLONE_ROOT, STORAGE_FOLDER_PATH, CLUSTER_NODE_NOT_MEMBER, \
 	LUCI_DEBUG_MODE
 
-from homebase_adapters import luci_log
+luci_log = get_logger()
 
 def getClusterNode(self, nodename, clustername):
 	try:
--- conga/luci/site/luci/Extensions/LuciSyslog.py	2006/11/29 18:15:20	1.11
+++ conga/luci/site/luci/Extensions/LuciSyslog.py	2007/05/14 18:00:14	1.11.4.1
@@ -18,14 +18,20 @@
 		except:
 			raise LuciSyslogError, 'unable to setup syslog functionality.'
 
+	def __del__(self):
+		self.close()
+
+	def initialized(self):
+		return self.__init > 0
+
 	def info(self, msg):
 		if not self.__init:
 			return
 		try:
 			syslog(LOG_INFO, msg)
 		except:
-			pass
 			#raise LuciSyslogError, 'syslog info call failed'
+			pass
 
 	def warn(self, msg):
 		if not self.__init:
@@ -33,8 +39,8 @@
 		try:
 			syslog(LOG_WARNING, msg)
 		except:
-			pass
 			#raise LuciSyslogError, 'syslog warn call failed'
+			pass
 
 	def private(self, msg):
 		if not self.__init:
@@ -42,8 +48,8 @@
 		try:
 			syslog(LOG_AUTH, msg)
 		except:
-			pass
 			#raise LuciSyslogError, 'syslog private call failed'
+			pass
 
 	def debug_verbose(self, msg):
 		if not LUCI_DEBUG_MODE or LUCI_DEBUG_VERBOSITY < 2 or not self.__init:
@@ -73,8 +79,8 @@
 		try:
 			syslog(LOG_DEBUG, msg)
 		except:
-			pass
 			#raise LuciSyslogError, 'syslog debug call failed'
+			pass
 
 	def close(self):
 		try:
@@ -82,3 +88,9 @@
 			closelog()
 		except:
 			pass
+
+luci_logc = None
+def get_logger():
+	if luci_logc is None or luci_logc.initialized() is False:
+		luci_logc = LuciSyslog()
+	return luci_logc
--- conga/luci/site/luci/Extensions/Attic/LuciZope.py	2007/05/08 22:19:35	1.1.2.2
+++ conga/luci/site/luci/Extensions/Attic/LuciZope.py	2007/05/14 18:00:14	1.1.2.3
@@ -87,3 +87,41 @@
 	if get_units_multiplier(units) > get_units_multiplier(p_units):
 		units = p_units
 	return (convert_bytes(bytes, units), units)
+
+def getTabs(self, req):
+	if not userAuthenticated(self):
+		return []
+
+	htab = {
+		'Title': 'homebase',
+		'Description': 'Home base for this luci server',
+		'Taburl': '/luci/homebase',
+		'isSelected': False
+	}
+	ctab = {
+		'Title': 'cluster',
+		'Description': 'Cluster configuration area',
+		'Taburl': '/luci/cluster/index_html?pagetype=3',
+		'isSelected': False
+	}
+	stab = {
+		'Title': 'storage',
+		'Description': 'Storage configuration page',
+		'Taburl': '/luci/storage',
+		'isSelected': False
+	}
+
+	try:
+		baseurl = req['URL']
+		if baseurl.find('cluster') > (-1):
+			ctab['isSelected'] = True
+		elif baseurl.find('storage') > (-1):
+			stab['isSelected'] = True
+		else:
+			htab['isSelected'] = True
+	except KeyError, e:
+		pass
+	except Exception, e:
+		htab['isSelected'] = True
+
+	return [ htab, ctab, stab ]
--- conga/luci/site/luci/Extensions/PropsObject.py	2007/05/04 19:10:24	1.2.8.1
+++ conga/luci/site/luci/Extensions/PropsObject.py	2007/05/14 18:00:14	1.2.8.2
@@ -1,7 +1,5 @@
-
-from ricci_defines import *
-
-from Variable import Variable, parse_variable
+from Variable import parse_variable
+from ricci_defines import PROPS_TAG
 import xml
 import xml.dom
 
--- conga/luci/site/luci/Extensions/Attic/ResourceHandler.py	2007/05/03 20:16:38	1.1.2.1
+++ conga/luci/site/luci/Extensions/Attic/ResourceHandler.py	2007/05/14 18:00:14	1.1.2.2
@@ -12,8 +12,10 @@
 from ClusterModel.OpenLDAP import OpenLDAP
 from ClusterModel.LVM import LVM
 from ClusterModel.MySQL import MySQL
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
 
-from homebase_adapters import luci_log
+luci_log = get_logger()
 
 def get_fsid_list(model):
 	obj_list = model.searchObjectTree('fs')
@@ -45,7 +47,8 @@
 			resPtr.removeChild(res)
 			return res
 
-	luci_log.debug_verbose('GRFE0: unable to find resource "%s"' % name)
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GRFE0: unable to find resource "%s"' % name)
 	raise KeyError, name
 
 def addIp(res, rname, form, model):
@@ -57,9 +60,10 @@
 			raise KeyError, 'No IP address was given'
 		res.addAttribute('address', addr)
 	except KeyError, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addIp4: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addIp4: %s' % err)
 
 	if 'monitorLink' in form:
 		res.addAttribute('monitor_link', '1')
@@ -77,9 +81,10 @@
 			raise Exception, 'No mount point was given for "%s"' % rname
 		res.addAttribute('mountpoint', mountpoint)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs6: %s' % err)
 
 	try:
 		device = form['device'].strip()
@@ -87,9 +92,10 @@
 			raise Exception, 'No device was given for "%s"' % rname
 		res.addAttribute('device', device)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs7: %s' % err)
 
 	try:
 		options = form['options'].strip()
@@ -102,9 +108,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs8: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs8: %s' % err)
 
 	try:
 		fstype = form['fstype'].strip()
@@ -112,9 +119,10 @@
 			raise Exception, 'No filesystem type was given for "%s"' % rname
 		res.addAttribute('fstype', fstype)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addFs9: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs9: %s' % err)
 
 	try:
 		fsid = form['fsid'].strip()
@@ -153,9 +161,10 @@
 			raise Exception, 'No mount point was given for "%s"' % rname
 		res.addAttribute('mountpoint', mountpoint)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs5: %s' % err)
 
 	try:
 		device = form['device'].strip()
@@ -163,9 +172,10 @@
 			raise Exception, 'No device was given for "%s"' % rname
 		res.addAttribute('device', device)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs6: %s' % err)
 
 	try:
 		options = form['options'].strip()
@@ -178,9 +188,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addGfs7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs7: %s' % err)
 
 	try:
 		fsid = form['fsid'].strip()
@@ -209,9 +220,10 @@
 			raise Exception, 'No mount point was given for "%s"' % rname
 		res.addAttribute('mountpoint', mountpoint)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm5: %s' % err)
 
 	try:
 		host = form['host'].strip()
@@ -219,9 +231,10 @@
 			raise Exception, 'No host server was given for "%s"' % rname
 		res.addAttribute('host', host)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm6 error: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm6 error: %s' % err)
 
 	try:
 		options = form['options'].strip()
@@ -234,9 +247,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm7: %s' % err)
 
 	try:
 		exportpath = form['exportpath'].strip()
@@ -244,9 +258,10 @@
 			raise Exception, 'No export path was given for "%s"' % rname
 		res.addAttribute('exportpath', exportpath)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm8: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm8: %s' % err)
 
 	try:
 		nfstype = form['nfstype'].strip().lower()
@@ -255,9 +270,10 @@
 								% (nfstype, rname)
 		res.addAttribute('nfstype', nfstype)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsm9: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm9: %s' % err)
 
 	if form.has_key('forceunmount'):
 		res.addAttribute('force_unmount', '1')
@@ -275,9 +291,10 @@
 			raise Exception, 'No target was given for "%s"' % rname
 		res.addAttribute('target', target)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsc5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsc5: %s' % err)
 
 	try:
 		options = form['options'].strip()
@@ -290,9 +307,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addNfsc6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsc6: %s' % err)
 
 	if form.has_key('allow_recover'):
 		res.addAttribute('allow_recover', '1')
@@ -314,9 +332,10 @@
 			raise Exception, 'No path to a script file was given for this "%s"' % rname
 		res.addAttribute('file', path)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addScr5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addScr5: %s' % err)
 
 	return errors
 
@@ -329,9 +348,10 @@
 			raise Exception, 'No workgroup was given for "%s"' % rname
 		res.addAttribute('workgroup', workgroup)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addSmb5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addSmb5: %s' % err)
 
 	return errors
 
@@ -344,9 +364,10 @@
 			raise KeyError, 'No server root was given for "%s"' % rname
 		res.addAttribute('server_root', server_root)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addApache5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache5: %s' % err)
 
 	try:
 		config_file = form['config_file'].strip()
@@ -354,9 +375,10 @@
 			raise KeyError, 'No path to the Apache configuration file was given for "%s"' % rname
 		res.addAttribute('config_file', config_file)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addApache6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache6: %s' % err)
 
 	try:
 		options = form['httpd_options'].strip()
@@ -369,9 +391,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addApache7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache7: %s' % err)
 
 	try:
 		shutdown_wait = int(form['shutdown_wait'].strip())
@@ -379,9 +402,10 @@
 	except KeyError, e:
 		res.addAttribute('shutdown_wait', '0')
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addApache7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache7: %s' % err)
 
 	return errors
 
@@ -394,9 +418,10 @@
 			raise KeyError, 'No path to the MySQL configuration file was given for "%s"' % rname
 		res.addAttribute('config_file', config_file)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addMySQL5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL5: %s' % err)
 
 	try:
 		listen_addr = form['listen_address'].strip()
@@ -404,9 +429,10 @@
 			raise KeyError, 'No address was given for "%s"' % rname
 		res.addAttribute('listen_address', listen_addr)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addMySQL6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL6: %s' % err)
 
 	try:
 		options = form['mysql_options'].strip()
@@ -419,9 +445,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addMySQL7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL7: %s' % err)
 
 	try:
 		shutdown_wait = int(form['shutdown_wait'].strip())
@@ -429,9 +456,10 @@
 	except KeyError, e:
 		res.addAttribute('shutdown_wait', '0')
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addMySQL7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL7: %s' % err)
 
 	return errors
 
@@ -444,9 +472,10 @@
 			raise KeyError, 'No URL list was given for "%s"' % rname
 		res.addAttribute('url_list', url_list)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addOpenLDAP5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP5: %s' % err)
 
 	try:
 		config_file = form['config_file'].strip()
@@ -454,9 +483,10 @@
 			raise KeyError, 'No path to the OpenLDAP configuration file was given for "%s"' % rname
 		res.addAttribute('config_file', config_file)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addOpenLDAP6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP6: %s' % err)
 
 	try:
 		options = form['slapd_options'].strip()
@@ -469,9 +499,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP7: %s' % err)
 
 	try:
 		shutdown_wait = int(form['shutdown_wait'].strip())
@@ -479,9 +510,10 @@
 	except KeyError, e:
 		res.addAttribute('shutdown_wait', '0')
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP7: %s' % err)
 
 	return errors
 
@@ -493,9 +525,10 @@
 			raise KeyError, 'No postmaster user was given for "%s"' % rname
 		res.addAttribute('postmaster_user', user)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addPostgreSQL85: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL85: %s' % err)
 
 	try:
 		config_file = form['config_file'].strip()
@@ -503,9 +536,10 @@
 			raise KeyError, 'No path to the PostgreSQL 8 configuration file was given for "%s"' % rname
 		res.addAttribute('config_file', config_file)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addPostgreSQL86: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL86: %s' % err)
 
 	try:
 		options = form['postmaster_options'].strip()
@@ -518,9 +552,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL87: %s' % err)
 
 	try:
 		shutdown_wait = int(form['shutdown_wait'].strip())
@@ -528,9 +563,10 @@
 	except KeyError, e:
 		res.addAttribute('shutdown_wait', '0')
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL87: %s' % err)
 
 	return errors
 
@@ -543,9 +579,10 @@
 			raise KeyError, 'No Tomcat user was given for "%s"' % rname
 		res.addAttribute('tomcat_user', user)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addTomcat55: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat55: %s' % err)
 
 	try:
 		config_file = form['config_file'].strip()
@@ -553,9 +590,10 @@
 			raise KeyError, 'No path to the Tomcat 5 configuration file was given for "%s"' % rname
 		res.addAttribute('config_file', config_file)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addTomcat56: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat56: %s' % err)
 
 	try:
 		options = form['catalina_options'].strip()
@@ -568,9 +606,10 @@
 		except:
 			pass
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addTomcat57: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat57: %s' % err)
 
 	try:
 		catalina_base = form['catalina_base'].strip()
@@ -578,9 +617,10 @@
 			raise KeyError, 'No cataliny base directory was given for "%s"' % rname
 		res.addAttribute('catalina_base', catalina_base)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addTomcat58: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat58: %s' % err)
 
 	try:
 		shutdown_wait = int(form['shutdown_wait'].strip())
@@ -588,9 +628,10 @@
 	except KeyError, e:
 		res.addAttribute('shutdown_wait', '0')
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addTomcat59: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat59: %s' % err)
 
 	return errors
 
@@ -603,9 +644,10 @@
 			raise KeyError, 'No volume group name was given for "%s"' % rname
 		res.addAttribute('vg_name', vg_name)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addLVM5: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addLVM5: %s' % err)
 
 	try:
 		lv_name = form['lv_name'].strip()
@@ -613,9 +655,10 @@
 			raise KeyError, 'No logical volume name was given for "%s"' % rname
 		res.addAttribute('lv_name', lv_name)
 	except Exception, e:
-		err = str(e)
+		err = repr(e)
 		errors.append(err)
-		luci_log.debug_verbose('addLVM6: %s' % err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addLVM6: %s' % err)
 
 	return errors
 
--- conga/luci/site/luci/Extensions/Attic/RicciQueries.py	2007/05/03 20:16:38	1.1.2.1
+++ conga/luci/site/luci/Extensions/Attic/RicciQueries.py	2007/05/14 18:00:14	1.1.2.2
@@ -1,12 +1,10 @@
 from xml.dom import minidom
 from xml.dom.Node import ELEMENT_NODE
 from ricci_communicator import RicciCommunicator, extract_module_status
-from LuciSyslog import LuciSyslog
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
 
-try:
-	luci_log = LuciSyslog()
-except:
-	pass
+luci_log = get_logger()
 
 def checkBatch(rc, batch_id):
 	err_msg = 'An unknown Ricci error occurred on %s' % rc.hostname()
@@ -14,10 +12,14 @@
 	try:
 		batch = rc.batch_report(batch_id)
 		if batch is None:
-			luci_log.debug_verbose('checkBatch0: batch id %s was not found' % batch_id)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('checkBatch0: batch id %s not found' \
+					% batch_id)
 			return (True, 'batch id %s was not found' % batch_id)
 	except Exception, e:
-		luci_log.debug_verbose('checkBatch0a: %s: %s' % (rc.hostname(), str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('checkBatch1: %s: %r' \
+				% (rc.hostname(), e))
 		return (-1, err_msg)
 
 	try:
@@ -26,7 +28,9 @@
 			err_msg = 'A Ricci error occurred on %s: %s' \
 				% (rc.hostname(), str(new_err_msg))
 	except Exception, e:
-		luci_log.debug_verbose('checkBatch1: %s: %s' % (rc.hostname(), str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('checkBatch2: %s: %r' \
+				% (rc.hostname(), e))
 		return (-1, err_msg)
 
 	# In progress.
@@ -38,7 +42,8 @@
 		return (True, 'completed sucessfully')
 
 	# Error
-	luci_log.debug_verbose('checkBatch2: %d: %s' % (code, rc.hostname()))
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('checkBatch3: %d: %s' % (code, rc.hostname()))
 	return (-1, err_msg)
 
 def addClusterNodeBatch(cluster_name,
@@ -290,7 +295,8 @@
 		if not batch or len(batch) < 1:
 			raise Exception, 'no batch tag was found'
 	except Exception, e:
-		luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('BAR0: %r' % e)
 		return (None, None)
 
 	for i in batch:
@@ -299,13 +305,15 @@
 			result = i.getAttribute('status')
 			return (str(batch_number), str(result))
 		except Exception, e:
-			luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
-
-	try:
-		luci_log.debug_verbose('no batch with batchid and status found in \"%s\"' % doc.toxml())
-	except:
-		pass
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('BAR1: %s' % e)
 
+	if LUCI_DEBUG_MODE is True:
+		try:
+			luci_log.debug_verbose('BAR2: batchid, status not in \"%s\"' \
+				% doc.toxml())
+		except:
+			pass
 	return (None, None)
 
 def getClusterStatusBatch(rc):
@@ -315,21 +323,25 @@
 	try:
 		cluster_tags = ricci_xml.getElementsByTagName('cluster')
 	except Exception, e:
-		luci_log.debug_verbose('getClusterStatusBatch0: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB0: %r' % e)
 		return None
 
 	if len(cluster_tags) < 1:
-		luci_log.debug_verbose('getClusterStatusBatch1: %d entries - expecting 1' \
-			% len(cluster_tags))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB1: %d entries - expecting 1' \
+				% len(cluster_tags))
 	elif len(cluster_tags) > 1:
-		luci_log.debug_verbose('getClusterStatusBatch2: %d entries - expecting 1, using the first' % len(cluster_tags))
-
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB2: %d entries, expecting 1 use first' \
+				% len(cluster_tags))
 	try:
 		cluster_node = cluster_tags[0]
 		if not cluster_node:
 			raise Exception, 'element 0 is None'
 	except Exception, e:
-		luci_log.debug_verbose('getClusterStatusBatch3: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB3: %r' % e)
 		return None
 
 	try:
@@ -337,12 +349,13 @@
 		doc.appendChild(cluster_node)
 		return doc
 	except Exception, e:
-		luci_log.debug_verbose('getClusterStatusBatch4: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB4: %r' % e)
 
 	return None
 
 def setClusterConf(rc, clusterconf, propagate=True):
-	if propagate == True:
+	if propagate is True:
 		propg = 'true'
 	else:
 		propg = 'false'
@@ -372,8 +385,9 @@
 		if not log_entries or len(log_entries) < 1:
 			raise Exception, 'no log data is available.'
 	except Exception, e:
-		luci_log.debug_verbose('Error retrieving log data from %s: %s' \
-			% (rc.hostname(), str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL0: retrieving log data from %s: %r' \
+				% (rc.hostname(), e))
 		return None
 
 	time_now = time()
@@ -524,7 +538,7 @@
 		else:
 			raise Exception, op
 	except Exception, e:
-		elem.setAttribute('message', 'Unknown operation: %s' % str(e))
+		elem.setAttribute('message', 'Unknown operation: %r' % e)
 		doc.appendChild(elem)
 		return doc
 
@@ -564,13 +578,15 @@
 	batch_str = '<module name="service"><request API_version="1.0"><function_call name="list"><var mutable="false" name="description" type="boolean" value="true"/></function_call></request></module>'
 	ricci_xml = rc.batch_run(batch_str, async=False)
 	if not ricci_xml or not ricci_xml.firstChild:
-		luci_log.debug_verbose('LS0: None returned')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LS0: None returned')
 		return None
 	try:
 		service_tags = ricci_xml.getElementsByTagName('service')
 		return service_tags
 	except Exception, e:
-		luci_log.debug_verbose('LS1: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LS1: %r' % e)
 	return None
 
 def nodeIsVirtual(rc):
@@ -578,13 +594,15 @@
 
 	ricci_xml = rc.batch_run(batch_str, async=False)
 	if not ricci_xml or not ricci_xml.firstChild:
-		luci_log.debug_verbose('nodeIsVirtual0: None returned')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('nodeIsVirtual0: None returned')
 		return None
 
 	var_tags = ricci_xml.getElementsByTagName('var')
 	if not var_tags or len(var_tags) < 2:
-		luci_log.debug_verbose('nodeIsVirtual1: unexpected response %s' \
-			% ricci_xml.toxml())
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('nodeIsVirtual1: unexpected response %s' \
+				% ricci_xml.toxml())
 		return None
 
 	success = False
@@ -605,7 +623,8 @@
 			else:
 				raise Exception, 'unexpected attribute name: %s' % name
 		except Exception, e:
-			luci_log.debug_verbose('nodeIsVirtual2: error: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('nodeIsVirtual2: error: %r' % e)
 
 	if not success:
 		return None
@@ -621,7 +640,8 @@
 
 	ricci_xml = rc.batch_run(''.join(batch_list), async=False)
 	if not ricci_xml or not ricci_xml.firstChild:
-		luci_log.debug_verbose('no ricci_xml in getDaemonStates')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GDS0: no ricci_xml')
 		return None
 	result = extractDaemonInfo(ricci_xml.firstChild)
 	return result
@@ -710,16 +730,18 @@
 		if rc is None:
 			raise Exception, 'unknown error'
 	except Exception, e:
-		luci_log.debug_verbose('Ricci error while connecting to %s: %s' \
-			% (system_info, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCC0: connecting to %s: %r' \
+				% (system_info, e))
 		return None
 	# end workaround
 
 	try:
 		ret = rc.process_batch(batch)
 	except Exception, e:
-		luci_log.debug_verbose('process_batch error for %s: %s' \
-			% (system_info, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCC1: process_batch error for %s: %r' \
+				% (system_info, e))
 		return None
 
 	if not ret:
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/08 22:19:35	1.255.2.3
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/14 18:00:14	1.255.2.4
@@ -1,8 +1,5 @@
 from xml.dom import minidom
 import AccessControl
-from conga_constants import *
-import RicciQueries as rq
-from ricci_communicator import RicciCommunicator, RicciError, batch_status, extract_module_status
 
 from ClusterModel.ModelBuilder import ModelBuilder
 from ClusterModel.FailoverDomain import FailoverDomain
@@ -19,22 +16,31 @@
 from ClusterModel.Method import Method
 from ClusterModel.GeneralError import GeneralError
 
+import RicciQueries as rq
 from HelperFunctions import resolveOSType
-from LuciSyslog import LuciSyslog
+from LuciSyslog import get_logger
 from ResourceHandler import create_resource
-from FenceHandler import validateNewFenceDevice, FENCE_OPTS, validateFenceDevice, validate_fenceinstance, FD_VAL_FAIL, FD_VAL_SUCCESS
-
 from system_adapters import validate_svc_update
-
 from homebase_adapters import parseHostForm
 from LuciZope import userAuthenticated
+from LuciClusterInfo import getClusterInfo
+
+from conga_constants import *
 
-from LuciDB import manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, getStorageNode, getClusterNode, delCluster, buildClusterCreateFlags, noNodeFlagsPresent, resolve_nodename, set_node_flag
+from FenceHandler import validateNewFenceDevice, FENCE_OPTS, \
+	validateFenceDevice, validate_fenceinstance, \
+	FD_VAL_FAIL, FD_VAL_SUCCESS
+
+from ricci_communicator import RicciCommunicator, RicciError, \
+	batch_status, extract_module_status
+
+from LuciDB import manageCluster, createClusterSystems, \
+	havePermCreateCluster, setNodeStatus, getStorageNode, \
+	getClusterNode, delCluster, buildClusterCreateFlags, \
+	noNodeStatussPresent, resolve_nodename, set_node_flag, \
+	getRicciAgent
 
-try:
-	luci_log = LuciSyslog()
-except:
-	pass
+luci_log = get_logger()
 
 def parseClusterNodes(self, request, cluster_os):
 	check_certs = False
@@ -58,7 +64,7 @@
 
 	if clusterName is None:
 		luci_log.debug_verbose('PCN0: no cluster name was given')
-		return (False, { 'errors': [ 'No cluster name was given.' ]})
+		return (False, { 'errors': [ 'No cluster name was given' ]})
 
 	shared_storage = False
 	try:
@@ -111,7 +117,7 @@
 
 			try:
 				if prev_auth:
-					messages.append('Host %s is already authenticated.' \
+					messages.append('Host %s is already authenticated' \
 						% cur_host)
 				else:
 					rc.auth(cur_passwd)
@@ -190,7 +196,7 @@
 	clusterName = add_cluster['name']
 
 	if len(clusterName) > 15:
-		errors.append('A cluster\'s name must be less than 16 characters long.')
+		errors.append('A cluster\'s name must be less than 16 characters long')
 
 	try:
 		cluster_os = add_cluster['cluster_os']
@@ -219,9 +225,9 @@
 				raise Exception, 'blank'
 			num_lockservers = len(lockservers)
 			if not num_lockservers in (1, 3, 5):
-				errors.append('You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers.' % num_lockservers)
+				errors.append('You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers' % num_lockservers)
 		except:
-			errors.append('No lock servers were given.')
+			errors.append('No lock servers were given')
 
 		if len(errors) > 0:
 			try:
@@ -326,7 +332,7 @@
 
 	if clusterName is None:
 		luci_log.debug_verbose('VACN0: no cluster name was given')
-		return (False, { 'errors': [ 'No cluster name was given.' ]})
+		return (False, { 'errors': [ 'No cluster name was given' ]})
 
 	if cluster_os is None:
 		cluster_folder = None
@@ -336,7 +342,7 @@
 				raise Exception, 'cluster DB object is missing'
 		except Exception, e:
 			luci_log.debug_verbose('VACN1: %s: %s' % (clusterName, str(e)))
-			return (False, { 'errors': [ 'The database object for %s is missing.' % clusterName ] })
+			return (False, { 'errors': [ 'The database object for %s is missing' % clusterName ] })
 
 		try:
 			cluster_os = cluster_folder.manage_getProperty('cluster_os')
@@ -356,7 +362,7 @@
 
 	if cluster_os is None:
 		luci_log.debug_verbose('Unable to determine cluster OS for %s' % clusterName)
-		return (False, { 'errors': [ 'Unable to determine the version of the cluster suite this cluster is running.' ] })
+		return (False, { 'errors': [ 'Unable to determine the version of the cluster suite this cluster is running' ] })
 
 	shared_storage = False
 	try:
@@ -409,7 +415,7 @@
 
 			try:
 				if prev_auth:
-					messages.append('Host %s is already authenticated.' \
+					messages.append('Host %s is already authenticated' \
 						% cur_host)
 				else:
 					rc.auth(cur_passwd)
@@ -473,7 +479,7 @@
 			raise Exception, 'Unable to find a ricci agent for %s' % clusterName
 	except Exception, e:
 		incomplete = True
-		errors.append('Unable to contact a ricci agent for %s.' % clusterName)
+		errors.append('Unable to contact a ricci agent for %s' % clusterName)
 		luci_log.debug_verbose('VACN10: %s: %s' % (clusterName, str(e)))
 
 	if incomplete or len(errors) > 0:
@@ -554,14 +560,14 @@
 		model.setModified(True)
 		conf_str = str(model.exportModelAsString())
 		if not conf_str:
-			raise Exception, 'Unable to save the new cluster model.'
+			raise Exception, 'Unable to save the new cluster model'
 
 		batch_number, result = rq.setClusterConf(cluster_ricci, conf_str)
 		if not batch_number or not result:
 			raise Exception, 'batch or result is None'
 	except Exception, e:
 		incomplete = True
-		errors.append('Unable to save the new cluster model.')
+		errors.append('Unable to save the new cluster model')
 		luci_log.debug_verbose('VACN14: %s' % str(e))
 
 	# Propagate the new cluster.conf to the existing nodes
@@ -648,7 +654,7 @@
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
 		luci_log.debug_verbose('vSA0: no form_xml: %s' % str(e))
-		return (False, {'errors': ['No resource data was supplied for this service.']})
+		return (False, {'errors': ['No resource data was supplied for this service']})
 
 	try:
 		model = request.SESSION.get('model')
@@ -656,7 +662,7 @@
 			raise Exception, 'model is None'
 	except Exception, e:
 		luci_log.debug_verbose('vSA0a: %s' % str(e))
-		return (False, {'errors': [ 'The cluster model is missing from the session object.' ]})
+		return (False, {'errors': [ 'The cluster model is missing from the session object' ]})
 
 	try:
 		doc = minidom.parseString(form_xml)
@@ -665,7 +671,7 @@
 			raise
 	except Exception, e:
 		luci_log.debug_verbose('vSA1: error: %s' % str(e))
-		return (False, {'errors': ['The resource data submitted for this service is not properly formed.']})
+		return (False, {'errors': ['The resource data submitted for this service is not properly formed']})
 
 	form_hash = {}
 	form_hash['toplevel'] = {'form': None, 'kids': [] }
@@ -711,11 +717,11 @@
 				dummy_form['resourceName'] = dummy_form['ip_address']
 		except Exception, e:
 			luci_log.debug_verbose('vSA3a: type is ip but no addr: %s' % str(e))
-			return (False, {'errors': [ 'No IP address was given.' ]})
+			return (False, {'errors': [ 'No IP address was given' ]})
 
 		try:
 			if dummy_form.has_key('immutable'):
-				newRes = getResource(model, dummy_form['resourceName'])
+				newRes = model.getResourceByName(dummy_form['resourceName'])
 				resObj = RefObject(newRes)
 				resObj.setRef(newRes.getName())
 			else:
@@ -754,7 +760,7 @@
 		service_name = request.form['svc_name'].strip()
 	except Exception, e:
 		luci_log.debug_verbose('vSA5: no service name: %s' % str(e))
-		return (False, {'errors': [ 'No service name was given.' ]})
+		return (False, {'errors': [ 'No service name was given' ]})
 
 	autostart = "1"
 	try:
@@ -785,14 +791,14 @@
 	try:
 		if request.form['action'] == 'edit':
 			if cur_service is None:
-				return (False, {'errors': [ 'The service %s could not be found for editing.' % service_name ]})
+				return (False, {'errors': [ 'The service %s could not be found for editing' % service_name ]})
 			model.deleteService(service_name)
 		elif request.form['action'] == 'add':
 			if cur_service is not None:
-				return (False, {'errors': [ 'A service with the name %s already exists.' % service_name ]})
+				return (False, {'errors': [ 'A service with the name %s already exists' % service_name ]})
 		else:
 			luci_log.debug_verbose('vSA4a: unknown action %s' % request.form['action'])
-			return (False, {'errors': [ 'An unknown action was specified.' ]})
+			return (False, {'errors': [ 'An unknown action was specified' ]})
 	except Exception, e:
 		luci_log.debug_verbose('vSA5: no action type: %s' % str(e))
 
@@ -835,7 +841,7 @@
 	except Exception, e:
 		luci_log.debug_verbose('vAS6a: exportModelAsString : %s' \
 			% str(e))
-		return (False, {'errors': [ 'An error occurred while adding this service.' ]})
+		return (False, {'errors': [ 'An error occurred while adding this service' ]})
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
@@ -851,7 +857,7 @@
 		batch_number, result = rq.setClusterConf(rc, str(conf))
 		if batch_number is None or result is None:
 			luci_log.debug_verbose('vAS8: missing batch_number or result')
-			raise Exception, 'unable to save the new cluster configuration.'
+			raise Exception, 'unable to save the new cluster configuration'
 	except Exception, e:
 		luci_log.debug_verbose('vAS9: %s' % str(e))
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
@@ -875,7 +881,7 @@
 			raise KeyError, 'type is blank'
 	except Exception, e:
 		luci_log.debug_verbose('VRA0: type is blank')
-		return (False, {'errors': ['No resource type was given.']})
+		return (False, {'errors': ['No resource type was given']})
 
 	try:
 		model = request.SESSION.get('model')
@@ -912,7 +918,7 @@
 	try:
 		gulm_ptr = model.getGULMPtr()
 		if gulm_ptr:
-			return (False, {'errors': ['Multicast cannot be used with GULM locking.']})
+			return (False, {'errors': ['Multicast cannot be used with GULM locking']})
 	except:
 		pass
 
@@ -946,7 +952,7 @@
 		addr_str = None
 
 	if (addr_str is None and mcast_manual != True) or (mcast_manual == True and addr_str == model.getMcastAddr()):
-		errors.append('No multicast configuration changes were made.')
+		errors.append('No multicast configuration changes were made')
 		return (False, {'errors': errors})
 
 	try:
@@ -1044,7 +1050,7 @@
 		if num_heuristics < 1:
 			raise ValueError, form['num_heuristics']
 	except KeyError, e:
-		errors.append('No number of heuristics was given.')
+		errors.append('No number of heuristics was given')
 	except ValueError, e:
 		errors.append('An invalid number of heuristics was given: %s' % str(e))
 
@@ -1135,20 +1141,20 @@
 		if not cluster_name:
 			raise KeyError('cluname')
 	except KeyError, e:
-		errors.append('No cluster name was given.')
+		errors.append('No cluster name was given')
 
 	if len(cluster_name) > 15:
-		errors.append('A cluster\'s name must be less than 16 characters long.')
+		errors.append('A cluster\'s name must be less than 16 characters long')
 
 	try:
 		version_num = int(form['cfgver'])
 		if version_num < old_ver:
-			raise ValueError, 'configuration version number must be %d or greater.' % old_ver
+			raise ValueError, 'configuration version number must be %d or greater' % old_ver
 		if version_num != old_ver:
 			# we'll increment the cluster version before propagating it.
 			version_num -= 1
 	except KeyError, e:
-		errors.append('No cluster configuration version was given.')
+		errors.append('No cluster configuration version was given')
 	except ValueError, e:
 		errors.append('An invalid configuration version was given: %s' % str(e))
 
@@ -1159,7 +1165,7 @@
 			cp.setConfigVersion(str(version_num))
 		except Exception, e:
 			luci_log.debug_verbose('unable to update general properties: %s' % str(e))
-			errors.append('Unable to update the cluster configuration.')
+			errors.append('Unable to update the cluster configuration')
 
 	try:
 		cluster_version = form['cluster_version'].strip()
@@ -1246,23 +1252,23 @@
 	errors = list()
 
 	if model.getGULMPtr() is not None:
-		return (False, {'errors': [ 'GULM clusters do not support fenced.' ]})
+		return (False, {'errors': [ 'GULM clusters do not support fenced' ]})
 
 	try:
 		post_fail_delay = int(form['post_fail_delay'])
 		if post_fail_delay < 0:
-			raise ValueError('post fail delay values must be 0 or greater.')
+			raise ValueError('post fail delay values must be 0 or greater')
 	except KeyError, e:
-		errors.append('No post fail delay was given.')
+		errors.append('No post fail delay was given')
 	except ValueError, e:
 		errors.append('Invalid post fail delay: %s' % str(e))
 
 	try:
 		post_join_delay = int(form['post_join_delay'])
 		if post_join_delay < 0:
-			raise ValueError('post join delay values must be 0 or greater.')
+			raise ValueError('post join delay values must be 0 or greater')
 	except KeyError, e:
-		errors.append('No post join delay was given.')
+		errors.append('No post join delay was given')
 	except ValueError, e:
 		errors.append('Invalid post join delay: %s' % str(e))
 
@@ -1284,13 +1290,13 @@
 		old_pf_delay = fd.getPostFailDelay()
 
 		if post_join_delay == old_pj_delay and post_fail_delay == old_pf_delay:
-			errors.append('No fence daemon properties were changed.')
+			errors.append('No fence daemon properties were changed')
 		else:
 			fd.setPostJoinDelay(str(post_join_delay))
 			fd.setPostFailDelay(str(post_fail_delay))
 	except Exception, e:
 		luci_log.debug_verbose('Unable to update fence daemon properties: %s' % str(e))
-		errors.append('An error occurred while attempting to update fence daemon properties.')
+		errors.append('An error occurred while attempting to update fence daemon properties')
 
 	if len(errors) > 0:
 		return (False, {'errors': errors })
@@ -1300,7 +1306,7 @@
 def validateGULMConfig(model, form):
 	gulm_ptr = model.getGULMPtr()
 	if not gulm_ptr:
-		return (False, {'errors': [ 'This cluster appears not to be using GULM locking.' ]})
+		return (False, {'errors': [ 'This cluster appears not to be using GULM locking' ]})
 
 	node_list = map(lambda x: x.getName(), gulm_ptr.getChildren())
 	for i in map(lambda x: x.getName(), model.getNodes()):
@@ -1327,7 +1333,7 @@
 
 	num_ls = len(gulm_lockservers)
 	if not num_ls in (1, 3, 5):
-		return (False, {'errors': [ 'You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers.' % num_ls ]})
+		return (False, {'errors': [ 'You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers' % num_ls ]})
 
 	model.GULM_ptr.children = gulm_lockservers
 	return (True, {})
@@ -1341,123 +1347,110 @@
 }
 
 def validateConfigCluster(self, request):
-  errors = list()
-  messages = list()
-  rc = None
+	errors = list()
+	messages = list()
+	rc = None
 
-  try:
-    model = request.SESSION.get('model')
-    if not model:
-      raise Exception, 'model is none'
-  except Exception, e:
-    model = None
-    try:
-      cluname = request.form['clustername']
-    except:
-      try:
-        cluname = request['clustername']
-      except:
-        luci_log.debug_verbose('VCC0a: no model, no cluster name')
-        return (False, {'errors': ['No cluster model was found.']})
+	try:
+		model = LuciExtractCluModel(self, request)
+	except Exception, e:
+		luci_log.debug_verbose('VCC0a: no model, no cluster name')
+		return (False, {'errors': ['No cluster model was found']})
 
-    try:
-      model = getModelForCluster(self, cluname)
-    except:
-      model = None
+	try:
+		if not request.form.has_key('configtype'):
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC2: no configtype')
+			raise Exception, 'no config type'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC2a: %r' % e)
+		return (False, {'errors': [ 'No configuration type was submitted' ]})
 
-    if model is None:
-      luci_log.debug_verbose('VCC0: unable to get model from session')
-      return (False, {'errors': ['No cluster model was found.']})
-  try:
-    if not 'configtype' in request.form:
-      luci_log.debug_verbose('VCC2: no configtype')
-      raise Exception, 'no config type'
-  except Exception, e:
-    luci_log.debug_verbose('VCC2a: %s' % str(e))
-    return (False, {'errors': ['No configuration type was submitted.']})
+	if not configFormValidators.has_key(request.form['configtype']):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC3: invalid config type: %s' \
+				% request.form['configtype'])
+		return (False, { 'errors': [ 'An invalid configuration type was submitted' ]})
 
-  if not request.form['configtype'] in configFormValidators:
-    luci_log.debug_verbose('VCC3: invalid config type: %s' % request.form['configtype'])
-    return (False, {'errors': ['An invalid configuration type was submitted.']})
+	try:
+		cp = model.getClusterPtr()
+	except:
+		luci_log.debug_verbose('VCC3a: getClusterPtr failed')
+		return (False, {'errors': ['No cluster model was found']})
 
-  try:
-    cp = model.getClusterPtr()
-  except:
-    luci_log.debug_verbose('VCC3a: getClusterPtr failed')
-    return (False, {'errors': ['No cluster model was found.']})
+	config_validator = configFormValidators[request.form['configtype']]
+	ret = config_validator(model, request.form)
 
-  config_validator = configFormValidators[request.form['configtype']]
-  ret = config_validator(model, request.form)
+	retcode = ret[0]
+	if 'errors' in ret[1]:
+		errors.extend(ret[1]['errors'])
 
-  retcode = ret[0]
-  if 'errors' in ret[1]:
-    errors.extend(ret[1]['errors'])
+	if 'messages' in ret[1]:
+		messages.extend(ret[1]['messages'])
 
-  if 'messages' in ret[1]:
-    messages.extend(ret[1]['messages'])
+	if retcode is True:
+		try:
+			config_ver = int(cp.getConfigVersion()) + 1
+			# always increment the configuration version
+			cp.setConfigVersion(str(config_ver))
+			model.setModified(True)
+			conf_str = model.exportModelAsString()
+			if not conf_str:
+				raise Exception, 'conf_str is none'
+		except Exception, e:
+			luci_log.debug_verbose('VCC4: export model as string failed: %s' \
+				% str(e))
+			errors.append('Unable to store the new cluster configuration')
 
-  if retcode == True:
-    try:
-      config_ver = int(cp.getConfigVersion()) + 1
-      # always increment the configuration version
-      cp.setConfigVersion(str(config_ver))
-      model.setModified(True)
-      conf_str = model.exportModelAsString()
-      if not conf_str:
-        raise Exception, 'conf_str is none'
-    except Exception, e:
-      luci_log.debug_verbose('VCC4: export model as string failed: %s' \
- 			% str(e))
-      errors.append('Unable to store the new cluster configuration')
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'cluster name from model.getClusterName() is blank'
+	except Exception, e:
+		luci_log.debug_verbose('VCC5: error: getClusterName: %r' % e)
+		errors.append('Unable to determine cluster name from model')
 
-  try:
-    clustername = model.getClusterName()
-    if not clustername:
-      raise Exception, 'cluster name from model.getClusterName() is blank'
-  except Exception, e:
-    luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
-    errors.append('Unable to determine cluster name from model')
+	if len(errors) > 0:
+		return (retcode, {'errors': errors, 'messages': messages})
 
-  if len(errors) > 0:
-    return (retcode, {'errors': errors, 'messages': messages})
+	if not rc:
+		rc = getRicciAgent(self, clustername)
+	if not rc:
+		luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
+		errors.append('Unable to contact a ricci agent for cluster %s' \
+			% clustername)
 
-  if not rc:
-    rc = getRicciAgent(self, clustername)
-    if not rc:
-      luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
-      errors.append('Unable to contact a ricci agent for cluster %s' \
-      % clustername)
-
-  if rc:
-    batch_id, result = rq.setClusterConf(rc, str(conf_str))
-    if batch_id is None or result is None:
-      luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
-      errors.append('Unable to propagate the new cluster configuration for %s' \
-      % clustername)
-    else:
-      try:
-        set_node_flag(self, clustername, rc.hostname(), batch_id, CLUSTER_CONFIG, 'Updating cluster configuration')
-      except:
-        pass
+	if rc:
+		batch_id, result = rq.setClusterConf(rc, str(conf_str))
+		if batch_id is None or result is None:
+			luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
+			errors.append('Unable to propagate the new cluster configuration for %s' % clustername)
+		else:
+			try:
+				set_node_flag(self, clustername, rc.hostname(), batch_id, CLUSTER_CONFIG, 'Updating cluster configuration')
+			except:
+				pass
 
-  if len(errors) < 1:
-    messages.append('The cluster properties have been updated.')
-  else:
-    return (retcode, {'errors': errors, 'messages': messages})
+	if len(errors) < 1:
+		messages.append('The cluster properties have been updated')
+	else:
+		return (retcode, {'errors': errors, 'messages': messages})
 
-  response = request.RESPONSE
-  response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-	% (request['URL'], CLUSTER_CONFIG, clustername))
+	response = request.RESPONSE
+	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+		% (request['URL'], CLUSTER_CONFIG, clustername))
 
 def LuciExtractCluName(self, request):
 	cluname = None
+
 	try:
-		cluname = request.form['clustername']
-    except Exception, e:
-		try:
-			cluname = request['clustername']
-		except Exception, e:
-			cluname = None
+		if request.form.has_key('clustername'):
+			cluname = request.form['clustername'].strip()
+		if not cluname and request.has_key('clustername'):
+			cluname = request['clustername'].strip()
+	except Exception, e:
+		pass
 
 	if not cluname:
 		luci_log.debug_verbose('LECN0: no cluster name')
@@ -1482,49 +1475,38 @@
 		if not model:
 			luci_log.debug_verbose('LECM1: empty model')
 			model = None
-    except Exception, e:
+	except Exception, e:
 		luci_log.debug_verbose('LECM2: no model: %s' % str(e))
 		model = None
 	return model
 
-def LuciFenceAddReq(self, request):
+def validateFenceAdd(self, request):
+	errors = list()
+
 	model = LuciExtractCluModel(self, request)
+
 	if not model:
 		luci_log.debug_verbose('VFE0: no model')
-		return (False, [ 'No cluster model was found.' ])
-
-	ret = validateFenceAdd(request, model)
-	if ret[0] is not True:
-		return (False, { 'errors': ret[1] })
-
-def validateFenceAdd(request, model):
-	errors = list()
-	messages = list()
+		return (False, [ 'No cluster model was found' ])
 
 	if not request.form:
 		luci_log.debug_verbose('VFE: no form was submitted')
-		return (False, [ 'No form was submitted '])
+		return (False, [ 'No form was submitted' ])
 
-	ret_code, ret_msgs = validateNewFenceDevice(form, model)
-	if error_code != FD_VAL_SUCCESS:
-		if ret_msgs:
-			errors.extend(ret_msgs)
+	ret_code, ret_obj = validateNewFenceDevice(request.form, model)
+	if ret_code != FD_VAL_SUCCESS:
+		errors.extend(ret_obj)
 		return (False, errors)
-	if ret_msgs:
-		messages.extend(ret_msgs)
-	return (True, ret_msgs)
-
-    try:
-      conf_str = model.exportModelAsString()
-      if not conf_str:
-        raise Exception, 'conf_str is none'
-    except Exception, e:
-      luci_log.debug_verbose('VFE: export model as string failed: %s' \
-        % str(e))
-      errors.append('Unable to store the new cluster configuration')
 
-    response.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, clustername, retobj))
+	try:
+		conf_str = str(model.exportModelAsString())
+		if not conf_str:
+			raise Exception, 'conf_str is none'
+	except Exception, e:
+		luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+		errors.append('Unable to store the new cluster configuration')
 
+	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, model.getClusterName(), ret_obj.getAttribute('name')))
 
 def validateFenceEdit(self, request):
   errors = list()
@@ -1544,7 +1526,7 @@
         cluname = request['clustername']
       except:
         luci_log.debug_verbose('VFE: no model, no cluster name')
-        return (False, {'errors': ['No cluster model was found.']})
+        return (False, {'errors': ['No cluster model was found']})
 
     try:
       model = getModelForCluster(self, cluname)
@@ -1553,7 +1535,7 @@
 
     if model is None:
       luci_log.debug_verbose('VFE: unable to get model from session')
-      return (False, {'errors': ['No cluster model was found.']})
+      return (False, {'errors': ['No cluster model was found']})
 
   form = None
   try:
@@ -1626,13 +1608,13 @@
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
 		luci_log.debug_verbose('vNFC0: no form_xml: %s' % str(e))
-		return (False, {'errors': ['No fence data was supplied.']})
+		return (False, {'errors': ['No fence data was supplied']})
 
 	try:
 		fence_level = int(request.form['fence_level'].strip())
 	except Exception, e:
 		luci_log.debug_verbose('vNFC1: no fence level: %s' % str(e))
-		return (False, {'errors': ['No fence level was supplied.']})
+		return (False, {'errors': ['No fence level was supplied']})
 
 	try:
 		nodename = request['nodename']
@@ -1640,7 +1622,7 @@
 			raise Exception, 'nodename is blank'
 	except Exception, e:
 		luci_log.debug_verbose('vNFC2: no nodename: %s' % str(e))
-		return (False, {'errors': ['No node name was given.']})
+		return (False, {'errors': ['No node name was given']})
 
 	try:
 		clustername = request['clustername']
@@ -1648,7 +1630,7 @@
 			raise Exception, 'clustername is blank'
 	except Exception, e:
 		luci_log.debug_verbose('vNFC3: no clustername: %s' % str(e))
-		return (False, {'errors': ['No cluster name was given.']})
+		return (False, {'errors': ['No cluster name was given']})
 
 	try:
 		model = request.SESSION.get('model')
@@ -1663,19 +1645,19 @@
 
 	if model is None:
 		luci_log.debug_verbose('vNFC4: unable to get model for cluster %s' % clustername)
-		return (False, {'errors': ['No cluster model was found.']})
+		return (False, {'errors': ['No cluster model was found']})
 
 	try:
 		doc = minidom.parseString(form_xml)
 	except Exception, e:
 		luci_log.debug_verbose('vNFC5: error: %s' % str(e))
-		return (False, {'errors': ['The fence data submitted is not properly formed.']})
+		return (False, {'errors': ['The fence data submitted is not properly formed']})
 
 	try:
 		node = model.retrieveNodeByName(nodename)
 	except GeneralError, e:
 		luci_log.debug_verbose('vNFC6: unable to find node name %s in current node list' % (str(nodename), str(e)))
-		return (False, {'errors': ['Unable to find the cluster node %s in the node list.' % str(nodename) ]})
+		return (False, {'errors': ['Unable to find the cluster node %s in the node list' % str(nodename) ]})
 
 	fence_level_num = int(fence_level)
 	levels = node.getFenceLevels()
@@ -1706,7 +1688,7 @@
 				luci_log.debug_verbose('vNFC6a: %s: %s' % (method_id, str(e)))
 				return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]})
 		else:
-			return (True, {'messages': ['No changes were made.'] })
+			return (True, {'messages': ['No changes were made'] })
 
 	form_hash = {}
 	for i in forms:
@@ -1742,14 +1724,14 @@
 				parent = dummy_form['parent_fencedev']
 			except:
 				luci_log.debug_verbose('vNFC9: no parent for instance')
-				return (False, {'errors': [ 'Unable to determine what device the current instance uses.' ]})
+				return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
 
 			try:
 				form_hash[parent][1].append(dummy_form)
 				del dummy_form['fence_instance']
 			except Exception, e:
 				luci_log.debug_verbose('vNFC10: no parent for instance')
-				return (False, {'errors': [ 'Unable to determine what device the current instance uses.' ]})
+				return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
 		else:
 			form_hash[form_id] = (dummy_form, list())
 
@@ -1779,7 +1761,7 @@
 				if not fencedev_name.strip():
 					raise Exception, 'no fence name'
 			except Exception, e:
-				errors.append('You must provide a unique name for all fence devices.')
+				errors.append('You must provide a unique name for all fence devices')
 				continue
 
 			if fence_type is None:
@@ -1870,7 +1852,7 @@
 	except Exception, e:
 		luci_log.debug_verbose('vNFC17: exportModelAsString failed: %s' \
 			% str(e))
-		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration.' ]})
+		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
@@ -1881,7 +1863,7 @@
 	batch_number, result = rq.setClusterConf(rc, conf)
 	if batch_number is None or result is None:
 		luci_log.debug_verbose('vNFC19: missing batch and/or result')
-		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration.' ]})
+		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	try:
 		set_node_flag(self, clustername, ragent, str(batch_number), FENCEDEV_NODE_CONFIG, "Updating fence configuration for node \'%s\'" % nodename)
@@ -1909,7 +1891,7 @@
         cluname = request['clustername']
       except:
         luci_log.debug_verbose('VFE: no model, no cluster name')
-        return (False, {'errors': ['No cluster model was found.']})
+        return (False, {'errors': ['No cluster model was found']})
 
     try:
       model = getModelForCluster(self, cluname)
@@ -1918,11 +1900,11 @@
 
     if model is None:
       luci_log.debug_verbose('VFE: unable to get model from session')
-      return (False, {'errors': ['No cluster model was found.']})
+      return (False, {'errors': ['No cluster model was found']})
 
   form = None
   try:
-    response = request.response
+    response = request.RESPONSE
     form = request.form
     if not form:
       form = None
@@ -2110,7 +2092,7 @@
 			raise Exception, 'no model'
 	except Exception, e:
 		luci_log.debug_verbose('validateFdom0: no model: %s' % str(e))
-		return (False, {'errors': [ 'Unable to retrieve cluster information.' ]})
+		return (False, {'errors': [ 'Unable to retrieve cluster information' ]})
 
 	prioritized = False
 	try:
@@ -2138,14 +2120,14 @@
 			clustername = None
 
 	if not clustername:
-		errors.append('Unable to determine this cluster\'s name.')
+		errors.append('Unable to determine this cluster\'s name')
 
 	try:
 		name = request.form['name'].strip()
 		if not name:
 			raise Exception, 'blank'
 	except Exception, e:
-		errors.append('No name was given for this failover domain.')
+		errors.append('No name was given for this failover domain')
 		luci_log.debug_verbose('validateFdom0: %s' % str(e))
 
 	oldname = None
@@ -2158,14 +2140,14 @@
 
 	if oldname is None or oldname != name:
 		if model.getFailoverDomainByName(name) is not None:
-			errors.append('A failover domain named "%s" already exists.' % name)
+			errors.append('A failover domain named "%s" already exists' % name)
 
 	fdom = None
 	if oldname is not None:
 		fdom = model.getFailoverDomainByName(oldname)
 		if fdom is None:
 			luci_log.debug_verbose('validateFdom1: No fdom named %s exists' % oldname)
-			errors.append('No failover domain named "%s" exists.' % oldname)
+			errors.append('No failover domain named "%s" exists' % oldname)
 		else:
 			fdom.addAttribute('name', name)
 			fdom.children = list()
@@ -2211,7 +2193,7 @@
 		conf = str(model.exportModelAsString())
 	except Exception, e:
 		luci_log.debug_verbose('validateFdom2: %s' % str(e))
-		errors.append('Unable to update the cluster configuration.')
+		errors.append('Unable to update the cluster configuration')
 
 	if len(errors) > 0:
 		return (False, {'errors': errors })
@@ -2225,7 +2207,7 @@
 	batch_number, result = rq.setClusterConf(rc, conf)
 	if batch_number is None or result is None:
 		luci_log.debug_verbose('validateFdom4: missing batch and/or result')
-		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration.' ]})
+		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	try:
 		if oldname:
@@ -2250,7 +2232,7 @@
 			raise Exception, 'blank'
 	except Exception, e:
 		luci_log.debug_verbose('validateVM0: no vm name: %s' % str(e))
-		errors.append('No virtual machine name was given.')
+		errors.append('No virtual machine name was given')
 
 	try:
 		vm_path = request.form['vmpath'].strip()
@@ -2258,7 +2240,7 @@
 			raise Exception, 'blank'
 	except Exception, e:
 		luci_log.debug_verbose('validateVM1: no vm path: %s' % str(e))
-		errors.append('No path to the virtual machine configuration file was given.')
+		errors.append('No path to the virtual machine configuration file was given')
 
 	autostart = 1
 	try:
@@ -2318,7 +2300,7 @@
 			rmptr.removeChild(xvm)
 			delete_vm = True
 		except:
-			return (False, {'errors': ['No virtual machine service named "%s" exists.' % old_name ]})
+			return (False, {'errors': ['No virtual machine service named "%s" exists' % old_name ]})
 	else:
 		if isNew is True:
 			xvm = Vm()
@@ -2332,7 +2314,7 @@
 				if not xvm:
 					raise Exception, 'not found'
 			except:
-				return (False, {'errors': ['No virtual machine service named "%s" exists.' % old_name ]})
+				return (False, {'errors': ['No virtual machine service named "%s" exists' % old_name ]})
 			xvm.addAttribute('name', vm_name)
 			xvm.addAttribute('path', vm_path)
 
@@ -2369,7 +2351,7 @@
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
 		luci_log.debug_verbose('validateVM3: %s' % str(e))
-		errors.append('Unable to determine the cluster name.')
+		errors.append('Unable to determine the cluster name')
 
 	if len(errors) > 0:
 		return (False, {'errors': errors })
@@ -2377,12 +2359,12 @@
 	rc = getRicciAgent(self, clustername)
 	if not rc:
 		luci_log.debug_verbose('validateVM4: no ricci for %s' % clustername)
-		return (False, {'errors': ['Unable to contact a ricci agent for this cluster.']})
+		return (False, {'errors': ['Unable to contact a ricci agent for this cluster']})
 
 	batch_number, result = rq.setClusterConf(rc, stringbuf)
 	if batch_number is None or result is None:
 		luci_log.debug_verbose('validateVM5: missing batch and/or result')
-		return (False, {'errors': [ 'Error creating virtual machine %s.' % vm_name ]})
+		return (False, {'errors': [ 'Error creating virtual machine %s' % vm_name ]})
 
 	try:
 		if delete_vm is True:
@@ -2542,13 +2524,6 @@
 
   return dummynode
 
-def getnodes(self, model):
-	try:
-		return map(lambda x: str(x.getName()), model.getNodes())
-	except Exception, e:
-		luci_log.debug_verbose('getnodes0: %s' % str(e))
-	return []
-
 def createCluConfigTree(self, request, model):
   dummynode = {}
 
@@ -2973,17 +2948,6 @@
 
   return dummynode
 
-def getClusterName(self, model):
-	return model.getClusterName()
-
-def getClusterAlias(self, model):
-	if not model:
-		return ''
-	alias = model.getClusterAlias()
-	if alias is None:
-		return model.getClusterName()
-	return alias
-
 def getClusterURL(self, request, model):
 	try:
 		clustername = request.clustername
@@ -2996,59 +2960,10 @@
 				raise Exception, 'cluster name from model is blank'
 		except:
 			luci_log.debug_verbose('GCURL0: unable to get cluster name')
-			return '.'
+			return ''
 
 	return '/luci/cluster/index_html?pagetype=7&clustername=%s' % clustername
 
-def getTabs(self, req):
-  ###XXX Make this method return only tabs current user can use
-  portaltabs = list()
-  if not userAuthenticated(self):
-    return portaltabs
-
-  selectedtab = "homebase"
-  try:
-    baseurl = req['URL']
-    if baseurl.find("cluster") > (-1):
-      selectedtab = "cluster"
-    elif baseurl.find("storage") > (-1):
-      selectedtab = "storage"
-    else:
-      selectedtab = "homebase"
-  except KeyError, e:
-    selectedtab = None
-
-  htab = { 'Title':"homebase",
-           'Description':"Home base for this luci server",
-           'Taburl':"/luci/homebase"}
-  if selectedtab == "homebase":
-    htab['isSelected'] = True
-  else:
-    htab['isSelected'] = False
-
-
-  ctab = { 'Title':"cluster",
-           'Description':"Cluster configuration page",
-           'Taburl':"/luci/cluster/index_html?pagetype=3"}
-  if selectedtab == "cluster":
-    ctab['isSelected'] = True
-  else:
-    ctab['isSelected'] = False
-
-  stab = { 'Title':"storage",
-           'Description':"Storage configuration page",
-           'Taburl':"/luci/storage"}
-  if selectedtab == "storage":
-    stab['isSelected'] = True
-  else:
-    stab['isSelected'] = False
-
-  portaltabs.append(htab)
-  portaltabs.append(ctab)
-  portaltabs.append(stab)
-
-  return portaltabs
-
 def getRicciAgentForCluster(self, req):
 	clustername = None
 	try:
@@ -3380,7 +3295,7 @@
 					raise Exception, 'cluname is blank'
 			except:
 				luci_log.debug_verbose('CTP0: no model/no cluster name')
-				return 'Unable to determine the cluster name.'
+				return 'Unable to determine the cluster name'
 		try:
 			model = getModelForCluster(self, cluname)
 		except Exception, e:
@@ -3402,7 +3317,7 @@
 		if ret is not None:
 			redirect_page = ret
 	else:
-		return 'An unknown cluster task was requested.'
+		return 'An unknown cluster task was requested'
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -3420,7 +3335,7 @@
 		return None
 
 	objname = '%s____flag' % nodename_resolved
-	fnpresent = noNodeFlagsPresent(self, nodefolder, objname, nodename_resolved)
+	fnpresent = noNodeStatussPresent(self, nodefolder, objname, nodename_resolved)
 
 	if fnpresent is None:
 		luci_log.debug('NL1: An error occurred while checking flags for %s' \
@@ -3609,12 +3524,12 @@
 			rc = None
 			try:
 				snode = getStorageNode(self, node[1].getId())
-				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
 			except:
 				pass
 
 			try:
-				setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
 			except:
 				pass
 
@@ -3682,13 +3597,13 @@
 
 			if not rc2.authed():
 				try:
-					setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+					setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
 				except:
 					pass
 
 				try:
 					snode = getStorageNode(self, node[0])
-					setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+					setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
 				except:
 					pass
 
@@ -3784,7 +3699,7 @@
 			clustername = request.form['clustername']
 		except:
 			luci_log.debug('NTP0: missing cluster name')
-			return (False, {'errors': [ 'No cluster name was given.' ]})
+			return (False, {'errors': [ 'No cluster name was given' ]})
 
 	try:
 		nodename = request['nodename']
@@ -3793,7 +3708,7 @@
 			nodename = request.form['nodename']
 		except:
 			luci_log.debug('NTP1: missing node name')
-			return (False, {'errors': [ 'No node name was given.' ]})
+			return (False, {'errors': [ 'No node name was given' ]})
 
 	try:
 		task = request['task']
@@ -3802,7 +3717,7 @@
 			task = request.form['task']
 		except:
 			luci_log.debug('NTP2: missing task')
-			return (False, {'errors': [ 'No node task was given.' ]})
+			return (False, {'errors': [ 'No node task was given' ]})
 
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
 
@@ -3817,17 +3732,17 @@
 		except RicciError, e:
 			luci_log.debug('NTP3: ricci error from %s: %s' \
 				% (nodename_resolved, str(e)))
-			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s.' % nodename_resolved ]})
+			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 		except:
 			luci_log.debug('NTP4: ricci error from %s: %s' \
 				% (nodename_resolved, str(e)))
-			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s.' % nodename_resolved ]})
+			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 
 		cluinfo = rc.cluster_info()
 		if not cluinfo[0] and not cluinfo[1]:
 			luci_log.debug('NTP5: node %s not in a cluster (expected %s)' \
 				% (nodename_resolved, clustername))
-			return (False, {'errors': [ 'Node "%s" reports it is not in a cluster.' % nodename_resolved ]})
+			return (False, {'errors': [ 'Node "%s" reports it is not in a cluster' % nodename_resolved ]})
 
 		cname = clustername.lower()
 		if cname != cluinfo[0].lower() and cname != cluinfo[1].lower():
@@ -3838,14 +3753,14 @@
 			rc = None
 			try:
 				snode = getStorageNode(self, nodename)
-				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
 			except:
 				# we'll hit it again, and try again then
 				pass
 
 			try:
 				cnode = getClusterNode(self, nodename, clustername)
-				setNodeFlag(cnode, CLUSTER_NODE_NEED_AUTH)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
 			except:
 				# we'll hit it again, and try again then
 				pass
@@ -3853,7 +3768,7 @@
 		if rc is None:
 			luci_log.debug('NTP7: node %s is not authenticated' \
 				% nodename_resolved)
-			return (False, {'errors': [ 'Node "%s" is not authenticated.' % nodename_resolved ]})
+			return (False, {'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
 
 	if task == NODE_LEAVE_CLUSTER:
 		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
@@ -3874,7 +3789,7 @@
 	elif task == NODE_REBOOT:
 		if forceNodeReboot(self, rc, clustername, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP10: nodeReboot failed')
-			return (False, {'errors': [ 'Node "%s" failed to reboot.' \
+			return (False, {'errors': [ 'Node "%s" failed to reboot' \
 				% nodename_resolved ]})
 
 		response = request.RESPONSE
@@ -3883,7 +3798,7 @@
 	elif task == NODE_FENCE:
 		if forceNodeFence(self, clustername, nodename, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP11: nodeFencefailed')
-			return (False, {'errors': [ 'Fencing of node "%s" failed.' \
+			return (False, {'errors': [ 'Fencing of node "%s" failed' \
 				% nodename_resolved]})
 
 		response = request.RESPONSE
@@ -3892,7 +3807,7 @@
 	elif task == NODE_DELETE:
 		if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP12: nodeDelete failed')
-			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed.' % (nodename_resolved, clustername) ]})
+			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
 
 		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -3941,18 +3856,18 @@
 	if not rc.authed():
 		try:
 			snode = getStorageNode(self, nodename)
-			setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
 		except:
 			pass
 
 		if clustername:
 			try:
 				cnode = getClusterNode(self, nodename, clustername)
-				setNodeFlag(cnode, CLUSTER_NODE_NEED_AUTH)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
 			except:
 				pass
 
-		return 'Luci is not authenticated to node %s. Please reauthenticate first.' % nodename
+		return 'Luci is not authenticated to node %s. Please reauthenticate first' % nodename
 
 	return rq.getNodeLogs(rc)
 
@@ -4130,7 +4045,7 @@
 						node_report['statusmessage'] = '%s%s' % (RICCI_CONNECT_FAILURE_MSG, PRE_JOIN)
 					else:
 						node_report['statusindex'] = 0
-						node_report['statusmessage'] = '%s Install is in an unknown state.' % RICCI_CONNECT_FAILURE_MSG
+						node_report['statusmessage'] = '%s Install is in an unknown state' % RICCI_CONNECT_FAILURE_MSG
 					nodereports.append(node_report)
 					continue
 				elif creation_status == -(INSTALL_TASK):
@@ -4307,7 +4222,7 @@
 
 	if name is None:
 		luci_log.debug_verbose('delService1: no service name')
-		return (False, {'errors': [ '%s: no service name was provided.' % errstr ]})
+		return (False, {'errors': [ '%s: no service name was provided' % errstr ]})
 
 	clustername = None
 	try:
@@ -4320,12 +4235,12 @@
 
 	if clustername is None:
 		luci_log.debug_verbose('delService2: no cluster name for %s' % name)
-		return (False, {'errors': [ '%s: no cluster name was provided.' % errstr ]})
+		return (False, {'errors': [ '%s: no cluster name was provided' % errstr ]})
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
 		luci_log.debug_verbose('delService3: unable to get ricci agent for cluster %s' % clustername)
-		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster.' % errstr ]})
+		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
 
 	try:
 		ragent = rc.hostname()
@@ -4333,7 +4248,7 @@
 			raise Exception, 'unable to determine the hostname of the ricci agent'
 	except Exception, e:
 		luci_log.debug_verbose('delService4: %s: %s' % (errstr, str(e)))
-		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster.' % errstr ]})
+		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
 
 	try:
 		model.deleteService(name)
@@ -4385,7 +4300,7 @@
 
 	if name is None:
 		luci_log.debug_verbose('delResource1: no resource name')
-		return '%s: no resource name was provided.' % errstr
+		return '%s: no resource name was provided' % errstr
 
 	clustername = None
 	try:
@@ -4398,7 +4313,7 @@
 
 	if clustername is None:
 		luci_log.debug_verbose('delResource2: no cluster name for %s' % name)
-		return '%s: could not determine the cluster name.' % errstr
+		return '%s: could not determine the cluster name' % errstr
 
 	try:
 		ragent = rc.hostname()
@@ -4406,7 +4321,7 @@
 			raise Exception, 'unable to determine the hostname of the ricci agent'
 	except Exception, e:
 		luci_log.debug_verbose('delResource3: %s: %s' % (errstr, str(e)))
-		return '%s: could not determine the ricci agent hostname.' % errstr
+		return '%s: could not determine the ricci agent hostname' % errstr
 
 	resPtr = model.getResourcesPtr()
 	resources = resPtr.getChildren()
@@ -4420,7 +4335,7 @@
 
 	if not found:
 		luci_log.debug_verbose('delResource4: cant find res %s' % name)
-		return '%s: the specified resource was not found.' % errstr
+		return '%s: the specified resource was not found' % errstr
 
 	try:
 		model.setModified(True)
@@ -4482,7 +4397,7 @@
 		batch_number, result = rq.setClusterConf(rc, str(conf))
 		if batch_number is None or result is None:
 			luci_log.debug_verbose('addResource5: missing batch_number or result')
-			raise Exception, 'unable to save the new cluster configuration.'
+			raise Exception, 'unable to save the new cluster configuration'
 	except Exception, e:
 		luci_log.debug_verbose('addResource6: %s' % str(e))
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
@@ -4512,7 +4427,7 @@
 		request.SESSION.set('model', model)
 	except Exception, e:
 		luci_log.debug_verbose('Appending model to request failed: %r' % e)
-		return 'An error occurred while storing the cluster model.'
+		return 'An error occurred while storing the cluster model'
 
 def getModelBuilder(self, rc, isVirtualized):
 	try:
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2007/05/04 19:10:24	1.50.2.2
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2007/05/14 18:00:14	1.50.2.3
@@ -2,10 +2,10 @@
 	STORAGE_FOLDER_PATH, CLUSTER_FOLDER_PATH
 
 from RicciQueries import getClusterConf
-from LuciSyslog import LuciSyslog
+from LuciSyslog import get_logger
 from HelperFunctions import resolveOSType
 
-from LuciDB import	delCluster, delNodeFlag, delSystem, getClusterNode, \
+from LuciDB import	delCluster, clearNodeStatus, delSystem, getClusterNode, \
 					getClusters, getStorage, getStorageNode, \
 					havePermAddCluster, havePermAddUser, havePermDelUser, \
 					havePermEditPerms, havePermRemCluster, havePermRemStorage, \
@@ -13,7 +13,6 @@
 
 from ricci_communicator import RicciCommunicator
 
-
 # Homebase area page types
 HOMEBASE_ADD_USER				= '1'
 HOMEBASE_ADD_SYSTEM				= '2'
@@ -24,50 +23,42 @@
 HOMEBASE_ADD_CLUSTER_INITIAL	= '7'
 HOMEBASE_AUTH					= '8'
 
-try:
-	luci_log = LuciSyslog()
-except:
-	pass
+luci_log = get_logger()
 
 def validateDelSystem(self, request):
 	errors = list()
 	messages = list()
 
 	if '__SYSTEM' in request.form:
-		sysNames = request.form['__SYSTEM']
-		for i in sysNames:
-			if i == '':
+		system_names = request.form['__SYSTEM']
+		for i in system_names:
+			if not i:
 				continue
-			dsResult = delSystem(self, i)
-			if dsResult:
-				errors.append(dsResult)
+			err_msg = delSystem(self, i)
+			if err_msg:
+				errors.append(err_msg)
 			else:
 				messages.append('Removed storage system "%s" successfully' % i)
 
 	if '__CLUSTER' in request.form:
-		cluNames = request.form['__CLUSTER']
-		for i in cluNames:
-			if i == '':
+		cluster_names = request.form['__CLUSTER']
+		for i in cluster_names:
+			if not i:
 				continue
-			dcResult = delCluster(self, i)
-			if dcResult:
-				errors.append(dcResult)
+			err_msg = delCluster(self, i)
+			if err_msg:
+				errors.append(err_msg)
 			else:
 				messages.append('Removed cluster "%s" successfully' % i)
 
-	if len(errors) > 0:
-		retCode = False
-	else:
-		retCode = True
-
-	return (retCode, {'messages': messages, 'errors': errors })
+	return (len(errors) < 1, { 'messages': messages, 'errors': errors })
 
 def validateDelUser(self, request):
 	errors = list()
 	messages = list()
 
-	if not 'deluserId' in request.form:
-		return (False, {'errors': [ 'No User Id given' ] })
+	if not request.form.has_key('deluserId'):
+		return (False, { 'errors': [ 'No User ID given' ] })
 	
 	userId = request.form['deluserId']
 
@@ -75,61 +66,66 @@
 		user = self.portal_membership.getMemberById(userId)
 		if not user:
 			raise Exception, 'user %s does not exist' % userId
-	except:
+	except Exception, e:
+		luci_log.debug_verbose('VDU0: %s: %r' % (userId, e))
 		return (False, {'errors': [ 'No such user: "%s"' % userId ] })
 
 	for i in getClusters(self):
 		try:
-			i[1].manage_delLocalRoles([userId])
-		except:
+			i[1].manage_delLocalRoles([ userId ])
+		except Exception, e:
+			luci_log.debug_verbose('VDU1: %s %s: %r' % (userId, i[0], e))
 			errors.append('Error deleting roles from cluster "%s" for user "%s"' % (i[0], userId))
 
 	for i in getStorage(self):
 		try:
-			i[1].manage_delLocalRoles([userId])
-		except:
+			i[1].manage_delLocalRoles([ userId ])
+		except Exception, e:
+			luci_log.debug_verbose('VDU2: %s: %r' % (userId, i[0], e))
 			errors.append('Error deleting roles from storage system "%s" for user "%s"' % (i[0], userId))
 
 	try:
-		self.acl_users.userFolderDelUsers([userId])
-	except:
+		self.acl_users.userFolderDelUsers([ userId ])
+	except Exception, e:
+		luci_log.debug_verbose('VDU3: %s %r' % (userId, e))
 		errors.append('Unable to delete user "%s"' % userId)
 		return (False, {'errors': errors })
 
 	messages.append('User "%s" has been deleted' % userId)
-	return (True, {'errors': errors, 'messages': messages })
+	return (True, { 'errors': errors, 'messages': messages })
 
 def validateAddUser(self, request):
 	messages = list()
 
-	if not 'newUserName' in request.form:
-		return (False, { 'errors': ['No user name given'] })
-	if not 'newPassword' in request.form:
-		return (False, { 'errors': ['No password given'] })
-	if not 'newPasswordConfirm' in request.form:
-		return (False, {'errors': ['You didn\'t confirm the password'] })
+	if not request.form.has_key('newUserName'):
+		return (False, { 'errors': [ 'No user name given' ] })
+	if not request.form.has_key('newPassword'):
+		return (False, { 'errors': [ 'No password given' ] })
+	if not request.form.has_key('newPasswordConfirm'):
+		return (False, { 'errors': [ 'You didn\'t confirm the password' ] })
 
-	user = request.form['newUserName']
+	user = request.form['newUserName'].strip()
 
 	if self.portal_membership.getMemberById(user):
-		return (False, {'errors': ['The user "%s" already exists' % user ]})
+		return (False, { 'errors': [ 'The user "%s" already exists' % user ]})
 
 	passwd = request.form['newPassword']
 	pwconfirm = request.form['newPasswordConfirm']
 
 	if passwd != pwconfirm:
-		return (False, {'errors': ['The passwords do not match']})
+		return (False, { 'errors': [ 'The passwords given do not match' ]})
 
 	try:
-		self.portal_registration.addMember(user, passwd, properties = { 'username': user, 'password': passwd, 'confirm': passwd, 'roles': ['Member'], 'domains':[], 'email': '%s at example.com' % user })
-	except:
-		return (False, {'errors': [ 'Unable to add new user "%s"' % user ] })
+		self.portal_registration.addMember(user, passwd, properties = { 'username': user, 'password': passwd, 'confirm': passwd, 'roles': [ 'Member' ], 'domains': [], 'email': '%s@conga.example.com' % user })
+	except Exception, e:
+		luci_log.debug_verbose('VAU0: %s: %r' % (user, e))
+		return (False, { 'errors': [ 'Unable to add new user "%s"' % user ] })
 
 	if not self.portal_membership.getMemberById(user):
-		return (False, {'errors': [ 'Unable to add new user "%s"' % user ] })
+		return (False, { 'errors': [ 'Unable to add new user "%s"' % user ] })
 
 	messages.append('Added new user "%s" successfully' % user)
-	return (True, {'messages': messages, 'params': { 'user': user }})
+	return (True, { 'messages': messages, 'params': { 'user': user }})
 
 def validateAddClusterInitial(self, request):
 	errors = list()
@@ -489,23 +485,14 @@
 		luci_log.debug_verbose('VAC1: no cluster os')
 		errors.append('Unable to determine the version of cluster %s.' % cluster_name)
 
-	check_certs = False
-	try:
-		check_certs = 'check_certs' in request.form
-	except:
-		check_certs = False
+	check_certs = request.form.has_key('check_certs')
+	same_node_passwds = request.form.has_key('allSameCheckBox')
 
 	try:
 		pass_num = int(request.form['pass'].strip()) + 1
 	except:
 		pass_num = 1
 
-	same_node_passwds = False
-	try:
-		same_node_passwds = 'allSameCheckBox' in request.form
-	except:
-		same_node_passwds = False
-
 	add_cluster = { 'name': cluster_name,
 					'pass': pass_num,
 					'cluster_os': cluster_os,
@@ -630,11 +617,7 @@
 	except:
 		pass
 
-	check_certs = False
-	try:
-		check_certs = 'check_certs' in request.form
-	except:
-		check_certs = False
+	check_certs = request.form.has_key('check_certs')
 
 	add_systems, incomplete, errors, messages = parseHostForm(request, check_certs)
 	delete_keys = list()
@@ -691,22 +674,21 @@
 	errors = list()
 
 	username = None
-	if not request.form.has_key('userList'):
-		luci_log.debug_verbose('VP0: no user given')
-		errors.append('No user name was given.')
-	else:
+	if request.form.has_key('userList'):
 		username = request.form['userList'].strip()
 
+	if not username:
+		return (False, { 'errors': [ 'No user name was given' ]})
+
 	user_id = None
-	if username is not None:
-		try:
-			user = self.portal_membership.getMemberById(username)
-			if not user:
-				raise Exception, 'no user'
-			user_id = user.getUserId()
-		except Exception, e:
-			luci_log.debug_verbose('VP1: no user "%s": %s' % (username, str(e)))
-			errors.append('An invalid user "%s" was given.' % username)
+	try:
+		user = self.portal_membership.getMemberById(username)
+		if not user:
+			raise Exception, 'no user'
+		user_id = user.getUserId()
+	except Exception, e:
+		luci_log.debug_verbose('VP1: no user "%s": %r' % (username, e))
+		errors.append('An invalid user "%s" was given' % username)
 
 	if len(errors) > 0:
 		return (False, { 'errors': errors })
@@ -724,7 +706,8 @@
 					else:
 						i[1].manage_delLocalRoles([ user_id ])
 					messages.append('Removed permission for user "%s" for cluster "%s"' % (user_id, i[0]))
-			except:
+			except Exception, e:
+				luci_log.debug_verbose('VP2: %s %s: %r' % (user_id, i[0], e))
 				errors.append('Failed to remove permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 	else:
 		for i in clusters:
@@ -735,7 +718,8 @@
 						roles.append('View')
 						i[1].manage_setLocalRoles(user_id, roles)
 						messages.append('Added permission for user "%s" for cluster "%s"' % (user_id, i[0]))
-				except:
+				except Exception, e:
+					luci_log.debug_verbose('VP3: %s %s %r' % (user_id, i[0], e))
 					errors.append('Failed to add permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 			else:
 				try:
@@ -749,9 +733,11 @@
 							i[1].manage_delLocalRoles([ user_id ])
 
 						messages.append('Removed permission for user "%s" for cluster "%s"' % (user_id, i[0]))
-				except:
+				except Exception, e:
+					luci_log.debug_verbose('VP4: %s %s %r' % (user_id, i[0], e))
 					errors.append('Failed to remove permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 
+
 	storage = self.restrictedTraverse('%s/systems/storage/objectItems' % PLONE_ROOT)('Folder')
 	if not request.form.has_key('__SYSTEM'):
 		for i in storage:
@@ -807,11 +793,7 @@
 	except:
 		pass
 
-	check_certs = False
-	try:
-		check_certs = 'check_certs' in request.form
-	except:
-		check_certs = False
+	check_certs = request.form.has_key('check_certs')
 
 	system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
 	delete_keys = list()
@@ -850,12 +832,12 @@
 						% cur_host)
 				delete_keys.append(i)
 				try:
-					delNodeFlag(self, getStorageNode(self, cur_host), CLUSTER_NODE_NEED_AUTH)
+					clearNodeStatus(self, getStorageNode(self, cur_host), CLUSTER_NODE_NEED_AUTH)
 				except:
 					pass
 
 				try:
-					delNodeFlag(self, getClusterNode(self, cur_host, rc.cluster_info()[0]), CLUSTER_NODE_NEED_AUTH)
+					clearNodeStatus(self, getClusterNode(self, cur_host, rc.cluster_info()[0]), CLUSTER_NODE_NEED_AUTH)
 				except:
 					pass
 			except Exception, e:
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2007/05/04 19:10:24	1.25.2.2
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2007/05/14 18:00:14	1.25.2.3
@@ -1,482 +1,505 @@
 import xml
 import xml.dom
 from xml.dom import minidom
-from LuciSyslog import LuciSyslog
+from LuciSyslog import get_logger
 from conga_ssl import SSLSocket
+from conga_constants import LUCI_DEBUG_MODE
 
 CERTS_DIR_PATH = '/var/lib/luci/var/certs/'
-
-try:
-    luci_log = LuciSyslog()
-except:
-    pass
+luci_log = get_logger()
 
 class RicciError(Exception):
-    pass
+	pass
 
 class RicciCommunicator:
-    def __init__(self, hostname, enforce_trust=True, port=11111):
-        self.__hostname = hostname
-        self.__port = port
-        
-        self.__timeout_init  = 4
-        self.__timeout_auth  = 4
-        self.__timeout_short = 6
-        self.__timeout_long  = 600
-        
-        self.__privkey_file = '%sprivkey.pem' % CERTS_DIR_PATH
-        self.__cert_file = '%scacert.pem' % CERTS_DIR_PATH
-        
-        try:
-            self.ss = SSLSocket(self.__hostname,
-                                self.__port,
-                                self.__timeout_init)
-            if enforce_trust:
-                if not self.ss.trusted():
-                    raise RicciError, 'ricci\'s certificate is not trusted'
-        except Exception, e:
-            raise RicciError, 'Error setting up SSL for connection to %s: %s' \
-                % (self.__hostname, str(e))
-        except:
-            raise RicciError, 'Error setting up SSL for connection to %s' \
-                % self.__hostname
-        
-        # receive ricci header
-        hello = self.__receive(self.__timeout_init)
-        try:
-            luci_log.debug_verbose('RC:init0: Received header from %s: \"%s\"' \
-                % (self.__hostname, hello.toxml()))
-        except:
-            pass
-        
-        self.__authed = hello.firstChild.getAttribute('authenticated') == 'true'
-        self.__cluname = hello.firstChild.getAttribute('clustername')
-        self.__clualias = hello.firstChild.getAttribute('clusteralias')
-        self.__reported_hostname = hello.firstChild.getAttribute('hostname')
-        self.__os = hello.firstChild.getAttribute('os')
-        self.__dom0 = hello.firstChild.getAttribute('xen_host') == 'true'
-        
-    def hostname(self):
-        luci_log.debug_verbose('RC:hostname: [auth %d] reported hostname = %s' \
-            % (self.__authed, self.__hostname))
-        return self.__hostname
-    def authed(self):
-        luci_log.debug_verbose('RC:authed: reported authed = %d for %s' \
-            % (self.__authed, self.__hostname))
-        return self.__authed
-    def system_name(self):
-        luci_log.debug_verbose('RC:system_name: [auth %d] reported system_name = %s for %s' \
-            % (self.__authed, self.__reported_hostname, self.__hostname))
-        return self.__reported_hostname
-    def cluster_info(self):
-        luci_log.debug_verbose('RC:cluster_info: [auth %d] reported cluster_info = (%s,%s) for %s' \
-            % (self.__authed, self.__cluname, self.__clualias, self.__hostname))
-        return (self.__cluname, self.__clualias)
-    def os(self):
-        luci_log.debug_verbose('RC:os: [auth %d] reported os = %s for %s' \
-            % (self.__authed, self.__os, self.__hostname))
-        return self.__os
-    def dom0(self):
-        luci_log.debug_verbose('RC:dom0: [auth %d] reported dom0 = %s for %s' \
-            % (self.__authed, self.__dom0, self.__hostname))
-        return self.__dom0
-
-    def fingerprint(self):
-        return self.ss.peer_fingerprint()
-
-    def trust(self):
-        return self.ss.trust()
-
-    def untrust(self):
-        return self.ss.untrust()
-
-    def trusted(self):
-        return self.ss.trusted()
-
-    def auth(self, password):
-        if self.authed():
-            luci_log.debug_verbose('RC:auth0: already authenticated to %s' \
-                % self.__hostname)
-            return True
-        
-        # send request
-        doc = minidom.Document()
-        ricci = doc.createElement("ricci")
-        ricci.setAttribute("version", "1.0")
-        ricci.setAttribute("function", "authenticate")
-        ricci.setAttribute("password", password)
-        doc.appendChild(ricci)
-        self.__send(doc, self.__timeout_auth)
-        
-        # receive response
-        resp = self.__receive(self.__timeout_auth)
-        self.__authed = resp.firstChild.getAttribute('authenticated') == 'true'
-
-        if self.__authed:
-            try:
-                self.__cluname = resp.firstChild.getAttribute('clustername')
-                self.__clualias = resp.firstChild.getAttribute('clusteralias')
-                self.__reported_hostname = resp.firstChild.getAttribute('hostname')
-                self.__os = resp.firstChild.getAttribute('os')
-                self.__dom0 = resp.firstChild.getAttribute('xen_host') == 'true'
-            except:
-                pass
-        luci_log.debug_verbose('RC:auth1: auth call returning %d' \
-            % self.__authed)
-        return self.__authed
-
-    def unauth(self):
-        doc = minidom.Document()
-        ricci = doc.createElement('ricci')
-        ricci.setAttribute('version', '1.0')
-        ricci.setAttribute('function', 'unauthenticate')
-        doc.appendChild(ricci)
-        self.__send(doc, self.__timeout_auth)
-        resp = self.__receive(self.__timeout_auth)
-
-        luci_log.debug_verbose('RC:unauth0: trying to unauthenticate to %s' \
-            % self.__hostname)
-
-        try:
-            ret = resp.firstChild.getAttribute('success')
-            luci_log.debug_verbose('RC:unauth1: unauthenticate returned %s for %s' \
-                % (ret, self.__hostname))
-            if ret != '0':
-                raise Exception, 'Invalid response'
-            try:
-                self.ss.untrust()
-            except:
-                pass
-        except:
-            errstr = 'Error authenticating to host %s: %s' \
-                        % (self.__hostname, str(ret))
-            luci_log.debug_verbose('RC:unauth2: %s' % errstr)
-            raise RicciError, errstr
-        return True
-
-
-    def process_batch(self, batch_xml, async=False):
-        try:
-            luci_log.debug_verbose('RC:PB0: [auth=%d] to %s for batch %s [async=%d]' \
-                % (self.__authed, self.__hostname, batch_xml.toxml(), async))
-        except:
-            pass
-
-        if not self.authed():
-            raise RicciError, 'not authenticated to host %s' % self.__hostname
-        
-        # construct request
-        doc = minidom.Document()
-        ricci = doc.createElement("ricci")
-        ricci.setAttribute("version", "1.0")
-        ricci.setAttribute("function", "process_batch")
-        async_str = None
-        if async:
-            async_str = 'true'
-        else:
-            async_str = 'false'
-        ricci.setAttribute("async", async_str)
-        doc.appendChild(ricci)
-        ricci.appendChild(batch_xml.cloneNode(True))
-        
-        # send request
-        try:
-            self.__send(doc, self.__timeout_short)
-        except Exception, e:
-            luci_log.debug_verbose('RC:PB1: Error sending XML \"%s\" to host %s' \
-                % (doc.toxml(), self.__hostname))
-            raise RicciError, 'Error sending XML to host %s: %s' \
-                    % (self.__hostname, str(e))
-        except:
-            raise RicciError, 'Error sending XML to host %s' % self.__hostname
-        
-        # receive response
-        doc = self.__receive(self.__timeout_long)
-        try:
-            luci_log.debug_verbose('RC:PB2: received from %s XML \"%s\"' \
-                % (self.__hostname, doc.toxml()))
-        except:
-            pass
+	def __init__(self, hostname, enforce_trust=True, port=11111):
+		self.__hostname = hostname
+		self.__port = port
+		
+		self.__timeout_init  = 4
+		self.__timeout_auth  = 4
+		self.__timeout_short = 6
+		self.__timeout_long  = 600
+		
+		self.__privkey_file = '%sprivkey.pem' % CERTS_DIR_PATH
+		self.__cert_file = '%scacert.pem' % CERTS_DIR_PATH
+		
+		try:
+			self.ss = SSLSocket(self.__hostname,
+								self.__port,
+								self.__timeout_init)
+			if enforce_trust:
+				if not self.ss.trusted():
+					raise RicciError, 'ricci\'s certificate is not trusted'
+		except Exception, e:
+			raise RicciError, 'Error setting up SSL for connection to %s: %s' \
+				% (self.__hostname, str(e))
+		except:
+			raise RicciError, 'Error setting up SSL for connection to %s' \
+				% self.__hostname
+		
+		# receive ricci header
+		hello = self.__receive(self.__timeout_init)
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:init0: recv header from %s: \"%s\"' \
+					% (self.__hostname, hello.toxml()))
+		except:
+			pass
+		
+		self.__authed = hello.firstChild.getAttribute('authenticated') == 'true'
+		self.__cluname = hello.firstChild.getAttribute('clustername')
+		self.__clualias = hello.firstChild.getAttribute('clusteralias')
+		self.__reported_hostname = hello.firstChild.getAttribute('hostname')
+		self.__os = hello.firstChild.getAttribute('os')
+		self.__dom0 = hello.firstChild.getAttribute('xen_host') == 'true'
+		
+	def hostname(self):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:hostname: [auth %d] hostname = %s' \
+				% (self.__authed, self.__hostname))
+		return self.__hostname
+
+	def authed(self):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:authed: reported authed = %d for %s' \
+				% (self.__authed, self.__hostname))
+		return self.__authed
+
+	def system_name(self):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:system_name: [auth %d] system_name = %s for %s' % (self.__authed, self.__reported_hostname, self.__hostname))
+		return self.__reported_hostname
+
+	def cluster_info(self):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:cluster_info: [auth %d] reported cluster_info = (%s,%s) for %s' % (self.__authed, self.__cluname, self.__clualias, self.__hostname))
+		return (self.__cluname, self.__clualias)
+
+	def os(self):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:os: [auth %d] reported os = %s for %s' \
+				% (self.__authed, self.__os, self.__hostname))
+		return self.__os
+
+	def dom0(self):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:dom0: [auth %d] dom0 = %s for %s' \
+				% (self.__authed, self.__dom0, self.__hostname))
+		return self.__dom0
+
+	def fingerprint(self):
+		fp = self.ss.peer_fingerprint()
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:fp: [auth %d] fp for %s = %s' \
+				% (self.__authed, self.__hostname, fp))
+		return fp
+
+	def trust(self):
+		return self.ss.trust()
+
+	def untrust(self):
+		return self.ss.untrust()
+
+	def trusted(self):
+		return self.ss.trusted()
+
+	def auth(self, password):
+		if self.authed():
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:auth0: already authenticated to %s' \
+					% self.__hostname)
+			return True
+		
+		# send request
+		doc = minidom.Document()
+		ricci = doc.createElement("ricci")
+		ricci.setAttribute("version", "1.0")
+		ricci.setAttribute("function", "authenticate")
+		ricci.setAttribute("password", password)
+		doc.appendChild(ricci)
+		self.__send(doc, self.__timeout_auth)
+		
+		# receive response
+		resp = self.__receive(self.__timeout_auth)
+		self.__authed = resp.firstChild.getAttribute('authenticated') == 'true'
+
+		if self.__authed:
+			try:
+				self.__cluname = resp.firstChild.getAttribute('clustername')
+				self.__clualias = resp.firstChild.getAttribute('clusteralias')
+				self.__reported_hostname = resp.firstChild.getAttribute('hostname')
+				self.__os = resp.firstChild.getAttribute('os')
+				self.__dom0 = resp.firstChild.getAttribute('xen_host') == 'true'
+			except:
+				pass
+			
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:auth1: auth call returning %d' \
+				% self.__authed)
+		return self.__authed
+
+	def unauth(self):
+		doc = minidom.Document()
+		ricci = doc.createElement('ricci')
+		ricci.setAttribute('version', '1.0')
+		ricci.setAttribute('function', 'unauthenticate')
+		doc.appendChild(ricci)
+		self.__send(doc, self.__timeout_auth)
+		resp = self.__receive(self.__timeout_auth)
+
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:unauth0: unauthenticate %s' \
+				% self.__hostname)
+
+		try:
+			ret = resp.firstChild.getAttribute('success')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:unauth1: unauthenticate %s for %s' \
+					% (ret, self.__hostname))
+			if ret != '0':
+				raise Exception, 'Invalid response'
+
+			try:
+				self.ss.untrust()
+			except:
+				pass
+		except:
+			errstr = 'Error authenticating to host %s: %s' \
+						% (self.__hostname, str(ret))
+
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:unauth2: %s' % errstr)
+			raise RicciError, errstr
+		return True
+
+
+	def process_batch(self, batch_xml, async=False):
+		if LUCI_DEBUG_MODE is True:
+			try:
+				luci_log.debug_verbose('RC:PB0: [auth=%d] to %s for batch %s [async=%d]' % (self.__authed, self.__hostname, batch_xml.toxml(), async))
+			except:
+				pass
+
+		if not self.authed():
+			raise RicciError, 'not authenticated to host %s' % self.__hostname
+		
+		# construct request
+		doc = minidom.Document()
+		ricci = doc.createElement("ricci")
+		ricci.setAttribute("version", "1.0")
+		ricci.setAttribute("function", "process_batch")
+		async_str = None
+		if async:
+			async_str = 'true'
+		else:
+			async_str = 'false'
+		ricci.setAttribute("async", async_str)
+		doc.appendChild(ricci)
+		ricci.appendChild(batch_xml.cloneNode(True))
+		
+		# send request
+		try:
+			self.__send(doc, self.__timeout_short)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:PB1: Error sending XML \"%s\" to host %s' % (doc.toxml(), self.__hostname))
+			raise RicciError, 'Error sending XML to host %s: %s' \
+					% (self.__hostname, str(e))
+		except:
+			raise RicciError, 'Error sending XML to host %s' % self.__hostname
+		
+		# receive response
+		doc = self.__receive(self.__timeout_long)
+		if LUCI_DEBUG_MODE is True:
+			try:
+				luci_log.debug_verbose('RC:PB2: received from %s XML \"%s\"' \
+					% (self.__hostname, doc.toxml()))
+			except:
+				pass
  
-        if doc.firstChild.getAttribute('success') != '0':
-            luci_log.debug_verbose('RC:PB3: batch command failed')
-            raise RicciError, 'The last ricci command to host %s failed' \
-                    % self.__hostname
-        
-        batch_node = None
-        for node in doc.firstChild.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                if node.nodeName == 'batch':
-                    batch_node = node.cloneNode(True)
-        if batch_node == None:
-            luci_log.debug_verbose('RC:PB4: batch node missing <batch/>')
-            raise RicciError, 'missing <batch/> in ricci\'s response from "%s"' \
-                    % self.__hostname
-
-        return batch_node
-    
-    def batch_run(self, batch_str, async=True):
-        try:
-            batch_xml_str = '<?xml version="1.0" ?><batch>%s</batch>' % batch_str
-            luci_log.debug_verbose('RC:BRun0: attempting batch "%s" for host "%s"' % (batch_xml_str, self.__hostname))
-            batch_xml = minidom.parseString(batch_xml_str).firstChild
-        except Exception, e:
-            luci_log.debug_verbose('RC:BRun1: received invalid batch XML for %s: "%s": "%s"' % (self.__hostname, batch_xml_str, str(e)))
-            raise RicciError, 'batch XML is malformed'
-
-        try:
-            ricci_xml = self.process_batch(batch_xml, async)
-            try:
-                luci_log.debug_verbose('RC:BRun2: received XML "%s" from host %s in response to batch command.' % (ricci_xml.toxml(), self.__hostname))
-            except:
-                pass
-        except:
-            luci_log.debug_verbose('RC:BRun3: An error occurred while trying to process the batch job: "%s"' % batch_xml_str)
-            return None
-
-        doc = minidom.Document()
-        doc.appendChild(ricci_xml)
-        return doc
-
-    def batch_report(self, batch_id):
-        luci_log.debug_verbose('RC:BRep0: [auth=%d] asking for batchid# %s for host %s' % (self.__authed, batch_id, self.__hostname))
-
-        if not self.authed():
-            raise RicciError, 'Not authenticated to host %s' % self.__hostname
-        
-        # construct request
-        doc = minidom.Document()
-        ricci = doc.createElement("ricci")
-        ricci.setAttribute("version", "1.0")
-        ricci.setAttribute("function", "batch_report")
-        ricci.setAttribute("batch_id", str(batch_id))
-        doc.appendChild(ricci)
-        
-        # send request
-        self.__send(doc, self.__timeout_short)
+		if doc.firstChild.getAttribute('success') != '0':
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:PB3: batch command failed')
+			raise RicciError, 'The last ricci command to host %s failed' \
+					% self.__hostname
+		
+		batch_node = None
+		for node in doc.firstChild.childNodes:
+			if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+				if node.nodeName == 'batch':
+					batch_node = node.cloneNode(True)
+		if batch_node == None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:PB4: batch node missing <batch/>')
+			raise RicciError, 'missing <batch/> in ricci response from "%s"' \
+					% self.__hostname
+
+		return batch_node
+	
+	def batch_run(self, batch_str, async=True):
+		try:
+			batch_xml_str = '<?xml version="1.0" ?><batch>%s</batch>' \
+				% batch_str
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:BRun0: attempting batch "%s" for host "%s"' % (batch_xml_str, self.__hostname))
+			batch_xml = minidom.parseString(batch_xml_str).firstChild
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:BRun1: received invalid batch XML for %s: "%s": "%s"' % (self.__hostname, batch_xml_str, str(e)))
+			raise RicciError, 'batch XML is malformed'
+
+		try:
+			ricci_xml = self.process_batch(batch_xml, async)
+			if LUCI_DEBUG_MODE is True:
+				try:
+					luci_log.debug_verbose('RC:BRun2: received XML "%s" from host %s in response to batch command.' % (ricci_xml.toxml(), self.__hostname))
+				except:
+					pass
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:BRun3: An error occurred while trying to process the batch job: "%s"' % batch_xml_str)
+			return None
+
+		doc = minidom.Document()
+		doc.appendChild(ricci_xml)
+		return doc
+
+	def batch_report(self, batch_id):
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:BRep0: [auth=%d] asking for batchid# %s for host %s' % (self.__authed, batch_id, self.__hostname))
+
+		if not self.authed():
+			raise RicciError, 'Not authenticated to host %s' % self.__hostname
+		
+		# construct request
+		doc = minidom.Document()
+		ricci = doc.createElement("ricci")
+		ricci.setAttribute("version", "1.0")
+		ricci.setAttribute("function", "batch_report")
+		ricci.setAttribute("batch_id", str(batch_id))
+		doc.appendChild(ricci)
+		
+		# send request
+		self.__send(doc, self.__timeout_short)
  
-       
-        # receive response
-        doc = self.__receive(self.__timeout_short)
-        if doc.firstChild.getAttribute('success') == '12':
-            return None
-        if doc.firstChild.getAttribute('success') != '0':
-            raise RicciError, 'Error while retrieving batch report for batch #%d from host %s' % (batch_id, self.__hostname)
-        batch_node = None
-        for node in doc.firstChild.childNodes:
-            if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-                if node.nodeName == 'batch':
-                    batch_node = node.cloneNode(True)
-        if batch_node == None:
-            raise RicciError, 'Missing <batch/> in ricci\'s response from host %s' % self.__hostname
-        return batch_node
-    
-    
-    
-    
-    
-    def __send(self, xml_doc, timeout):
-        buff = '%s\n' % xml_doc.toxml()
-        try:
-            self.ss.send(buff, timeout)
-        except Exception, e:
-            luci_log.debug_verbose('RC:send0: Error sending XML "%s" to %s: %s' % (buff, self.__hostname, str(e)))
-            raise RicciError, 'write error while sending XML to host %s' \
-                  % self.__hostname
-        except:
-            raise RicciError, 'write error while sending XML to host %s' \
-                  % self.__hostname
-        try:
-            luci_log.debug_verbose('RC:send1: Sent XML "%s" to host %s' \
-                % (xml_doc.toxml(), self.__hostname))
-        except:
-            pass
-        return
-    
-    def __receive(self, timeout):
-        doc = None
-        xml_in = ''
-        try:
-            xml_in = self.ss.recv(timeout)
-        except Exception, e:
-            luci_log.debug_verbose('RC:recv0: Error reading data from %s: %s' \
-                % (self.__hostname, str(e)))
-            raise RicciError, 'Error reading data from host %s' % self.__hostname
-        except:
-            raise RicciError, 'Error reading data from host %s' % self.__hostname
-        luci_log.debug_verbose('RC:recv1: Received XML "%s" from host %s' \
-            % (xml_in, self.__hostname))
-
-        try:
-            if doc == None:
-                doc = minidom.parseString(xml_in)
-        except Exception, e:
-            luci_log.debug_verbose('RC:recv2: Error parsing XML "%s" from %s' \
-                % (xml_in, str(e)))
-            raise RicciError, 'Error parsing XML from host %s: %s' \
-                    % (self.__hostname, str(e))
-
-        if not doc or not doc.firstChild:
-            raise RicciError, \
-                    'Error an empty response was received from host %s' \
-                    % self.__hostname
-        
-        try:        
-            if doc.firstChild.nodeName != 'ricci':
-                luci_log.debug_verbose('RC:recv3: Expecting "ricci" got XML "%s" from %s' % (xml_in, self.__hostname))
-                raise Exception, 'Expecting first XML child node to be "ricci"'
-        except Exception, e:
-            raise RicciError, 'Invalid XML ricci response from host %s' \
-                    % self.__hostname
-        
-        return doc
-    
-
-
+	   
+		# receive response
+		doc = self.__receive(self.__timeout_short)
+		if doc.firstChild.getAttribute('success') == '12':
+			return None
+		if doc.firstChild.getAttribute('success') != '0':
+			raise RicciError, 'Error while retrieving batch report for batch #%d from host %s' % (batch_id, self.__hostname)
+		batch_node = None
+		for node in doc.firstChild.childNodes:
+			if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+				if node.nodeName == 'batch':
+					batch_node = node.cloneNode(True)
+		if batch_node == None:
+			raise RicciError, 'Missing <batch/> in ricci\'s response from host %s' % self.__hostname
+		return batch_node
+
+
+	def __send(self, xml_doc, timeout):
+		buff = '%s\n' % xml_doc.toxml()
+		try:
+			self.ss.send(buff, timeout)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:send0: Error sending XML "%s" to %s: %s' % (buff, self.__hostname, str(e)))
+			raise RicciError, 'write error while sending XML to host %s' \
+				  % self.__hostname
+		except:
+			raise RicciError, 'write error while sending XML to host %s' \
+				  % self.__hostname
+		if LUCI_DEBUG_MODE is True:
+			try:
+				luci_log.debug_verbose('RC:send1: Sent XML "%s" to host %s' \
+					% (xml_doc.toxml(), self.__hostname))
+			except:
+				pass
+		return
+	
+	def __receive(self, timeout):
+		doc = None
+		xml_in = ''
+		try:
+			xml_in = self.ss.recv(timeout)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:recv0: Error reading from %s: %s' \
+					% (self.__hostname, str(e)))
+			raise RicciError, 'Error reading data from host %s' \
+				% self.__hostname
+		except:
+			raise RicciError, 'Error reading data from host %s' \
+				% self.__hostname
+
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:recv1: Received XML "%s" from host %s' \
+				% (xml_in, self.__hostname))
+
+		try:
+			if doc == None:
+				doc = minidom.parseString(xml_in)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('RC:recv2: parsing XML "%s" from %s' \
+					% (xml_in, str(e)))
+			raise RicciError, 'Error parsing XML from host %s: %s' \
+					% (self.__hostname, str(e))
+
+		if not doc or not doc.firstChild:
+			raise RicciError, 'Error an empty response was received from host %s' % self.__hostname
+		
+		try:		
+			if doc.firstChild.nodeName != 'ricci':
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('RC:recv3: Expecting "ricci" got XML "%s" from %s' % (xml_in, self.__hostname))
+				raise Exception, 'Expecting first XML child node to be "ricci"'
+		except Exception, e:
+			raise RicciError, 'Invalid XML ricci response from host %s' \
+					% self.__hostname
+		return doc
+	
 def get_ricci_communicator(self, hostname, allowed_systems):
-    if not self.access_to_host_allowed(hostname, allowed_systems):
-        return None
-    
-    try:
-        return RicciCommunicator(hostname)
-    except Exception, e:
-        luci_log.debug_verbose('RC:GRC0: Error creating a ricci connection to %s: %s' % (hostname, str(e)))
-        return None
+	if not self.access_to_host_allowed(hostname, allowed_systems):
+		return None
+	
+	try:
+		return RicciCommunicator(hostname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:GRC0: Error creating a ricci connection to %s: %s' % (hostname, str(e)))
+		return None
 
 def ricci_get_called_hostname(self, ricci):
-    return ricci.hostname()
+	return ricci.hostname()
 def ricci_get_reported_hostname(self, ricci):
-    return ricci.system_name()
+	return ricci.system_name()
 def ricci_get_os(self, ricci):
-    return ricci.os()
+	return ricci.os()
 def ricci_get_dom0(self, ricci):
-    return ricci.dom0()
+	return ricci.dom0()
 def ricci_get_cluster_info(self, ricci):
-    return ricci.cluster_info()
+	return ricci.cluster_info()
 def ricci_get_authenticated(self, ricci):
-    return ricci.authed()
+	return ricci.authed()
 def ricci_authenticate(self, ricci, password):
-    return ricci.auth(password)
+	return ricci.auth(password)
 def ricci_unauthenticate(self, ricci):
-    return ricci.unauth()
-
-
-
-
-
-
-
+	return ricci.unauth()
 
 ########## helpers to process batch as returned by ricci #############
 
-
-
-
-
 # check the status of batch
 # returns (int num, int total)
 # * total:
-#          total number of modules in batch
+#		  total number of modules in batch
 # * num:
-#          if num == total: 
-#             all modules in the batch completed successfuly
-#          if num > 0: 
-#             last seq. number of module that successfuly finished
-#          if num < 0: 
-#             module (-num) failed (next module won't be processed)
-def batch_status(batch_xml):
-    if batch_xml.nodeName != 'batch':
-        try:
-            luci_log.debug_verbose('RC:BS0: Expecting an XML batch node. Got \"%s\"' \
-                % batch_xml.toxml())
-        except:
-            pass
-        raise RicciError, 'Not an XML batch node'
-    total = 0
-    last  = 0
-    for node in batch_xml.childNodes:
-        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-            if node.nodeName == 'module':
-                total = total + 1
-                status = node.getAttribute('status')
-                if status == '0':
-                    # success
-                    last = last + 1
-                elif status == '3' or status == '4':
-                    # failure
-                    last = last + 1
-                    last = last - 2 * last
-    try:
-        luci_log.debug_verbose('RC:BS1: Returning (%d, %d) for batch_status("%s")' % (last, total, batch_xml.toxml()))
-    except:
-        luci_log.debug_verbose('RC:BS2: Returning last, total')
-
-    return (last, total)
-
+#		  if num == total: 
+#			 all modules in the batch completed successfuly
+#		  if num > 0: 
+#			 last seq. number of module that successfuly finished
+#		  if num < 0: 
+#			 module (-num) failed (next module won't be processed)
 
+def batch_status(batch_xml):
+	if batch_xml.nodeName != 'batch':
+		if LUCI_DEBUG_MODE is True:
+			try:
+				luci_log.debug_verbose('RC:BS0: Expecting an XML batch node. Got \"%s\"' % batch_xml.toxml())
+			except:
+				pass
+		raise RicciError, 'Not an XML batch node'
+
+	total = 0
+	last  = 0
+	for node in batch_xml.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			if node.nodeName == 'module':
+				total = total + 1
+				status = node.getAttribute('status')
+				if status == '0':
+					# success
+					last = last + 1
+				elif status == '3' or status == '4':
+					# failure
+					last = last + 1
+					last = last - 2 * last
+	try:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:BS1: Returning (%d, %d) for batch_status("%s")' % (last, total, batch_xml.toxml()))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:BS2: error %r: (last, total)' % \
+				(e, last, total))
+	return (last, total)
 
 # extract error_code from module's response
 # * module_num:
-#              1-based seq. number of module to process
+#			  1-based seq. number of module to process
 #
 # returns (int error_code, string error_msg)
 # * error_code: each module defines own error codes, which are >0
-#          -101 - in progress
-#          -102 - scheduled
-#          -103 - removed from schedule
-#          -104 - failed to execute module
+#		  -101 - in progress
+#		  -102 - scheduled
+#		  -103 - removed from schedule
+#		  -104 - failed to execute module
 # 
-#          >-3  - module executed. Following codes are defined:
-#             -2   - API error
-#             -1   - undefined error occured (msg not necesarily very informative)
-#             0    - no error (msg is empty string)
-#             >0   - predefined error has occured
-#                        (check respective API, msg will be fully descriptive)
+#		  >-3  - module executed. Following codes are defined:
+#			 -2   - API error
+#			 -1   - undefined error occured (msg not necesarily very informative)
+#			 0	- no error (msg is empty string)
+#			 >0   - predefined error has occured
+#						(check respective API, msg will be fully descriptive)
 # * error_msg:  error message
-def extract_module_status(batch_xml, module_num=1):
-    if batch_xml.nodeName != 'batch':
-        luci_log.debug_verbose('RC:EMS0: Expecting "batch" got "%s"' % batch_xml.toxml())
-        raise RicciError, 'Invalid XML node; expecting a batch node'
-
-    c = 0
-    for node in batch_xml.childNodes:
-        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
-            if node.nodeName == 'module':
-                module_xml = node
-                c = c + 1
-                if c == module_num:
-                    status = module_xml.getAttribute('status')
-                    if status == '0' or status == '4':
-                        # module executed, dig deeper into request
-                        for node_i in module_xml.childNodes:
-                            if node_i.nodeType == xml.dom.Node.ELEMENT_NODE:
-                                if node_i.nodeName == 'API_error':
-                                    return -2, 'API error'
-                                elif node_i.nodeName == 'response':
-                                    for node_j in node_i.childNodes:
-                                        if node_j.nodeType == xml.dom.Node.ELEMENT_NODE:
-                                            if node_j.nodeName == 'function_response':
-                                                code = -11111111
-                                                msg  = 'BUG'
-                                                for var in node_j.childNodes:
-                                                    if var.nodeType == xml.dom.Node.ELEMENT_NODE:
-                                                        if var.nodeName == 'var':
-                                                            if var.getAttribute('name') == 'success' and var.getAttribute('value') == 'true':
-                                                                return 0, ''
-                                                            elif var.getAttribute('name') == 'error_code':
-                                                                code = int(var.getAttribute('value'))
-                                                            elif var.getAttribute('name') == 'error_description':
-                                                                msg = var.getAttribute('value')
-                                                return code, msg
-                                            
-                    elif status == '1':
-                        return -102, 'module scheduled for execution'
-                    elif status == '2':
-                        return -101, 'module is being executed'
-                    elif status == '3':
-                        return -104, 'failed to locate/execute module'
-                    elif status == '5':
-                        return -103, 'module removed from schedule'
-    
-    raise RicciError, 'no %dth module in the batch, or malformed response' % module_num
 
+def extract_module_status(batch_xml, module_num=1):
+	if batch_xml.nodeName != 'batch':
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RC:EMS0: Expecting "batch" got "%s"' \
+				% batch_xml.toxml())
+		raise RicciError, 'Invalid XML node; expecting a batch node'
+
+	c = 0
+	for node in batch_xml.childNodes:
+		if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+			if node.nodeName == 'module':
+				module_xml = node
+				c = c + 1
+				if c == module_num:
+					status = module_xml.getAttribute('status')
+					if status == '0' or status == '4':
+						# module executed, dig deeper into request
+						for node_i in module_xml.childNodes:
+							if node_i.nodeType == xml.dom.Node.ELEMENT_NODE:
+								if node_i.nodeName == 'API_error':
+									return -2, 'API error'
+								elif node_i.nodeName == 'response':
+									for node_j in node_i.childNodes:
+										if node_j.nodeType == xml.dom.Node.ELEMENT_NODE:
+											if node_j.nodeName == 'function_response':
+												code = -11111111
+												msg  = 'BUG'
+												for var in node_j.childNodes:
+													if var.nodeType == xml.dom.Node.ELEMENT_NODE:
+														if var.nodeName == 'var':
+															if var.getAttribute('name') == 'success' and var.getAttribute('value') == 'true':
+																return 0, ''
+															elif var.getAttribute('name') == 'error_code':
+																code = int(var.getAttribute('value'))
+															elif var.getAttribute('name') == 'error_description':
+																msg = var.getAttribute('value')
+												return code, msg
+											
+					elif status == '1':
+						return -102, 'module scheduled for execution'
+					elif status == '2':
+						return -101, 'module is being executed'
+					elif status == '3':
+						return -104, 'failed to locate/execute module'
+					elif status == '5':
+						return -103, 'module removed from schedule'
+	
+	raise RicciError, 'no %dth module in the batch, or malformed response' % module_num
--- conga/luci/site/luci/Extensions/system_adapters.py	2007/05/03 20:16:38	1.2.2.1
+++ conga/luci/site/luci/Extensions/system_adapters.py	2007/05/14 18:00:14	1.2.2.2
@@ -1,12 +1,9 @@
 from ricci_communicator import RicciCommunicator
 from RicciQueries import list_services, updateServices, svc_manage
-from LuciSyslog import LuciSyslog
+from LuciSyslog import get_logger
 from xml.dom import minidom
 
-try: 
-	luci_log = LuciSyslog()
-except:
-	pass
+luci_log = get_logger()
 
 def get_sys_svc_list(self, request, hostname):
 	try:



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-05-18  5:23 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-05-18  5:23 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-18 05:23:55

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py 
	                           LuciClusterInfo.py LuciDB.py 
	                           LuciZope.py RicciQueries.py 
	                           cluster_adapters.py 
	                           homebase_adapters.py 
	                           ricci_communicator.py 
	                           system_adapters.py 

Log message:
	Better debug log messages.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.3&r2=1.1.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.5&r2=1.1.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.8&r2=1.1.2.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.6&r2=1.1.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.7&r2=1.255.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.50.2.5&r2=1.50.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.25.2.4&r2=1.25.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/system_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.2.2.3&r2=1.2.2.4

--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/15 21:42:21	1.1.2.3
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/18 05:23:55	1.1.2.4
@@ -22,7 +22,8 @@
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('PCC0: getClusterName: %r' % e)
+			luci_log.debug_verbose('PCC0: getClusterName: %r %s' \
+				% (e, str(e)))
 		errors.append('Unable to determine cluster name')
 		return (False, { 'errors': errors, 'messages': messages })
 
@@ -43,7 +44,7 @@
 			raise Exception, 'The new cluster configuration is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('PCC2: %r' % e)
+			luci_log.debug_verbose('PCC2: %r %s' % (e, str(e)))
 		errors.append(repr(e))
 		return (False, { 'errors': errors, 'messages': messages })
 
@@ -61,5 +62,6 @@
 				clustername)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('PCC4: set_node_flag: %r' % e)
+			luci_log.debug_verbose('PCC4: set_node_flag: %r %s' \
+				% (e, str(e)))
 	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
--- conga/luci/site/luci/Extensions/Attic/LuciClusterInfo.py	2007/05/18 02:36:59	1.1.2.5
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterInfo.py	2007/05/18 05:23:55	1.1.2.6
@@ -32,7 +32,7 @@
 		return map(lambda x: str(x.getName()), model.getNodes())
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getnodes0: %r' % e)
+			luci_log.debug_verbose('getnodes0: %r %s' % (e, str(e)))
 	return []
 
 def getResourceInfo(model, name, res=None):
@@ -79,14 +79,14 @@
 		results.append(vals)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSM0: %r' % e)
+			luci_log.debug_verbose('GCSM0: %r %s' % (e, str(e)))
 		return None
 
 	try:
 		nodelist = model.getNodes()
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSM1: %r' % e)
+			luci_log.debug_verbose('GCSM1: %r %s' % (e, str(e)))
 		return None
 
 	for node in nodelist:
@@ -123,7 +123,7 @@
 			raise Exception, 'doc is None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCS0: error: %r' % e)
+			luci_log.debug_verbose('GCS0: error: %r %s' % (e, str(e)))
 		doc = None
 
 	if doc is None and not cluname:
@@ -135,7 +135,7 @@
 			return cinfo
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GCS1: %r' % e)
+				luci_log.debug_verbose('GCS1: %r %s' % (e, str(e)))
 			doc = None
 
 	if not doc:
@@ -161,7 +161,7 @@
 			return cinfo
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %r' % e)
+				luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %r %s' % (e, str(e)))
 		return []
 
 	results = list()
@@ -463,7 +463,7 @@
 		fdom = model.getFailoverDomainByName(request['fdomname'])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getFdomInfo0: %r' % e)
+			luci_log.debug_verbose('getFdomInfo0: %r %s' % (e, str(e)))
 		return fhash
 
 	fhash['name'] = fdom.getName()
@@ -589,7 +589,7 @@
 			req.SESSION.set('model', model)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %r' % (cluname, e))
+				luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %r %s' % (cluname, e, str(e)))
 			return {}
 	else:
 		totem = model.getTotemPtr()
@@ -827,7 +827,7 @@
 		nodename = request['nodename']
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getNodeInfo0: %r' % e)
+			luci_log.debug_verbose('getNodeInfo0: %r %s' % (e, str(e)))
 		return {}
 
 	# extract correct node line from cluster status
@@ -1168,7 +1168,7 @@
 			nodename = request.form['nodename']
 		except:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %r' % e)
+				luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %r %s' % (e, str(e)))
 			return {}
 
 	# Here we need to get fences for a node - just the first two levels
@@ -1180,7 +1180,7 @@
 		node = model.retrieveNodeByName(nodename)
 	except GeneralError, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getFenceInfo3: unable to find node name "%s" in current node list: %r' % (str(nodename), e))
+			luci_log.debug_verbose('getFenceInfo3: unable to find node name "%s" in current node list: %r %s' % (str(nodename), e, str(e)))
 		return {}
 
 	fds = model.getFenceDevices()
@@ -1547,7 +1547,7 @@
 			raise Exception, 'getClusterConf returned None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r' % e)
+			luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r %s' % (e, str(e)))
 		return None
 
 	try:
@@ -1557,7 +1557,7 @@
 	except Exception, e:
 		try:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%r": %r' % (cluster_conf_node.toxml(), e))
+				luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%s": %r %s' % (cluster_conf_node.toxml(), e, str(e)))
 		except:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('GMB1: ModelBuilder failed')
@@ -1572,8 +1572,7 @@
 	rc = getRicciAgent(self, clustername)
 	if not rc:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GMFC0: unable to find a ricci agent for %s' \
-				% clustername)
+			luci_log.debug_verbose('GMFC0: unable to find a ricci agent for %s' % clustername)
 		return None
 
 	try:
@@ -1582,8 +1581,7 @@
 			raise Exception, 'model is none'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r' \
-				 % (clustername, e))
+			luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r %s' % (clustername, e, str(e)))
 		return None
 
 	return model
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/18 02:36:59	1.1.2.8
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/18 05:23:55	1.1.2.9
@@ -27,8 +27,8 @@
 		return cluster_node
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getClusterNode0: %s %s: %r' \
-				% (nodename, clustername, e))
+			luci_log.debug_verbose('getClusterNode0: %s %s: %r %s' \
+				% (nodename, clustername, e, str(e)))
 	return None
 
 def getStorageNode(self, nodename):
@@ -40,7 +40,8 @@
 		return storage_node
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getStorageNode0: %s: %r' % (nodename, e))
+			luci_log.debug_verbose('getStorageNode0: %s: %r %s' \
+				% (nodename, e, str(e)))
 	return None
 
 def testNodeStatus(node, flag_mask):
@@ -51,7 +52,7 @@
 		return flags & flag_mask != 0
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('testNodeStatus0: %r' % e)
+			luci_log.debug_verbose('testNodeStatus0: %r %s' % (e, str(e)))
 	return False
 
 def setNodeStatus(node, flag_mask):
@@ -65,7 +66,8 @@
 			node.manage_addProperty('flags', flag_mask, 'int')
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('setNodeStatus0: %r' % e)
+				luci_log.debug_verbose('setNodeStatus0: %r %s' \
+					% (e, str(e)))
 
 def clearNodeStatus(node, flag_mask):
 	try:
@@ -76,7 +78,8 @@
 			node.manage_changeProperties({ 'flags': flags & ~flag_mask })
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clearNodeStatus0: %r' % e)
+			luci_log.debug_verbose('clearNodeStatus0: %r %s' \
+				% (e, str(e)))
 
 def set_node_flag(self, cluname, agent, batchid, task, desc):
 	path = '%s%s' % (CLUSTER_FOLDER_PATH, cluname)
@@ -93,10 +96,10 @@
 		flag.manage_addProperty(TASKTYPE, task, 'string')
 		flag.manage_addProperty(FLAG_DESC, desc, 'string')
 	except Exception, e:
-		errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %r' \
-					% (batch_id, task, desc, objpath, e)
+		errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %s' \
+					% (batch_id, task, desc, objpath, str(e))
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose(errmsg)
+			luci_log.debug_verbose('SNF0: %r %s' % (e, errmsg))
 		raise Exception, errmsg
 
 def noNodeStatusPresent(self, nodefolder, flagname, hostname):
@@ -104,8 +107,8 @@
 		items = nodefolder.objectItems('ManagedSystem')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NNFP0: error getting flags for %s: %r' \
-				% (nodefolder[0], e))
+			luci_log.debug('NNFP0: error getting flags for %s: %r %s' \
+				% (nodefolder[0], e, str(e)))
 		return None
 
 	for item in items:
@@ -118,7 +121,8 @@
 			rc = RicciCommunicator(hostname)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.info('NNFP1: ricci error %s: %r' % (hostname, e))
+				luci_log.info('NNFP1: ricci error %s: %r %s' \
+					% (hostname, e, str(e)))
 			return None
 
 		if not rc.authed():
@@ -141,8 +145,7 @@
 				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.info('NNFP3: manage_delObjects for %s failed: %r' \
-						% (item[0], e))
+					luci_log.info('NNFP3: manage_delObjects for %s failed: %r %s' % (item[0], e, str(e)))
 				return None
 			return True
 		else:
@@ -158,8 +161,8 @@
 		objs = clusterfolder.objectItems('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('RNN0: error for %s/%s: %r' \
-				% (nodename, clustername, e))
+			luci_log.debug_verbose('RNN0: error for %s/%s: %r %s' \
+				% (nodename, clustername, e, str(e)))
 		return nodename
 
 	for obj in objs:
@@ -181,7 +184,7 @@
 			raise Exception, 'node list is empty'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r' % (clustername, e))
+			luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r %s' % (clustername, e, str(e)))
 		return 'Unable to find cluster nodes for %s' % clustername
 
 	try:
@@ -191,8 +194,8 @@
 			raise Exception, 'cluster node is none'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('RCC1: cant find cluster node for %s: %r' \
-				% (clustername, e))
+			luci_log.debug('RCC1: cant find cluster node for %s: %r %s' \
+				% (clustername, e, str(e)))
 		return 'Unable to find an entry for %s in the Luci database.' % clustername
 
 	try:
@@ -202,7 +205,7 @@
 	except Exception, e:
 		# Should we just create them all? Can this even happen?
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('RCC2: error: %r' % e)
+			luci_log.debug('RCC2: error: %r %s' % (e, str(e)))
 		return 'Unable to find database entries for any nodes in %s' % clustername
 
 	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
@@ -240,7 +243,8 @@
 				luci_log.debug_verbose('VCC3: deleted node %s' % i)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VCC4: delObjects: %s: %r' % (i, e))
+				luci_log.debug_verbose('VCC4: delObjects: %s: %r %s' \
+					% (i, e, str(e)))
 
 	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
 	for i in new_list:
@@ -252,8 +256,8 @@
 		except Exception, e:
 			messages.append('A new cluster node, "%s," is now a member of cluster "%s,". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clustername))
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r' \
-					% (clustername, i, e))
+				luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r %s' \
+					% (clustername, i, e, str(e)))
 
 	return messages
 
@@ -264,8 +268,8 @@
 		clusterfolder = self.restrictedTraverse(path)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r' \
-				% (path, e))
+			luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r %s' \
+				% (path, e, str(e)))
 		return None
 
 	for key in batch_map.keys():
@@ -286,7 +290,7 @@
 			flag.manage_addProperty(LAST_STATUS, 0, 'int')
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r' % (key, e))
+				luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r %s' % (key, e, str(e)))
 
 def manageCluster(self, clustername, node_list, cluster_os):
 	clustername = str(clustername)
@@ -297,7 +301,7 @@
 			raise Exception, 'cannot find the cluster entry in the DB'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('MC0: %s: %r' % (clustername, e))
+			luci_log.debug_verbose('MC0: %s: %r %s' % (clustername, e, str(e)))
 		return 'Unable to create cluster %s: the cluster directory is missing.' % clustername
 
 	try:
@@ -317,28 +321,30 @@
 			raise Exception, 'unable to create the cluster DB entry for %s' % clustername
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('MC2: %s: %r' % (clustername, e))
-		return 'Unable to create cluster %s: %r' % (clustername, e)
+			luci_log.debug_verbose('MC2: %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to create cluster %s: %s' % (clustername, str(e))
 
 	try:
 		newCluster.manage_acquiredPermissions([])
 		newCluster.manage_role('View', ['Access Contents Information', 'View'])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('MC3: %s: %r' % (clustername, e))
+			luci_log.debug_verbose('MC3: %s: %r %s' % (clustername, e, str(e)))
 		try:
 			clusters.manage_delObjects([clustername])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('MC4: %s: %r' % (clustername, e))
-		return 'Unable to set permissions on new cluster: %s: %r' % (clustername, e)
+				luci_log.debug_verbose('MC4: %s: %r %s' \
+					% (clustername, e, str(e)))
+		return 'Unable to set permissions on new cluster: %s: %s' \
+			% (clustername, str(e))
 
 	try:
 		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('MC5: %s: %s: %r' \
-				% (clustername, cluster_os, e))
+			luci_log.debug_verbose('MC5: %s: %s: %r %s' \
+				% (clustername, cluster_os, e, str(e)))
 
 	for i in node_list:
 		host = node_list[i]['host']
@@ -355,14 +361,14 @@
 				clusters.manage_delObjects([clustername])
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('MC6: %s: %s: %r' \
-						% (clustername, host, e))
+					luci_log.debug_verbose('MC6: %s: %s: %r %s' \
+						% (clustername, host, e, str(e)))
 
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('MC7: %s: %s: %r' \
-					% (clustername, host, e))
-			return 'Unable to create cluster node %s for cluster %s: %r' \
-				% (host, clustername, e)
+				luci_log.debug_verbose('MC7: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+			return 'Unable to create cluster node %s for cluster %s: %s' \
+				% (host, clustername, str(e))
 
 	try:
 		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
@@ -370,8 +376,9 @@
 			raise Exception, 'The storage DB entry is missing'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('MC8: %s: %s: %r' % (clustername, host, e))
-		return 'Error adding storage node %s: %r' % (host, e)
+			luci_log.debug_verbose('MC8: %s: %s: %r %s' \
+				% (clustername, host, e, str(e)))
+		return 'Error adding storage node %s: %s' % (host, str(e))
 
 	# Only add storage systems if the cluster and cluster node DB
 	# objects were added successfully.
@@ -392,8 +399,8 @@
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('MC9: %s: %s: %r' \
-					% (clustername, host, e))
+				luci_log.debug_verbose('MC9: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
 
 def createClusterSystems(self, clustername, node_list):
 	try:
@@ -402,7 +409,8 @@
 			raise Exception, 'cluster %s DB entry is missing' % clustername
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('CCS0: %s: %r' % (clustername, e))
+			luci_log.debug_verbose('CCS0: %s: %r %s' \
+				% (clustername, e, str(e)))
 		return 'No cluster named "%s" is managed by Luci' % clustername
 
 	for x in node_list:
@@ -413,8 +421,8 @@
 			clusterObj.manage_addFolder(host)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CCS0a: %s: %s: %r' \
-					% (clustername, host, e))
+				luci_log.debug_verbose('CCS0a: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
 
 		try:
 			newSystem = self.restrictedTraverse('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, host))
@@ -424,10 +432,10 @@
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CCS1: %s: %s: %r' \
-					% (clustername, host, e))
-			return 'Unable to create cluster node %s for cluster %s: %r' \
-				% (host, clustername, e)
+				luci_log.debug_verbose('CCS1: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+			return 'Unable to create cluster node %s for cluster %s: %s' \
+				% (host, clustername, str(e))
 
 	try:
 		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
@@ -436,7 +444,8 @@
 	except Exception, e:
 		# This shouldn't fail, but if it does, it's harmless right now
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('CCS2: %s: %r' % (clustername, host, e))
+			luci_log.debug_verbose('CCS2: %s: %r %s' \
+				% (clustername, host, e, str(e)))
 		return None
 
 	# Only add storage systems if the and cluster node DB
@@ -459,7 +468,8 @@
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CCS3: %s: %r' % (clustername, host, e))
+				luci_log.debug_verbose('CCS3: %s: %r %s' \
+					% (clustername, host, e, str(e)))
 
 def delSystem(self, systemName):
 	try:
@@ -468,8 +478,9 @@
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delSystem0: %s: %r' % (systemName, e))
-		return 'Unable to find storage system %s: %r' % (systemName, e)
+			luci_log.debug_verbose('delSystem0: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return 'Unable to find storage system %s: %s' % (systemName, str(e))
 
 	try:
 		rc = RicciCommunicator(systemName, enforce_trust=False)
@@ -480,10 +491,12 @@
 			ssystem.manage_delObjects([ systemName ])
 		except Exception, e1:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('delSystem1: %s: %r' % (systemName, e1))
+				luci_log.debug_verbose('delSystem1: %s: %r %s' \
+					% (systemName, e1, str(e1)))
 			return 'Unable to delete the storage system %s' % systemName
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delSystem2: %s: %r' % (systemName, e))
+			luci_log.debug_verbose('delSystem2: %s: %r %s' \
+				% (systemName, e, str(e)))
 		return
 
 	# Only unauthenticate if the system isn't a member of
@@ -517,9 +530,10 @@
 		ssystem.manage_delObjects([ systemName ])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delSystem3: %s: %r' % (systemName, e))
-		return 'Unable to delete storage system %s: %r' \
-			% (systemName, e)
+			luci_log.debug_verbose('delSystem3: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return 'Unable to delete storage system %s: %s' \
+			% (systemName, str(e))
 
 def delCluster(self, clustername):
 	try:
@@ -528,7 +542,7 @@
 			raise Exception, 'clusters DB entry is missing'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delCluster0: %r' % e)
+			luci_log.debug_verbose('delCluster0: %r %s' % (e, str(e)))
 		return 'Unable to find cluster %s' % clustername
 
 	err = delClusterSystems(self, clustername)
@@ -539,7 +553,8 @@
 		clusters.manage_delObjects([ clustername ])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delCluster1: %s %r' % (clustername, e))
+			luci_log.debug_verbose('delCluster1: %s %r %s' \
+				% (clustername, e, str(e)))
 		return 'Unable to delete cluster %s' % clustername
 
 def delClusterSystem(self, cluster, systemName):
@@ -552,14 +567,14 @@
 			rc.unauth()
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r' % (systemName, e))
+				luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r %s' % (systemName, e, str(e)))
 
 	try:
 		cluster.manage_delObjects([ systemName ])
 	except Exception, e:
-		err_str = 'Error deleting cluster object %s: %r' % (systemName, e)
+		err_str = 'Error deleting cluster object %s: %s' % (systemName, str(e))
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delClusterSystem1: %s' % err_str)
+			luci_log.debug_verbose('delClusterSystem1: %r %s' % (e, err_str))
 		return err_str
 
 def delClusterSystems(self, clustername):
@@ -574,12 +589,12 @@
 				return None
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('delCluSystems0: %r' % e)
+				luci_log.debug_verbose('delCluSystems0: %r %s' % (e, str(e)))
 			return None
 	except Exception, er:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delCluSystems1: error for %s: %r' \
-				% (clustername, er))
+			luci_log.debug_verbose('delCluSystems1: error for %s: %r %s' \
+				% (clustername, er, str(er)))
 		return str(er)
 
 	error_list = list()
@@ -624,7 +639,8 @@
 		cluster_nodes = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clustername))('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSy0: %s: %r' % (clustername, e))
+			luci_log.debug_verbose('GCSy0: %s: %r %s' \
+				% (clustername, e, str(e)))
 		return None
 
 	if isAdmin(self):
@@ -635,7 +651,8 @@
 			% (CLUSTER_FOLDER_PATH, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSy1: %s: %r' % (clustername, e))
+			luci_log.debug_verbose('GCSy1: %s: %r %s' \
+				% (clustername, e, str(e)))
 		return None
 
 	if cluster_permission_check(self, cluster):
@@ -647,7 +664,7 @@
 		clusters = self.restrictedTraverse('%s/systems/cluster/objectItems' % PLONE_ROOT)('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GC0: %r' % e)
+			luci_log.debug_verbose('GC0: %r %s' % (e, str(e)))
 		return None
 
 	return check_clusters(self, clusters)
@@ -657,7 +674,7 @@
 		storage = self.restrictedTraverse('%s/systems/storage/objectItems' % PLONE_ROOT)('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GS0: %r' % e)
+			luci_log.debug_verbose('GS0: %r %s' % (e, str(e)))
 		return None
 	if isAdmin(self):
 		return storage
@@ -691,7 +708,7 @@
 			return None
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRA0: %r' % e)
+			luci_log.debug_verbose('GRA0: %r %s' % (e, str(e)))
 		return None
 
 	try:
@@ -712,15 +729,14 @@
 	except Exception, e:
 		try:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('GRA3: cluster folder %s for %s is missing: %r' \
-					% (path, clustername, e))
+				luci_log.debug('GRA3: cluster folder %s for %s is missing: %r %s' % (path, clustername, e, str(e)))
 
 			if len(clusterfolder.objectItems()) < 1:
 				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
 				clusters.manage_delObjects([clustername])
 		except Exception, ein:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GRA4: %r' % ein)
+				luci_log.debug_verbose('GRA4: %r %s' % (ein, str(ein)))
 		return None
 
 	cluname = clustername.lower()
@@ -774,7 +790,7 @@
 			setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
 		except Exception, eout:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GRA10: %r' % eout)
+				luci_log.debug_verbose('GRA10: %r %s' % (eout, str(eout)))
 
 	if LUCI_DEBUG_MODE is True:
 		luci_log.debug('GRA11: no ricci agent could be found for cluster %s' \
@@ -799,8 +815,8 @@
 		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSDB0: %s -> %s: %r' \
-				% (clustername, cluster_path, e))
+			luci_log.debug_verbose('GCSDB0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
 		return results
 
 	if len(nodelist) < 1:
@@ -811,7 +827,8 @@
 			clusters_dir.manage_delObjects([clustername])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GCSDB0b: %s: %r' % (clustername, e))
+				luci_log.debug_verbose('GCSDB0b: %s: %r %s' \
+					% (clustername, e, str(e)))
 	else:
 		for node in nodelist:
 			try:
@@ -824,6 +841,6 @@
 				results.append(node_val)
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GCSDB1: %r' % e)
+					luci_log.debug_verbose('GCSDB1: %r %s' % (e, str(e)))
 
 	return results
--- conga/luci/site/luci/Extensions/Attic/LuciZope.py	2007/05/18 03:30:44	1.1.2.6
+++ conga/luci/site/luci/Extensions/Attic/LuciZope.py	2007/05/18 05:23:55	1.1.2.7
@@ -20,7 +20,7 @@
 		return os.path.isfile('%sprivkey.pem' % CERTS_DIR_PATH) and os.path.isfile('%scacert.pem' % CERTS_DIR_PATH)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('SIS0: %r' % e)
+			luci_log.debug_verbose('SIS0: %r %s' % (e, str(e)))
 	return False
 
 def strFilter(regex, replaceChar, arg):
@@ -121,5 +121,6 @@
 		request.SESSION.set('model', model)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('Appending model to request failed: %r' % e)
+			luci_log.debug_verbose('Appending model to request failed: %r %s' \
+				% (e, str(e)))
 		return 'An error occurred while storing the cluster model'
--- conga/luci/site/luci/Extensions/Attic/RicciQueries.py	2007/05/18 02:36:59	1.1.2.4
+++ conga/luci/site/luci/Extensions/Attic/RicciQueries.py	2007/05/18 05:23:55	1.1.2.5
@@ -5,6 +5,7 @@
 # GNU General Public License as published by the
 # Free Software Foundation.
 
+import xml.dom
 from xml.dom import minidom
 from ricci_communicator import RicciCommunicator, extract_module_status
 from LuciSyslog import get_logger
@@ -24,8 +25,8 @@
 			return (True, 'batch id %s was not found' % batch_id)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('checkBatch1: %s: %r' \
-				% (rc.hostname(), e))
+			luci_log.debug_verbose('checkBatch1: %s: %r %s' \
+				% (rc.hostname(), e, str(e)))
 		return (-1, err_msg)
 
 	try:
@@ -35,8 +36,8 @@
 				% (rc.hostname(), str(new_err_msg))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('checkBatch2: %s: %r' \
-				% (rc.hostname(), e))
+			luci_log.debug_verbose('checkBatch2: %s: %r %s' \
+				% (rc.hostname(), e, str(e)))
 		return (-1, err_msg)
 
 	# In progress.
@@ -302,7 +303,7 @@
 			raise Exception, 'no batch tag was found'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('BAR0: %r' % e)
+			luci_log.debug_verbose('BAR0: %r %s' % (e, str(e)))
 		return (None, None)
 
 	for i in batch:
@@ -330,7 +331,7 @@
 		cluster_tags = ricci_xml.getElementsByTagName('cluster')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSB0: %r' % e)
+			luci_log.debug_verbose('GCSB0: %r %s' % (e, str(e)))
 		return None
 
 	if len(cluster_tags) < 1:
@@ -347,7 +348,7 @@
 			raise Exception, 'element 0 is None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSB3: %r' % e)
+			luci_log.debug_verbose('GCSB3: %r %s' % (e, str(e)))
 		return None
 
 	try:
@@ -356,7 +357,7 @@
 		return doc
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSB4: %r' % e)
+			luci_log.debug_verbose('GCSB4: %r %s' % (e, str(e)))
 
 	return None
 
@@ -392,8 +393,8 @@
 			raise Exception, 'no log data is available.'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL0: retrieving log data from %s: %r' \
-				% (rc.hostname(), e))
+			luci_log.debug_verbose('GNL0: retrieving log data from %s: %r %s' \
+				% (rc.hostname(), e, str(e)))
 		return None
 
 	time_now = time()
@@ -544,7 +545,7 @@
 		else:
 			raise Exception, op
 	except Exception, e:
-		elem.setAttribute('message', 'Unknown operation: %r' % e)
+		elem.setAttribute('message', 'Unknown operation: %s' % str(e))
 		doc.appendChild(elem)
 		return doc
 
@@ -592,7 +593,7 @@
 		return service_tags
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('LS1: %r' % e)
+			luci_log.debug_verbose('LS1: %r %s' % (e, str(e)))
 	return None
 
 def nodeIsVirtual(rc):
@@ -630,7 +631,8 @@
 				raise Exception, 'unexpected attribute name: %s' % name
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('nodeIsVirtual2: error: %r' % e)
+				luci_log.debug_verbose('nodeIsVirtual2: error: %r %s' \
+					% (e, str(e)))
 
 	if not success:
 		return None
@@ -654,28 +656,40 @@
 
 def extractDaemonInfo(bt_node):
 	if not bt_node:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EDI0: no bt_node')
 		return None
 
+	resultlist = list()
 	svc_nodes = bt_node.getElementsByTagName('service')
 	for node in svc_nodes:
 		svchash = {}
 		try:
 			name = node.getAttribute('name')
 			if not name:
-				raise
-		except:
+				raise Exception, 'No name'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI1: no service name: %r %s' \
+					% (e, str(e)))
 			name = '[unknown]'
 		svchash['name'] = name
 
 		try:
 			svc_enabled = node.getAttribute('enabled')
-		except:
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI2: no enabled: %r %s' \
+					% (e, str(e)))
 			svc_enabled = '[unknown]'
 		svchash['enabled'] = svc_enabled
 
 		try:
 			running = node.getAttribute('running')
-		except:
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI3: no running: %r %s' \
+					% (e, str(e)))
 			running = '[unknown]'
 		svchash['running'] = running
 		resultlist.append(svchash)
@@ -706,8 +720,8 @@
 			raise Exception, 'unknown error'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCC0: connecting to %s: %r' \
-				% (system_info, e))
+			luci_log.debug_verbose('GCC0: connecting to %s: %r %s' \
+				% (system_info, e, str(e)))
 		return None
 	# end workaround
 
@@ -715,8 +729,8 @@
 		ret = rc.process_batch(batch)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCC1: process_batch error for %s: %r' \
-				% (system_info, e))
+			luci_log.debug_verbose('GCC1: process_batch error for %s: %r %s' \
+				% (system_info, e, str(e)))
 		return None
 
 	if not ret:
@@ -725,7 +739,7 @@
 	cur = ret
 	while len(cur.childNodes) > 0:
 		for i in cur.childNodes:
-			if i.nodeType == ELEMENT_NODE:
+			if i.nodeType == xml.dom.Node.ELEMENT_NODE:
 				if i.nodeName == 'var' and i.getAttribute('name') == 'cluster.conf':
 					return i.childNodes[1].cloneNode(True)
 				else:
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/18 02:36:59	1.255.2.7
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/18 05:23:55	1.255.2.8
@@ -7,7 +7,6 @@
 
 from xml.dom import minidom
 
-from ClusterModel.ModelBuilder import ModelBuilder
 from ClusterModel.FailoverDomain import FailoverDomain
 from ClusterModel.FailoverDomainNode import FailoverDomainNode
 from ClusterModel.RefObject import RefObject
@@ -66,7 +65,8 @@
 		clustername = str(request.form['clustername'])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('PNC00: missing cluster name: %r' % e)
+			luci_log.debug_verbose('PNC00: missing cluster name: %r %s' \
+				% (e, str(e)))
 		clustername = None
 
 	if clustername is None:
@@ -118,7 +118,8 @@
 				errors.append('Unable to connect to %s: %s' \
 					% (cur_host, str(e)))
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('PCN1: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('PCN1: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			prev_auth = rc.authed()
@@ -139,7 +140,8 @@
 				errors.append('Error authenticating to %s: %s' \
 					% (cur_host, str(e)))
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('PCN2: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('PCN2: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			cur_cluster_info = rc.cluster_info()
@@ -158,7 +160,8 @@
 						del cur_system['trusted']
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('PCN3: %s: %r' % (cur_host, e))
+						luci_log.debug_verbose('PCN3: %s: %r %s' \
+							% (cur_host, e, str(e)))
 
 				errors.append('%s reports it is a member of cluster "%s"' \
 					% (cur_host, cur_cluster_name))
@@ -182,7 +185,8 @@
 						del cur_system['trusted']
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('PCN5: %s: %r' % (cur_host, e))
+						luci_log.debug_verbose('PCN5: %s: %r %s' \
+							% (cur_host, e, str(e)))
 
 				errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
 				if LUCI_DEBUG_MODE is True:
@@ -300,8 +304,8 @@
 			batch_id_map[i] = resultNode.getAttribute('batch_id')
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('validateCreateCluster0: %s: %r' \
-					% (i, e))
+				luci_log.debug_verbose('validateCreateCluster0: %s: %r %s' \
+					% (i, e, str(e)))
 			errors.append('An error occurred while attempting to add cluster node "%s"' % i)
 			if len(batch_id_map) == 0:
 				request.SESSION.set('create_cluster', add_cluster)
@@ -359,7 +363,8 @@
 				raise Exception, 'cluster DB object is missing'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VACN1: %s: %r' % (clustername, e))
+				luci_log.debug_verbose('VACN1: %s: %r %s' \
+					% (clustername, e, str(e)))
 			return (False, { 'errors': [ 'The database object for %s is missing' % clustername ] })
 
 		try:
@@ -368,7 +373,8 @@
 				raise Exception, 'cluster os is blank'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VACN2: %s: %r' % (clustername, e))
+				luci_log.debug_verbose('VACN2: %s: %r %s' \
+					% (clustername, e, str(e)))
 			cluster_os = None
 
 		if cluster_os is None:
@@ -377,7 +383,8 @@
 				cluster_os = resolveOSType(cluster_ricci.os())
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VACN3: %s: %r' % (clustername, e))
+					luci_log.debug_verbose('VACN3: %s: %r %s' \
+						% (clustername, e, str(e)))
 				cluster_os = None
 
 	if cluster_os is None:
@@ -429,7 +436,8 @@
 				errors.append('Unable to connect to %s: %s' \
 					% (cur_host, str(e)))
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VACN4: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('VACN4: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			prev_auth = rc.authed()
@@ -450,7 +458,8 @@
 				errors.append('Error authenticating to %s: %s' \
 					% (cur_host, str(e)))
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VACN5: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('VACN5: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			cur_cluster_info = rc.cluster_info()
@@ -469,7 +478,8 @@
 						del cur_system['trusted']
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('VACN6: %s: %r' % (cur_host, e))
+						luci_log.debug_verbose('VACN6: %s: %r %s' \
+							% (cur_host, e, str(e)))
 
 				errors.append('%s reports it is already a member of cluster "%s"' % (cur_host, cur_cluster_name))
 				if LUCI_DEBUG_MODE is True:
@@ -488,7 +498,8 @@
 						del cur_system['trusted']
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('VACN8: %s: %r' % (cur_host, e))
+						luci_log.debug_verbose('VACN8: %s: %r %s' \
+							% (cur_host, e, str(e)))
 
 				errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
 				if LUCI_DEBUG_MODE is True:
@@ -508,7 +519,8 @@
 		incomplete = True
 		errors.append('Unable to contact a ricci agent for %s' % clustername)
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VACN10: %s: %r' % (clustername, e))
+			luci_log.debug_verbose('VACN10: %s: %r %s' \
+				% (clustername, e, str(e)))
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -535,7 +547,7 @@
 		errors.append('Unable to build the cluster model for %s' \
 			% clustername)
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VACN11: %r' % e)
+			luci_log.debug_verbose('VACN11: %r %s' % (e, str(e)))
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -568,11 +580,13 @@
 						del cur_system['trusted']
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('VACN12: %s: %r' % (cur_host, e))
+						luci_log.debug_verbose('VACN12: %s: %r %s' \
+							% (cur_host, e, str(e)))
 
 				errors.append('Unable to initiate cluster join for node "%s"' % cur_host)
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VACN13: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('VACN13: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			next_node_id += 1
@@ -621,7 +635,7 @@
 		errors.append('Unable to update the cluster node list for %s' \
 			% clustername)
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VACN15: %r' % e)
+			luci_log.debug_verbose('VACN15: %r %s' % (e, str(e)))
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -654,7 +668,7 @@
 			errors.append('Unable to connect to the ricci agent on %s: %s' \
 				% (cur_host, str(e)))
 			if LUCI_DEBUG_MODE is True:
-				luci_log.info('VACN17: Unable to connect to the ricci daemon on host %s: %r' % (clunode['host'], e))
+				luci_log.info('VACN17: Unable to connect to the ricci daemon on host %s: %r %s' % (clunode['host'], e, str(e)))
 
 		if success:
 			try:
@@ -664,7 +678,8 @@
 				clunode['errors'] = True
 				success = False
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VACN18: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('VACN18: %s: %r %s' \
+						% (cur_host, e, str(e)))
 
 		if not success:
 			incomplete = True
@@ -689,8 +704,9 @@
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA0: no form_xml: %r' % e)
-		return (False, {'errors': ['No resource data was supplied for this service']})
+			luci_log.debug_verbose('vSA0: no form_xml: %r %s' \
+				% (e, str(e)))
+		return (False, { 'errors': [ 'No resource data was supplied for this service' ]})
 
 	try:
 		model = request.SESSION.get('model')
@@ -698,8 +714,8 @@
 			raise Exception, 'model is None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA0a: %r' % e)
-		return (False, {'errors': [ 'The cluster model is missing from the session object' ]})
+			luci_log.debug_verbose('vSA0a: %r %s' % (e, str(e)))
+		return (False, { 'errors': [ 'The cluster model is missing from the session object' ]})
 
 	try:
 		doc = minidom.parseString(form_xml)
@@ -708,11 +724,11 @@
 			raise
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA1: error: %r' % e)
-		return (False, {'errors': ['The resource data submitted for this service is not properly formed']})
+			luci_log.debug_verbose('vSA1: error: %r %s' % (e, str(e)))
+		return (False, { 'errors': [ 'The resource data submitted for this service is not properly formed' ]})
 
 	form_hash = {}
-	form_hash['toplevel'] = {'form': None, 'kids': [] }
+	form_hash['toplevel'] = { 'form': None, 'kids': [] }
 	for i in forms:
 		form_id = i.getAttribute('id')
 		form_parent = i.getAttribute('parent')
@@ -741,7 +757,8 @@
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vSA2: parsing XML: %r' % e)
+					luci_log.debug_verbose('vSA2: parsing XML: %r %s' \
+						% (e, str(e)))
 
 		try:
 			res_type = dummy_form['type'].strip()
@@ -749,16 +766,17 @@
 				raise Exception, 'no resource type'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA3: %r' % e)
-			return (False, {'errors': [ 'No resource type was specified' ]})
+				luci_log.debug_verbose('vSA3: %r %s' % (e, str(e)))
+			return (False, { 'errors': [ 'No resource type was specified' ]})
 
 		try:
 			if res_type == 'ip':
 				dummy_form['resourceName'] = dummy_form['ip_address']
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA3a: type is ip but no addr: %r' % e)
-			return (False, {'errors': [ 'No IP address was given' ]})
+				luci_log.debug_verbose('vSA3a: type is ip but no addr: %r %s' \
+					% (e, str(e)))
+			return (False, { 'errors': [ 'No IP address was given' ]})
 
 		try:
 			if dummy_form.has_key('immutable'):
@@ -770,10 +788,11 @@
 		except Exception, e:
 			resObj = None
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vSA4: type %s: %r' % (res_type, e))
+				luci_log.debug_verbose('vSA4: type %s: %r %s' \
+					% (res_type, e, str(e)))
 
 		if resObj is None:
-			return (False, {'errors': [ 'An error occurred while adding %s' % res_type ]})
+			return (False, { 'errors': [ 'An error occurred while adding %s' % res_type ]})
 		form_hash[form_id]['obj'] = resObj
 
 	if len(errors) > 0:
@@ -802,8 +821,9 @@
 		service_name = request.form['svc_name'].strip()
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5: no service name: %r' % e)
-		return (False, {'errors': [ 'No service name was given' ]})
+			luci_log.debug_verbose('vSA5: no service name: %r %s' \
+				% (e, str(e)))
+		return (False, { 'errors': [ 'No service name was given' ]})
 
 	autostart = '1'
 	try:
@@ -812,7 +832,8 @@
 	except Exception, e:
 		autostart = None
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5a: error getting autostart: %r' % e)
+			luci_log.debug_verbose('vSA5a: error getting autostart: %r %s' \
+				% (e, str(e)))
 
 	exclusive = '0'
 	try:
@@ -827,13 +848,13 @@
 		cur_service = model.retrieveServiceByName(service_name)
 	except GeneralError, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5b: no service named %s found: %r' \
-				% (service_name, e))
+			luci_log.debug_verbose('vSA5b: no service named %s found: %r %s' \
+				% (service_name, e, str(e)))
 		cur_service = None
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5c: no service named %s found: %r' \
-				% (service_name, e))
+			luci_log.debug_verbose('vSA5c: no service named %s found: %r %s' \
+				% (service_name, e, str(e)))
 		cur_service = None
 
 	try:
@@ -850,7 +871,8 @@
 			return (False, {'errors': [ 'An unknown action was specified' ]})
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vSA5: no action type: %r' % e)
+			luci_log.debug_verbose('vSA5: no action type: %r %s' \
+				% (e, str(e)))
 
 	def buildSvcTree(parent, child_id_list):
 		for i in child_id_list:
@@ -860,7 +882,7 @@
 					raise Exception, 'No object for %s' % i
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('bST0: %r' % e)
+					luci_log.debug_verbose('bST0: %r %s' % (e, str(e)))
 				continue
 			parent.addChild(child)
 			if 'kids' in form_hash[i]:
@@ -892,7 +914,8 @@
 			raise Exception, 'model string for %s is blank' % clustername
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vAS6a: exportModelAsString: %r' % e)
+			luci_log.debug_verbose('vAS6a: exportModelAsString: %r %s' \
+				% (e, str(e)))
 		return (False, {'errors': [ 'An error occurred while adding this service' ]})
 
 	rc = getRicciAgent(self, clustername)
@@ -915,7 +938,8 @@
 			raise Exception, 'unable to save the new cluster configuration'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vAS9: %r' % e)
+			luci_log.debug_verbose('vAS9: %r %s' \
+				% (e, str(e)))
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
 
 	try:
@@ -925,7 +949,8 @@
 			set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_ADD, 'Creating service "%s"' % service_name)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vAS10: failed to set flags: %r' % e)
+			luci_log.debug_verbose('vAS10: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -945,7 +970,7 @@
 		model = request.SESSION.get('model')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VRA1: no model: %r' % e)
+			luci_log.debug_verbose('VRA1: no model: %r %s' % (e, str(e)))
 		return None
 
 	errors = list()
@@ -963,7 +988,7 @@
 	if len(errors) > 0:
 		errors.append('An error occurred while adding this resource')
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('resource error: %r' % e)
+			luci_log.debug_verbose('resource error: %r %s' % (e, str(e)))
 		return (False, {'errors': errors})
 
 
@@ -1020,7 +1045,8 @@
 		model.mcast_address = addr_str
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('Error updating mcast properties: %r' % e)
+			luci_log.debug('Error updating mcast properties: %r %s' \
+				% (e, str(e)))
 		errors.append('Unable to update cluster multicast properties')
 
 	if len(errors) > 0:
@@ -1227,7 +1253,7 @@
 			cp.setConfigVersion(str(version_num))
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('unable to update general properties: %r' % e)
+				luci_log.debug_verbose('unable to update general properties: %r %s' % (e, str(e)))
 			errors.append('Unable to update the cluster configuration')
 
 	try:
@@ -1359,7 +1385,7 @@
 			fd.setPostFailDelay(str(post_fail_delay))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('Unable to update fence daemon properties: %r' % e)
+			luci_log.debug_verbose('Unable to update fence daemon properties: %r %s' % (e, str(e)))
 		errors.append('An error occurred while attempting to update fence daemon properties')
 
 	if len(errors) > 0:
@@ -1429,7 +1455,7 @@
 			raise Exception, 'no config type'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VCC2a: %r' % e)
+			luci_log.debug_verbose('VCC2a: %r %s' % (e, str(e)))
 		return (False, {'errors': [ 'No configuration type was submitted' ]})
 
 	if not configFormValidators.has_key(request.form['configtype']):
@@ -1466,7 +1492,7 @@
 				raise Exception, 'conf_str is none'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VCC4: export model as string failed: %r' % e)
+				luci_log.debug_verbose('VCC4: export model as string failed: %r %s' % (e, str(e)))
 			errors.append('Unable to store the new cluster configuration')
 
 	try:
@@ -1475,7 +1501,8 @@
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VCC5: error: getClusterName: %r' % e)
+			luci_log.debug_verbose('VCC5: error: getClusterName: %r %s' \
+				% (e, str(e)))
 		errors.append('Unable to determine cluster name from model')
 
 	if len(errors) > 0:
@@ -1548,7 +1575,7 @@
 			model = None
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('LECM2: no model: %r' % e)
+			luci_log.debug_verbose('LECM2: no model: %r %s' % (e, str(e)))
 		model = None
 	return model
 
@@ -1578,7 +1605,8 @@
 			raise Exception, 'conf_str is none'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+			luci_log.debug_verbose('VFE: export model as string failed: %r %s' \
+				% (e, str(e)))
 		errors.append('Unable to store the new cluster configuration')
 
 	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, model.getClusterName(), ret_obj))
@@ -1641,7 +1669,7 @@
 				raise Exception, 'conf_str is none'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+				luci_log.debug_verbose('VFE: export model as string failed: %r %s' % (e, str(e)))
 			errors.append('Unable to store the new cluster configuration')
 
 		try:
@@ -1650,7 +1678,8 @@
 				raise Exception, 'cluster name from model.getClusterName() is blank'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VFA: error: getClusterName: %r' % e)
+				luci_log.debug_verbose('VFA: error: getClusterName: %r %s' \
+					% (e, str(e)))
 			errors.append('Unable to determine cluster name from model')
 
 		if not rc:
@@ -1689,14 +1718,14 @@
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC0: no form_xml: %r' % e)
+			luci_log.debug_verbose('vNFC0: no form_xml: %r %s' % (e, str(e)))
 		return (False, {'errors': ['No fence data was supplied']})
 
 	try:
 		fence_level = int(request.form['fence_level'].strip())
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC1: no fence level: %r' % e)
+			luci_log.debug_verbose('vNFC1: no fence level: %r %s' % (e, str(e)))
 		return (False, {'errors': ['No fence level was supplied']})
 
 	try:
@@ -1705,7 +1734,7 @@
 			raise Exception, 'nodename is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC2: no nodename: %r' % e)
+			luci_log.debug_verbose('vNFC2: no nodename: %r %s' % (e, str(e)))
 		return (False, {'errors': ['No node name was given']})
 
 	try:
@@ -1714,7 +1743,7 @@
 			raise Exception, 'clustername is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC3: no clustername: %r' % e)
+			luci_log.debug_verbose('vNFC3: no clustername: %r %s' % (e, str(e)))
 		return (False, {'errors': ['No cluster name was given']})
 
 	try:
@@ -1737,14 +1766,14 @@
 		doc = minidom.parseString(form_xml)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC5: error: %r' % e)
+			luci_log.debug_verbose('vNFC5: error: %r %s' % (e, str(e)))
 		return (False, {'errors': ['The fence data submitted is not properly formed']})
 
 	try:
 		node = model.retrieveNodeByName(nodename)
 	except GeneralError, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC6: unable to find node name %s in current node list: %r' % (str(nodename), e))
+			luci_log.debug_verbose('vNFC6: unable to find node name %s in current node list: %r %s' % (str(nodename), e, str(e)))
 		return (False, {'errors': ['Unable to find the cluster node %s in the node list' % str(nodename) ]})
 
 	fence_level_num = int(fence_level)
@@ -1774,7 +1803,8 @@
 				node.getChildren()[0].removeChild(delete_target)
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC6a: %s: %r' % (method_id, e))
+					luci_log.debug_verbose('vNFC6a: %s: %r %s' \
+						% (method_id, e, str(e)))
 				return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]})
 		else:
 			return (True, {'messages': ['No changes were made'] })
@@ -1795,7 +1825,8 @@
 				input_type = str(i.getAttribute('type'))
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC7: input type: %r' % e)
+					luci_log.debug_verbose('vNFC7: input type: %r %s' \
+						% (e, str(e)))
 				continue
 
 			if not input_type or input_type == 'button':
@@ -1805,12 +1836,13 @@
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('vNFC8: parsing XML: %r' % e)
+					luci_log.debug_verbose('vNFC8: parsing XML: %r %s' \
+						% (e, str(e)))
 
 		if len(dummy_form) < 1:
 			continue
 
-		if 'fence_instance' in dummy_form:
+		if dummy_form.has_key('fence_instance'):
 			try:
 				parent = dummy_form['parent_fencedev']
 			except:
@@ -1838,7 +1870,7 @@
 			fence_form, instance_list = form_hash[i]
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vNFC11: %r' % e)
+				luci_log.debug_verbose('vNFC11: %r %s' % (e, str(e)))
 			continue
 
 		try:
@@ -1847,7 +1879,7 @@
 				raise Exception, 'fence type is blank'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vNFC12: %s: %r' % (i, e))
+				luci_log.debug_verbose('vNFC12: %s: %r %s' % (i, e, str(e)))
 			fence_type = None
 
 		if 'existing_device' in fence_form:
@@ -1947,7 +1979,8 @@
 			luci_log.debug_verbose('vNFC16: exported "%s"' % conf)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC17: exportModelAsString failed: %r' % e)
+			luci_log.debug_verbose('vNFC17: exportModelAsString failed: %r %s' \
+				% (e, str(e)))
 		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	rc = getRicciAgent(self, clustername)
@@ -1967,7 +2000,8 @@
 		set_node_flag(self, clustername, ragent, str(batch_number), FENCEDEV_NODE_CONFIG, "Updating fence configuration for node \'%s\'" % nodename)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vNFC20: failed to set flags: %r' % e)
+			luci_log.debug_verbose('vNFC20: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (request['URL'], NODE, clustername, nodename))
@@ -2064,7 +2098,7 @@
 				raise Exception, 'conf_str is none'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+				luci_log.debug_verbose('VFE: export model as string failed: %r %s' % (e, str(e)))
 			errors.append('Unable to store the new cluster configuration')
 
 		try:
@@ -2073,7 +2107,8 @@
 				raise Exception, 'cluster name from model.getClusterName() is blank'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VFA: error: getClusterName: %r' % e)
+				luci_log.debug_verbose('VFA: error: getClusterName: %r %s' \
+					% (e, str(e)))
 			errors.append('Unable to determine cluster name from model')
 
 		if not rc:
@@ -2130,7 +2165,7 @@
 	except Exception, e:
 		errors.append('Unable to determine the current node name')
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDP1: no nodename: %r' % e)
+			luci_log.debug_verbose('VDP1: no nodename: %r %s' % (e, str(e)))
 
 	try:
 		clustername = form['clustername'].strip()
@@ -2139,7 +2174,7 @@
 	except Exception, e:
 		errors.append('Unable to determine the current cluster name')
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDP2: no clustername: %r' % e)
+			luci_log.debug_verbose('VDP2: no clustername: %r %s' % (e, str(e)))
 
 	disable_list = list()
 	enable_list = list()
@@ -2170,7 +2205,8 @@
 			raise Exception, 'rc is None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDP5: RC %s: %r' % (nodename_resolved, e))
+			luci_log.debug_verbose('VDP5: RC %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
 		errors.append('Unable to connect to the ricci agent on %s to update cluster daemon properties' % nodename_resolved)
 		return (False, {'errors': errors})
 
@@ -2206,7 +2242,8 @@
 			raise Exception, 'no model'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateFdom0: no model: %r' % e)
+			luci_log.debug_verbose('validateFdom0: no model: %r %s' \
+				% (e, str(e)))
 		return (False, {'errors': [ 'Unable to retrieve cluster information' ]})
 
 	prioritized = False
@@ -2244,7 +2281,8 @@
 	except Exception, e:
 		errors.append('No name was given for this failover domain')
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateFdom0: %r' % e)
+			luci_log.debug_verbose('validateFdom0: %r %s' \
+				% (e, str(e)))
 
 	oldname = None
 	try:
@@ -2310,7 +2348,7 @@
 		conf = str(model.exportModelAsString())
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateFdom2: %r' % e)
+			luci_log.debug_verbose('validateFdom2: %r %s' % (e, str(e)))
 		errors.append('Unable to update the cluster configuration')
 
 	if len(errors) > 0:
@@ -2336,7 +2374,8 @@
 			set_node_flag(self, clustername, ragent, str(batch_number), FDOM_ADD, 'Creating failover domain "%s"' % name)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateFdom5: failed to set flags: %r' % e)
+			luci_log.debug_verbose('validateFdom5: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&fdomname=%s&busyfirst=true' \
@@ -2353,7 +2392,8 @@
 			raise Exception, 'blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM0: no vm name: %r' % e)
+			luci_log.debug_verbose('validateVM0: no vm name: %r %s' \
+				% (e, str(e)))
 		errors.append('No virtual machine name was given')
 
 	try:
@@ -2362,7 +2402,8 @@
 			raise Exception, 'blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM1: no vm path: %r' % e)
+			luci_log.debug_verbose('validateVM1: no vm path: %r %s' \
+				% (e, str(e)))
 		errors.append('No path to the virtual machine configuration file was given')
 
 	autostart = 1
@@ -2466,7 +2507,7 @@
 			raise Exception, 'model is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM2: %r' % e)
+			luci_log.debug_verbose('validateVM2: %r %s' % (e, str(e)))
 		errors.append('Unable to update the cluster model')
 
 	try:
@@ -2475,7 +2516,7 @@
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM3: %r' % e)
+			luci_log.debug_verbose('validateVM3: %r %s' % (e, str(e)))
 		errors.append('Unable to determine the cluster name')
 
 	if len(errors) > 0:
@@ -2502,7 +2543,8 @@
 			set_node_flag(self, clustername, rc.hostname(), str(batch_number), VM_CONFIG, "Configuring virtual machine service \'%s\'" % vm_name)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('validateVM6: failed to set flags: %r' % e)
+			luci_log.debug_verbose('validateVM6: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -2533,7 +2575,7 @@
 		pagetype = int(request.form['pagetype'])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VP0: error: %r' % e)
+			luci_log.debug_verbose('VP0: error: %r %s' % (e, str(e)))
 		return None
 
 	if not pagetype in formValidators:
@@ -3390,7 +3432,7 @@
 			raise Exception, 'cannot find database object at %s' % path
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NLO: node_leave_cluster err: %r' % e)
+			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
 		return None
 
 	objname = '%s____flag' % nodename_resolved
@@ -3418,7 +3460,8 @@
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_LEAVE_CLUSTER, 'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL4: failed to set flags: %r' % e)
+			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
+				% (e, str(e)))
 	return True
 
 def nodeJoin(self, rc, clustername, nodename_resolved):
@@ -3432,7 +3475,8 @@
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_JOIN_CLUSTER, 'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NJ1: failed to set flags: %r' % e)
+			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
+				% (e, str(e)))
 	return True
 
 def clusterStart(self, model):
@@ -3453,8 +3497,8 @@
 			rc = RicciCommunicator(nodename_resolved)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStart: RC %s: %r' \
-					% (nodename_resolved, e))
+				luci_log.debug_verbose('CStart: RC %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
 			errors += 1
 			continue
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
@@ -3482,8 +3526,8 @@
 			rc = RicciCommunicator(nodename_resolved)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStop0: [%d] RC %s: %r' \
-					% (delete is True, str(nodename_resolved), e))
+				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
+					% (delete is True, str(nodename_resolved), e, str(e)))
 			errors += 1
 			continue
 
@@ -3531,8 +3575,8 @@
 			delCluster(self, clustername)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete1: %s: %r' \
-					% (clustername, e))
+				luci_log.debug_verbose('clusterDelete1: %s: %r %s' \
+					% (clustername, e, str(e)))
 
 		try:
 			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
@@ -3541,8 +3585,8 @@
 				clusters.manage_delObjects([clustername])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete2: %s %r' \
-					% (clustername, e))
+				luci_log.debug_verbose('clusterDelete2: %s %r %s' \
+					% (clustername, e, str(e)))
 		return CLUSTERLIST
 	else:
 		if LUCI_DEBUG_MODE is True:
@@ -3560,7 +3604,8 @@
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_REBOOT, 'Node "%s" is being rebooted' % nodename_resolved)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNR1: failed to set flags: %r' % e)
+			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
+				% (e, str(e)))
 	return True
 
 def forceNodeFence(self, clustername, nodename, nodename_resolved):
@@ -3572,8 +3617,7 @@
 			raise Exception, 'no cluster folder at %s' % path
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF0: The cluster folder %s could not be found: %r' \
-				 % (clustername, e))
+			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
 		return None
 
 	try:
@@ -3582,8 +3626,8 @@
 			raise Exception, 'no cluster nodes'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF1: No cluster nodes for %s were found: %r' \
-				% (clustername, e))
+			luci_log.debug('FNF1: No cluster nodes for %s were found: %r %s' \
+				% (clustername, e, str(e)))
 		return None
 
 	found_one = False
@@ -3597,8 +3641,8 @@
 				raise Exception, 'rc is None'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('FNF2: ricci error for host %s: %r' \
-					% (node[0], e))
+				luci_log.debug('FNF2: ricci error for host %s: %r %s' \
+					% (node[0], e, str(e)))
 			continue
 
 		if not rc.authed():
@@ -3631,7 +3675,8 @@
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_FENCE, 'Node "%s" is being fenced' % nodename_resolved)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF4: failed to set flags: %r' % e)
+			luci_log.debug_verbose('FNF4: failed to set flags: %r %s' \
+				% (e, str(e)))
 	return True
 
 def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
@@ -3652,8 +3697,7 @@
 				raise Exception, 'no cluster folder at %s' % path
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r' \
-					% (clustername, e))
+				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
 			return None
 
 		try:
@@ -3662,8 +3706,7 @@
 				raise Exception, 'no cluster nodes in DB'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r' \
-					% (clustername, e))
+				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
 
 		for node in nodes:
 			if node[1].getId().find(nodename) != (-1):
@@ -3678,7 +3721,8 @@
 					raise Exception, 'ND1a: rc2 is None'
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.info('ND2: ricci %s error: %r' % (node[0], e))
+					luci_log.info('ND2: ricci %s error: %r %s' \
+						% (node[0], e, str(e)))
 				continue
 
 			if not rc2.authed():
@@ -3724,7 +3768,8 @@
 			set_node_flag(self, clustername, rc.hostname(), str(batch_number), CLUSTER_DELETE, 'Deleting cluster "%s": Deleting node "%s"' % (clustername, nodename_resolved))
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND5a: failed to set flags: %r' % e)
+				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
+					% (e, str(e)))
 	else:
 		delete_target = None
 		nodelist = model.getNodes()
@@ -3747,8 +3792,8 @@
 			model.deleteNode(delete_target)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r' \
-					% (delete_target.getName(), e))
+				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r %s' \
+					% (delete_target.getName(), e, str(e)))
 
 		try:
 			model.setModified(True)
@@ -3757,7 +3802,8 @@
 				raise Exception, 'model string is blank'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND7: exportModelAsString: %r' % e)
+				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
+					% (e, str(e)))
 				return None
 
 		# propagate the new cluster.conf via the second node
@@ -3774,8 +3820,8 @@
 		clusterfolder.manage_delObjects([nodename_resolved])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND9: error deleting %s at %s: %r' \
-				% (nodename_resolved, path, e))
+			luci_log.debug_verbose('ND9: error deleting %s at %s: %r %s' \
+				% (nodename_resolved, path, e, str(e)))
 
 	if delete_cluster:
 		return True
@@ -3784,7 +3830,8 @@
 		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND10: failed to set flags: %r' % e)
+			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
+				% (e, str(e)))
 	return True
 
 def nodeTaskProcess(self, model, request):
@@ -3830,13 +3877,13 @@
 				raise Exception, 'rc is None'
 		except RicciError, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP3: ricci error from %s: %r' \
-					% (nodename_resolved, e))
+				luci_log.debug('NTP3: ricci error from %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
 			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 		except:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP4: ricci error from %s: %r' \
-					% (nodename_resolved, e))
+				luci_log.debug('NTP4: ricci error from %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
 			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 
 		cluinfo = rc.cluster_info()
@@ -3957,13 +4004,13 @@
 		rc = RicciCommunicator(nodename_resolved)
 	except RicciError, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL2: ricci error %s: %r' \
-				% (nodename_resolved, e))
+			luci_log.debug_verbose('GNL2: ricci error %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
 		return 'Ricci error while getting logs for %s' % nodename_resolved
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r' \
-				% (nodename_resolved, e))
+			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
 		return 'Ricci error while getting logs for %s' % nodename_resolved
 
 	if not rc.authed():
@@ -3972,7 +4019,8 @@
 			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL4: %s: %r' % (nodename_resolved, e))
+				luci_log.debug_verbose('GNL4: %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
 
 		if clustername:
 			try:
@@ -3980,8 +4028,8 @@
 				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GNL5: %s: %r' \
-						% (nodename_resolved, e))
+					luci_log.debug_verbose('GNL5: %s: %r %s' \
+						% (nodename_resolved, e, str(e)))
 		return 'Luci is not authenticated to node %s. Please reauthenticate first' % nodename
 
 	return rq.getNodeLogs(rc)
@@ -4015,7 +4063,7 @@
 			raise Exception, 'clusterfolder is None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %r -- returning empty map' % (cluname, path, e))
+			luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %r %s -- returning empty map' % (cluname, path, e, str(e)))
 		return busy_map
 	except:
 		if LUCI_DEBUG_MODE is True:
@@ -4030,7 +4078,7 @@
 			return busy_map
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %r' % (cluname, path, e))
+			luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %r %s' % (cluname, path, e, str(e)))
 		return busy_map
 	except:
 		if LUCI_DEBUG_MODE is True:
@@ -4087,7 +4135,8 @@
 			except Exception, e:
 				rc = None
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('ICB7: RC: %s: %r' % (cluname, e))
+					luci_log.debug_verbose('ICB7: RC: %s: %r %s' \
+						% (cluname, e, str(e)))
 
 			batch_id = None
 			if rc is not None:
@@ -4098,7 +4147,7 @@
 							% (batch_id, item[0]))
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %r' % (item[0], e))
+						luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %r %s' % (item[0], e, str(e)))
 
 				if batch_id is not None:
 					try:
@@ -4118,7 +4167,7 @@
 								luci_log.debug_verbose('ICB9: batch_xml for cluster is None')
 					except Exception, e:
 						if LUCI_DEBUG_MODE is True:
-							luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %r' % e)
+							luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %r %s' % (e, str(e)))
 					# No contact with ricci (-1000)
 					creation_status = RICCI_CONNECT_FAILURE
 					# set to avoid next if statement
@@ -4220,8 +4269,8 @@
 						clusterfolder.manage_delObjects([item[0]])
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('ICB14: delObjects: %s: %r' \
-							% (item[0], e))
+						luci_log.debug_verbose('ICB14: delObjects: %s: %r %s' \
+							% (item[0], e, str(e)))
 
 				nodereports.append(node_report)
 				continue
@@ -4238,8 +4287,8 @@
 						clusterfolder.manage_delObjects([item[0]])
 					except Exception, e:
 						if LUCI_DEBUG_MODE is True:
-							luci_log.info('ICB15: Unable to delete %s: %r' \
-								% (item[0], e))
+							luci_log.info('ICB15: Unable to delete %s: %r %s' \
+								% (item[0], e, str(e)))
 					continue
 				else:
 					busy_map['busy'] = 'true'
@@ -4254,7 +4303,7 @@
 						item[1].manage_addProperty(LAST_STATUS, creation_status, 'int')
 					except Exception, e:
 						if LUCI_DEBUG_MODE is True:
-							luci_log.debug_verbose('ICB16: last_status err: %s %d: %r' % (item[0], creation_status, e))
+							luci_log.debug_verbose('ICB16: last_status err: %s %d: %r %s' % (item[0], creation_status, e, str(e)))
 					continue
 		else:
 			node_report = {}
@@ -4269,8 +4318,8 @@
 				finished = -1
 				err_msg = ''
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('ICB15: ricci error: %s: %r' \
-						% (ricci[0], e))
+					luci_log.debug_verbose('ICB15: ricci error: %s: %r %s' \
+						% (ricci[0], e, str(e)))
 
 			if rc is not None:
 				batch_res = rq.checkBatch(rc, item[1].getProperty(BATCH_ID))
@@ -4293,8 +4342,8 @@
 					clusterfolder.manage_delObjects([item[0]])
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.info('ICB16: Unable to delete %s: %r' \
-						% (item[0], e))
+						luci_log.info('ICB16: Unable to delete %s: %r %s' \
+						% (item[0], e, str(e)))
 			else:
 				node_report = {}
 				busy_map['busy'] = 'true'
@@ -4348,7 +4397,8 @@
 		model = request.SESSION.get('model')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService0: no model: %r' % e)
+			luci_log.debug_verbose('delService0: no model: %r %s' \
+				% (e, str(e)))
 		return (False, {'errors': [ errstr ] })
 
 	name = None
@@ -4391,14 +4441,15 @@
 			raise Exception, 'unable to determine the hostname of the ricci agent'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService4: %s: %r' % (errstr, e))
+			luci_log.debug_verbose('delService4: %s: %r %s' \
+				% (errstr, e, str(e)))
 		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
 
 	try:
 		model.deleteService(name)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s: %r' % (name, clustername, e))
+			luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s: %r %s' % (name, clustername, e, str(e)))
 		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
 
 	try:
@@ -4408,7 +4459,7 @@
 			raise Exception, 'model string is blank'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService6: exportModelAsString failed: %r' % e)
+			luci_log.debug_verbose('delService6: exportModelAsString failed: %r %s' % (e, str(e)))
 		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
 
 	batch_number, result = rq.setClusterConf(rc, str(conf))
@@ -4421,7 +4472,8 @@
 		set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_DELETE, 'Removing service "%s"' % name)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService8: failed to set flags: %r' % e)
+			luci_log.debug_verbose('delService8: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -4434,7 +4486,8 @@
 		model = request.SESSION.get('model')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource0: no model: %r' % e)
+			luci_log.debug_verbose('delResource0: no model: %r %s' \
+				% (e, str(e)))
 		return errstr
 
 	name = None
@@ -4471,7 +4524,8 @@
 			raise Exception, 'unable to determine the hostname of the ricci agent'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource3: %s: %r' % (errstr, e))
+			luci_log.debug_verbose('delResource3: %s: %r %s' \
+				% (errstr, e, str(e)))
 		return '%s: could not determine the ricci agent hostname' % errstr
 
 	resPtr = model.getResourcesPtr()
@@ -4509,7 +4563,8 @@
 		set_node_flag(self, clustername, ragent, str(batch_number), RESOURCE_REMOVE, 'Removing resource "%s"' % request['resourcename'])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource7: failed to set flags: %r' % e)
+			luci_log.debug_verbose('delResource7: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -4532,7 +4587,7 @@
 		model.getResourcesPtr().addChild(res)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource2: %r' % e)
+			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
 		return 'Unable to add the new resource'
 
 	try:
@@ -4542,7 +4597,8 @@
 			raise Exception, 'model string for %s is blank' % clustername
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource3: exportModelAsString: %r' % e)
+			luci_log.debug_verbose('addResource3: exportModelAsString: %r %s' \
+				% (e, str(e)))
 		return 'An error occurred while adding this resource'
 
 	try:
@@ -4559,7 +4615,7 @@
 			raise Exception, 'unable to save the new cluster configuration'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource6: %r' % e)
+			luci_log.debug_verbose('addResource6: %r %s' % (e, str(e)))
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
 
 	try:
@@ -4576,7 +4632,8 @@
 		set_node_flag(self, clustername, ragent, str(batch_number), action_type, action_str)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource7: failed to set flags: %r' % e)
+			luci_log.debug_verbose('addResource7: failed to set flags: %r %s' \
+				% (e, str(e)))
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true'
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2007/05/18 02:36:59	1.50.2.5
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2007/05/18 05:23:55	1.50.2.6
@@ -77,7 +77,7 @@
 			raise Exception, 'user %s does not exist' % userId
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDU0: %s: %r' % (userId, e))
+			luci_log.debug_verbose('VDU0: %s: %r %s' % (userId, e, str(e)))
 		return (False, {'errors': [ 'No such user: "%s"' % userId ] })
 
 	for i in getClusters(self):
@@ -85,7 +85,8 @@
 			i[1].manage_delLocalRoles([ userId ])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VDU1: %s %s: %r' % (userId, i[0], e))
+				luci_log.debug_verbose('VDU1: %s %s: %r %s' \
+					% (userId, i[0], e, str(e)))
 			errors.append('Error deleting roles from cluster "%s" for user "%s"' \
 				% (i[0], userId))
 
@@ -94,14 +95,16 @@
 			i[1].manage_delLocalRoles([ userId ])
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VDU2: %s: %r' % (userId, i[0], e))
+				luci_log.debug_verbose('VDU2: %s: %r %s' \
+					% (userId, i[0], e, str(e)))
 			errors.append('Error deleting roles from storage system "%s" for user "%s"' % (i[0], userId))
 
 	try:
 		self.acl_users.userFolderDelUsers([ userId ])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VDU3: %s %r' % (userId, e))
+			luci_log.debug_verbose('VDU3: %s %r %s' \
+				% (userId, e, str(e)))
 		errors.append('Unable to delete user "%s"' % userId)
 		return (False, {'errors': errors })
 
@@ -133,7 +136,7 @@
 		self.portal_registration.addMember(user, passwd, properties = { 'username': user, 'password': passwd, 'confirm': passwd, 'roles': [ 'Member' ], 'domains': [], 'email': '%s at conga.example.com' % user })
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VAU0: %s: %r' % (user, e))
+			luci_log.debug_verbose('VAU0: %s: %r %s' % (user, e, str(e)))
 		return (False, { 'errors': [ 'Unable to add new user "%s"' % user ] })
 
 	if not self.portal_membership.getMemberById(user):
@@ -160,7 +163,7 @@
 		cur_host = sysData[0]
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vACI0: %r' % e)
+			luci_log.debug_verbose('vACI0: %r %s' % (e, str(e)))
 		return (False, { 'errors': [ 'You must provide the address of at least one node in the cluster you wish to add.' ]})
 
 	cur_entry = { 'host': cur_host }
@@ -212,7 +215,7 @@
 			cur_entry['fp'] = cur_fp[1]
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vACI2: %s: %r' % (cur_host, e))
+			luci_log.debug_verbose('vACI2: %s: %r %s' % (cur_host, e, str(e)))
 		request.SESSION.set('add_cluster_initial', cur_entry)
 		return (False, { 'errors': [ 'Unable to establish a connection to the ricci agent on %s: %s' % (cur_host, str(e)) ] })
 
@@ -231,7 +234,8 @@
 			rc.trust()
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vACI3: %s %r' % (cur_host, e))
+				luci_log.debug_verbose('vACI3: %s %r %s' \
+					% (cur_host, e, str(e)))
 			request.SESSION.set('add_cluster_initial', cur_entry)
 			return (False, { 'errors': [ 'Unable to establish trust for host %s' % (cur_host, str(e)) ] })
 	elif check_certs:
@@ -260,7 +264,7 @@
 		cur_entry['trusted'] = rc.trusted()
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('vACI4: %s %r' % (cur_host, e))
+			luci_log.debug_verbose('vACI4: %s %r %s' % (cur_host, e, str(e)))
 		request.SESSION.set('add_cluster_initial', cur_entry)
 		return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % cur_host ] })
 
@@ -272,7 +276,8 @@
 				raise Exception, 'authentication failed'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('vACI5: %s: %r' % (cur_host, e))
+				luci_log.debug_verbose('vACI5: %s: %r %s' \
+					% (cur_host, e, str(e)))
 			request.SESSION.set('add_cluster_initial', cur_entry)
 			return (False, { 'errors': [ 'Unable to authenticate to the ricci agent on "%s"' % cur_host ] })
 
@@ -280,10 +285,15 @@
 
 	try:
 		cluster_info = rc.cluster_info()
-	except:
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vACI5aa: %s: %r %s' % (cur_host, e, str(e)))
 		cluster_info = None
 
 	if not cluster_info or not cluster_info[0]:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vACI5a0: no cluster info')
+
 		if not prev_auth:
 			try:
 				rc.unauth()
@@ -298,26 +308,11 @@
 
 	cluster_name = cluster_info[0]
 
-	cluster_os = resolveOSType(rc.os())
-	if LUCI_DEBUG_MODE is True:
-		luci_log.debug_verbose('vACI5a: cluster os is %s (%s)' \
-			% (cluster_os, rc.os()))
-
-	try:
-		cluster_conf = getClusterConf(rc)
-	except:
-		cluster_conf = None
-
-	if cluster_conf:
-		try:
-			node_list = getClusterConfNodes(cluster_conf)
-		except:
-			node_list = None
-
 	# Make sure a cluster with this name is not already managed before
 	# going any further.
 	try:
-		dummy = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, cluster_name))
+		dummy = self.restrictedTraverse('%s%s' \
+					% (CLUSTER_FOLDER_PATH, cluster_name))
 		if not dummy:
 			raise Exception, 'no existing cluster'
 		errors.append('A cluster named "%s" is already managed.')
@@ -330,6 +325,28 @@
 	except:
 		pass
 
+	cluster_os = resolveOSType(rc.os())
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('vACI5a: cluster os is %s (%s)' \
+			% (cluster_os, rc.os()))
+
+	try:
+		cluster_conf = getClusterConf(rc)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vACI5a1: no cluster.conf: %r %s' \
+				% (e, str(e)))
+		cluster_conf = None
+
+	if cluster_conf:
+		try:
+			node_list = getClusterConfNodes(cluster_conf)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vACI5b: no node list: %r %s' \
+					% (e, str(e)))
+			node_list = None
+
 	if not cluster_conf or not node_list or len(node_list) < 1:
 		if not prev_auth:
 			try:
@@ -340,24 +357,28 @@
 
 	same_node_passwds = False
 	try:
-		same_node_passwds = 'allSameCheckBox' in request.form
+		same_node_passwds = request.form.has_key('allSameCheckBox')
 	except:
 		same_node_passwds = False
 
-	add_cluster = { 'name': cluster_name,
-					'nodes': {},
-					'cluster_os':cluster_os,
-					'pass': 0,
-					'identical_passwds': same_node_passwds,
-					'check_certs': check_certs }
+	add_cluster = {
+		'name': cluster_name,
+		'nodes': {},
+		'cluster_os': cluster_os,
+		'pass': 0,
+		'identical_passwds': same_node_passwds,
+		'check_certs': check_certs
+	}
 
 	for i in node_list:
 		cur_node = { 'host': i }
-		if same_node_passwds:
+		if same_node_passwds is True:
 			cur_node['passwd'] = cur_pass
 		add_cluster['nodes'][i] = cur_node
+
 	request.SESSION.set('add_cluster', add_cluster)
-	request.response.redirect('/luci/homebase/index_html?pagetype=%s' % HOMEBASE_ADD_CLUSTER)
+	request.response.redirect('/luci/homebase/index_html?pagetype=%s' \
+		% HOMEBASE_ADD_CLUSTER)
 
 def parseHostForm(request, check_certs):
 	errors = list()
@@ -368,7 +389,8 @@
 		num_storage = int(request.form['numStorage'].strip())
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('PHF1: numStorage field missing: %r' % e)
+			luci_log.debug_verbose('PHF1: numStorage field missing: %r %s' \
+				% (e, str(e)))
 		errors.append('The number of systems entered could not be determined.')
 
 	trust_shown = False
@@ -453,7 +475,8 @@
 					pass
 				errors.append('Unable to retrieve the SSL fingerprint for node %s: %s' % (cur_host, str(e)))
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('PHF2: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('PHF2: %s: %r %s' \
+						% (cur_host, e, str(e)))
 		else:
 			try:
 				rc = RicciCommunicator(cur_host, enforce_trust=False)
@@ -480,7 +503,8 @@
 					pass
 				errors.append('Unable to add the key for node %s to the trusted keys list.' % cur_host)
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('PHF3: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('PHF3: %s: %r %s' \
+						% (cur_host, e, str(e)))
 		system_list[cur_host] = cur_system
 		i += 1
 
@@ -548,7 +572,8 @@
 				incomplete = True
 				cur_system['errors'] = True
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VAC2: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('VAC2: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			try:
@@ -561,7 +586,8 @@
 				incomplete = True
 				cur_system['errors'] = True
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VAC3: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('VAC3: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				continue
 
 			cluster_info = rc.cluster_info()
@@ -585,7 +611,8 @@
 						rc.unauth()
 					except Exception, e:
 						if LUCI_DEBUG_MODE is True:
-							luci_log.debug_verbose('VAC4: %s: %r' % (cur_host, e))
+							luci_log.debug_verbose('VAC4: %s: %r %s' \
+								% (cur_host, e, str(e)))
 
 				errors.append(err_msg)
 				if LUCI_DEBUG_MODE is True:
@@ -605,7 +632,8 @@
 						rc.unauth()
 					except Exception, e:
 						if LUCI_DEBUG_MODE is True:
-							luci_log.debug_verbose('VAC6: %s: %r' % (cur_host, e))
+							luci_log.debug_verbose('VAC6: %s: %r %s' \
+								% (cur_host, e, str(e)))
 
 				err_msg = 'Node %s reports its cluster version is %s and we expect %s' \
 					% (cur_host, cur_os, cluster_os)
@@ -688,7 +716,7 @@
 			request.SESSION.set('add_systems', add_systems)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('validateSA2: %r' % e)
+				luci_log.debug_verbose('validateSA2: %r %s' % (e, str(e)))
 		return_code = False
 	else:
 		try:
@@ -718,7 +746,8 @@
 		user_id = user.getUserId()
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VP1: no user "%s": %r' % (username, e))
+			luci_log.debug_verbose('VP1: no user "%s": %r %s' \
+				% (username, e, str(e)))
 		errors.append('An invalid user "%s" was given' % username)
 
 	if len(errors) > 0:
@@ -739,7 +768,8 @@
 					messages.append('Removed permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('VP2: %s %s: %r' % (user_id, i[0], e))
+					luci_log.debug_verbose('VP2: %s %s: %r %s' \
+						% (user_id, i[0], e, str(e)))
 				errors.append('Failed to remove permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 	else:
 		for i in clusters:
@@ -752,7 +782,8 @@
 						messages.append('Added permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('VP3: %s %s %r' % (user_id, i[0], e))
+						luci_log.debug_verbose('VP3: %s %s %r %s' \
+							% (user_id, i[0], e, str(e)))
 					errors.append('Failed to add permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 			else:
 				try:
@@ -768,7 +799,8 @@
 						messages.append('Removed permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 				except Exception, e:
 					if LUCI_DEBUG_MODE is True:
-						luci_log.debug_verbose('VP4: %s %s %r' % (user_id, i[0], e))
+						luci_log.debug_verbose('VP4: %s %s %r %s' \
+							% (user_id, i[0], e, str(e)))
 					errors.append('Failed to remove permission for user "%s" for cluster "%s"' % (user_id, i[0]))
 
 
@@ -849,7 +881,8 @@
 					raise Exception, 'connection failed'
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('validateAuth0: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('validateAuth0: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				errors.append('Unable to communicate with the ricci agent on %s: %s' \
 					% (cur_host, str(e)))
 				incomplete = True
@@ -878,7 +911,8 @@
 			except Exception, e:
 				errors.append('Unable to authenticate to %s: %s' % (cur_host, str(e)))
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('validateAuth1: %s: %r' % (cur_host, e))
+					luci_log.debug_verbose('validateAuth1: %s: %r %s' \
+						% (cur_host, e, str(e)))
 				incomplete = True
 				cur_system['error'] = True
 
@@ -893,7 +927,8 @@
 			request.SESSION.set('auth_systems', system_list)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('validateAuthenticate2: %r' % e)
+				luci_log.debug_verbose('validateAuthenticate2: %r' \
+					% (e, str(e)))
 	else:
 		try:
 			request.SESSION.delete('auth_systems')
@@ -1150,7 +1185,7 @@
 		i = rc.authed()
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('CS3 %s: %r' % (host, e))
+			luci_log.debug_verbose('CS3 %s: %r %s' % (host, e, str(e)))
 		return 'Unable to authenticate to the ricci agent on %s' % host
 
 	if i is not True:
@@ -1168,7 +1203,7 @@
 		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('CS5 %s: %r' % (host, e))
+			luci_log.debug_verbose('CS5 %s: %r %s' % (host, e, str(e)))
 		return 'Unable to create storage system %s: %s' % host
 
 	try:
@@ -1176,7 +1211,7 @@
 		newSystem = self.restrictedTraverse('%s%s' % (STORAGE_FOLDER_PATH, host))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('CS6 %s: %r' % (host, e))
+			luci_log.debug_verbose('CS6 %s: %r %s' % (host, e, str(e)))
 		return 'Unable to create DB entry for storage system %s' % host
 
 	try:
@@ -1184,7 +1219,7 @@
 		newSystem.manage_role('View', ['Access contents information', 'View'])
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('CS7 %s: %r' % (host, e))
+			luci_log.debug_verbose('CS7 %s: %r %s' % (host, e, str(e)))
 		return 'Unable to set permissions on storage system %s' % host
 
 	return None
@@ -1203,7 +1238,8 @@
 				user = members[0].getUserName()
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('getDefaultUser0: %r' % e)
+					luci_log.debug_verbose('getDefaultUser0: %r %s' \
+						% (e, str(e)))
 				user = None
 
 	if not user:
@@ -1222,7 +1258,7 @@
 		members.sort()
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('getUserPerms0: %r' % e)
+			luci_log.debug_verbose('getUserPerms0: %r %s' % (e, str(e)))
 		return {}
 
 	for i in members:
@@ -1237,7 +1273,8 @@
 			storage = self.restrictedTraverse('%s/systems/storage/objectItems' % PLONE_ROOT)('Folder')
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('getUserPerms1: user %s: %r' % (userName, e))
+				luci_log.debug_verbose('getUserPerms1: user %s: %r %s' \
+					% (userName, e, str(e)))
 			continue
 
 		for c in clusters:
@@ -1245,18 +1282,16 @@
 				perms[userName]['cluster'][c[0]] = i.has_role('View', c[1])
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('getUserPerms2: user %s, obj %s: %r' \
-						% (userName, c[0], e))
+					luci_log.debug_verbose('getUserPerms2: user %s, obj %s: %r %s' % (userName, c[0], e, str(e)))
 
 		for s in storage:
 			try:
 				perms[userName]['storage'][s[0]] = i.has_role('View', s[1])
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('getUserPerms2: user %s, obj %s: %r' \
-						% (userName, s[0], e))
+					luci_log.debug_verbose('getUserPerms2: user %s, obj %s: %r %s' % (userName, s[0], e, str(e)))
 	return perms
 
 def getClusterConfNodes(conf_dom):
-	cluster_nodes = conf_dom.getElementsByTagName('clusternodes')
-	return (lambda x: str(x.getAttribute('name')), cluster_nodes)
+	cluster_nodes = conf_dom.getElementsByTagName('clusternode')
+	return map(lambda x: str(x.getAttribute('name')), cluster_nodes)
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2007/05/15 21:42:21	1.25.2.4
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2007/05/18 05:23:55	1.25.2.5
@@ -439,8 +439,8 @@
 			luci_log.debug_verbose('RC:BS1: Returning (%d, %d) for batch_status("%s")' % (last, total, batch_xml.toxml()))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('RC:BS2: error %r: (last, total)' % \
-				(e, last, total))
+			luci_log.debug_verbose('RC:BS2: error %r %s: (last, total)' \
+				% (e, str(e), last, total))
 	return (last, total)
 
 # extract error_code from module's response
--- conga/luci/site/luci/Extensions/system_adapters.py	2007/05/15 21:42:21	1.2.2.3
+++ conga/luci/site/luci/Extensions/system_adapters.py	2007/05/18 05:23:55	1.2.2.4
@@ -20,7 +20,7 @@
 			raise Exception, 'None'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GSSL0: %s: %r' % (hostname, e))
+			luci_log.debug_verbose('GSSL0: %s: %r %s' % (hostname, e, str(e)))
 		return []
 
 	service_list = list_services(rc)
@@ -84,7 +84,7 @@
 			raise Exception, 'unknown error'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VSU0: %s: %r' % (hostname, e))
+			luci_log.debug_verbose('VSU0: %s: %r %s' % (hostname, e, str(e)))
 		return (False, {'errors': [ 'Unable to connect to the ricci agent on %s: %s' % (hostname, str(e)) ]})
 
 	sys_svc_list = list()
@@ -97,7 +97,7 @@
 			sys_svc_list.append(svc_name)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('VSU1: %s: %r' % (hostname, e))
+			luci_log.debug_verbose('VSU1: %s: %r %s' % (hostname, e, str(e)))
 		return (False, {'errors': [ 'Unable to retrieve the list of services from %s' % hostname ]})
 
 	try:
@@ -148,7 +148,7 @@
 			ret = svc_manage(rc, hostname, servicename, op)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('VMS0: %r' % e)
+				luci_log.debug_verbose('VMS0: %r %s' % (e, str(e)))
 			result.setAttribute('message', str(e))
 			ret.appendChild(result)
 	else:



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-05-22 21:52 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-05-22 21:52 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-22 21:52:05

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py LuciDB.py 
	                           cluster_adapters.py 
	luci/site/luci/Extensions/ClusterModel: ModelBuilder.py 

Log message:
	More refactoring and clean-up.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.10&r2=1.1.2.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.8&r2=1.255.2.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5

--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/18 05:23:55	1.1.2.4
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/22 21:52:03	1.1.2.5
@@ -5,14 +5,691 @@
 # GNU General Public License as published by the
 # Free Software Foundation.
 
-import RicciQueries as rq
-from LuciDB import set_node_flag, getRicciAgent
 from LuciSyslog import get_logger
-from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE
+import RicciQueries as rq
+
+from ricci_communicator import RicciCommunicator, RicciError
+
+from LuciDB import set_node_flag, getRicciAgent, delCluster, \
+	getClusterNode, getStorageNode, noNodeStatusPresent, \
+	setNodeStatus, resolve_nodename
+
+from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
+	NODE_DELETE, CLUSTER_DELETE, CLUSTER_FOLDER_PATH, \
+	CLUSTERLIST, CLUSTER_NODE_NEED_AUTH, NODE_FENCE, \
+	NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
+	RESOURCE_ADD, RESOURCE_CONFIG, RESOURCE_REMOVE, \
+	SERVICE_DELETE, SERVICE_RESTART, SERVICE_START, SERVICE_STOP
 
 luci_log = get_logger()
 
-def propagateClusterConfAsync(self, model, rc=None):
+#
+# Cluster service tasks
+#
+
+def RestartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	batch_number, result = rq.restartService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while restarting cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_RESTART,
+			'Restarting service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def StartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	batch_number, result = rq.startService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'An error occurred while starting cluster service "%s"' % svcname ] })
+
+	try:
+		if nodename is not None:
+			status_msg = 'Starting cluster service "%s" on node "%s"' \
+				% (svcname, nodename)
+		else:
+			status_msg = 'Starting cluster service "%s"' % svcname
+
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START, status_msg)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+def StopCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	batch_number, result = rq.stopService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: stop %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while stopping cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_STOP,
+			'Stopping cluster service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def DeleteCluSvc(self, rc, fvars, model):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	try:
+		model.deleteService(svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc1: Unable to find a service named %s for cluster %s: %r %s' % (svcname, cluname, e, str(e)))
+		return (False, { 'errors': [ 'Error removing cluster service "%s"' % svcname ]})
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			SERVICE_DELETE, 'Removing service "%s"' % svcname)
+	if ret[0] is False:
+		return ret
+
+def MigrateCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	batch_number, result = rq.migrateService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'Error migrating cluster service "%s" to node "%s"' % (svcname, nodename) ]})
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START,
+			'Migrating service "%s" to node "%s"' % (svcname, nodename))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+#
+# Cluster resource-related tasks
+#
+
+def DeleteResource(self, rc, fvars, model):
+	errstr = 'An error occurred while attempting to set the new cluster.conf'
+	resname = fvars['resourcename']
+
+	try:
+		model.deleteResource(resname)
+	except KeyError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
+		return '%s: the specified resource was not found' % errstr
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
+		return '%s: the specified resource was not found' % errstr
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			RESOURCE_REMOVE, 'Removing resource "%s"' % resname)
+	if ret[0] is False:
+		return ret
+
+def AddResource(self, rc, fvars, model, res):
+	try:
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
+		return 'Unable to add the new resource'
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_ADD,
+			'Creating cluster resource "%s"' % res.getName())
+	if ret[0] is False:
+		return ret
+
+def EditResource(self, rc, fvars, model, res):
+	try:
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EditResource2: %r %s' % (e, str(e)))
+		return 'Unable to the resource'
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_CONFIG,
+				'Configuring resource "%s"' % res.getName())
+
+	if ret[0] is False:
+		return ret
+
+#
+# Cluster node membership-related tasks
+#
+
+def nodeJoin(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeJoinCluster(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_JOIN_CLUSTER,
+			'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def nodeLeave(self, rc, clustername, nodename_resolved):
+	path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
+
+	try:
+		nodefolder = self.restrictedTraverse(path)
+		if not nodefolder:
+			raise Exception, 'cannot find database object at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
+		return None
+
+	objname = '%s____flag' % nodename_resolved
+	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
+
+	if fnpresent is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NL1: An error checking flags for %s' \
+				% nodename_resolved)
+		return None
+
+	if fnpresent is False:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NL2: flags still present for %s -- bailing out' \
+				% nodename_resolved)
+		return None
+
+	batch_number, result = rq.nodeLeaveCluster(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_LEAVE_CLUSTER,
+			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
+	# We need to get a node name other than the node
+	# to be deleted, then delete the node from the cluster.conf
+	# and propogate it. We will need two ricci agents for this task,
+	# unless we are deleting the cluster itself.
+
+	if not delete_cluster:
+		# Make sure we can find a second node before we hose anything.
+		found_one = False
+
+		path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+
+		try:
+			clusterfolder = self.restrictedTraverse(path)
+			if not clusterfolder:
+				raise Exception, 'no cluster folder at %s' % path
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
+			return None
+
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes in DB'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
+
+		for node in nodes:
+			if node[1].getId().find(nodename) != (-1):
+				continue
+			# here we make certain the node is up...
+			# XXX- we should also make certain this host is still
+			# in the cluster we believe it is.
+
+			try:
+				rc2 = RicciCommunicator(node[1].getId())
+				if not rc2:
+					raise Exception, 'ND1a: rc2 is None'
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('ND2: ricci %s error: %r %s' \
+						% (node[0], e, str(e)))
+				continue
+
+			if not rc2.authed():
+				try:
+					setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				try:
+					snode = getStorageNode(self, node[0])
+					setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ND3: %s is not authed' % node[0])
+				rc2 = None
+				continue
+			else:
+				found_one = True
+				break
+
+		if not found_one:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+			return None
+
+	# First, delete cluster.conf from node to be deleted.
+	# next, have node leave cluster.
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND5: batch_number and/or result is None')
+		return None
+
+	# Unless we're deleting the whole cluster, it is not worth
+	# flagging this node in DB, as we are going to delete it
+	# anyway. Now, we need to delete node from model and send out
+	# new cluster.conf
+
+	if delete_cluster:
+		try:
+			set_node_flag(self, clustername, rc.hostname(),
+				str(batch_number), CLUSTER_DELETE,
+				'Deleting cluster "%s": Deleting node "%s"' \
+					% (clustername, nodename_resolved))
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
+					% (e, str(e)))
+	else:
+		delete_target = None
+		nodelist = model.getNodes()
+		find_node = nodename.lower()
+		for n in nodelist:
+			try:
+				if n.getName().lower() == find_node:
+					delete_target = n
+					break
+			except:
+				continue
+
+		if delete_target is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' % (nodename, clustername))
+			return None
+
+		try:
+			model.deleteNode(delete_target)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r %s' \
+					% (delete_target.getName(), e, str(e)))
+
+		try:
+			model.setModified(True)
+			str_buf = model.exportModelAsString()
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
+					% (e, str(e)))
+				return None
+
+		# propagate the new cluster.conf via the second node
+		batch_number, result = rq.setClusterConf(rc2, str(str_buf))
+		if batch_number is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+			return None
+
+	# Now we need to delete the node from the DB
+	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_delObjects([nodename_resolved])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND9: error deleting %s@%s: %r %s' \
+				% (nodename_resolved, path, e, str(e)))
+
+	if delete_cluster:
+		return True
+
+	try:
+		set_node_flag(self, clustername, rc2.hostname(),
+			str(batch_number), NODE_DELETE,
+			'Deleting node "%s"' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+#
+# Cluster management-related tasks.
+#
+
+def clusterStart(self, model):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart: RC %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+			errors += 1
+			continue
+		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart1: nodeJoin %s' \
+					% nodename_resolved)
+			errors += 1
+
+	return errors
+
+def clusterStop(self, model, delete=False):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
+					% (delete is True, str(nodename_resolved), e, str(e)))
+			errors += 1
+			continue
+
+		if delete is True:
+			ret = nodeDelete(self, rc, model, clustername, nodename,
+					nodename_resolved, delete_cluster=True)
+			if ret is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
+				errors += 1
+		else:
+			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
+						% (nodename_resolved))
+				errors += 1
+	return errors
+
+def clusterRestart(self, model):
+	snum_err = clusterStop(self, model)
+	if snum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' \
+				% snum_err)
+	jnum_err = clusterStart(self, model)
+	if jnum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' \
+				% jnum_err)
+	return snum_err + jnum_err
+
+def clusterDelete(self, model):
+	# Try to stop all the cluster nodes before deleting any.
+	num_errors = clusterStop(self, model, delete=False)
+	if num_errors > 0:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluDelete: %d errors' % num_errors)
+		return None
+
+	# If the cluster is stopped, delete all of the nodes.
+	num_errors = clusterStop(self, model, delete=True)
+	try:
+		clustername = model.getClusterName()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+		return None
+
+	if num_errors < 1:
+		try:
+			delCluster(self, clustername)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('clusterDelete1: %s: %r %s' \
+					% (clustername, e, str(e)))
+
+		try:
+			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+			if len(clusterfolder.objectItems()) < 1:
+				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+				clusters.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('clusterDelete2: %s %r %s' \
+					% (clustername, e, str(e)))
+		return CLUSTERLIST
+	else:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
+				% (clustername, num_errors))
+
+def forceNodeReboot(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeReboot(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_REBOOT,
+			'Node "%s" is being rebooted' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def forceNodeFence(self, clustername, nodename, nodename_resolved):
+	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			raise Exception, 'no cluster folder at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
+		return None
+
+	try:
+		nodes = clusterfolder.objectItems('Folder')
+		if not nodes or len(nodes) < 1:
+			raise Exception, 'no cluster nodes'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('FNF1: No cluster nodes for %s were found: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+
+	found_one = False
+	for node in nodes:
+		if node[1].getId().find(nodename) != (-1):
+			continue
+
+		try:
+			rc = RicciCommunicator(node[1].getId())
+			if not rc:
+				raise Exception, 'rc is None'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('FNF2: ricci error for host %s: %r %s' \
+					% (node[0], e, str(e)))
+			continue
+
+		if not rc.authed():
+			rc = None
+			try:
+				snode = getStorageNode(self, node[1].getId())
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+			except:
+				pass
+
+			try:
+				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+			except:
+				pass
+
+			continue
+		found_one = True
+		break
+
+	if not found_one:
+		return None
+
+	batch_number, result = rq.nodeFence(rc, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF3: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FENCE,
+			'Node "%s" is being fenced' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF4: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+#
+# Cluster-independent tasks.
+#
+
+def getLogsForNode(self, request):
+	try:
+		nodename = request['nodename']
+	except KeyError, e:
+		try:
+			nodename = request.form['nodename']
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL0: no node name')
+			return 'Unable to get node name to retrieve logging information'
+
+	clustername = None
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clustername']
+			if not clustername:
+				raise
+		except:
+			clustername = None
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL1: no cluster for %s' % nodename)
+	except:
+		pass
+
+	if clustername is None:
+		nodename_resolved = nodename
+	else:
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	try:
+		rc = RicciCommunicator(nodename_resolved)
+	except RicciError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL2: ricci error %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+
+	if not rc.authed():
+		try:
+			snode = getStorageNode(self, nodename)
+			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL4: %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+
+		if clustername:
+			try:
+				cnode = getClusterNode(self, nodename, clustername)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GNL5: %s: %r %s' \
+						% (nodename_resolved, e, str(e)))
+		return 'Luci is not authenticated to node %s. Reauthenticate first.' \
+			% nodename
+
+	return rq.getNodeLogs(rc)
+
+def propagateClusterConfAsync(	self,
+								model,
+								rc=None,
+								action=CLUSTER_CONFIG,
+								pmsg=None):
 	errors = list()
 	messages = list()
 
@@ -39,6 +716,7 @@
 		return (False, { 'errors': errors, 'messages': messages })
 
 	try:
+		model.setModified(True)
 		conf_str = str(model.exportModelAsString()).strip()
 		if not conf_str:
 			raise Exception, 'The new cluster configuration is blank'
@@ -56,12 +734,15 @@
 			% clustername)
 		return (False, { 'errors': errors, 'messages': messages })
 
+	if pmsg is None:
+		pmsg = 'Updating the cluster configuration for "%s"' % clustername
+
 	try:
-		set_node_flag(self, clustername, rc.hostname(), batch_id,
-			CLUSTER_CONFIG, 'Updating the cluster configuration for "%s"' %
-				clustername)
+		set_node_flag(self, clustername, rc.hostname(),
+			batch_id, action, pmsg)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('PCC4: set_node_flag: %r %s' \
 				% (e, str(e)))
+
 	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/22 02:45:54	1.1.2.10
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/22 21:52:05	1.1.2.11
@@ -634,28 +634,33 @@
 	ret[2] = need_auth_hash
 	return ret
 
-def getClusterSystems(self, clustername):
+def getCluster(self, clustername):
 	try:
-		cluster_nodes = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clustername))('Folder')
+		cluster_obj = self.restrictedTraverse('%s%s' \
+			% (CLUSTER_FOLDER_PATH, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSy0: %s: %r %s' \
+			luci_log.debug_verbose('GCobj0: %s: %r %s' \
 				% (clustername, e, str(e)))
 		return None
 
-	if isAdmin(self):
-		return cluster_nodes
+	if isAdmin(self) or cluster_permission_check(self, cluster_obj):
+		return cluster_obj
+	return None
 
+def getClusterSystems(self, clustername):
 	try:
-		cluster =  self.restrictedTraverse('%s%s/objectItems' \
-			% (CLUSTER_FOLDER_PATH, clustername))
+		cluster_nodes = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clustername))('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSy1: %s: %r %s' \
+			luci_log.debug_verbose('GCSy0: %s: %r %s' \
 				% (clustername, e, str(e)))
 		return None
 
-	if cluster_permission_check(self, cluster):
+	if isAdmin(self):
+		return cluster_nodes
+	cluster_obj = getCluster(self, clustername)
+	if cluster_permission_check(self, cluster_obj):
 		return cluster_nodes
 	return None
 
@@ -691,8 +696,9 @@
 		user = getSecurityManager().getUser()
 		if user.has_permission('View', cluster[1]):
 			return True
-	except:
-		pass
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CPC0: %s: %r %s' % (cluster, e, str(e)))
 	return False
 
 def allowed_systems(self, systems):
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/18 05:23:55	1.255.2.8
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/22 21:52:05	1.255.2.9
@@ -3158,218 +3158,6 @@
 		return None
 	return getRicciAgent(self, clustername)
 
-def serviceStart(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart0: no service name')
-		return None
-
-	nodename = None
-	try:
-		nodename = req['nodename']
-	except:
-		try:
-			nodename = req.form['nodename']
-		except:
-			pass
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except KeyError, e:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart2: no cluster name for svc %s' \
-				% svcname)
-		return None
-
-	batch_number, result = rq.startService(rc, svcname, nodename)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart3: SS(%s,%s,%s) call failed' \
-				% (svcname, cluname, nodename))
-		return None
-
-	try:
-		if nodename:
-			status_msg = 'Starting service "%s" on node "%s"' \
-				% (svcname, nodename)
-		else:
-			status_msg = 'Starting service "%s"' % svcname
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, status_msg)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
-def serviceMigrate(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate0: no service name')
-		return None
-
-	nodename = None
-	try:
-		nodename = req['nodename']
-	except:
-		try:
-			nodename = req.form['nodename']
-		except:
-			pass
-
-	if nodename is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate1: no target node name')
-		return None
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except KeyError, e:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate2: no cluster name for svc %s' \
-				% svcname)
-		return None
-
-	batch_number, result = rq.migrateService(rc, svcname, nodename)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate3: SS(%s,%s,%s) call failed' \
-				% (svcname, cluname, nodename))
-		return None
-
-	try:
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, "Migrating service \'%s\' to node \'%s\'" % (svcname, nodename))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
-def serviceRestart(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart0: no service name')
-		return None
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart1: no cluster for %s' % svcname)
-		return None
-
-	batch_number, result = rq.restartService(rc, svcname)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart2: %s failed' % svcname)
-		return None
-
-	try:
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_RESTART, "Restarting service \'%s\'" % svcname)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart3: error setting flags for service %s for cluster %s' % (svcname, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
-def serviceStop(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop0: no service name')
-		return None
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop1: no cluster name for %s' % svcname)
-		return None
-
-	batch_number, result = rq.stopService(rc, svcname)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop2: stop %s failed' % svcname)
-		return None
-
-	try:
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_STOP, "Stopping service \'%s\'" % svcname)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop3: error setting flags for service %s for cluster %s' % (svcname, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
 def clusterTaskProcess(self, model, request):
 	try:
 		task = request['task']
@@ -3379,460 +3167,49 @@
 		except:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('CTP1: no task specified')
-			task = None
-
-	if not model:
-		try:
-			cluname = request['clustername']
-			if not cluname:
-				raise Exception, 'cluname is blank'
-		except:
-			try:
-				cluname = request.form['clustername']
-				if not cluname:
-					raise Exception, 'cluname is blank'
-			except:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CTP0: no model/no cluster name')
-				return 'Unable to determine the cluster name'
-		try:
-			model = getModelForCluster(self, cluname)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
-			model = None
-
-	if not model:
-		return 'Unable to get the model object for %s' % cluname
-
-	redirect_page = NODES
-	if task == CLUSTER_STOP:
-		clusterStop(self, model)
-	elif task == CLUSTER_START:
-		clusterStart(self, model)
-	elif task == CLUSTER_RESTART:
-		clusterRestart(self, model)
-	elif task == CLUSTER_DELETE:
-		ret = clusterDelete(self, model)
-		if ret is not None:
-			redirect_page = ret
-	else:
-		return 'An unknown cluster task was requested'
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], redirect_page, model.getClusterName()))
-
-def nodeLeave(self, rc, clustername, nodename_resolved):
-	path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
-
-	try:
-		nodefolder = self.restrictedTraverse(path)
-		if not nodefolder:
-			raise Exception, 'cannot find database object@%s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
-		return None
-
-	objname = '%s____flag' % nodename_resolved
-	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
-
-	if fnpresent is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL1: An error occurred while checking flags for %s' \
-				% nodename_resolved)
-		return None
-
-	if fnpresent is False:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL2: flags are still present for %s -- bailing out' \
-				% nodename_resolved)
-		return None
-
-	batch_number, result = rq.nodeLeaveCluster(rc)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_LEAVE_CLUSTER, 'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def nodeJoin(self, rc, clustername, nodename_resolved):
-	batch_number, result = rq.nodeJoinCluster(rc)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_JOIN_CLUSTER, 'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def clusterStart(self, model):
-	if model is None:
-		return None
-
-	clustername = model.getClusterName()
-	nodes = model.getNodes()
-	if not nodes or len(nodes) < 1:
-		return None
-
-	errors = 0
-	for node in nodes:
-		nodename = node.getName().strip()
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-		try:
-			rc = RicciCommunicator(nodename_resolved)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStart: RC %s: %r %s' \
-					% (nodename_resolved, e, str(e)))
-			errors += 1
-			continue
-		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
-			errors += 1
-
-	return errors
-
-def clusterStop(self, model, delete=False):
-	if model is None:
-		return None
-
-	clustername = model.getClusterName()
-	nodes = model.getNodes()
-	if not nodes or len(nodes) < 1:
-		return None
-
-	errors = 0
-	for node in nodes:
-		nodename = node.getName().strip()
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-		try:
-			rc = RicciCommunicator(nodename_resolved)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
-					% (delete is True, str(nodename_resolved), e, str(e)))
-			errors += 1
-			continue
-
-		if delete is True:
-			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=True) is None:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
-				errors += 1
-		else:
-			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
-						% (nodename_resolved))
-				errors += 1
-	return errors
-
-def clusterRestart(self, model):
-	snum_err = clusterStop(self, model)
-	if snum_err:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' % snum_err)
-	jnum_err = clusterStart(self, model)
-	if jnum_err:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' % jnum_err)
-	return snum_err + jnum_err
-
-def clusterDelete(self, model):
-	# Try to stop all the cluster nodes before deleting any.
-	num_errors = clusterStop(self, model, delete=False)
-	if num_errors > 0:
-		return None
-
-	# If the cluster is stopped, delete all of the nodes.
-	num_errors = clusterStop(self, model, delete=True)
-	try:
-		clustername = model.getClusterName()
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
-		return None
-
-	if num_errors < 1:
-		try:
-			delCluster(self, clustername)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete1: %s: %r %s' \
-					% (clustername, e, str(e)))
-
-		try:
-			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
-			if len(clusterfolder.objectItems()) < 1:
-				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
-				clusters.manage_delObjects([clustername])
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete2: %s %r %s' \
-					% (clustername, e, str(e)))
-		return CLUSTERLIST
-	else:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
-				% (clustername, num_errors))
-
-def forceNodeReboot(self, rc, clustername, nodename_resolved):
-	batch_number, result = rq.nodeReboot(rc)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_REBOOT, 'Node "%s" is being rebooted' % nodename_resolved)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def forceNodeFence(self, clustername, nodename, nodename_resolved):
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		if not clusterfolder:
-			raise Exception, 'no cluster folder at %s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
-		return None
-
-	try:
-		nodes = clusterfolder.objectItems('Folder')
-		if not nodes or len(nodes) < 1:
-			raise Exception, 'no cluster nodes'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF1: No cluster nodes for %s were found: %r %s' \
-				% (clustername, e, str(e)))
-		return None
-
-	found_one = False
-	for node in nodes:
-		if node[1].getId().find(nodename) != (-1):
-			continue
-
-		try:
-			rc = RicciCommunicator(node[1].getId())
-			if not rc:
-				raise Exception, 'rc is None'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('FNF2: ricci error for host %s: %r %s' \
-					% (node[0], e, str(e)))
-			continue
-
-		if not rc.authed():
-			rc = None
-			try:
-				snode = getStorageNode(self, node[1].getId())
-				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
-			try:
-				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
-			continue
-		found_one = True
-		break
-
-	if not found_one:
-		return None
-
-	batch_number, result = rq.nodeFence(rc, nodename)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF3: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_FENCE, 'Node "%s" is being fenced' % nodename_resolved)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF4: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
-	# We need to get a node name other than the node
-	# to be deleted, then delete the node from the cluster.conf
-	# and propogate it. We will need two ricci agents for this task,
-	# unless we are deleting the cluster itself.
-
-	if not delete_cluster:
-		# Make sure we can find a second node before we hose anything.
-		found_one = False
-
-		path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
-		try:
-			clusterfolder = self.restrictedTraverse(path)
-			if not clusterfolder:
-				raise Exception, 'no cluster folder at %s' % path
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
-			return None
-
-		try:
-			nodes = clusterfolder.objectItems('Folder')
-			if not nodes or len(nodes) < 1:
-				raise Exception, 'no cluster nodes in DB'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
-
-		for node in nodes:
-			if node[1].getId().find(nodename) != (-1):
-				continue
-			# here we make certain the node is up...
-			# XXX- we should also make certain this host is still
-			# in the cluster we believe it is.
-
-			try:
-				rc2 = RicciCommunicator(node[1].getId())
-				if not rc2:
-					raise Exception, 'ND1a: rc2 is None'
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.info('ND2: ricci %s error: %r %s' \
-						% (node[0], e, str(e)))
-				continue
-
-			if not rc2.authed():
-				try:
-					setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
-				except:
-					pass
-
-				try:
-					snode = getStorageNode(self, node[0])
-					setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-				except:
-					pass
-
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('ND3: %s is not authed' % node[0])
-				rc2 = None
-				continue
-			else:
-				found_one = True
-				break
-
-		if not found_one:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
-			return None
-
-	# First, delete cluster.conf from node to be deleted.
-	# next, have node leave cluster.
-	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND5: batch_number and/or result is None')
-		return None
-
-	# Unless we're deleting the whole cluster, it is not worth
-	# flagging this node in DB, as we are going to delete it
-	# anyway. Now, we need to delete node from model and send out
-	# new cluster.conf
+			task = None
 
-	if delete_cluster:
+	if not model:
 		try:
-			set_node_flag(self, clustername, rc.hostname(), str(batch_number), CLUSTER_DELETE, 'Deleting cluster "%s": Deleting node "%s"' % (clustername, nodename_resolved))
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
-					% (e, str(e)))
-	else:
-		delete_target = None
-		nodelist = model.getNodes()
-		find_node = nodename.lower()
-		for n in nodelist:
+			cluname = request['clustername']
+			if not cluname:
+				raise Exception, 'cluname is blank'
+		except:
 			try:
-				if n.getName().lower() == find_node:
-					delete_target = n
-					break
+				cluname = request.form['clustername']
+				if not cluname:
+					raise Exception, 'cluname is blank'
 			except:
-				continue
-
-		if delete_target is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
-					% (nodename, clustername))
-			return None
-
-		try:
-			model.deleteNode(delete_target)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r %s' \
-					% (delete_target.getName(), e, str(e)))
-
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CTP0: no model/no cluster name')
+				return 'Unable to determine the cluster name'
 		try:
-			model.setModified(True)
-			str_buf = model.exportModelAsString()
-			if not str_buf:
-				raise Exception, 'model string is blank'
+			model = getModelForCluster(self, cluname)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
-					% (e, str(e)))
-				return None
-
-		# propagate the new cluster.conf via the second node
-		batch_number, result = rq.setClusterConf(rc2, str(str_buf))
-		if batch_number is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
-			return None
+				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
+			model = None
 
-	# Now we need to delete the node from the DB
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_delObjects([nodename_resolved])
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND9: error deleting %s@%s: %r %s' \
-				% (nodename_resolved, path, e, str(e)))
+	if not model:
+		return 'Unable to get the model object for %s' % cluname
 
-	if delete_cluster:
-		return True
+	redirect_page = NODES
+	if task == CLUSTER_STOP:
+		clusterStop(self, model)
+	elif task == CLUSTER_START:
+		clusterStart(self, model)
+	elif task == CLUSTER_RESTART:
+		clusterRestart(self, model)
+	elif task == CLUSTER_DELETE:
+		ret = clusterDelete(self, model)
+		if ret is not None:
+			redirect_page = ret
+	else:
+		return 'An unknown cluster task was requested'
 
-	try:
-		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
+	response = request.RESPONSE
+	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+		% (request['URL'], redirect_page, model.getClusterName()))
 
 def nodeTaskProcess(self, model, request):
 	try:
@@ -3969,71 +3346,6 @@
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 
-def getLogsForNode(self, request):
-	try:
-		nodename = request['nodename']
-	except KeyError, e:
-		try:
-			nodename = request.form['nodename']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL0: no node name')
-			return 'Unable to get node name to retrieve logging information'
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except KeyError, e:
-		try:
-			clustername = request.form['clustername']
-			if not clustername:
-				raise
-		except:
-			clustername = None
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL1: no cluster for %s' % nodename)
-	except:
-		pass
-
-	if clustername is None:
-		nodename_resolved = nodename
-	else:
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-	try:
-		rc = RicciCommunicator(nodename_resolved)
-	except RicciError, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL2: ricci error %s: %r %s' \
-				% (nodename_resolved, e, str(e)))
-		return 'Ricci error while getting logs for %s' % nodename_resolved
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r %s' \
-				% (nodename_resolved, e, str(e)))
-		return 'Ricci error while getting logs for %s' % nodename_resolved
-
-	if not rc.authed():
-		try:
-			snode = getStorageNode(self, nodename)
-			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL4: %s: %r %s' \
-					% (nodename_resolved, e, str(e)))
-
-		if clustername:
-			try:
-				cnode = getClusterNode(self, nodename, clustername)
-				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GNL5: %s: %r %s' \
-						% (nodename_resolved, e, str(e)))
-		return 'Luci is not authenticated to node %s. Please reauthenticate first' % nodename
-
-	return rq.getNodeLogs(rc)
-
 def isClusterBusy(self, req):
 	items = None
 	busy_map = {}
@@ -4390,255 +3702,6 @@
 		clu_map['isVirtualized'] = False
 	return clu_map
 
-def delService(self, request):
-	errstr = 'An error occurred while attempting to set the new cluster.conf'
-
-	try:
-		model = request.SESSION.get('model')
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService0: no model: %r %s' \
-				% (e, str(e)))
-		return (False, {'errors': [ errstr ] })
-
-	name = None
-	try:
-		name = request['servicename']
-	except:
-		try:
-			name = request.form['servicename']
-		except:
-			pass
-
-	if name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService1: no service name')
-		return (False, {'errors': [ '%s: no service name was provided' % errstr ]})
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except:
-		try:
-			clustername = request.form['clustername']
-		except:
-			pass
-
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService2: no cluster name for %s' % name)
-		return (False, {'errors': [ '%s: no cluster name was provided' % errstr ]})
-
-	rc = getRicciAgent(self, clustername)
-	if not rc:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService3: unable to get ricci agent for cluster %s' % clustername)
-		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
-
-	try:
-		ragent = rc.hostname()
-		if not ragent:
-			raise Exception, 'unable to determine the hostname of the ricci agent'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService4: %s: %r %s' \
-				% (errstr, e, str(e)))
-		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
-
-	try:
-		model.deleteService(name)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s: %r %s' % (name, clustername, e, str(e)))
-		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
-
-	try:
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string is blank'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService6: exportModelAsString failed: %r %s' % (e, str(e)))
-		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
-
-	batch_number, result = rq.setClusterConf(rc, str(conf))
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService7: missing batch and/or result')
-		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
-
-	try:
-		set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_DELETE, 'Removing service "%s"' % name)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService8: failed to set flags: %r %s' \
-				% (e, str(e)))
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], SERVICES, clustername))
-
-def delResource(self, rc, request):
-	errstr = 'An error occurred while attempting to set the new cluster.conf'
-
-	try:
-		model = request.SESSION.get('model')
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource0: no model: %r %s' \
-				% (e, str(e)))
-		return errstr
-
-	name = None
-	try:
-		name = request['resourcename']
-	except:
-		try:
-			name = request.form['resourcename']
-		except:
-			pass
-
-	if name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource1: no resource name')
-		return '%s: no resource name was provided' % errstr
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except:
-		try:
-			clustername = request.form['clustername']
-		except:
-			pass
-
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource2: no cluster name for %s' % name)
-		return '%s: could not determine the cluster name' % errstr
-
-	try:
-		ragent = rc.hostname()
-		if not ragent:
-			raise Exception, 'unable to determine the hostname of the ricci agent'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource3: %s: %r %s' \
-				% (errstr, e, str(e)))
-		return '%s: could not determine the ricci agent hostname' % errstr
-
-	resPtr = model.getResourcesPtr()
-	resources = resPtr.getChildren()
-
-	found = 0
-	for res in resources:
-		if res.getName() == name:
-			resPtr.removeChild(res)
-			found = 1
-			break
-
-	if not found:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource4: cant find res %s' % name)
-		return '%s: the specified resource was not found' % errstr
-
-	try:
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string is blank'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource5: exportModelAsString failed: %s' % e)
-		return errstr
-
-	batch_number, result = rq.setClusterConf(rc, str(conf))
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource6: missing batch and/or result')
-		return errstr
-
-	try:
-		set_node_flag(self, clustername, ragent, str(batch_number), RESOURCE_REMOVE, 'Removing resource "%s"' % request['resourcename'])
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource7: failed to set flags: %r %s' \
-				% (e, str(e)))
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], RESOURCES, clustername))
-
-def addResource(self, request, model, res):
-	clustername = model.getClusterName()
-	if not clustername:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource0: no cluname from mb')
-		return 'Unable to determine cluster name'
-
-	rc = getRicciAgent(self, clustername)
-	if not rc:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource1: %s' % clustername)
-		return 'Unable to find a ricci agent for the %s cluster' % clustername
-
-	try:
-		model.getResourcesPtr().addChild(res)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
-		return 'Unable to add the new resource'
-
-	try:
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string for %s is blank' % clustername
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource3: exportModelAsString: %r %s' \
-				% (e, str(e)))
-		return 'An error occurred while adding this resource'
-
-	try:
-		ragent = rc.hostname()
-		if not ragent:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('addResource4: missing ricci hostname')
-			raise Exception, 'unknown ricci agent hostname'
-
-		batch_number, result = rq.setClusterConf(rc, str(conf))
-		if batch_number is None or result is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('addResource5: missing batch_number or result')
-			raise Exception, 'unable to save the new cluster configuration'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource6: %r %s' % (e, str(e)))
-		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
-
-	try:
-		try:
-			if request.form.has_key('edit'):
-				action_type = RESOURCE_CONFIG
-				action_str = 'Configuring resource "%s"' % res.getName()
-			else:
-				raise Exception, 'new'
-		except Exception, e:
-			action_type = RESOURCE_ADD
-			action_str = 'Creating new resource "%s"' % res.getName()
-
-		set_node_flag(self, clustername, ragent, str(batch_number), action_type, action_str)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource7: failed to set flags: %r %s' \
-				% (e, str(e)))
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true'
-		% (request['URL'], RESOURCES, clustername))
-
 def process_cluster_conf_editor(self, req):
 	clustername = req['clustername']
 	msg_list = list(('\n'))
@@ -4744,3 +3807,135 @@
 	#CALL
 	return {}
 
+def GetRequestVars(req, varlist):
+	ret = {}
+	for i in varlist:
+		pval = None
+		if req.has_key(i):
+			pval = req[i].strip()
+			if not pval:
+				pval = None
+		if pval is None:
+			if req.form and req.form.has_key(i):
+				pval = req.form[i].strip()
+				if not pval:
+					pval = None
+		ret[i] = pval
+	return ret
+
+def GetSvcReqVars(request):
+	return GetRequestVars(request,
+			['clustername', 'servicename', 'nodename', 'URL' ])
+def GetResReqVars(request):
+	return GetRequestVars(request,
+			['clustername', 'resourcename', 'nodename', 'URL' ])
+
+# These are called from external methods.
+
+def serviceRestart(self, rc, req):
+	from LuciClusterActions import RestartCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = RestartCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def serviceStop(self, rc, req):
+	from LuciClusterActions import StopCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = StopCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def serviceStart(self, rc, req):
+	from LuciClusterActions import StartCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = StartCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def serviceDelete(self, rc, req):
+	from LuciClusterActions import DeleteCluSvc
+
+	fvars = GetSvcReqVars(req)
+	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+
+	ret = DeleteCluSvc(self, rc, fvars, model)
+	if ret is None:
+		response = request.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (request['URL'], SERVICES, clustername))
+	else:
+		return ret
+
+def serviceMigrate(self, rc, req):
+	from LuciClusterActions import MigrateCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = MigrateCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def resourceDelete(self, rc, req):
+	from LuciClusterActions import DeleteResource
+
+	fvars = GetResReqVars(req)
+	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+
+	ret = DeleteResource(self, rc, fvars, model)
+	if ret is None:
+		response = request.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (request['URL'], RESOURCES, clustername))
+	else:
+		return ret
+
+def resourceAdd(self, rc, req):
+	from LuciClusterActions import AddResource
+
+	fvars = GetResReqVars(req)
+	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+
+	# XXX pass in resource
+	ret = AddResource(self, rc, fvars, model, None)
+	if ret is None:
+		response = request.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (request['URL'], RESOURCES, clustername))
+	else:
+		return ret
+
+def nodeJoin(self, rc, cluname, nodename_resolved):
+	return None
+def nodeLeave(self, rc, cluname, nodename_resolved):
+	return None
+def nodeDelete(self, rc, cluname, nodename_resolved):
+	return None
+
+
+def clusterStart(self, model):
+	return None
+def clusterStop(self, model):
+	return None
+def clusterRestart(self, model):
+	return None
+def clusterDelete(self, model):
+	return None
--- conga/luci/site/luci/Extensions/ClusterModel/Attic/ModelBuilder.py	2007/05/15 21:42:21	1.1.2.4
+++ conga/luci/site/luci/Extensions/ClusterModel/Attic/ModelBuilder.py	2007/05/22 21:52:05	1.1.2.5
@@ -771,6 +771,13 @@
       raise Exception, 'More than one resource is named "%s"' % name
     return res[0]
 
+  def deleteResource(self, name):
+    for i in self.resources_ptr.getChildren():
+      if i.getName() == name:
+        self.resources_ptr.removeChild(i)
+        return i
+    raise KeyError, name
+
   def getClusterNodesPtr(self):
     return self.clusternodes_ptr
 



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-05-23 21:21 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-05-23 21:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-23 21:21:37

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py LuciDB.py 
	                           cluster_adapters.py 
	luci/site/luci/Extensions/ClusterModel: Ip.py 

Log message:
	more cleanup

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.5&r2=1.1.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.11&r2=1.1.2.12
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.9&r2=1.255.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/Ip.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.2&r2=1.1.2.3

--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/22 21:52:03	1.1.2.5
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/23 21:21:32	1.1.2.6
@@ -11,7 +11,7 @@
 from ricci_communicator import RicciCommunicator, RicciError
 
 from LuciDB import set_node_flag, getRicciAgent, delCluster, \
-	getClusterNode, getStorageNode, noNodeStatusPresent, \
+	getClusterNode, getStorageNode, NodeBusy, \
 	setNodeStatus, resolve_nodename
 
 from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
@@ -31,6 +31,12 @@
 	svcname = fvars['servicename']
 	cluname = fvars['clustername']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	batch_number, result = rq.restartService(rc, svcname)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -50,6 +56,12 @@
 	cluname = fvars['clustername']
 	nodename = fvars['nodename']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	batch_number, result = rq.startService(rc, svcname, nodename)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -74,6 +86,12 @@
 	svcname = fvars['servicename']
 	cluname = fvars['clustername']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	batch_number, result = rq.stopService(rc, svcname)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -92,6 +110,12 @@
 	svcname = fvars['servicename']
 	cluname = fvars['clustername']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	try:
 		model.deleteService(svcname)
 	except Exception, e:
@@ -109,6 +133,12 @@
 	cluname = fvars['clustername']
 	nodename = fvars['nodename']
 
+	if svcname is None or cluname is None or nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: svc: %s, clu: %s, nn: %s' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'A cluster service name, the cluster name, and the target node name must be given' ] })
+
 	batch_number, result = rq.migrateService(rc, svcname, nodename)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -128,49 +158,66 @@
 # Cluster resource-related tasks
 #
 
-def DeleteResource(self, rc, fvars, model):
-	errstr = 'An error occurred while attempting to set the new cluster.conf'
-	resname = fvars['resourcename']
+def DeleteResource(self, rc, model, resname):
+	errstr = 'An error occurred while attempting to delete this cluster resource'
+	if resname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource0: no res name')
+		return (False, { 'errors': [ '%s: no resource name was given' % errstr ]})
 
 	try:
 		model.deleteResource(resname)
 	except KeyError, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
-		return '%s: the specified resource was not found' % errstr
+			luci_log.debug_verbose('DeleteResource1: no res %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: no resource named "%s" was found' % (errstr, resname) ]})
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
-		return '%s: the specified resource was not found' % errstr
+			luci_log.debug_verbose('DeleteResource2: err: %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: unable to delete resource "%s"' % (errstr, resname) ]})
 
 	ret = propagateClusterConfAsync(self, model, rc,
-			RESOURCE_REMOVE, 'Removing resource "%s"' % resname)
+			RESOURCE_REMOVE, 'Removing cluster resource "%s"' % resname)
 	if ret[0] is False:
 		return ret
 
-def AddResource(self, rc, fvars, model, res):
+def AddResource(self, rc, model, res):
+	resname = None
 	try:
+		resname = res.getName()
 		model.getResourcesPtr().addChild(res)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
-		return 'Unable to add the new resource'
+			luci_log.debug_verbose('AddResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to add new resource "%s"' % resname
+		else:
+			errstr = 'Unable to add this new resource'
+		return (False, { 'errors': [ errstr ] })
 
 	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_ADD,
-			'Creating cluster resource "%s"' % res.getName())
+			'Creating new cluster resource "%s"' % resname)
 	if ret[0] is False:
 		return ret
 
-def EditResource(self, rc, fvars, model, res):
+def EditResource(self, rc, model, res):
+	resname = None
 	try:
+		resname = res.getName()
 		model.getResourcesPtr().addChild(res)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('EditResource2: %r %s' % (e, str(e)))
-		return 'Unable to the resource'
+			luci_log.debug_verbose('EditResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to edit cluster resource "%s"' % resname
+		else:
+			errstr = 'Unable to edit this cluster resource'
+		return (False, { 'errors': [ errstr ] })
 
 	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_CONFIG,
-				'Configuring resource "%s"' % res.getName())
+				'Configuring cluster resource "%s"' % resname)
 
 	if ret[0] is False:
 		return ret
@@ -194,39 +241,21 @@
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
 				% (e, str(e)))
+		return None
 	return True
 
 def nodeLeave(self, rc, clustername, nodename_resolved):
-	path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
-
-	try:
-		nodefolder = self.restrictedTraverse(path)
-		if not nodefolder:
-			raise Exception, 'cannot find database object at %s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
-		return None
-
-	objname = '%s____flag' % nodename_resolved
-	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
-
-	if fnpresent is None:
+	if NodeBusy(self, clustername, nodename_resolved, rc) is not False:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL1: An error checking flags for %s' \
-				% nodename_resolved)
-		return None
-
-	if fnpresent is False:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL2: flags still present for %s -- bailing out' \
+			luci_log.debug('NL0: flags still present for %s -- bailing out' \
 				% nodename_resolved)
 		return None
 
 	batch_number, result = rq.nodeLeaveCluster(rc)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
+			luci_log.debug_verbose('NL1: %s: batch_number or result is None' \
+				% nodename_resolved)
 		return None
 
 	try:
@@ -235,8 +264,8 @@
 			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
-				% (e, str(e)))
+			luci_log.debug_verbose('NL4: failed to set flags: %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
 	return True
 
 def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
@@ -551,7 +580,8 @@
 			raise Exception, 'no cluster folder at %s' % path
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
+			luci_log.debug('FNF0: The cluster obj %s not found: %r %s' \
+				% (clustername, e, str(e)))
 		return None
 
 	try:
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/22 21:52:05	1.1.2.11
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/23 21:21:32	1.1.2.12
@@ -102,12 +102,25 @@
 			luci_log.debug_verbose('SNF0: %r %s' % (e, errmsg))
 		raise Exception, errmsg
 
-def noNodeStatusPresent(self, nodefolder, flagname, hostname):
+def NodeBusy(self, clustername, nodename, rc=None):
+	try:
+		path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename)
+		nodefolder = self.restrictedTraverse(path)
+		if not nodefolder:
+			raise Exception, 'cannot find database object at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NodeBusy0: (%s,%s) %r %s' \
+				% (clustername, nodename, e, str(e)))
+		return None
+
+	flagname = '%s____flag' % nodename
+
 	try:
 		items = nodefolder.objectItems('ManagedSystem')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NNFP0: error getting flags for %s: %r %s' \
+			luci_log.debug('NodeBusy1: error getting flags for %s: %r %s' \
 				% (nodefolder[0], e, str(e)))
 		return None
 
@@ -115,43 +128,51 @@
 		if item[0] != flagname:
 			continue
 
-		# a flag already exists... try to delete it
-		try:
-			# hostname must be a FQDN
-			rc = RicciCommunicator(hostname)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.info('NNFP1: ricci error %s: %r %s' \
-					% (hostname, e, str(e)))
-			return None
+		# A flag already exists. Check to see whether we're done.
+		if rc is None:
+			try:
+				rc = RicciCommunicator(nodename)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NodeBusy2: ricci error %s: %r %s' \
+						% (nodename, e, str(e)))
+				# We can't know if we're done or not; err on the
+				# side of caution.
+				return True
 
 		if not rc.authed():
 			try:
-				snode = getStorageNode(self, hostname)
+				snode = getStorageNode(self, nodename)
 				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NodeBusy3: %s: %r %s' \
+						% (nodename, e, str(e)))
 			if LUCI_DEBUG_MODE is True:
-				luci_log.info('NNFP2: %s not authenticated' % item[0])
+				luci_log.info('NodeBusy4: %s not authenticated' % item[0])
+			# The comment above applies here, too.
+			return True
 
 		batch_ret = rq.checkBatch(rc, item[1].getProperty(BATCH_ID))
 		finished = batch_ret[0]
 		if finished is True or finished == -1:
 			if finished == -1:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('NNFP2: batch error: %s' \
+					luci_log.debug_verbose('NodeBusy5: batch error: %s' \
 						% batch_ret[1])
+
 			try:
 				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.info('NNFP3: manage_delObjects for %s failed: %r %s' % (item[0], e, str(e)))
-				return None
-			return True
-		else:
-			# Not finished, so don't remove the flag.
+					luci_log.info('NodeBusy6: %s: %r %s' % (item[0], e, str(e)))
 			return False
-	return True
+
+		# Not finished, so don't remove the flag.
+		return True
+
+	# If this code is ever reached, no flags exist for the node in question.
+	return False
 
 def resolve_nodename(self, clustername, nodename):
 	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
@@ -805,6 +826,17 @@
 			% cluname)
 	return None
 
+def getClusterDBNodes(self, clustername):
+	try:
+		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDBN0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+		return []
+	return nodelist
+
 def getClusterStatusDB(self, clustername):
 	results = list()
 	vals = {}
@@ -818,15 +850,7 @@
 	vals['minQuorum'] = '[unknown]'
 	results.append(vals)
 
-	try:
-		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSDB0: %s -> %s: %r %s' \
-				% (clustername, cluster_path, e, str(e)))
-		return results
-
+	nodelist = getClusterDBNodes(self, clustername)
 	if len(nodelist) < 1:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
@@ -837,18 +861,14 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('GCSDB0b: %s: %r %s' \
 					% (clustername, e, str(e)))
-	else:
-		for node in nodelist:
-			try:
-				node_val = {}
-				node_val['type'] = 'node'
-				node_val['name'] = node[0]
-				node_val['clustered'] = '[unknown]'
-				node_val['online'] = '[unknown]'
-				node_val['error'] = True
-				results.append(node_val)
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GCSDB1: %r %s' % (e, str(e)))
+		return results
 
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		node_val['name'] = node
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+		results.append(node_val)
 	return results
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/22 21:52:05	1.255.2.9
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/23 21:21:32	1.255.2.10
@@ -39,7 +39,7 @@
 	batch_status, extract_module_status
 
 from LuciDB import manageCluster, createClusterSystems, \
-	setNodeStatus, getStorageNode, noNodeStatusPresent, \
+	setNodeStatus, getStorageNode, \
 	getClusterNode, delCluster, buildClusterCreateFlags, \
 	resolve_nodename, set_node_flag, getRicciAgent
 
@@ -981,7 +981,7 @@
 
 	if len(errors) < 1:
 		try:
-			addResource(self, request, model, res)
+			resourceAdd(self, request, model, res)
 		except Exception, e:
 			errors.append('An error occurred while adding resource "%s"' \
 				% res.getName())
@@ -989,10 +989,10 @@
 		errors.append('An error occurred while adding this resource')
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('resource error: %r %s' % (e, str(e)))
-		return (False, {'errors': errors})
+		return (False, { 'errors': errors})
 
 
-	return (True, {'messages': ['Resource added successfully']})
+	return (True, { 'messages': [ 'Resource added successfully' ]})
 
 
 ## Cluster properties form validation routines
@@ -3138,20 +3138,8 @@
 	return '/luci/cluster/index_html?pagetype=7&clustername=%s' % clustername
 
 def getRicciAgentForCluster(self, req):
-	clustername = None
-	try:
-		clustername = req['clustername']
-		if not clustername:
-			clustername = None
-			raise
-	except:
-		try:
-			clustername = req.form['clustername']
-			if not clustername:
-				clustername = None
-		except:
-			pass
-
+	fvar = GetReqVars(req, [ 'clustername' ])
+	clustername = fvar['clustername']
 	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug('GRAFC0: no cluster name was found')
@@ -3159,39 +3147,30 @@
 	return getRicciAgent(self, clustername)
 
 def clusterTaskProcess(self, model, request):
-	try:
-		task = request['task']
-	except:
-		try:
-			task = request.form['task']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CTP1: no task specified')
-			task = None
+	fvar = GetReqVars(request, [ 'task', 'clustername' ])
+
+	task = fvar['task']
+	if task is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('CTP0: no cluster task')
+		return 'No cluster task was given'
 
 	if not model:
-		try:
-			cluname = request['clustername']
-			if not cluname:
-				raise Exception, 'cluname is blank'
-		except:
-			try:
-				cluname = request.form['clustername']
-				if not cluname:
-					raise Exception, 'cluname is blank'
-			except:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CTP0: no model/no cluster name')
-				return 'Unable to determine the cluster name'
+		cluname = fvar['cluname']
+		if cluname is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('CTP1: no cluster name')
+			return 'No cluster name was given'
+
 		try:
 			model = getModelForCluster(self, cluname)
+			if not model:
+				raise Exception, 'No cluster model'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
-			model = None
-
-	if not model:
-		return 'Unable to get the model object for %s' % cluname
+				luci_log.debug_verbose('CTP2: GMFC failed for %s: %r %s' \
+					% (e, str(e), cluname))
+			return 'Unable to get the model object for %s' % cluname
 
 	redirect_page = NODES
 	if task == CLUSTER_STOP:
@@ -3212,37 +3191,29 @@
 		% (request['URL'], redirect_page, model.getClusterName()))
 
 def nodeTaskProcess(self, model, request):
-	try:
-		clustername = request['clustername']
-	except:
-		try:
-			clustername = request.form['clustername']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP0: missing cluster name')
-			return (False, {'errors': [ 'No cluster name was given' ]})
+	fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename' ])
 
-	try:
-		nodename = request['nodename']
-	except:
-		try:
-			nodename = request.form['nodename']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP1: missing node name')
-			return (False, {'errors': [ 'No node name was given' ]})
+	task = fvar['task']
+	clustername = fvar['clustername']
+	nodename = fvar['nodename']
 
-	try:
-		task = request['task']
-	except:
-		try:
-			task = request.form['task']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP2: missing task')
-			return (False, {'errors': [ 'No node task was given' ]})
+	if clustername is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NTP0: missing cluster name')
+		return (False, { 'errors': [ 'No cluster name was given' ]})
+
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NTP1: missing node name')
+		return (False, { 'errors': [ 'No node name was given' ]})
+
+	if task is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NTP2: missing task')
+		return (False, { 'errors': [ 'No node task was given' ]})
 
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
+	response = request.RESPONSE
 
 	if task != NODE_FENCE:
 		# Fencing is the only task for which we don't
@@ -3256,25 +3227,25 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP3: ricci error from %s: %r %s' \
 					% (nodename_resolved, e, str(e)))
-			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
-		except:
+			return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
+		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP4: ricci error from %s: %r %s' \
 					% (nodename_resolved, e, str(e)))
-			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
+			return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 
 		cluinfo = rc.cluster_info()
 		if not cluinfo[0] and not cluinfo[1]:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP5: node %s not in a cluster (expected %s)' \
 					% (nodename_resolved, clustername))
-			return (False, {'errors': [ 'Node "%s" reports it is not in a cluster' % nodename_resolved ]})
+			return (False, { 'errors': [ 'Node "%s" reports it is not in a cluster' % nodename_resolved ]})
 
 		cname = clustername.lower()
 		if cname != cluinfo[0].lower() and cname != cluinfo[1].lower():
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
-			return (False, {'errors': [ 'Node "%s" reports it in cluster "%s." We expect it to be a member of cluster "%s"' % (nodename_resolved, cluinfo[0], clustername) ]})
+			return (False, { 'errors': [ 'Node "%s" reports it in cluster "%s." We expect it to be a member of cluster "%s"' % (nodename_resolved, cluinfo[0], clustername) ]})
 
 		if not rc.authed():
 			rc = None
@@ -3296,7 +3267,7 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP7: node %s is not authenticated' \
 					% nodename_resolved)
-			return (False, {'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
+			return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
 
 	if task == NODE_LEAVE_CLUSTER:
 		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
@@ -3304,7 +3275,6 @@
 				luci_log.debug_verbose('NTP8: nodeLeave failed')
 			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_JOIN_CLUSTER:
@@ -3313,7 +3283,6 @@
 				luci_log.debug_verbose('NTP9: nodeJoin failed')
 			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_REBOOT:
@@ -3323,7 +3292,6 @@
 			return (False, {'errors': [ 'Node "%s" failed to reboot' \
 				% nodename_resolved ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_FENCE:
@@ -3333,7 +3301,6 @@
 			return (False, {'errors': [ 'Fencing of node "%s" failed' \
 				% nodename_resolved]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_DELETE:
@@ -3342,7 +3309,6 @@
 				luci_log.debug_verbose('NTP12: nodeDelete failed')
 			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 
@@ -3760,54 +3726,32 @@
 	return { 'msg': ''.join(msg_list), 'cluster_conf': cc }
 
 def getResourceInfo(model, request):
+	fvars = GetReqVars(request,
+				[ 'resourcename', 'type', 'value', 'clustername', 'URL' ])
+
 	if not model:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GRI0: no model object in session')
 		return {}
 
-	name = None
-	try:
-		name = request['resourcename']
-	except:
-		try:
-			name = request.form['resourcename']
-		except:
-			pass
-
+	name = fvars['resourcename']
 	if name is None:
-		try:
-			res_type = request.form['type']
-			if res_type == 'ip':
-				name = request.form['value'].strip()
-		except:
-			pass
+		res_type = fvars['type']
+		if res_type == 'ip':
+			name = fvars['value']
 
 	if name is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GRI1: missing res name')
 		return {}
 
-	try:
-		cluname = request['clustername']
-	except:
-		try:
-			cluname = request.form['clustername']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GRI2: missing cluster name')
-			return {}
-
-	try:
-		baseurl = request['URL']
-	except:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI3: missing URL')
-		return {}
+	cluname = fvars['clustername']
+	baseurl = fvars['URL']
 
 	#CALL
 	return {}
 
-def GetRequestVars(req, varlist):
+def GetReqVars(req, varlist):
 	ret = {}
 	for i in varlist:
 		pval = None
@@ -3823,19 +3767,12 @@
 		ret[i] = pval
 	return ret
 
-def GetSvcReqVars(request):
-	return GetRequestVars(request,
-			['clustername', 'servicename', 'nodename', 'URL' ])
-def GetResReqVars(request):
-	return GetRequestVars(request,
-			['clustername', 'resourcename', 'nodename', 'URL' ])
-
 # These are called from external methods.
 
 def serviceRestart(self, rc, req):
 	from LuciClusterActions import RestartCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = RestartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3847,7 +3784,7 @@
 def serviceStop(self, rc, req):
 	from LuciClusterActions import StopCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = StopCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3859,7 +3796,7 @@
 def serviceStart(self, rc, req):
 	from LuciClusterActions import StartCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = StartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3871,21 +3808,27 @@
 def serviceDelete(self, rc, req):
 	from LuciClusterActions import DeleteCluSvc
 
-	fvars = GetSvcReqVars(req)
-	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	try:
+		model = LuciExtractCluModel(self, req,
+					cluster_name=fvars['clustername'])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceDelete0: %r %s' % (e, str(e)))
+		return (False, { 'errors': [ 'No resource name was given' ]})
 
 	ret = DeleteCluSvc(self, rc, fvars, model)
 	if ret is None:
-		response = request.RESPONSE
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], SERVICES, clustername))
+			% (req['URL'], SERVICES, clustername))
 	else:
 		return ret
 
 def serviceMigrate(self, rc, req):
 	from LuciClusterActions import MigrateCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = MigrateCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3897,37 +3840,56 @@
 def resourceDelete(self, rc, req):
 	from LuciClusterActions import DeleteResource
 
-	fvars = GetResReqVars(req)
-	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+	fvars = GetReqVars(request, [ 'clustername', 'resourcename', 'nodename' ])
+	try:
+		model = LuciExtractCluModel(self, req,
+					cluster_name=fvars['clustername'])
+		if not model:
+			raise Exception, 'no model'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('resourceDelete0: %r %s' % (e, str(e)))
+		return (False, { 'errors': [ 'No resource name was given' ] })
 
-	ret = DeleteResource(self, rc, fvars, model)
+	ret = DeleteResource(self, rc, model, fvars['resourcename'])
 	if ret is None:
-		response = request.RESPONSE
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], RESOURCES, clustername))
+			% (req['URL'], RESOURCES, fvars['clustername']))
 	else:
 		return ret
 
-def resourceAdd(self, rc, req):
-	from LuciClusterActions import AddResource
+def resourceAdd(self, req, model, res):
+	from LuciClusterActions import AddResource, EditResource
 
-	fvars = GetResReqVars(req)
-	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+	try:
+		cluname = model.getClusterName()
+		rc = getRicciAgent(self, cluname)
+		if not rc:
+			raise Exception, 'no rc'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('resourceAdd0: no ricci agent: %r %s' \
+				% (e, str(e)))
+		return (False, { 'errors': [ 'Unable to find a ricci agent for cluster "%s"' % cluname ]})
+
+	if req.form.has_key('edit'):
+		ret = EditResource(self, rc, model, res)
+	else:
+		ret = AddResource(self, rc, model, res)
 
-	# XXX pass in resource
-	ret = AddResource(self, rc, fvars, model, None)
 	if ret is None:
-		response = request.RESPONSE
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], RESOURCES, clustername))
+			% (req['URL'], RESOURCES, clustername))
 	else:
 		return ret
 
-def nodeJoin(self, rc, cluname, nodename_resolved):
+def nodeJoin(self, rc, cluname, nodename):
 	return None
-def nodeLeave(self, rc, cluname, nodename_resolved):
+def nodeLeave(self, rc, cluname, nodename):
 	return None
-def nodeDelete(self, rc, cluname, nodename_resolved):
+def nodeDelete(self, rc, cluname, nodename):
 	return None
 
 
--- conga/luci/site/luci/Extensions/ClusterModel/Attic/Ip.py	2007/05/15 18:58:55	1.1.2.2
+++ conga/luci/site/luci/Extensions/ClusterModel/Attic/Ip.py	2007/05/23 21:21:35	1.1.2.3
@@ -22,4 +22,3 @@
       return self.attr_hash['address']
     except KeyError, e:
       return ''
- 



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-07-26  4:21 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-07-26  4:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-07-26 04:20:59

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py LuciDB.py 
	                           RicciQueries.py conga_constants.py 

Log message:
	Fix for bz249291

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.2&r2=1.1.4.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.3&r2=1.1.4.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.3&r2=1.1.4.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.19.2.12&r2=1.19.2.13

--- conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/06/19 15:54:10	1.1.4.2
+++ conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/07/26 04:20:59	1.1.4.3
@@ -245,7 +245,11 @@
 		return None
 	return True
 
-def NodeLeaveCluster(self, rc, clustername, nodename_resolved):
+def NodeLeaveCluster(	self,
+						rc,
+						clustername,
+						nodename_resolved,
+						stop_cluster=False):
 	reported_cluname = None
 	try:
 		cluster_info = rc.cluster_info()
@@ -266,7 +270,7 @@
 				% nodename_resolved)
 		return None
 
-	batch_number, result = rq.nodeLeaveCluster(rc)
+	batch_number, result = rq.nodeLeaveCluster(rc, cluster_shutdown=stop_cluster)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('NLC2: %s: batch_number or result is None' \
@@ -367,7 +371,7 @@
 	# First, delete cluster.conf from node to be deleted.
 	# next, have node leave cluster.
 
-	batch_number, result = rq.nodeLeaveCluster(rc, purge=False)
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('ND5: batch_number and/or result is None')
@@ -505,7 +509,8 @@
 					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
 				errors += 1
 		else:
-			ret = NodeLeaveCluster(self, rc, clustername, nodename_resolved)
+			ret = NodeLeaveCluster(self, rc, clustername,
+					nodename_resolved, stop_cluster=True)
 			if ret is None:
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
--- conga/luci/site/luci/Extensions/LuciDB.py	2007/07/16 21:35:17	1.1.4.3
+++ conga/luci/site/luci/Extensions/LuciDB.py	2007/07/26 04:20:59	1.1.4.4
@@ -825,6 +825,7 @@
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('GRA8: cluster name is none for %s' \
 						% ricci_hostname)
+				raise Exception, '%s not in a cluster' % ricci_hostname
 
 			cur_alias = str(clu_info[1]).strip().lower()
 			if not cur_alias:
--- conga/luci/site/luci/Extensions/RicciQueries.py	2007/07/16 21:35:17	1.1.4.3
+++ conga/luci/site/luci/Extensions/RicciQueries.py	2007/07/26 04:20:59	1.1.4.4
@@ -701,17 +701,19 @@
 
 	try:
 		ret = rc.process_batch(batch)
+		if not ret:
+			raise Exception, 'no XML response'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GCC1: process_batch error for %s: %r %s' \
 				% (system_info, e, str(e)))
 		return None
 
-	if not ret:
-		return None
-
 	var_nodes = ret.getElementsByTagName('var')
 	for i in var_nodes:
 		if i.getAttribute('name') == 'cluster.conf':
 			return i.childNodes[0]
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GCC2: no conf node found')
 	return None
--- conga/luci/site/luci/Extensions/conga_constants.py	2007/06/27 07:43:17	1.19.2.12
+++ conga/luci/site/luci/Extensions/conga_constants.py	2007/07/26 04:20:59	1.19.2.13
@@ -134,7 +134,7 @@
 	'A problem occurred when starting this node: %s'
 ]
 
-REDIRECT_SEC = 3
+REDIRECT_SEC = 5
 
 # cluster/node create error status messages
 PRE_INSTALL = 'The install state is not yet complete.'



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-08-09 21:35 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-08-09 21:35 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe at sourceware.org	2007-08-09 21:35:22

Added files:
	luci/site/luci/Extensions: LuciClusterActions.py 
	                           LuciClusterInfo.py LuciDB.py 
	                           LuciZope.py LuciZopeAsync.py 
	                           LuciZopeClusterPortal.py 
	                           LuciZopeExternal.py LuciZopePerm.py 
	                           ResourceHandler.py RicciQueries.py 

Log message:
	Merge in fixes from the RHEL5 branch, pass 4

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.10.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.6.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeAsync.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.1.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeClusterPortal.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeExternal.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.3.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopePerm.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ResourceHandler.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.7.2.1

/cvs/cluster/conga/luci/site/luci/Extensions/LuciClusterActions.py,v  -->  standard output
revision 1.4.2.1
--- conga/luci/site/luci/Extensions/LuciClusterActions.py
+++ -	2007-08-09 21:35:22.549359000 +0000
@@ -0,0 +1,725 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from LuciSyslog import get_logger
+import RicciQueries as rq
+
+from ricci_communicator import RicciCommunicator
+
+from LuciDB import set_node_flag, getRicciAgent, \
+	getClusterNode, getStorageNode, NodeBusy, \
+	setNodeStatus, resolve_nodename, \
+	delCluster, delClusterSystem, \
+	CLUSTER_NODE_NEED_AUTH
+
+from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
+	NODE_DELETE, NODE_FORCE_DELETE, CLUSTER_DELETE, CLUSTERLIST, \
+	NODE_FENCE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
+	RESOURCE_ADD, RESOURCE_CONFIG, RESOURCE_REMOVE, \
+	SERVICE_DELETE, SERVICE_RESTART, SERVICE_START, SERVICE_STOP
+
+luci_log = get_logger()
+
+#
+# Cluster service tasks
+#
+
+def RestartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	batch_number, result = rq.restartService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while restarting cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_RESTART,
+			'Restarting service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def StartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	batch_number, result = rq.startService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'An error occurred while starting cluster service "%s"' % svcname ] })
+
+	try:
+		if nodename is not None:
+			status_msg = 'Starting cluster service "%s" on node "%s"' \
+				% (svcname, nodename)
+		else:
+			status_msg = 'Starting cluster service "%s"' % svcname
+
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START, status_msg)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+def StopCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	batch_number, result = rq.stopService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: stop %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while stopping cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_STOP,
+			'Stopping cluster service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def DeleteCluSvc(self, rc, fvars, model):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	try:
+		model.deleteService(svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc1: Unable to find a service named %s for cluster %s: %r %s' % (svcname, cluname, e, str(e)))
+		return (False, { 'errors': [ 'Error removing cluster service "%s"' % svcname ]})
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			SERVICE_DELETE, 'Removing service "%s"' % svcname)
+	if ret[0] is False:
+		return ret
+
+def MigrateCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	if svcname is None or cluname is None or nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: svc: %s, clu: %s, nn: %s' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'A cluster service name, the cluster name, and the target node name must be given' ] })
+
+	batch_number, result = rq.migrateService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'Error migrating cluster service "%s" to node "%s"' % (svcname, nodename) ]})
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START,
+			'Migrating service "%s" to node "%s"' % (svcname, nodename))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+#
+# Cluster resource-related tasks
+#
+
+def DeleteResource(self, rc, model, resname):
+	errstr = 'An error occurred while attempting to delete this cluster resource'
+	if resname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource0: no res name')
+		return (False, { 'errors': [ '%s: no resource name was given' % errstr ]})
+
+	try:
+		model.deleteResource(resname)
+	except KeyError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource1: no res %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: no resource named "%s" was found' % (errstr, resname) ]})
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource2: err: %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: unable to delete resource "%s"' % (errstr, resname) ]})
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			RESOURCE_REMOVE, 'Removing cluster resource "%s"' % resname)
+	if ret[0] is False:
+		return ret
+
+def AddResource(self, rc, model, res):
+	resname = None
+	try:
+		resname = res.getName()
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('AddResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to add new resource "%s"' % resname
+		else:
+			errstr = 'Unable to add this new resource'
+		return (False, { 'errors': [ errstr ] })
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_ADD,
+			'Creating new cluster resource "%s"' % resname)
+	if ret[0] is False:
+		return ret
+
+def EditResource(self, rc, model, res):
+	resname = None
+	try:
+		resname = res.getName()
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EditResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to edit cluster resource "%s"' % resname
+		else:
+			errstr = 'Unable to edit this cluster resource'
+		return (False, { 'errors': [ errstr ] })
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_CONFIG,
+				'Configuring cluster resource "%s"' % resname)
+
+	if ret[0] is False:
+		return ret
+
+#
+# Cluster node membership-related tasks
+#
+
+def NodeJoinCluster(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeJoinCluster(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_JOIN_CLUSTER,
+			'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
+				% (e, str(e)))
+		return None
+	return True
+
+def NodeLeaveCluster(	self,
+						rc,
+						clustername,
+						nodename_resolved,
+						stop_cluster=False):
+	reported_cluname = None
+	try:
+		cluster_info = rc.cluster_info()
+		reported_cluname = cluster_info[0] or cluster_info[1]
+		if not reported_cluname:
+			raise Exception, 'not a cluster member'
+		if reported_cluname.lower() != clustername.lower():
+			raise Exception, 'cluster mismatch: expected %s, got %s' \
+								% (clustername, reported_cluname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NCL0: no cluster name: %r %s' % (e, str(e)))
+		return None
+
+	if NodeBusy(self, clustername, nodename_resolved, rc) is not False:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NLC1: %s is busy, can\'t leave cluster yet.' \
+				% nodename_resolved)
+		return None
+
+	batch_number, result = rq.nodeLeaveCluster(rc, cluster_shutdown=stop_cluster)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NLC2: %s: batch_number or result is None' \
+				% nodename_resolved)
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_LEAVE_CLUSTER,
+			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NLC3: failed to set flags: %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+	return True
+
+def NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved):
+	rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ], exclude_busy=True)
+
+	if rc is None:
+		rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ])
+
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC0: no agent to delete node %s "%s"' \
+				% (nodename_resolved, clustername))
+		return None
+
+	try:
+		model.deleteNodeByName(nodename.lower())
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC1: deleteNode %s: %r %s' \
+				% (nodename, e, str(e)))
+		return None
+
+	try:
+		model.setModified(True)
+		str_buf = str(model.exportModelAsString())
+		if not str_buf:
+			raise Exception, 'model string is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC2: exportModelAsString: %r %s' \
+				% (e, str(e)))
+		return None
+
+	batch_number, result = rq.setClusterConf(rc, str_buf)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC3: batch number is None')
+		return None
+
+	try:
+		ret = delClusterSystem(self, clustername, nodename_resolved)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC4: error deleting %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FORCE_DELETE,
+			'Forcing the deletion of node "%s"' % nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC5: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def NodeDeleteFromCluster(	self,
+							rc,
+							model,
+							clustername,
+							nodename,
+							nodename_resolved,
+							delete_cluster=False):
+
+	# We need to get a node name other than the node
+	# to be deleted, then delete the node from the cluster.conf
+	# and propogate it. We will need two ricci agents for this task,
+	# unless we are deleting the cluster itself.
+
+	if delete_cluster is False:
+		# Make sure we can find a second node before we hose anything.
+		rc2 = getRicciAgent(self, clustername,
+				exclude_names=[ nodename_resolved ], exclude_busy=True)
+
+		if rc2 is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND0: unable to find ricci agent to delete %s from %s' % (nodename_resolved, clustername))
+			return None
+
+	# First, delete cluster.conf from node to be deleted.
+	# next, have node leave cluster.
+
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND5: batch_number and/or result is None')
+		return None
+
+	# Unless we're deleting the whole cluster, it is not worth
+	# flagging this node in DB, as we are going to delete it
+	# anyway. Now, we need to delete node from model and send out
+	# new cluster.conf
+
+	if delete_cluster is True:
+		try:
+			set_node_flag(self, clustername, rc.hostname(),
+				str(batch_number), CLUSTER_DELETE,
+				'Deleting cluster "%s": Deleting node "%s"' \
+					% (clustername, nodename_resolved))
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
+					% (e, str(e)))
+	else:
+		try:
+			model.deleteNodeByName(nodename.lower())
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6: deleteNode %s: %r %s' \
+					% (nodename, e, str(e)))
+			return None
+
+		try:
+			model.setModified(True)
+			str_buf = str(model.exportModelAsString())
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
+					% (e, str(e)))
+				return None
+
+		# propagate the new cluster.conf via the second node
+		batch_number, result = rq.setClusterConf(rc2, str_buf)
+		if batch_number is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND8: batch number is None')
+			return None
+
+	try:
+		ret = delClusterSystem(self, clustername, nodename_resolved)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND9: error deleting %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+
+	if delete_cluster:
+		return True
+
+	try:
+		set_node_flag(self, clustername, rc2.hostname(),
+			str(batch_number), NODE_DELETE,
+			'Deleting node "%s"' % nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+#
+# Cluster management-related tasks.
+#
+
+def ClusterStart(self, model):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart: RC %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+			errors += 1
+			continue
+
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart1: nodeJoin %s' \
+					% nodename_resolved)
+			errors += 1
+
+	return errors
+
+def ClusterStop(self, model, delete=False):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
+					% (delete is True, str(nodename_resolved), e, str(e)))
+			errors += 1
+			continue
+
+		if delete is True:
+			ret = NodeDeleteFromCluster(self, rc, model, clustername,
+					nodename, nodename_resolved, delete_cluster=True)
+			if ret is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
+				errors += 1
+		else:
+			ret = NodeLeaveCluster(self, rc, clustername,
+					nodename_resolved, stop_cluster=True)
+			if ret is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
+						% (nodename_resolved))
+				errors += 1
+	return errors
+
+def ClusterRestart(self, model):
+	snum_err = ClusterStop(self, model)
+	if snum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart0: ClusterStop: %d errs' \
+				% snum_err)
+
+	jnum_err = ClusterStart(self, model)
+	if jnum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart1: ClusterStart: %d errs' \
+				% jnum_err)
+	return snum_err + jnum_err
+
+def ClusterDelete(self, model):
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'no cluster name found'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete0: %r %s' % (e, str(e)))
+		return None
+
+	# Try to stop all the cluster nodes before deleting any.
+	num_errors = ClusterStop(self, model, delete=False)
+	if num_errors > 0:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete1: %s: %d errors' \
+				% (clustername, num_errors))
+		return None
+
+	# If the cluster is stopped, delete all of the nodes.
+	num_errors = ClusterStop(self, model, delete=True)
+	if num_errors > 0:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete2: %s: %d errors' \
+				% (clustername, num_errors))
+		return None
+
+	try:
+		ret = delCluster(self, clustername)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete3: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+	return CLUSTERLIST
+
+def NodeReboot(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeReboot(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_REBOOT,
+			'Node "%s" is being rebooted' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def NodeFence(self, clustername, nodename, nodename_resolved):
+	rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ], exclude_busy=True)
+	if rc is None:
+		rc = getRicciAgent(self, clustername,
+				exclude_names=[ nodename_resolved, nodename ])
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF0: no ricci to fence %s for cluster %s' \
+				% (nodename_resolved, clustername))
+		return None
+			
+	batch_number, result = rq.nodeFence(rc, nodename_resolved)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF1: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FENCE,
+			'Node "%s" is being fenced by node "%s"' \
+				% (nodename_resolved, rc.hostname()))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF2: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def propagateClusterConfAsync(	self,
+								model,
+								rc=None,
+								action=CLUSTER_CONFIG,
+								pmsg=None):
+	errors = list()
+	messages = list()
+
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'cluster name from model.getClusterName() is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC0: getClusterName: %r %s' \
+				% (e, str(e)))
+		errors.append('Unable to determine cluster name')
+		return (False, { 'errors': errors, 'messages': messages })
+
+	if rc is None:
+		rc = getRicciAgent(self, clustername, exclude_busy=True)
+	if rc is None:
+		rc = getRicciAgent(self, clustername)
+
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC1: no ricci agent for the %s cluster' \
+				% clustername)
+		errors.append('Unable to contact a ricci agent for cluster "%s"' \
+			% clustername)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	try:
+		model.setModified(True)
+		conf_str = str(model.exportModelAsString()).strip()
+		if not conf_str:
+			raise Exception, 'The new cluster configuration is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC2: %r %s' % (e, str(e)))
+		errors.append(repr(e))
+		return (False, { 'errors': errors, 'messages': messages })
+
+	batch_id, result = rq.setClusterConf(rc, conf_str)
+	if batch_id is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC3: batchid or result is None')
+		errors.append('Unable to propagate a new cluster configuration for %s' \
+			% clustername)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	if pmsg is None:
+		pmsg = 'Updating the cluster configuration for "%s"' % clustername
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			batch_id, action, pmsg)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC4: set_node_flag: %r %s' \
+				% (e, str(e)))
+
+	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
+
+def GetSystemLogs(self, fvars):
+	nodename = fvars['nodename']
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSL0: no node name')
+		return 'No system name was given'
+
+	clustername = fvars['clustername']
+	if clustername is None:
+		nodename_resolved = nodename
+	else:
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	try:
+		rc = RicciCommunicator(nodename_resolved)
+		if not rc:
+			raise Exception, 'no rc'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSL1: unexpected exception for %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+
+	if not rc.authed():
+		try:
+			snode = getStorageNode(self, nodename_resolved)
+			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GSL2: %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+
+		if clustername is not None:
+			try:
+				cnode = getClusterNode(self, nodename_resolved, clustername)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GNL5: %s: %r %s' \
+						% (nodename_resolved, e, str(e)))
+		return 'Luci is not authenticated to %s. Reauthenticate first.' \
+			% nodename
+
+	return rq.getNodeLogs(rc)
/cvs/cluster/conga/luci/site/luci/Extensions/LuciClusterInfo.py,v  -->  standard output
revision 1.10.2.1
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py
+++ -	2007-08-09 21:35:22.667793000 +0000
@@ -0,0 +1,1620 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from Products.Archetypes.utils import make_uuid
+from ClusterModel.ModelBuilder import ModelBuilder
+import RicciQueries as rq
+from ricci_communicator import RicciCommunicator
+from FenceHandler import FENCE_OPTS
+from LuciSyslog import get_logger
+from LuciDB import resolve_nodename
+from LuciZope import GetReqVars
+
+from conga_constants import CLUSTER_CONFIG, CLUSTER_DELETE, \
+	CLUSTER_PROCESS, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
+	NODE_FORCE_DELETE, FDOM, FENCEDEV, NODE, NODE_ACTIVE, \
+	NODE_ACTIVE_STR, NODE_DELETE, NODE_FENCE, NODE_INACTIVE, \
+	NODE_INACTIVE_STR, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
+	NODE_PROCESS, NODE_REBOOT, NODE_UNKNOWN, NODE_UNKNOWN_STR, \
+	PROP_FENCE_TAB, PROP_GENERAL_TAB, PROP_GULM_TAB, PROP_MCAST_TAB, \
+	PROP_QDISK_TAB, RESOURCE, RESOURCE_CONFIG, RESOURCE_REMOVE, \
+	SERVICE, SERVICE_DELETE, SERVICE_MIGRATE, SERVICE_RESTART, \
+	SERVICE_START, SERVICE_STOP, VM_CONFIG, \
+	LUCI_DEBUG_MODE, LUCI_CLUSTER_BASE_URL
+
+luci_log = get_logger()
+
+def getnodes(self, model):
+	try:
+		return map(lambda x: str(x.getName()), model.getNodes())
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getnodes0: %r %s' % (e, str(e)))
+	return []
+
+def getResourceInfo(model, name, baseurl, res=None):
+	if res is None:
+		try:
+			res = model.getResourceByName(name)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRI0: %s: %r %s' % (name, e, str(e)))
+			return {}
+
+	res_info = {}
+	res_name = res.getName().strip()
+
+	res_info['name'] = res_name
+	res_info['attrs'] = res.attr_hash
+	res_info['type'] = res.resource_type
+	res_info['tag_name'] = res.TAG_NAME
+
+	cluname = model.getClusterName()
+	res_info['cfgurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
+		% (baseurl, cluname, res_name, RESOURCE_CONFIG)
+	res_info['url'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
+		% (baseurl, cluname, res_name, RESOURCE)
+	res_info['delurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
+		% (baseurl, cluname, res_name, RESOURCE_REMOVE)
+	return res_info
+
+def getResources(model, baseurl):
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getResources0: model is none')
+		return []
+	return map(lambda x: getResourceInfo(model, None, baseurl, x), model.getResources())
+
+def getClusterStatusModel(model):
+	results = list()
+	vals = {}
+
+	try:
+		clustername = model.getClusterName()
+		clusteralias = model.getClusterAlias()
+		vals['type'] = 'cluster'
+		vals['alias'] = clusteralias
+		vals['name'] = clustername
+		vals['error'] = True
+		vals['votes'] = '[unknown]'
+		vals['quorate'] = '[unknown]'
+		vals['minQuorum'] = '[unknown]'
+		results.append(vals)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSM0: %r %s' % (e, str(e)))
+		return None
+
+	try:
+		nodelist = model.getNodes()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSM1: %r %s' % (e, str(e)))
+		return None
+
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		try:
+			node_name = node.getName()
+			if not node_name:
+				raise Exception, 'cluster node name is unknown'
+		except:
+			node_name = '[unknown]'
+
+		node_val['name'] = node_name
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+
+		try:
+			votes = node.getVotes()
+			if not votes:
+				raise Exception, 'unknown unmber of votes'
+		except:
+			votes = '[unknown]'
+
+		node_val['votes'] = votes
+		results.append(node_val)
+	return results
+
+def getClusterStatus(self, request, rc, cluname=None):
+	try:
+		doc = rq.getClusterStatusBatch(rc) or None
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCS0: error: %r: %r %s' \
+				% (cluname, e, str(e)))
+		doc = None
+
+	if doc is None:
+		model = LuciExtractCluModel(self, request, cluname)
+		if model is not None:
+			try:
+				cinfo = getClusterStatusModel(model)
+				if not cinfo or len(cinfo) < 1:
+					raise Exception, 'cinfo is None'
+				return cinfo
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GCS1: %r: %r %s' \
+						% (cluname, e, str(e)))
+
+	if doc is None:
+		try:
+			from LuciDB import getClusterStatusDB
+			if cluname:
+				clustername = cluname
+			else:
+				fvars = GetReqVars(request, [ 'clustername' ])
+
+				clustername = fvars['clustername']
+				if clustername is None:
+					raise Exception, 'unable to determine cluster name'
+
+			cinfo = getClusterStatusDB(self, clustername)
+			if not cinfo or len(cinfo) < 1:
+				raise Exception, 'cinfo is None'
+			return cinfo
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCS2: cluster %r info from DB: %r %s' \
+					% (cluname, e, str(e)))
+		return []
+
+	results = list()
+	vals = {}
+	vals['type'] = 'cluster'
+
+	try:
+		vals['alias'] = doc.firstChild.getAttribute('alias')
+	except AttributeError, e:
+		vals['alias'] = doc.firstChild.getAttribute('name')
+
+	vals['votes'] = doc.firstChild.getAttribute('votes')
+	vals['name'] = doc.firstChild.getAttribute('name')
+	vals['minQuorum'] = doc.firstChild.getAttribute('minQuorum')
+	vals['quorate'] = doc.firstChild.getAttribute('quorate')
+	results.append(vals)
+
+	for node in doc.firstChild.childNodes:
+		if node.nodeName == 'node':
+			vals = {}
+			vals['type'] = 'node'
+			vals['clustered'] = node.getAttribute('clustered')
+			vals['name'] = node.getAttribute('name')
+			vals['online'] = node.getAttribute('online')
+			vals['uptime'] = node.getAttribute('uptime')
+			vals['votes'] = node.getAttribute('votes')
+			results.append(vals)
+		elif node.nodeName == 'service':
+			vals = {}
+			vals['type'] = 'service'
+			vals['name'] = node.getAttribute('name')
+			vals['nodename'] = node.getAttribute('nodename')
+			vals['running'] = node.getAttribute('running')
+			try:
+				vals['is_vm'] = node.getAttribute('vm').lower() == 'true'
+			except:
+				vals['is_vm'] = False
+			vals['failed'] = node.getAttribute('failed')
+			vals['autostart'] = node.getAttribute('autostart')
+			results.append(vals)
+	return results
+
+def getServicesInfo(self, status, model, req):
+	svc_map = {}
+	maplist = list()
+	fvars = GetReqVars(req, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	cluname = fvars['clustername']
+	if cluname is None:
+		cluname = model.getClusterName()
+
+	nodes = model.getNodes()
+	for item in status:
+		if item['type'] == 'service':
+			itemmap = {}
+			itemmap['name'] = item['name']
+
+			cur_node = None
+			if item['running'] == 'true':
+				cur_node = item['nodename']
+				itemmap['running'] = 'true'
+				itemmap['nodename'] = cur_node
+				itemmap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_STOP)
+				itemmap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_RESTART)
+			else:
+				itemmap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_START)
+
+			itemmap['autostart'] = item['autostart']
+
+			try:
+				svc = model.retrieveServiceByName(item['name'])
+				itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE)
+				itemmap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_DELETE)
+			except:
+				try:
+					svc = model.retrieveVMsByName(item['name'])
+					itemmap['is_vm'] = True
+					itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], VM_CONFIG)
+					itemmap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], VM_CONFIG)
+				except:
+					continue
+
+			starturls = list()
+			for node in nodes:
+				cur_nodename = node.getName()
+				if node.getName() != cur_node:
+					starturl = {}
+					starturl['nodename'] = cur_nodename
+					starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, item['name'], SERVICE_START, cur_nodename)
+					starturls.append(starturl)
+
+					if itemmap.has_key('is_vm') and itemmap['is_vm'] is True:
+						migrate_url = { 'nodename': cur_nodename }
+						migrate_url['migrate'] = True
+						migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, item['name'], SERVICE_MIGRATE, cur_nodename)
+						starturls.append(migrate_url)
+
+			itemmap['links'] = starturls
+
+			dom = svc.getAttribute('domain')
+			if dom is not None:
+				itemmap['faildom'] = dom
+			else:
+				itemmap['faildom'] = 'No Failover Domain'
+			maplist.append(itemmap)
+
+	svc_map['services'] = maplist
+	return svc_map
+
+def recurse_resources(parent_uuid, child, resource_list, indent_ctr, parent=None):
+	#First, add the incoming child as a resource
+	#Next, check for children of it
+	#Call yourself on every children
+	#then return
+
+	rc_map = {}
+	if parent is not None:
+		rc_map['parent'] = parent
+	rc_map['name'] = child.getName()
+
+	#Note: Final version needs all resource attrs
+	if child.isRefObject() is True:
+		rc_map['ref_object'] = True
+		rc_map['tag_name'] = child.getObj().TAG_NAME
+		rc_map['type'] = child.getObj().getResourceType()
+		rc_map['attrs'] = child.getObj().getAttributes()
+	else:
+		rc_map['tag_name'] = child.TAG_NAME
+		rc_map['type'] = child.getResourceType()
+		rc_map['attrs'] = child.getAttributes()
+
+	rc_map['indent_ctr'] = indent_ctr
+
+	rc_map['uuid'] = make_uuid('resource')
+	rc_map['parent_uuid'] = parent_uuid
+
+	resource_list.append(rc_map)
+	kids = child.getChildren()
+	child_depth = 0
+	new_indent_ctr = indent_ctr + 1
+	for kid in kids:
+		cdepth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
+		child_depth = max(cdepth, child_depth)
+
+	rc_map['max_depth'] = child_depth
+	return child_depth + 1
+
+
+def get_fdom_names(model):
+	return map(lambda x: x.getName(), model.getFailoverDomains())
+
+
+def getServiceInfo(self, status, model, req):
+	root_uuid = 'toplevel'
+
+	fvars = GetReqVars(req, [ 'clustername', 'servicename', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getServiceInfo0: no model: %r' % model)
+		return {}
+
+	#set up struct for service config page
+	hmap = {}
+
+	try:
+		cluname = fvars['clustername'] or model.getClusterName()
+		hmap['fdoms'] = get_fdom_names(model)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getServiceInfo1: %r %s' % (e, str(e)))
+		hmap['fdoms'] = list()
+
+	hmap['root_uuid'] = root_uuid
+	# uuids for the service page needed when new resources are created
+	hmap['uuid_list'] = map(lambda x: make_uuid('resource'), xrange(30))
+
+	servicename = fvars['servicename']
+	if servicename is None:
+		return hmap
+
+	if len(status) > 0:
+		nodenames = model.getNodeNames()
+
+	for item in status:
+		innermap = {}
+		if item['type'] == 'service':
+			if item['name'] == servicename:
+				hmap['name'] = servicename
+				hmap['autostart'] = item['autostart']
+
+				starturls = list()
+				if item['running'] == 'true':
+					hmap['running'] = 'true'
+					nodename = item['nodename']
+					innermap['current'] = 'Running on %s' % nodename
+
+					innermap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_STOP)
+					innermap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_RESTART)
+					innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
+
+					# In this case, determine where it can run...
+					for node in nodenames:
+						if node != nodename:
+							starturl = {}
+							starturl['nodename'] = node 
+							starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_START, node)
+							starturls.append(starturl)
+
+							if item.has_key('is_vm') and item['is_vm'] is True:
+								migrate_url = { 'nodename': node }
+								migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_MIGRATE, node)
+								migrate_url['migrate'] = True
+								starturls.append(migrate_url)
+					innermap['links'] = starturls
+				else:
+					#Do not set ['running'] in this case...ZPT will detect it is missing
+					innermap['current'] = 'Stopped'
+					innermap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_START)
+					innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
+
+					starturls = list()
+					for node in nodenames:
+						starturl = {}
+
+						starturl['nodename'] = node
+						starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_START, node)
+						starturls.append(starturl)
+
+						if item.has_key('is_vm') and item['is_vm'] is True:
+							migrate_url = { 'nodename': node }
+							migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_MIGRATE, node)
+							migrate_url['migrate'] = True
+							starturls.append(migrate_url)
+					innermap['links'] = starturls
+				hmap['innermap'] = innermap
+
+	# Now build hashes for resources under service.
+	# first get service by name from model
+
+	svc = model.getService(servicename)
+	try:
+		hmap['domain'] = svc.getAttribute('domain')
+	except:
+		hmap['domain'] = None
+
+	try:
+		hmap['recovery'] = svc.getAttribute('recovery')
+	except:
+		hmap['recovery'] = None
+
+	try:
+		if int(svc.getAttribute('exclusive')):
+			hmap['exclusive'] = 'true'
+		else:
+			hmap['exclusive'] = 'false'
+	except:
+		hmap['exclusive'] = 'false'
+
+	resource_list = list()
+	if svc is not None:
+		indent_ctr = 0
+		children = svc.getChildren()
+		for child in children:
+			recurse_resources(root_uuid, child, resource_list, indent_ctr)
+
+	hmap['resource_list'] = resource_list
+	return hmap
+
+def getFdomInfo(self, model, request):
+	fhash = {}
+	fhash['members'] = {}
+
+	try:
+		fdom = model.getFailoverDomainByName(request['fdomname'])
+		fhash['name'] = fdom.getName()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFdomInfo0: %r %s' % (e, str(e)))
+		return fhash
+
+	ordered_attr = fdom.getAttribute('ordered')
+	if ordered_attr is not None and (ordered_attr == 'true' or ordered_attr == '1'):
+		fhash['prioritized'] = '1'
+	else:
+		fhash['prioritized'] = '0'
+
+	restricted_attr = fdom.getAttribute('restricted')
+	if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
+		fhash['restricted'] = '1'
+	else:
+		fhash['restricted'] = '0'
+
+	nodes = fdom.getChildren()
+	for node in nodes:
+		try:
+			priority = node.getAttribute('priority')
+		except:
+			priority = '1'
+		fhash['members'][node.getName()] = { 'priority': priority }
+	return fhash
+
+def getFdomsInfo(self, model, request, clustatus):
+	fvars = GetReqVars(request, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername']
+	if clustername is None:
+		return {}
+
+	slist = list()
+	nlist = list()
+	for item in clustatus:
+		if item['type'] == 'node':
+			nlist.append(item)
+		elif item['type'] == 'service':
+			slist.append(item)
+
+	fdomlist = list()
+	for fdom in model.getFailoverDomains():
+		fdom_map = {}
+		fdom_name = fdom.getName()
+		fdom_map['name'] = fdom_name
+		fdom_map['cfgurl'] = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+			% (baseurl, FDOM, clustername, fdom.getName())
+
+		ordered_attr = fdom.getAttribute('ordered')
+		if ordered_attr is not None and (ordered_attr == 'true' or ordered_attr == '1'):
+			fdom_map['ordered'] = True
+		else:
+			fdom_map['ordered'] = False
+
+		restricted_attr = fdom.getAttribute('restricted')
+		if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
+			fdom_map['restricted'] = True
+		else:
+			fdom_map['restricted'] = False
+
+		nodelist = list()
+		for node in fdom.getChildren():
+			nodesmap = {}
+			ndname = node.getName()
+
+			for nitem in nlist:
+				if nitem['name'] == ndname:
+					nodesmap['nodename'] = ndname
+					nodesmap['nodecfgurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+						% (baseurl, clustername, ndname, NODE)
+					if nitem['clustered'] == 'true':
+						nodesmap['status'] = NODE_ACTIVE
+					elif nitem['online'] == 'false':
+						nodesmap['status'] = NODE_UNKNOWN
+					else:
+						nodesmap['status'] = NODE_INACTIVE
+					priority_attr =	node.getAttribute('priority')
+					if priority_attr is not None:
+						nodesmap['priority'] = '0'
+					nodelist.append(nodesmap)
+		fdom_map['nodeslist'] = nodelist
+
+		svclist = list()
+		tmp = model.getServices()
+		tmp.extend(model.getVMs())
+		for svc in tmp:
+			svcname = svc.getName()
+			for sitem in slist:
+				if sitem['name'] == svcname:
+					domain = svc.getAttribute('domain')
+					if domain == fdom_name:
+						svcmap = {}
+						svcmap['name'] = svcname
+						svcmap['status'] = sitem['running']
+						if svc.getTagName() == 'vm':
+							svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' % (baseurl, VM_CONFIG, clustername, svcname)
+						else:
+							svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' % (baseurl, SERVICE, clustername, svcname)
+						svcmap['location'] = sitem['nodename']
+						svclist.append(svcmap)
+		fdom_map['svclist'] = svclist
+		fdomlist.append(fdom_map)
+
+	return fdomlist
+
+def getClusterInfo(self, model, req):
+	fvars = GetReqVars(req, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	cluname = fvars['clustername']
+	if cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCI0: unable to determine cluster name')
+		return {}
+
+	clumap = {}
+	if not model:
+		try:
+			model = getModelForCluster(self, cluname)
+			if not model:
+				raise Exception, 'model is none'
+			req.SESSION.set('model', model)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %r %s' % (cluname, e, str(e)))
+			return {}
+	else:
+		totem = model.getTotemPtr()
+		if totem:
+			clumap['totem'] = totem.getAttributes()
+
+	prop_baseurl = '%s?pagetype=%s&clustername=%s&' \
+		% (baseurl, CLUSTER_CONFIG, cluname)
+	basecluster_url = '%stab=%s' % (prop_baseurl, PROP_GENERAL_TAB)
+	# needed:
+	clumap['basecluster_url'] = basecluster_url
+	# name field
+	clumap['clustername'] = model.getClusterAlias()
+	# config version
+	cp = model.getClusterPtr()
+	clumap['config_version'] = cp.getConfigVersion()
+
+	# xvmd info
+	clumap['fence_xvmd'] = model.hasFenceXVM()
+
+	#-------------
+	#new cluster params - if rhel5
+	#-------------
+	gulm_ptr = model.getGULMPtr()
+	if not gulm_ptr:
+		#Fence Daemon Props
+		fencedaemon_url = '%stab=%s' % (prop_baseurl, PROP_FENCE_TAB)
+		clumap['fencedaemon_url'] = fencedaemon_url
+		fdp = model.getFenceDaemonPtr()
+		pjd = fdp.getAttribute('post_join_delay')
+		if pjd is None:
+			pjd = '6'
+		pfd = fdp.getAttribute('post_fail_delay')
+		if pfd is None:
+			pfd = '0'
+		#post join delay
+		clumap['pjd'] = pjd
+		#post fail delay
+		clumap['pfd'] = pfd
+
+		#-------------
+		#if multicast
+		multicast_url = '%stab=%s' % (prop_baseurl, PROP_MCAST_TAB)
+		clumap['multicast_url'] = multicast_url
+		#mcast addr
+		is_mcast = model.isMulticast()
+		if is_mcast:
+			clumap['mcast_addr'] = model.getMcastAddr()
+			clumap['is_mcast'] = 'True'
+		else:
+			clumap['is_mcast'] = 'False'
+			clumap['mcast_addr'] = '1.2.3.4'
+		clumap['gulm'] = False
+	else:
+		#-------------
+		# GULM params (RHEL4 only)
+		#-------------
+		lockserv_list = list()
+		clunodes = model.getNodes()
+		gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
+		lockserv_list = map(lambda x: (x, True), gulm_lockservs)
+		for node in clunodes:
+			n = node.getName()
+			if not n in gulm_lockservs:
+				lockserv_list.append((n, False))
+		clumap['gulm'] = True
+		clumap['gulm_url'] = '%stab=%s' % (prop_baseurl, PROP_GULM_TAB)
+		clumap['gulm_lockservers'] = lockserv_list
+
+	#-------------
+	# quorum disk params
+	#-------------
+	quorumd_url = '%stab=%s' % (prop_baseurl, PROP_QDISK_TAB)
+	clumap['quorumd_url'] = quorumd_url
+	is_quorumd = model.isQuorumd()
+	clumap['is_quorumd'] = is_quorumd
+	clumap['interval'] = ''
+	clumap['tko'] = ''
+	clumap['votes'] = ''
+	clumap['min_score'] = ''
+	clumap['device'] = ''
+	clumap['label'] = ''
+
+	# list struct for heuristics...
+	hlist = list()
+	if is_quorumd:
+		qdp = model.getQuorumdPtr()
+		interval = qdp.getAttribute('interval')
+		if interval is not None:
+			clumap['interval'] = interval
+
+		tko = qdp.getAttribute('tko')
+		if tko is not None:
+			clumap['tko'] = tko
+
+		votes = qdp.getAttribute('votes')
+		if votes is not None:
+			clumap['votes'] = votes
+
+		min_score = qdp.getAttribute('min_score')
+		if min_score is not None:
+			clumap['min_score'] = min_score
+
+		device = qdp.getAttribute('device')
+		if device is not None:
+			clumap['device'] = device
+
+		label = qdp.getAttribute('label')
+		if label is not None:
+			clumap['label'] = label
+
+		heuristic_kids = qdp.getChildren()
+		for kid in heuristic_kids:
+			hmap = {}
+			hprog = kid.getAttribute('program')
+			if hprog is None:
+				continue
+
+			hscore = kid.getAttribute('score')
+			hmap['hprog'] = hprog
+			if hscore is not None:
+				hmap['hscore'] = hscore
+			else:
+				hmap['hscore'] = ''
+
+			hinterval = kid.getAttribute('interval')
+			if hinterval is not None:
+				hmap['hinterval'] = hinterval
+			else:
+				hmap['hinterval'] = ''
+			hlist.append(hmap)
+	clumap['hlist'] = hlist
+
+	return clumap
+
+def getClustersInfo(self, status, req):
+	clu_map = {}
+
+	fvars = GetReqVars(req, [ 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	nodelist = list()
+	svclist = list()
+	clulist = list()
+	for item in status:
+		if item['type'] == 'node':
+			nodelist.append(item)
+		elif item['type'] == 'service':
+			svclist.append(item)
+		elif item['type'] == 'cluster':
+			clulist.append(item)
+		else:
+			continue
+
+	if len(clulist) < 1:
+		return {}
+	clu = clulist[0]
+	if clu.has_key('error'):
+		clu_map['error'] = clu['error']
+	if clu.has_key('errmsg'):
+		clu_map['errmsg'] = clu['errmsg']
+	clustername = clu['name']
+	if not clu['alias']:
+		clu_map['clusteralias'] = clu['alias']
+	else:
+		clu_map['clusteralias'] = clustername
+	clu_map['clustername'] = clustername
+	if clu['quorate'] == 'true':
+		clu_map['status'] = 'Quorate'
+		clu_map['running'] = 'true'
+	else:
+		clu_map['status'] = 'Not Quorate'
+		clu_map['running'] = 'false'
+	clu_map['votes'] = clu['votes']
+	clu_map['minquorum'] = clu['minQuorum']
+
+	clu_map['clucfg'] = '%s?pagetype=%s&clustername=%s' \
+		% (baseurl, CLUSTER_CONFIG, clustername)
+
+	clu_map['restart_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_RESTART)
+	clu_map['stop_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_STOP)
+	clu_map['start_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_START)
+	clu_map['delete_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_DELETE)
+
+	svc_dict_list = list()
+	for svc in svclist:
+		svc_dict = {}
+		svcname = svc['name']
+		svc_dict['name'] = svcname
+		svc_dict['servicename'] = svcname
+		svc_dict['nodename'] = svc['nodename']
+		svc_dict['srunning'] = svc['running']
+
+		if svc.has_key('is_vm') and svc['is_vm'] is True:
+			target_page = VM_CONFIG
+		else:
+			target_page = SERVICE
+
+		svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+			% (baseurl, target_page, clustername, svcname)
+		svc_dict['svcurl'] = svcurl
+		svc_dict_list.append(svc_dict)
+
+	clu_map['currentservices'] = svc_dict_list
+	node_dict_list = list()
+
+	for item in nodelist:
+		nmap = {}
+		name = item['name']
+		nmap['nodename'] = name
+		cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
+			% (baseurl, NODE, clustername, name)
+		nmap['configurl'] = cfgurl
+		if item['clustered'] == 'true':
+			nmap['status'] = NODE_ACTIVE
+		elif item['online'] == 'false':
+			nmap['status'] = NODE_UNKNOWN
+		else:
+			nmap['status'] = NODE_INACTIVE
+		node_dict_list.append(nmap)
+
+	clu_map['currentnodes'] = node_dict_list
+	return clu_map
+
+def getNodeInfo(self, model, status, request):
+	infohash = {}
+	item = None
+	fvars = GetReqVars(request, [ 'URL', 'clustername', 'nodename' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	nodestate = NODE_ACTIVE
+	svclist = list()
+	for thing in status:
+		if thing['type'] == 'service':
+			svclist.append(thing)
+
+	clustername = fvars['clustername']
+	nodename = fvars['nodename']
+	if clustername is None or nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getNodeInfo0: %r %r' \
+				% (clustername, nodename))
+		return {}
+
+	# extract correct node line from cluster status
+	found = False
+	for item in status:
+		if (item['type'] == 'node') and (item['name'] == nodename):
+			if item['online'] == 'false':
+				nodestate = NODE_UNKNOWN
+			elif item['clustered'] == 'true':
+				nodestate = NODE_ACTIVE
+			else:
+				nodestate = NODE_INACTIVE
+			found = True
+			break
+
+	if found is False:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getNodeInfo1: Unable to find node "%s" in cluster status' % nodename)
+		nodestate = NODE_UNKNOWN
+
+	infohash['nodestate'] = nodestate
+	infohash['nodename'] = nodename
+
+	# set up drop down links
+	if nodestate == NODE_ACTIVE:
+		infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, nodename, clustername)
+		infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
+	elif nodestate == NODE_INACTIVE:
+		infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, nodename, clustername)
+		infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
+	else:
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['force_delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FORCE_DELETE, nodename, clustername)
+
+	# figure out current services running on this node
+	svc_dict_list = list()
+	for svc in svclist:
+		if svc['nodename'] == nodename:
+			svc_dict = {}
+			svcname = svc['name']
+			svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+				% (baseurl, SERVICE, clustername, svcname)
+			svc_dict['servicename'] = svcname
+			svc_dict['svcurl'] = svcurl
+			svc_dict_list.append(svc_dict)
+
+	infohash['currentservices'] = svc_dict_list
+
+	fdom_dict_list = list()
+	gulm_cluster = False
+	if model:
+		gulm_cluster = model.getGULMPtr() is not None
+		try:
+			infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
+		except:
+			infohash['gulm_lockserver'] = False
+
+		# next is faildoms
+		fdoms = model.getFailoverDomainsForNode(nodename)
+		for fdom in fdoms:
+			fdom_dict = {}
+			fdom_dict['name'] = fdom.getName()
+			fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+				% (baseurl, FDOM, clustername, fdom.getName())
+			fdom_dict['fdomurl'] = fdomurl
+			fdom_dict_list.append(fdom_dict)
+	else:
+		infohash['gulm_lockserver'] = False
+
+	infohash['fdoms'] = fdom_dict_list
+
+	infohash['d_states'] = None
+	nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
+		try:
+			rc = RicciCommunicator(nodename_resolved)
+			if not rc:
+				raise Exception, 'connection failed'
+		except Exception, e:
+			rc = None
+			infohash['ricci_error'] = True
+			luci_log.info('Error connecting to %s: %s' \
+				% (nodename_resolved, str(e)))
+
+		if rc is not None:
+			# call service module on node and find out which daemons are running
+			dlist = list()
+			dlist.append('ccsd')
+			if not gulm_cluster:
+				dlist.append('cman')
+				dlist.append('fenced')
+			else:
+				dlist.append('lock_gulmd')
+			dlist.append('rgmanager')
+			states = rq.getDaemonStates(rc, dlist)
+			infohash['d_states'] = states
+	else:
+		infohash['ricci_error'] = True
+
+	infohash['logurl'] = '/luci/logs/?nodename=%s&clustername=%s' \
+		% (nodename_resolved, clustername)
+	return infohash
+
+def getNodesInfo(self, model, status, req):
+	resultlist = list()
+	nodelist = list()
+	svclist = list()
+
+	fvars = GetReqVars(req, [ 'URL', 'clustername' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername']
+
+	#Sort into lists...
+	for item in status:
+		if item['type'] == 'node':
+			nodelist.append(item)
+		elif item['type'] == 'service':
+			svclist.append(item)
+		else:
+			continue
+
+	if clustername is None:
+		try:
+			clustername = model.getClusterName().strip()
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNI0: no cluster name: %r %s' \
+					% (e, str(e)))
+			return {}
+
+	for item in nodelist:
+		nl_map = {}
+		name = item['name']
+		nl_map['nodename'] = name
+
+		try:
+			nl_map['gulm_lockserver'] = model.isNodeLockserver(name)
+		except:
+			nl_map['gulm_lockserver'] = False
+
+		cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
+			% (baseurl, NODE, clustername, name)
+		nl_map['configurl'] = cfgurl
+		nl_map['fenceurl'] = '%s#fence' % cfgurl
+		if item['clustered'] == 'true':
+			nl_map['status'] = NODE_ACTIVE
+			nl_map['status_str'] = NODE_ACTIVE_STR
+		elif item['online'] == 'false':
+			nl_map['status'] = NODE_UNKNOWN
+			nl_map['status_str'] = NODE_UNKNOWN_STR
+		else:
+			nl_map['status'] = NODE_INACTIVE
+			nl_map['status_str'] = NODE_INACTIVE_STR
+
+		nodename_resolved = resolve_nodename(self, clustername, name)
+
+		nl_map['logurl'] = '/luci/logs?nodename=%s&clustername=%s' \
+			% (nodename_resolved, clustername)
+
+		# set up URLs for dropdown menu...
+		if nl_map['status'] == NODE_ACTIVE:
+			nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, name, clustername)
+			nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
+		elif nl_map['status'] == NODE_INACTIVE:
+			nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, name, clustername)
+			nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
+		else:
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['force_delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FORCE_DELETE, name, clustername)
+
+		# figure out current services running on this node
+		svc_dict_list = list()
+		for svc in svclist:
+			if svc['nodename'] == name:
+				svc_dict = {}
+				svcname = svc['name']
+				svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+					% (baseurl, SERVICE, clustername, svcname)
+				svc_dict['servicename'] = svcname
+				svc_dict['svcurl'] = svcurl
+				svc_dict_list.append(svc_dict)
+
+		nl_map['currentservices'] = svc_dict_list
+
+		# next is faildoms
+		if model:
+			fdoms = model.getFailoverDomainsForNode(name)
+		else:
+			nl_map['ricci_error'] = True
+			fdoms = list()
+
+		fdom_dict_list = list()
+		for fdom in fdoms:
+			fdom_dict = {}
+			fdom_dict['name'] = fdom.getName()
+			fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+				% (baseurl, FDOM, clustername, fdom.getName())
+			fdom_dict['fdomurl'] = fdomurl
+			fdom_dict_list.append(fdom_dict)
+
+		nl_map['fdoms'] = fdom_dict_list
+		resultlist.append(nl_map)
+
+	return resultlist
+
+def getFence(self, model, request):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFence0: model is None')
+		return {}
+
+	fvars = GetReqVars(request, [ 'URL', 'fencename' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	fencename = fvars['fencename']
+
+	fence_map = {}
+	nodes = model.getNodes()
+	fencedevs = model.getFenceDevices()
+	clustername = model.getClusterName()
+
+	for fencedev in fencedevs:
+		if fencedev.getName().strip() == fencename:
+			fence_map = fencedev.getAttributes()
+			try:
+				fence_map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
+			except:
+				fence_map['unknown'] = True
+				fence_map['pretty_name'] = fencedev.getAgentType()
+
+			nodes_used = list()
+			for node in nodes:
+				flevels = node.getFenceLevels()
+				for flevel in flevels:
+					# These are the method blocks...
+					kids = flevel.getChildren()
+					for kid in kids:
+						# These are actual devices in each level
+						if kid.getName().strip() == fencedev.getName().strip():
+							# See if this fd already has an entry for this node
+							found_duplicate = False
+							for item in nodes_used:
+								if item['nodename'] == node.getName().strip():
+									found_duplicate = True
+							if found_duplicate is True:
+								continue
+							node_hash = {}
+							cur_nodename = node.getName().strip()
+							node_hash['nodename'] = cur_nodename
+							node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+								% (baseurl, clustername, cur_nodename, NODE)
+							nodes_used.append(node_hash)
+
+			fence_map['nodesused'] = nodes_used
+			return fence_map
+
+	return fence_map
+
+def getFDForInstance(fds, name):
+	for fd in fds:
+		if fd.getName().strip() == name:
+			return fd
+	raise
+
+def getFenceInfo(self, model, request):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo0: model is None')
+		return {}
+
+	fvars = GetReqVars(request, [ 'clustername', 'URL', 'nodename' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername'] or model.getClusterName()
+
+	nodename = fvars['nodename']
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo1: no nodename')
+		return {}
+
+	fence_map = {}
+	level1 = list() #First level fence devices
+	level2 = list() #Second level fence devices
+	shared1 = list() #List of available sharable fence devs not used in level1
+	shared2 = list() #List of available sharable fence devs not used in level2
+	fence_map['level1'] = level1
+	fence_map['level2'] = level2
+	fence_map['shared1'] = shared1
+	fence_map['shared2'] = shared2
+
+	major_num = 1
+	minor_num = 100
+
+	# Here we need to get fences for a node - just the first two levels
+	# Each level has its own list of fence devs used in that level
+	# For each fence dev, a list of instance structs is appended
+	# In addition, for each level, a list of available but unused fence devs
+	# is returned.
+	try:
+		node = model.retrieveNodeByName(nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo3: unable to find node name "%s" in current node list: %r %s' % (str(nodename), e, str(e)))
+		return {}
+
+	fds = model.getFenceDevices()
+
+	levels = node.getFenceLevels()
+	len_levels = len(levels)
+
+	if len_levels == 0:
+		return fence_map
+
+	if len_levels >= 1:
+		first_level = levels[0]
+		kids = first_level.getChildren()
+
+		# This is a marker for allowing multi instances
+		# beneath a fencedev
+		last_kid_fd = None
+
+		for kid in kids:
+			instance_name = kid.getName().strip()
+			try:
+				fd = getFDForInstance(fds, instance_name)
+			except:
+				# Set to None in case last time thru loop
+				fd = None
+				continue
+
+			if fd is not None:
+				if fd.isShared() is False:
+					# Not a shared dev... build struct and add
+					fencedev = {}
+
+					try:
+						fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+					except:
+						fencedev['unknown'] = True
+						fencedev['prettyname'] = fd.getAgentType()
+					fencedev['isShared'] = False
+					fencedev['id'] = str(major_num)
+					major_num = major_num + 1
+					devattrs = fd.getAttributes()
+					kees = devattrs.keys()
+					for kee in kees:
+						fencedev[kee] = devattrs[kee]
+					kidattrs = kid.getAttributes()
+					kees = kidattrs.keys()
+					for kee in kees:
+						if kee == 'name':
+							# Don't duplicate name attr
+							continue
+						fencedev[kee] = kidattrs[kee]
+
+					# This fencedev struct is complete, and needs
+					# to be placed on the level1 Q. Because it is
+					# non-shared, we should set last_kid_fd to none.
+					last_kid_fd = None
+					level1.append(fencedev)
+				else:
+					# This dev is shared
+					if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):
+						# just append a new instance struct to last_kid_fd
+						instance_struct = {}
+						instance_struct['id'] = str(minor_num)
+						minor_num = minor_num + 1
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+
+						for kee in kees:
+							if kee != 'name':
+								instance_struct[kee] = kidattrs[kee]
+
+						# Now just add this struct to last_kid_fd
+						# and reset last_kid_fd
+						ilist = last_kid_fd['instance_list']
+						ilist.append(instance_struct)
+						continue
+					else:
+						# Shared, but not used above...so we need
+						# a new fencedev struct
+						fencedev = {}
+						try:
+							fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+						except:
+							fencedev['unknown'] = True
+							fencedev['prettyname'] = fd.getAgentType()
+						fencedev['isShared'] = True
+						fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+							% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+						fencedev['id'] = str(major_num)
+						major_num = major_num + 1
+						inlist = list()
+						fencedev['instance_list'] = inlist
+						devattrs = fd.getAttributes()
+						kees = devattrs.keys()
+						for kee in kees:
+							fencedev[kee] = devattrs[kee]
+						instance_struct = {}
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee != 'name':
+								instance_struct[kee] = kidattrs[kee]
+
+						inlist.append(instance_struct)
+						level1.append(fencedev)
+						last_kid_fd = fencedev
+						continue
+		fence_map['level1'] = level1
+
+		# level1 list is complete now, but it is still
+		# necessary to build shared1
+		for fd in fds:
+			isUnique = True
+			if fd.isShared() is False:
+				continue
+			for fdev in level1:
+				if fd.getName().strip() == fdev['name']:
+					isUnique = False
+					break
+			if isUnique is True:
+				shared_struct = {}
+				shared_struct['name'] = fd.getName().strip()
+				agentname = fd.getAgentType()
+				shared_struct['agent'] = agentname
+				try:
+					shared_struct['prettyname'] = FENCE_OPTS[agentname]
+				except:
+					shared_struct['unknown'] = True
+					shared_struct['prettyname'] = agentname
+				shared1.append(shared_struct)
+		fence_map['shared1'] = shared1
+
+	# YUK: This next section violates the DRY rule, :-(
+	if len_levels >= 2:
+		second_level = levels[1]
+		kids = second_level.getChildren()
+		last_kid_fd = None	#This is a marker for allowing multi instances
+												#beneath a fencedev
+		for kid in kids:
+			instance_name = kid.getName().strip()
+			try:
+				fd = getFDForInstance(fds, instance_name)
+			except:
+				fd = None #Set to None in case last time thru loop
+				continue
+			if fd is not None:
+				if fd.isShared() is False:	#Not a shared dev...build struct and add
+					fencedev = {}
+					try:
+						fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+					except:
+						fencedev['unknown'] = True
+						fencedev['prettyname'] = fd.getAgentType()
+					fencedev['isShared'] = False
+					fencedev['id'] = str(major_num)
+					major_num = major_num + 1
+					devattrs = fd.getAttributes()
+					kees = devattrs.keys()
+					for kee in kees:
+						fencedev[kee] = devattrs[kee]
+					kidattrs = kid.getAttributes()
+					kees = kidattrs.keys()
+					for kee in kees:
+						if kee == 'name':
+							continue #Don't duplicate name attr
+						fencedev[kee] = kidattrs[kee]
+					#This fencedev struct is complete, and needs to be placed on the
+					#level2 Q. Because it is non-shared, we should set last_kid_fd
+					#to none.
+					last_kid_fd = None
+					level2.append(fencedev)
+				else:	#This dev is shared
+					if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):	#just append a new instance struct to last_kid_fd
+						instance_struct = {}
+						instance_struct['id'] = str(minor_num)
+						minor_num = minor_num + 1
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						#Now just add this struct to last_kid_fd and reset last_kid_fd
+						ilist = last_kid_fd['instance_list']
+						ilist.append(instance_struct)
+						#last_kid_fd = fd
+						continue
+					else: #Shared, but not used above...so we need a new fencedev struct
+						fencedev = {}
+						try:
+							fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+						except:
+							fencedev['unknown'] = True
+							fencedev['prettyname'] = fd.getAgentType()
+						fencedev['isShared'] = True
+						fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+							% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+						fencedev['id'] = str(major_num)
+						major_num = major_num + 1
+						inlist = list()
+						fencedev['instance_list'] = inlist
+						devattrs = fd.getAttributes()
+						kees = devattrs.keys()
+						for kee in kees:
+							fencedev[kee] = devattrs[kee]
+						instance_struct = {}
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						inlist.append(instance_struct)
+						level2.append(fencedev)
+						last_kid_fd = fencedev
+						continue
+		fence_map['level2'] = level2
+
+		#level2 list is complete but like above, we need to build shared2
+		for fd in fds:
+			isUnique = True
+			if fd.isShared() is False:
+				continue
+			for fdev in level2:
+				if fd.getName().strip() == fdev['name']:
+					isUnique = False
+					break
+			if isUnique is True:
+				shared_struct = {}
+				shared_struct['name'] = fd.getName().strip()
+				agentname = fd.getAgentType()
+				shared_struct['agent'] = agentname
+				try:
+					shared_struct['prettyname'] = FENCE_OPTS[agentname]
+				except:
+					shared_struct['unknown'] = True
+					shared_struct['prettyname'] = agentname
+				shared2.append(shared_struct)
+		fence_map['shared2'] = shared2
+
+	return fence_map
+
+def getFencesInfo(self, model, request):
+	fences_map = {}
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFencesInfo0: model is None')
+		fences_map['fencedevs'] = list()
+		return fences_map
+
+	fvars = GetReqVars(request, [ 'clustername', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername'] or model.getClusterName()
+
+	fencedevs = list() #This is for the fencedev list page
+
+	#Get list of fence devices
+	fds = model.getFenceDevices()
+	for fd in fds:
+		#This section determines which nodes use the dev
+		#create fencedev hashmap
+		nodes_used = list()
+
+		if fd.isShared() is True:
+			fencedev = {}
+			attr_hash = fd.getAttributes()
+			kees = attr_hash.keys()
+
+			for kee in kees:
+				fencedev[kee] = attr_hash[kee] #copy attrs over
+			try:
+				fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
+			except:
+				fencedev['unknown'] = True
+				fencedev['pretty_name'] = fd.getAgentType()
+
+			fencedev['agent'] = fd.getAgentType()
+			#Add config url for this fencedev
+			fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+				% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+
+			nodes = model.getNodes()
+			for node in nodes:
+				flevels = node.getFenceLevels()
+				for flevel in flevels: #These are the method blocks...
+					kids = flevel.getChildren()
+					for kid in kids: #These are actual devices in each level
+						if kid.getName().strip() == fd.getName().strip():
+							#See if this fd already has an entry for this node
+							found_duplicate = False
+							for item in nodes_used:
+								if item['nodename'] == node.getName().strip():
+									found_duplicate = True
+							if found_duplicate is True:
+								continue
+							node_hash = {}
+							cur_nodename = node.getName().strip()
+							node_hash['nodename'] = cur_nodename
+							node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+								% (baseurl, clustername, cur_nodename, NODE)
+							nodes_used.append(node_hash)
+
+			fencedev['nodesused'] = nodes_used
+			fencedevs.append(fencedev)
+
+	fences_map['fencedevs'] = fencedevs
+	return fences_map
+
+def getVMInfo(self, model, request):
+	vm_map = {}
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	clustername = fvars['clustername']
+	if clustername is None:
+		clustername = model.getName()
+
+	svcname = fvars['servicename']
+	if svcname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getVMInfo0: no service name')
+		return vm_map
+
+	vm_map['formurl'] = '%s?clustername=%s&pagetype=29&servicename=%s' \
+		% (baseurl, clustername, svcname)
+
+	try:
+		vm = model.retrieveVMsByName(svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('getVMInfo1: %s: %r %s' % (svcname, e, str(e)))
+		return vm_map
+
+	attrs = vm.getAttributes()
+	keys = attrs.keys()
+	for key in keys:
+		vm_map[key] = attrs[key]
+
+	return vm_map
+
+def getResourcesInfo(self, model, request):
+	fvars = GetReqVars(request, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	if fvars['clustername'] is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getResourcesInfo missing cluster name')
+		return []
+
+	return getResources(model, baseurl)
+
+def getClusterName(self, model):
+	return model.getClusterName()
+
+def getClusterAlias(self, model):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCA0: no model')
+		return ''
+	alias = model.getClusterAlias()
+	if not alias:
+		return model.getClusterName()
+	return alias
+
+def getModelBuilder(self, rc, isVirtualized):
+	try:
+		cluster_conf_node = rq.getClusterConf(rc)
+		if not cluster_conf_node:
+			raise Exception, 'getClusterConf returned None'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r %s' % (e, str(e)))
+		return None
+
+	try:
+		model = ModelBuilder(0, None, None, cluster_conf_node)
+		if not model:
+			raise Exception, 'ModelBuilder() returned None'
+		model.setIsVirtualized(isVirtualized)
+	except Exception, e:
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%s": %r %s' % (cluster_conf_node.toxml(), e, str(e)))
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GMB1: ModelBuilder failed')
+		return None
+
+	return model
+
+def getModelForCluster(self, clustername):
+	from LuciDB import getRicciAgent
+
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMFC0: no ricci agent for %s' % clustername)
+		return None
+
+	try:
+		model = getModelBuilder(None, rc, rc.dom0())
+		if not model:
+			raise Exception, 'model is none'
+		return model
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r %s' % (clustername, e, str(e)))
+	return None
+
+def LuciExtractCluModel(self, request, cluster_name=None):
+	model = None
+	try:
+		model = request.SESSION.get('model')
+		model.getClusterName()
+		return model
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM0: %r: %r %s' \
+				% (cluster_name, e, str(e)))
+		model = None
+
+	if not cluster_name:
+		fvar = GetReqVars(request, [ 'clustername' ])
+		cluster_name = fvar['clustername']
+
+	if cluster_name is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM1: no cluster name')
+		return None
+
+	try:
+		model = getModelForCluster(self, cluster_name)
+		model.getClusterName()
+		return model
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM2: no model for %s: %r %s' \
+				% (cluster_name, e, str(e)))
+	return None
+
+def getClusterOS(self, rc):
+	from HelperFunctions import resolveOSType
+
+	clu_map = {}
+
+	try:
+		os_str = resolveOSType(rc.os())
+		clu_map['os'] = os_str
+		clu_map['isVirtualized'] = rc.dom0()
+	except:
+		# default to rhel5 if something crazy happened.
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+		except:
+			# this can throw an exception if the original exception
+			# is caused by rc being None or stale.
+			pass
+		clu_map['os'] = 'rhel5'
+		clu_map['isVirtualized'] = False
+	return clu_map
+
+def getClusterConfNodes(conf_dom):
+	try:
+		cluster_nodes = conf_dom.getElementsByTagName('clusternode')
+		return map(lambda x: str(x.getAttribute('name')), cluster_nodes)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCCN0: %r %s' % (e, str(e)))
+	return None
/cvs/cluster/conga/luci/site/luci/Extensions/LuciDB.py,v  -->  standard output
revision 1.6.2.1
--- conga/luci/site/luci/Extensions/LuciDB.py
+++ -	2007-08-09 21:35:22.772198000 +0000
@@ -0,0 +1,969 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from AccessControl import getSecurityManager
+from ricci_communicator import RicciCommunicator
+from LuciZopePerm import isAdmin
+from LuciSyslog import get_logger
+
+from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, \
+	FLAG_DESC, CLUSTER_ADD, LAST_STATUS, \
+	STORAGE_FOLDER_PATH, LUCI_DEBUG_MODE
+
+# Cluster node exception attribute flags
+CLUSTER_NODE_NEED_AUTH	= 0x01
+CLUSTER_NODE_NOT_MEMBER	= 0x02
+CLUSTER_NODE_ADDED		= 0x04
+
+luci_log = get_logger()
+
+def getClusterNode(self, nodename, clustername):
+	try:
+		path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename))
+		cluster_node = self.restrictedTraverse(path)
+		if not cluster_node:
+			return None
+		return cluster_node
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getClusterNode0: %s %s: %r %s' \
+				% (nodename, clustername, e, str(e)))
+	return None
+
+def getStorageNode(self, nodename):
+	try:
+		path = str('%s%s' % (STORAGE_FOLDER_PATH, nodename))
+		storage_node = self.restrictedTraverse(path)
+		if not storage_node:
+			raise Exception, 'no storage node found'
+		return storage_node
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getStorageNode0: %s: %r %s' \
+				% (nodename, e, str(e)))
+	return None
+
+def testNodeStatus(node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags is None:
+			return False
+		return flags & flag_mask != 0
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('testNodeStatus0: %r %s' % (e, str(e)))
+	return False
+
+def setNodeStatus(node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags is None:
+			flags = 0
+		node.manage_changeProperties({ 'flags': flags | flag_mask })
+	except:
+		try:
+			node.manage_addProperty('flags', flag_mask, 'int')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('setNodeStatus0: %r %s' \
+					% (e, str(e)))
+
+def clearNodeStatus(node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags is None:
+			return
+		if flags & flag_mask != 0:
+			node.manage_changeProperties({ 'flags': flags & ~flag_mask })
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clearNodeStatus0: %r %s' \
+				% (e, str(e)))
+
+def set_node_flag(self, cluname, agent, batchid, task, desc):
+	path = str('%s%s' % (CLUSTER_FOLDER_PATH, cluname))
+	batch_id = str(batchid)
+	objname = '%s____flag' % agent
+
+	objpath = ''
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		objpath = str('%s/%s' % (path, objname))
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, 'string')
+		flag.manage_addProperty(TASKTYPE, task, 'string')
+		flag.manage_addProperty(FLAG_DESC, desc, 'string')
+	except Exception, e:
+		errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %s' \
+					% (batch_id, task, desc, objpath, str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('SNF0: %r %s' % (e, errmsg))
+		raise Exception, errmsg
+
+def NodeBusy(self, clustername, nodename, rc=None):
+	try:
+		path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename))
+		nodefolder = self.restrictedTraverse(path)
+		if not nodefolder:
+			raise Exception, 'cannot find database object at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NodeBusy0: (%s,%s) %r %s' \
+				% (clustername, nodename, e, str(e)))
+		return None
+
+	flagname = '%s____flag' % nodename
+
+	try:
+		items = nodefolder.objectItems('ManagedSystem')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NodeBusy1: error getting flags for %s: %r %s' \
+				% (nodefolder[0], e, str(e)))
+		return None
+
+	for item in items:
+		if item[0] != flagname:
+			continue
+
+		# A flag already exists. Check to see whether we're done.
+		if rc is None:
+			try:
+				rc = RicciCommunicator(nodename)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NodeBusy2: ricci error %s: %r %s' \
+						% (nodename, e, str(e)))
+				# We can't know if we're done or not; err on the
+				# side of caution.
+				return True
+
+		if not rc.authed():
+			try:
+				snode = getStorageNode(self, nodename)
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NodeBusy3: %s: %r %s' \
+						% (nodename, e, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.info('%s not authenticated' % item[0])
+			# The comment above applies here, too.
+			return True
+
+		batch_id = item[1].getProperty(BATCH_ID)
+		batch_ret = rc.batch_status(batch_id)
+		finished = batch_ret[0]
+		if finished is True or finished == -1:
+			if finished == -1:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NodeBusy5: batch error: %s' \
+						% batch_ret[1])
+
+			try:
+				nodefolder.manage_delObjects([item[0]])
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NodeBusy6: %s: %r %s' % (item[0], e, str(e)))
+			return False
+
+		# Not finished, so don't remove the flag.
+		return True
+
+	# If this code is ever reached, no flags exist for the node in question.
+	return False
+
+def resolve_nodename(self, clustername, nodename):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterfolder = self.restrictedTraverse(path)
+		objs = clusterfolder.objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RNN0: error for %s/%s: %r %s' \
+				% (nodename, clustername, e, str(e)))
+		return nodename
+
+	for obj in objs:
+		try:
+			if obj[0].find(nodename) != (-1):
+				return obj[0]
+		except:
+			continue
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('RNN1: failed for %s/%s: nothing found' \
+			% (nodename, clustername))
+	return nodename
+
+def resolveClusterChanges(self, clustername, model):
+	try:
+		mb_nodes = model.getNodes()
+		if not mb_nodes or not len(mb_nodes):
+			raise Exception, 'node list is empty'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to find cluster nodes for %s' % clustername
+
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster_node = self.restrictedTraverse(path)
+		if not cluster_node:
+			raise Exception, 'cluster node is none'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('RCC1: cant find cluster node for %s: %r %s' \
+				% (clustername, e, str(e)))
+		return 'Unable to find an entry for %s in the Luci database.' % clustername
+
+	try:
+		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
+		if not db_nodes or not len(db_nodes):
+			raise Exception, 'no database nodes'
+	except Exception, e:
+		# Should we just create them all? Can this even happen?
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('RCC2: error: %r %s' % (e, str(e)))
+		return 'Unable to find database entries for any nodes in %s' % clustername
+
+	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
+
+	# this is a really great algorithm.
+	missing_list = list()
+	new_list = list()
+	for i in mb_nodes:
+		for j in db_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			new_list.append(i)
+
+	for i in db_nodes:
+		for j in mb_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			missing_list.append(i)
+
+	messages = list()
+	for i in missing_list:
+		try:
+			# or alternately
+			# new_node = cluster_node.restrictedTraverse(i)
+			# #setNodeStatus(self, new_node, CLUSTER_NODE_NOT_MEMBER)
+			cluster_node.delObjects([i])
+			messages.append('Node "%s" is no longer in a member of cluster "%s." It has been deleted from the management interface for this cluster.' % (i, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC3: deleted node %s' % i)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC4: delObjects: %s: %r %s' \
+					% (i, e, str(e)))
+
+	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
+	for i in new_list:
+		try:
+			cluster_node.manage_addFolder(i)
+			new_node = cluster_node.restrictedTraverse(str(i))
+			setNodeStatus(self, new_node, new_flags)
+			messages.append('A new cluster node, "%s," is now a member of cluster "%s." It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clustername))
+		except Exception, e:
+			messages.append('A new cluster node, "%s," is now a member of cluster "%s,". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r %s' \
+					% (clustername, i, e, str(e)))
+
+	return messages
+
+def buildClusterCreateFlags(self, batch_map, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterfolder = self.restrictedTraverse(path)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r %s' \
+				% (path, e, str(e)))
+		return None
+
+	for key in batch_map.keys():
+		try:
+			key = str(key)
+			batch_id = str(batch_map[key])
+			# This suffix needed to avoid name collision
+			objname = '%s____flag' % key
+
+			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			# now designate this new object properly
+			objpath = str('%s/%s' % (path, objname))
+			flag = self.restrictedTraverse(objpath)
+
+			flag.manage_addProperty(BATCH_ID, batch_id, 'string')
+			flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, 'string')
+			flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clustername), 'string')
+			flag.manage_addProperty(LAST_STATUS, 0, 'int')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r %s' % (key, e, str(e)))
+
+def manageCluster(self, clustername, node_list, cluster_os):
+	try:
+		clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+		if not clusters:
+			raise Exception, 'cannot find the cluster entry in the DB'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC0: %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to create cluster %s: the cluster directory is missing.' % clustername
+
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		newCluster = self.restrictedTraverse(path)
+		if newCluster:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC1: cluster %s: already exists' \
+					% clustername)
+			return 'A cluster named %s is already managed by Luci' % clustername
+	except:
+		pass
+
+	try:
+		clusters.manage_addFolder(clustername)
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		newCluster = self.restrictedTraverse(path)
+		if not newCluster:
+			raise Exception, 'unable to create the cluster DB entry for %s' % clustername
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC2: %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to create cluster %s: %s' % (clustername, str(e))
+
+	try:
+		newCluster.manage_acquiredPermissions([])
+		newCluster.manage_role('View', ['Access Contents Information', 'View'])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC3: %s: %r %s' % (clustername, e, str(e)))
+		try:
+			clusters.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC4: %s: %r %s' \
+					% (clustername, e, str(e)))
+		return 'Unable to set permissions on new cluster: %s: %s' \
+			% (clustername, str(e))
+
+	try:
+		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC5: %s: %s: %r %s' \
+				% (clustername, cluster_os, e, str(e)))
+
+	for i in node_list:
+		host = node_list[i]['host']
+
+		try:
+			newCluster.manage_addFolder(host)
+			path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, host))
+			newSystem = self.restrictedTraverse(path)
+			if not newSystem:
+				raise Exception, 'unable to create cluster system DB entry for node %s' % host
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			try:
+				clusters.manage_delObjects([clustername])
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('MC6: %s: %s: %r %s' \
+						% (clustername, host, e, str(e)))
+
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC7: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+			return 'Unable to create cluster node %s for cluster %s: %s' \
+				% (host, clustername, str(e))
+
+	try:
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
+		if not ssystem:
+			raise Exception, 'The storage DB entry is missing'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC8: %s: %s: %r %s' \
+				% (clustername, host, e, str(e)))
+		return 'Error adding storage node %s: %s' % (host, str(e))
+
+	# Only add storage systems if the cluster and cluster node DB
+	# objects were added successfully.
+	for i in node_list:
+		host = node_list[i]['host']
+
+		try:
+			# It's already there, as a storage system, no problem.
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			dummy = self.restrictedTraverse(path)
+			continue
+		except:
+			pass
+
+		try:
+			ssystem.manage_addFolder(host)
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			newSystem = self.restrictedTraverse(path)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC9: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+
+def createClusterSystems(self, clustername, node_list):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterObj = self.restrictedTraverse(path)
+		if not clusterObj:
+			raise Exception, 'cluster %s DB entry is missing' % clustername
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CCS0: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return 'No cluster named "%s" is managed by Luci' % clustername
+
+	for x in node_list:
+		i = node_list[x]
+		host = str(i['host'])
+
+		try:
+			clusterObj.manage_addFolder(host)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS0a: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+
+		try:
+			path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, host))
+			newSystem = self.restrictedTraverse(path)
+			if not newSystem:
+				raise Exception, 'cluster node DB entry for %s disappeared from under us' % host
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS1: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+			return 'Unable to create cluster node %s for cluster %s: %s' \
+				% (host, clustername, str(e))
+
+	try:
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
+		if not ssystem:
+			raise Exception, 'storage DB entry is missing'
+	except Exception, e:
+		# This shouldn't fail, but if it does, it's harmless right now
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CCS2: %s: %r %s' \
+				% (clustername, host, e, str(e)))
+		return None
+
+	# Only add storage systems if the and cluster node DB
+	# objects were added successfully.
+	for x in node_list:
+		i = node_list[x]
+		host = str(i['host'])
+
+		try:
+			# It's already there, as a storage system, no problem.
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			dummy = self.restrictedTraverse(path)
+			continue
+		except:
+			pass
+
+		try:
+			ssystem.manage_addFolder(host)
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			newSystem = self.restrictedTraverse(path)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS3: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+
+def delSystem(self, systemName):
+	try:
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
+		if not ssystem:
+			raise Exception, 'storage DB entry is missing'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem0: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return 'Unable to find storage system %s: %s' % (systemName, str(e))
+
+	try:
+		rc = RicciCommunicator(systemName, enforce_trust=False)
+		if rc is None:
+			raise Exception, 'rc is None'
+	except Exception, e:
+		try:
+			ssystem.manage_delObjects([ systemName ])
+		except Exception, e1:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delSystem1: %s: %r %s' \
+					% (systemName, e1, str(e1)))
+			return 'Unable to delete the storage system %s' % systemName
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem2: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return
+
+	# Only unauthenticate if the system isn't a member of
+	# a managed cluster.
+	cluster_info = rc.cluster_info()
+	if not cluster_info:
+		cluster_name = None
+	elif not cluster_info[0]:
+		cluster_name = cluster_info[1]
+	else:
+		cluster_name = cluster_info[0]
+
+	unauth = False
+	if not cluster_name:
+		# If it's a member of no cluster, unauthenticate
+		unauth = True
+	else:
+		try:
+			path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, cluster_name, systemName))
+			dummy = self.restrictedTraverse(path).objectItems()
+		except Exception, e:
+			# It's not a member of a managed cluster, so unauthenticate.
+			unauth = True
+
+	if unauth is True:
+		try:
+			rc.unauth()
+		except:
+			pass
+
+	try:
+		ssystem.manage_delObjects([ systemName ])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem3: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return 'Unable to delete storage system %s: %s' \
+			% (systemName, str(e))
+
+def delCluster(self, clustername):
+	try:
+		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+		if not clusters:
+			raise Exception, 'clusters DB entry is missing'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluster0: %r %s' % (e, str(e)))
+		return 'Unable to find cluster %s' % clustername
+
+	err = delClusterSystems(self, clustername)
+	if err:
+		return err
+
+	try:
+		clusters.manage_delObjects([ clustername ])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluster1: %s %r %s' \
+				% (clustername, e, str(e)))
+		return 'Unable to delete cluster %s' % clustername
+
+def delClusterSystem(self, cluster, systemName):
+	try:
+		path = str('%s%s' % (STORAGE_FOLDER_PATH, systemName))
+		dummy = self.restrictedTraverse(path).objectItems()
+	except:
+		# It's not a storage system, so unauthenticate.
+		try:
+			rc = RicciCommunicator(systemName, enforce_trust=False)
+			rc.unauth()
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r %s' % (systemName, e, str(e)))
+
+	try:
+		cluster.manage_delObjects([ systemName ])
+	except Exception, e:
+		err_str = 'Error deleting cluster object %s: %s' % (systemName, str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delClusterSystem1: %r %s' % (e, err_str))
+		return err_str
+
+def delClusterSystems(self, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster = self.restrictedTraverse(path)
+		if not cluster:
+			raise Exception, 'cluster DB entry is missing'
+
+		try:
+			csystems = getClusterSystems(self, clustername)
+			if not csystems or len(csystems) < 1:
+				return None
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delCluSystems0: %r %s' % (e, str(e)))
+			return None
+	except Exception, er:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluSystems1: error for %s: %r %s' \
+				% (clustername, er, str(er)))
+		return str(er)
+
+	error_list = list()
+	for i in csystems:
+		err = delClusterSystem(self, cluster, i[0])
+		if err:
+			error_list.append('Unable to delete the cluster system %s: %s\n' \
+				% (i[0], err))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delCluSystems2: %s' % err)
+	return ''.join(error_list)
+
+def getSystems(self):
+	storage = getStorage(self)
+	clusters = getClusters(self)
+	storageList = list()
+	ret = [{}, [], {}]
+
+	need_auth_hash = {}
+	for i in storage:
+		storageList.append(i[0])
+		if testNodeStatus(i[1], CLUSTER_NODE_NEED_AUTH) is not False:
+			need_auth_hash[i[0]] = i[1]
+
+	chash = {}
+	for i in clusters:
+		csystems = getClusterSystems(self, i[0])
+		cslist = list()
+		for c in csystems:
+			if testNodeStatus(c[1], CLUSTER_NODE_NEED_AUTH) is not False:
+				need_auth_hash[c[0]] = c[1]
+			cslist.append(c[0])
+		chash[i[0]] = cslist
+
+	ret[0] = chash
+	ret[1] = storageList
+	ret[2] = need_auth_hash
+	return ret
+
+def getCluster(self, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster_obj = self.restrictedTraverse(path)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCobj0: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+
+	if cluster_permission_check(self, cluster_obj):
+		return cluster_obj
+	return None
+
+def getClusterSystems(self, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster_nodes = self.restrictedTraverse(path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSy0: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+
+	if isAdmin(self):
+		return cluster_nodes
+	cluster_obj = getCluster(self, clustername)
+	if cluster_permission_check(self, cluster_obj):
+		return cluster_nodes
+	return None
+
+def getClusters(self):
+	try:
+		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GC0: %r %s' % (e, str(e)))
+		return None
+
+	if isAdmin(self):
+		return clusters
+	return check_clusters(self, clusters)
+
+def getStorage(self):
+	try:
+		storage = self.restrictedTraverse(STORAGE_FOLDER_PATH).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GS0: %r %s' % (e, str(e)))
+		return None
+
+	if isAdmin(self):
+		return storage
+	return allowed_systems(storage)
+
+def check_clusters(self, clusters):
+	ret = []
+	try:
+		user = getSecurityManager().getUser()
+		ret = filter(lambda x: user.has_permission('View', x[1]), clusters)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CC0: %r %s' % (e, str(e)))
+	return ret
+
+def cluster_permission_check(self, cluster):
+	if isAdmin(self):
+		return True
+
+	try:
+		user = getSecurityManager().getUser()
+		if user.has_permission('View', cluster[1]):
+			return True
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CPC0: %s: %r %s' % (cluster, e, str(e)))
+	return False
+
+def allowed_systems(self, systems):
+	user = getSecurityManager().getUser()
+	return filter(lambda x: user.has_permission('View', x[1]), systems)
+
+def access_to_host_allowed(self, hostname, allowed_systems_list):
+	allowed = dict(map(lambda x: [ x[0], None ], allowed_systems_list))
+	return allowed.has_key(hostname)
+
+def getRicciAgent(self, clustername, exclude_names=None, exclude_busy=False):
+	try:
+		perm = cluster_permission_check(self, clustername)
+		if not perm:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA0: no permission for %s' \
+					% clustername)
+			return None
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRA0: %r %s' % (e, str(e)))
+		return None
+
+	clusterfolder = None
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA1: cluster folder %s for %s is missing' \
+					% (path, clustername))
+			return None
+
+		nodes = clusterfolder.objectItems('Folder')
+		if len(nodes) < 1:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA2: no cluster nodes for %s found' \
+					% clustername)
+			raise Exception, 'No cluster nodes were found@%s' % path
+	except Exception, e:
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA3: cluster folder %s for %s is missing: %r %s' % (path, clustername, e, str(e)))
+
+			if clusterfolder is not None and len(clusterfolder.objectItems()) < 1:
+				clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+				clusters.manage_delObjects([clustername])
+		except Exception, ein:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA4: %r %s' % (ein, str(ein)))
+		return None
+
+	cluname = clustername.lower()
+	for node in nodes:
+		hostname = node[0]
+
+		if exclude_names is not None and hostname in exclude_names:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA5: %s is in the excluded names list, excluding it' % hostname)
+			continue
+
+		try:
+			rc = RicciCommunicator(hostname)
+			if not rc:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA6: rc is None')
+				continue
+
+			ricci_hostname = rc.hostname()
+			if not ricci_hostname:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA7: ricci_hostname is blank')
+				continue
+
+			clu_info = rc.cluster_info()
+
+			cur_name = str(clu_info[0]).strip().lower()
+			if not cur_name:
+				cur_name = None
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA8: cluster name is none for %s' \
+						% ricci_hostname)
+				raise Exception, '%s not in a cluster' % ricci_hostname
+
+			cur_alias = str(clu_info[1]).strip().lower()
+			if not cur_alias:
+				cur_alias = None
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA9: no cluster alias for %s' \
+						% ricci_hostname)
+
+			if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug('GRA10: node %s reports it\'s in cluster [%s:%s] we expect %s' % (ricci_hostname, clu_info[0], clu_info[1], cluname))
+				setNodeStatus(self, node, CLUSTER_NODE_NOT_MEMBER)
+				continue
+
+			if not rc.authed():
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA11: %s is not authenticated' \
+						% ricci_hostname)
+				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+				continue
+		except Exception, eout:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA12: %r %s' % (eout, str(eout)))
+			continue
+
+		if exclude_busy is True:
+			if NodeBusy(self, cluname, ricci_hostname, rc) is not False:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA13: %s is busy, excluding' \
+						% ricci_hostname)
+				continue
+		return rc
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug('GRA14: no ricci agent could be found for cluster %s' \
+			% cluname)
+	return None
+
+def getClusterDBObj(self, clustername):
+	try:
+		cluster_path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		nodelist = self.restrictedTraverse(cluster_path)
+		if not nodelist:
+			raise Exception, 'no nodelist'
+		return nodelist
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDB0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+	return None
+
+def getClusterDBNodes(self, clustername):
+	try:
+		cluster_path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDBN0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+		return []
+	return nodelist
+
+def getClusterDBVersion(cluster_folder):
+	try:
+		cluster_os = str(cluster_folder.getProperty('cluster_os'))
+		if not cluster_os:
+			raise Exception, 'cluster os is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDBV0: %s: %r %s' \
+				% (cluster_folder.getId(), e, str(e)))
+		cluster_os = None
+	return cluster_os
+
+def setClusterDBVersion(cluster_folder, version_str):
+	if cluster_folder.getProperty('cluster_os') is None:
+		try:
+			cluster_folder.manage_addProperty('cluster_os',
+				version_str, 'string')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('SCDBV0: %s: %r %s' \
+					% (cluster_folder.getId(), e, str(e)))
+			return None
+	else:
+		try:
+			cluster_folder.manage_changeProperties({'cluster_os': version_str })
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('SCDBV1: %s: %r %s' \
+					% (cluster_folder.getId(), e, str(e)))
+			return None
+	return True
+
+def getClusterFlags(self, cluname):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, cluname))
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			raise Exception, 'clusterfolder is None'
+		return clusterfolder.objectItems('ManagedSystem')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCF0: cluster %s [%s] folder missing: %r %s -- returning empty map' % (cluname, path, e, str(e)))
+	return None
+
+def getClusterStatusDB(self, clustername):
+	results = list()
+	vals = {}
+
+	vals['type'] = 'cluster'
+	vals['alias'] = clustername
+	vals['name'] = clustername
+	vals['error'] = True
+	vals['quorate'] = '[unknown]'
+	vals['votes'] = '[unknown]'
+	vals['minQuorum'] = '[unknown]'
+	results.append(vals)
+
+	nodelist = getClusterDBNodes(self, clustername)
+	if len(nodelist) < 1:
+		luci_log.info('Removing cluster %s because it has no nodes' \
+			% clustername)
+		try:
+			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+			clusters_dir.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCSDB0b: %s: %r %s' \
+					% (clustername, e, str(e)))
+		return results
+
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		node_val['name'] = node[0]
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+		results.append(node_val)
+	return results
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZope.py,v  -->  standard output
revision 1.4.2.1
--- conga/luci/site/luci/Extensions/LuciZope.py
+++ -	2007-08-09 21:35:22.887659000 +0000
@@ -0,0 +1,147 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from LuciZopePerm import userAuthenticated
+from LuciDB import allowed_systems
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def siteIsSetup(self):
+	import os
+	from ricci_communicator import CERTS_DIR_PATH
+
+	try:
+		return os.path.isfile('%sprivkey.pem' % CERTS_DIR_PATH) and os.path.isfile('%scacert.pem' % CERTS_DIR_PATH)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('SIS0: %r %s' % (e, str(e)))
+	return False
+
+def strFilter(regex, replaceChar, arg):
+	import re
+	return re.sub(regex, replaceChar, arg)
+
+# removes systems that user is not authorized access to
+def get_systems_statuses(self, systems, from_cache=False):
+	from HelperFunctions import get_system_info
+
+	CACHED_INDEX = '_get_systems_statuses()_cached_result_'
+	session = self.REQUEST.SESSION
+	if session.has_key(CACHED_INDEX):
+		res = session[CACHED_INDEX]
+		if res is not None:
+			session.set(CACHED_INDEX, None)
+			if from_cache:
+				return res
+
+	allowed_sys_list = allowed_systems(self, systems)
+	ss_list = get_system_info(self, allowed_sys_list)
+	session.set(CACHED_INDEX, ss_list)
+	return ss_list
+
+def set_persistent_var(self, var_name, default_value):
+	request = self.REQUEST
+	response = request.RESPONSE
+	session = request.SESSION
+
+	# TODO: add username into cookie_prefix, so users don't overwrite each other
+	cookie_prefix = '__luci_storage_cookie_'
+
+	value = default_value
+	if request.has_key(var_name):
+		value = request[var_name]
+	elif session.has_key(var_name):
+		value = session[var_name]
+	elif request.cookies.has_key(cookie_prefix + var_name):
+		value = request.cookies[cookie_prefix + var_name]
+
+	session.set(var_name, value)
+	response.setCookie(cookie_prefix + var_name, value,
+		expires='Tue, 30 Jun 2060 12:00:00 GMT')
+	return value
+
+# returns (str(float), units) that fits best,
+# takes prefered units into account
+
+def bytes_to_value_prefunits(self, bytes):
+	from HelperFunctions import bytes_to_value_units, convert_bytes, get_units_multiplier
+
+	p_units = self.REQUEST.SESSION.get('preferred_size_units')
+	dummy, units = bytes_to_value_units(bytes)
+	if get_units_multiplier(units) > get_units_multiplier(p_units):
+		units = p_units
+	return (convert_bytes(bytes, units), units)
+
+def getTabs(self, req):
+	if not userAuthenticated(self):
+		return []
+
+	htab = {
+		'Title': 'homebase',
+		'Description': 'Home base for this luci server',
+		'Taburl': '/luci/homebase',
+		'isSelected': False
+	}
+	ctab = {
+		'Title': 'cluster',
+		'Description': 'Cluster configuration area',
+		'Taburl': '/luci/cluster/index_html?pagetype=3',
+		'isSelected': False
+	}
+	stab = {
+		'Title': 'storage',
+		'Description': 'Storage configuration page',
+		'Taburl': '/luci/storage',
+		'isSelected': False
+	}
+
+	try:
+		baseurl = req['URL']
+		if baseurl.find('cluster') > (-1):
+			ctab['isSelected'] = True
+		elif baseurl.find('storage') > (-1):
+			stab['isSelected'] = True
+		else:
+			htab['isSelected'] = True
+	except KeyError, e:
+		pass
+	except Exception, e:
+		htab['isSelected'] = True
+
+	return [ htab, ctab, stab ]
+
+def appendModel(request, model):
+	try:
+		request.SESSION.set('model', model)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('Appending model to request failed: %r %s' \
+				% (e, str(e)))
+
+def GetReqVars(req, varlist):
+	ret = {}
+	from types import ListType;
+
+	for i in varlist:
+		pval = None
+		if req and req.has_key(i):
+			pval = req[i]
+			if type(req[i]) is not ListType:
+				pval = req[i].strip()
+			if not pval:
+				pval = None
+		if req and pval is None:
+			if req.form and req.form.has_key(i):
+				pval = req.form[i]
+				if type(req.form[i]) is not ListType:
+					pval.strip()
+				if not pval:
+					pval = None
+		ret[i] = pval
+	return ret
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeAsync.py,v  -->  standard output
revision 1.1.4.1
--- conga/luci/site/luci/Extensions/LuciZopeAsync.py
+++ -	2007-08-09 21:35:23.007073000 +0000
@@ -0,0 +1,182 @@
+# Copyright (C) 2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from xml.dom import minidom
+
+from LuciSyslog import get_logger
+from LuciZope import GetReqVars
+from ricci_communicator import RicciCommunicator
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def write_xml_resp(request, xml_obj):
+	request.RESPONSE.setHeader('Content-Type', 'text/xml; charset=UTF-8')
+	request.RESPONSE.setHeader('Cache-Control', 'no-cache, no-store, private')
+	request.RESPONSE.write(str(xml_obj.toprettyxml()))
+
+def result_to_xml(result):
+	import types
+
+	numeric_types = [
+		types.IntType, types.BooleanType, types.LongType, types.FloatType
+	]
+
+	root = minidom.Document()
+
+	def pyobj_to_xml(element_name, element, parent_node):
+		if type(element) is types.DictType:
+			if len(element) > 0:
+				xml_elem = root.createElement('dict')
+				xml_elem.setAttribute('name', str(element_name))
+
+				for i in element.iterkeys():
+					pyobj_to_xml(i, element[i], xml_elem)
+			else:
+				xml_elem = None
+		elif type(element) in [ types.ListType, types.TupleType ]:
+			if len(element) > 0:
+				xml_elem = root.createElement('list')
+				xml_elem.setAttribute('name', str(element_name))
+				for i in element:
+					pyobj_to_xml(element_name, i, xml_elem)
+			else:
+				xml_elem = None
+		else:
+			cur_tagname = None
+			try:
+				if parent_node.tagName == 'list':
+					cur_tagname = parent_node.getAttribute('name')
+			except:
+				cur_tagname = None
+
+			if not cur_tagname:
+				xml_elem = root.createElement('var')
+			else:
+				xml_elem = root.createElement(cur_tagname)
+
+			if type(element) in types.StringTypes:
+				cur_type = 'str'
+			elif type(element) in numeric_types:
+				cur_type = 'num'
+			else:
+				cur_type = None
+
+			if cur_type:
+				try:
+					if parent_node.tagName == 'dict':
+						xml_elem.setAttribute('name', str(element_name))
+				except:
+					pass
+
+				xml_elem.setAttribute('type', cur_type)
+				xml_elem.setAttribute('value', str(element))
+			else:
+				xml_elem = None
+
+		if xml_elem is not None:
+			parent_node.appendChild(xml_elem)
+
+	pyobj_to_xml('result', result[1], root)
+	res_elem = root.createElement('result')
+	res_elem.setAttribute('name', 'success')
+	res_elem.setAttribute('value', str(result[0]).lower())
+	root.firstChild.appendChild(res_elem)
+	return root
+
+def write_err_async(request, err_msg):
+	xml_obj = result_to_xml((False, { 'errors': err_msg }))
+	write_xml_resp(request, xml_obj)
+
+def get_cluster_nodes_async(self, request):
+	from LuciClusterInfo import getClusterConfNodes
+	from RicciQueries import getClusterConf
+
+	fvars = GetReqVars(request, [ 'QUERY_STRING' ])
+	if fvars['QUERY_STRING'] is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCNA: No query string was given')
+		write_err_async(request, 'No node names were given')
+		return None
+
+	try:
+		nodes = fvars['QUERY_STRING'].split('&')
+		node_list = map(lambda x: x[1], filter(lambda x: x[0][:4] == 'node', map(lambda x: x.split('='), nodes)))
+		if not node_list or len(node_list) < 1:
+			raise Exception, 'No node list'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCNA: %r %s' % (e, str(e)))
+		write_err_async(request, 'No node names were given')
+		return None
+
+	errors = list()
+	ret = {}
+	for node_host in node_list:
+		try:
+			rc = RicciCommunicator(node_host)
+			cluster_name = rc.cluster_info()[0]
+			if not cluster_name:
+				errors.append('%s is not a member of a cluster' \
+					% cluster_name)
+				continue
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCNA0: ricci: %s: %r %s' \
+					% (node_host, e, str(e)))
+			errors.append('Unable to communicate with the ricci agent on %s' \
+				% node_host)
+			continue
+
+		try:
+			conf = getClusterConf(rc)
+			node_names = getClusterConfNodes(conf)
+			if not node_names or len(node_names) < 1:
+				raise Exception, 'no nodes'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCNA1: ricci: %s: %r %s' \
+					% (node_host, e, str(e)))
+			errors.append('Unable to retrieve a list of cluster nodes from %s' \
+				% node_host)
+			continue
+		ret[cluster_name] = {
+			'cluster': cluster_name,
+			'num_nodes': len(node_names),
+			'clusternode': node_names
+		}
+
+	ret['errors'] = errors
+	xml_obj = result_to_xml((len(errors) < len(node_list), ret))
+	write_xml_resp(request, xml_obj)
+
+def get_sysinfo_async(self, request):
+	from HelperFunctions import get_system_info
+
+	fvars = GetReqVars(request, [ 'QUERY_STRING' ])
+	try:
+		nodes = fvars['QUERY_STRING'].split('&')
+		node_list = map(lambda x: x[1], filter(lambda x: x[0][:4] == 'node', map(lambda x: x.split('='), nodes)))
+		if not node_list or len(node_list) < 1:
+			raise Exception, 'No node list'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSIA: %r %s' % (e, str(e)))
+		write_err_async(request, 'No node names were given')
+		return None
+
+	ret = {}
+	try:
+		ret = get_system_info(self, node_list)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNFPA %r: %r %s' \
+				% (request['nodenames'], e, str(e)))
+		write_err_async(request, 'Error retrieving information')
+		return None
+	xml_obj = result_to_xml(True, { 'result': ret })
+	write_xml_resp(request, xml_obj)
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeClusterPortal.py,v  -->  standard output
revision 1.2.2.1
--- conga/luci/site/luci/Extensions/LuciZopeClusterPortal.py
+++ -	2007-08-09 21:35:23.144029000 +0000
@@ -0,0 +1,551 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from LuciZopePerm import havePermCreateCluster
+from conga_constants import CLUNAME, CLUSTER, \
+	CLUSTER_ADD, CLUSTER_CONFIG, CLUSTERLIST, CLUSTERS, \
+	FDOM, FDOM_ADD, FDOM_CONFIG, FDOMS, \
+	FENCEDEV, FENCEDEV_ADD, FENCEDEV_CONFIG, FENCEDEVS, \
+	NODE, NODE_ADD, NODE_CONFIG, NODE_GRID, NODE_LIST, NODES, PAGETYPE, \
+	RESOURCE, RESOURCE_ADD, RESOURCE_CONFIG, RESOURCES, \
+	SERVICE, SERVICE_ADD, SERVICE_CONFIG, SERVICE_LIST, SERVICES, \
+	VM_ADD, VM_CONFIG
+
+# Policy for showing the cluster chooser menu:
+# 1) If there are no clusters in the ManagedClusterSystems
+# folder, then only the admin user may see this menu, and
+# the configure option should not be displayed.
+# 2)If there are clusters in the ManagedClusterSystems,
+# then only display chooser if the current user has
+# permissions on at least one. If the user is admin, show ALL clusters
+
+def createCluChooser(self, request, systems):
+	from cluster_adapters import validatePost
+	dummynode = {}
+
+	if request.REQUEST_METHOD == 'POST':
+		ret = validatePost(self, request)
+		try:
+			request.SESSION.set('checkRet', ret[1])
+		except:
+			request.SESSION.set('checkRet', {})
+	else:
+		try:
+			request.SESSION.set('checkRet', {})
+		except:
+			pass
+
+	# First, see if a cluster is chosen, then
+	# check that the current user can access that system
+	cname = None
+	try:
+		cname = request[CLUNAME]
+	except:
+		cname = ''
+
+	try:
+		url = request['URL']
+	except:
+		url = '/luci/cluster/index_html'
+
+	try:
+		pagetype = request[PAGETYPE]
+	except:
+		pagetype = '3'
+
+	cldata = {}
+	cldata['Title'] = 'Cluster List'
+	cldata['cfg_type'] = 'clusters'
+	cldata['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERLIST)
+	cldata['Description'] = 'Clusters available for configuration'
+	if pagetype == CLUSTERLIST:
+		cldata['currentItem'] = True
+	else:
+		cldata['currentItem'] = False
+
+	UserHasPerms = havePermCreateCluster(self)
+	if UserHasPerms:
+		cladd = {}
+		cladd['Title'] = 'Create a New Cluster'
+		cladd['cfg_type'] = 'clusteradd'
+		cladd['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTER_ADD)
+		cladd['Description'] = 'Create a Cluster'
+		if pagetype == CLUSTER_ADD:
+			cladd['currentItem'] = True
+		else:
+			cladd['currentItem'] = False
+
+	clcfg = {}
+	clcfg['Title'] = 'Configure'
+	clcfg['cfg_type'] = 'clustercfg'
+	clcfg['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERS)
+	clcfg['Description'] = 'Configure a cluster'
+	if pagetype == CLUSTERS:
+		clcfg['currentItem'] = True
+	else:
+		clcfg['currentItem'] = False
+
+	#test...
+	#clcfg['show_children'] = True
+	#Add all cluster type pages here:
+	if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
+		clcfg['show_children'] = True
+	else:
+		clcfg['show_children'] = False
+
+	#loop through all clusters
+	syslist = list()
+	for system in systems:
+		clsys = {}
+		clsys['Title'] = system[0]
+		clsys['cfg_type'] = 'cluster'
+		clsys['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, CLUSTER, system[0])
+		clsys['Description'] = 'Configure this cluster'
+
+		if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
+			if cname == system[0]:
+				clsys['currentItem'] = True
+			else:
+				clsys['currentItem'] = False
+		else:
+			clsys['currentItem'] = False
+		syslist.append(clsys)
+
+	clcfg['children'] = syslist
+
+	mylist = list()
+	mylist.append(cldata)
+	if UserHasPerms:
+		mylist.append(cladd)
+	mylist.append(clcfg)
+	dummynode['children'] = mylist
+
+	return dummynode
+
+def createCluConfigTree(self, request, model):
+	dummynode = {}
+
+	if not model:
+		return {}
+
+	# There should be a positive page type
+	try:
+		pagetype = request[PAGETYPE]
+	except:
+		pagetype = '3'
+
+	try:
+		url = request['URL']
+	except:
+		url = '/luci/cluster/index_html'
+
+	# The only way this method can run is if there exists
+	# a clustername query var
+	cluname = request['clustername']
+
+	nd = {}
+	nd['Title'] = 'Nodes'
+	nd['cfg_type'] = 'nodes'
+	nd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODES, cluname)
+	nd['Description'] = 'Node configuration for this cluster'
+	if pagetype == NODES or pagetype == NODE_GRID or pagetype == NODE_LIST or pagetype == NODE_CONFIG or pagetype == NODE_ADD or pagetype == NODE:
+		nd['show_children'] = True
+	else:
+		nd['show_children'] = False
+	if pagetype == '0':
+		nd['show_children'] = False
+
+	if pagetype == NODES:
+		nd['currentItem'] = True
+	else:
+		nd['currentItem'] = False
+
+
+	ndadd = {}
+	ndadd['Title'] = 'Add a Node'
+	ndadd['cfg_type'] = 'nodeadd'
+	ndadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_ADD, cluname)
+	ndadd['Description'] = 'Add a node to this cluster'
+	if pagetype == NODE_ADD:
+		ndadd['currentItem'] = True
+	else:
+		ndadd['currentItem'] = False
+
+	ndcfg = {}
+	ndcfg['Title'] = 'Configure'
+	ndcfg['cfg_type'] = 'nodecfg'
+	ndcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_CONFIG, cluname)
+	ndcfg['Description'] = 'Configure cluster nodes'
+	if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODE_GRID or pagetype == NODE_ADD:
+		ndcfg['show_children'] = True
+	else:
+		ndcfg['show_children'] = False
+	if pagetype == NODE_CONFIG:
+		ndcfg['currentItem'] = True
+	else:
+		ndcfg['currentItem'] = False
+
+	nodes = model.getNodes()
+	nodenames = list()
+	for node in nodes:
+		nodenames.append(node.getName())
+
+	cfgablenodes = list()
+	for nodename in nodenames:
+		cfg = {}
+		cfg['Title'] = nodename
+		cfg['cfg_type'] = 'node'
+		cfg['absolute_url'] = '%s?pagetype=%s&nodename=%s&clustername=%s' % (url, NODE, nodename, cluname)
+		cfg['Description'] = 'Configure this cluster node'
+		if pagetype == NODE:
+			try:
+				nname = request['nodename']
+			except KeyError, e:
+				nname = ''
+			if nodename == nname:
+				cfg['currentItem'] = True
+			else:
+				cfg['currentItem'] = False
+		else:
+			cfg['currentItem'] = False
+
+		cfgablenodes.append(cfg)
+
+	#Now add nodename structs as children of the config element
+	ndcfg['children'] = cfgablenodes
+
+	ndkids = list()
+	ndkids.append(ndadd)
+	ndkids.append(ndcfg)
+
+	nd['children'] = ndkids
+
+	##################################################################
+	sv = {}
+	sv['Title'] = 'Services'
+	sv['cfg_type'] = 'services'
+	sv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICES, cluname)
+	sv['Description'] = 'Service configuration for this cluster'
+	if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
+		sv['show_children'] = True
+	else:
+		sv['show_children'] = False
+	if pagetype == SERVICES or pagetype == SERVICE_LIST:
+		sv['currentItem'] = True
+	else:
+		sv['currentItem'] = False
+
+	svadd = {}
+	svadd['Title'] = 'Add a Service'
+	svadd['cfg_type'] = 'serviceadd'
+	svadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_ADD, cluname)
+	svadd['Description'] = 'Add a Service to this cluster'
+	if pagetype == SERVICE_ADD:
+		svadd['currentItem'] = True
+	else:
+		svadd['currentItem'] = False
+
+	if model.getIsVirtualized() is True:
+		vmadd = {}
+		vmadd['Title'] = 'Add a Virtual Service'
+		vmadd['cfg_type'] = 'vmadd'
+		vmadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, VM_ADD, cluname)
+		vmadd['Description'] = 'Add a Virtual Service to this cluster'
+		if pagetype == VM_ADD:
+			vmadd['currentItem'] = True
+		else:
+			vmadd['currentItem'] = False
+
+	svcfg = {}
+	svcfg['Title'] = 'Configure a Service'
+	svcfg['cfg_type'] = 'servicecfg'
+	svcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_CONFIG, cluname)
+	svcfg['Description'] = 'Configure a Service for this cluster'
+	if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
+		svcfg['show_children'] = True
+	else:
+		svcfg['show_children'] = False
+	if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
+		svcfg['currentItem'] = True
+	else:
+		svcfg['currentItem'] = False
+
+	services = model.getServices()
+	serviceable = list()
+
+	for service in services:
+		servicename = service.getName()
+		svc = {}
+		svc['Title'] = servicename
+		svc['cfg_type'] = 'service'
+		svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, SERVICE, servicename, cluname)
+		svc['Description'] = 'Configure this service'
+		if pagetype == SERVICE:
+			try:
+				sname = request['servicename']
+			except KeyError, e:
+				sname = ''
+			if servicename == sname:
+				svc['currentItem'] = True
+			else:
+				svc['currentItem'] = False
+		else:
+			svc['currentItem'] = False
+
+		serviceable.append(svc)
+
+	vms = model.getVMs()
+	for vm in vms:
+		name = vm.getName()
+		svc = {}
+		svc['Title'] = name
+		svc['cfg_type'] = 'vm'
+		svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, VM_CONFIG, name, cluname)
+		svc['Description'] = 'Configure this Virtual Service'
+		if pagetype == VM_CONFIG:
+			try:
+				xname = request['servicename']
+			except KeyError, e:
+				xname = ''
+			if name == xname:
+				svc['currentItem'] = True
+			else:
+				svc['currentItem'] = False
+		else:
+			svc['currentItem'] = False
+
+		serviceable.append(svc)
+
+	svcfg['children'] = serviceable
+
+
+
+	kids = list()
+	kids.append(svadd)
+	if model.getIsVirtualized() is True:
+		kids.append(vmadd)
+	kids.append(svcfg)
+	sv['children'] = kids
+#############################################################
+	rv = {}
+	rv['Title'] = 'Resources'
+	rv['cfg_type'] = 'resources'
+	rv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCES, cluname)
+	rv['Description'] = 'Resource configuration for this cluster'
+	if pagetype == RESOURCES or pagetype == RESOURCE_CONFIG or pagetype == RESOURCE_ADD or pagetype == RESOURCE:
+		rv['show_children'] = True
+	else:
+		rv['show_children'] = False
+	if pagetype == RESOURCES:
+		rv['currentItem'] = True
+	else:
+		rv['currentItem'] = False
+
+	rvadd = {}
+	rvadd['Title'] = 'Add a Resource'
+	rvadd['cfg_type'] = 'resourceadd'
+	rvadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_ADD, cluname)
+	rvadd['Description'] = 'Add a Resource to this cluster'
+	if pagetype == RESOURCE_ADD:
+		rvadd['currentItem'] = True
+	else:
+		rvadd['currentItem'] = False
+
+	rvcfg = {}
+	rvcfg['Title'] = 'Configure a Resource'
+	rvcfg['cfg_type'] = 'resourcecfg'
+	rvcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_CONFIG, cluname)
+	rvcfg['Description'] = 'Configure a Resource for this cluster'
+	if pagetype == RESOURCE_CONFIG or pagetype == RESOURCE:
+		rvcfg['show_children'] = True
+	else:
+		rvcfg['show_children'] = False
+	if pagetype == RESOURCE_CONFIG:
+		rvcfg['currentItem'] = True
+	else:
+		rvcfg['currentItem'] = False
+
+	resources = model.getResources()
+	resourceable = list()
+	for resource in resources:
+		resourcename = resource.getName()
+		rvc = {}
+		rvc['Title'] = resourcename
+		rvc['cfg_type'] = 'resource'
+		rvc['absolute_url'] = '%s?pagetype=%s&resourcename=%s&clustername=%s' % (url, RESOURCES, resourcename, cluname)
+		rvc['Description'] = 'Configure this resource'
+		if pagetype == RESOURCE:
+			try:
+				rname = request['resourcename']
+			except KeyError, e:
+				rname = ''
+			if resourcename == rname:
+				rvc['currentItem'] = True
+			else:
+				rvc['currentItem'] = False
+		else:
+			rvc['currentItem'] = False
+
+		resourceable.append(rvc)
+	rvcfg['children'] = resourceable
+
+
+
+	kids = list()
+	kids.append(rvadd)
+	kids.append(rvcfg)
+	rv['children'] = kids
+ ################################################################
+	fd = {}
+	fd['Title'] = 'Failover Domains'
+	fd['cfg_type'] = 'failoverdomains'
+	fd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOMS, cluname)
+	fd['Description'] = 'Failover domain configuration for this cluster'
+	if pagetype == FDOMS or pagetype == FDOM_CONFIG or pagetype == FDOM_ADD or pagetype == FDOM:
+		fd['show_children'] = True
+	else:
+		fd['show_children'] = False
+	if pagetype == FDOMS:
+		fd['currentItem'] = True
+	else:
+		fd['currentItem'] = False
+
+	fdadd = {}
+	fdadd['Title'] = 'Add a Failover Domain'
+	fdadd['cfg_type'] = 'failoverdomainadd'
+	fdadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_ADD, cluname)
+	fdadd['Description'] = 'Add a Failover Domain to this cluster'
+	if pagetype == FDOM_ADD:
+		fdadd['currentItem'] = True
+	else:
+		fdadd['currentItem'] = False
+
+	fdcfg = {}
+	fdcfg['Title'] = 'Configure a Failover Domain'
+	fdcfg['cfg_type'] = 'failoverdomaincfg'
+	fdcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_CONFIG, cluname)
+	fdcfg['Description'] = 'Configure a Failover Domain for this cluster'
+	if pagetype == FDOM_CONFIG or pagetype == FDOM:
+		fdcfg['show_children'] = True
+	else:
+		fdcfg['show_children'] = False
+	if pagetype == FDOM_CONFIG:
+		fdcfg['currentItem'] = True
+	else:
+		fdcfg['currentItem'] = False
+
+	fdoms = model.getFailoverDomains()
+	fdomable = list()
+	for fdom in fdoms:
+		fdomname = fdom.getName()
+		fdc = {}
+		fdc['Title'] = fdomname
+		fdc['cfg_type'] = 'fdom'
+		fdc['absolute_url'] = '%s?pagetype=%s&fdomname=%s&clustername=%s' % (url, FDOM, fdomname, cluname)
+		fdc['Description'] = 'Configure this Failover Domain'
+		if pagetype == FDOM:
+			try:
+				fname = request['fdomname']
+			except KeyError, e:
+				fname = ''
+			if fdomname == fname:
+				fdc['currentItem'] = True
+			else:
+				fdc['currentItem'] = False
+		else:
+			fdc['currentItem'] = False
+
+		fdomable.append(fdc)
+	fdcfg['children'] = fdomable
+
+
+
+	kids = list()
+	kids.append(fdadd)
+	kids.append(fdcfg)
+	fd['children'] = kids
+#############################################################
+	fen = {}
+	fen['Title'] = 'Shared Fence Devices'
+	fen['cfg_type'] = 'fencedevicess'
+	fen['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEVS, cluname)
+	fen['Description'] = 'Fence Device configuration for this cluster'
+	if pagetype == FENCEDEVS or pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV_ADD or pagetype == FENCEDEV:
+		fen['show_children'] = True
+	else:
+		fen['show_children'] = False
+	if pagetype == FENCEDEVS:
+		fen['currentItem'] = True
+	else:
+		fen['currentItem'] = False
+
+	fenadd = {}
+	fenadd['Title'] = 'Add a Fence Device'
+	fenadd['cfg_type'] = 'fencedeviceadd'
+	fenadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_ADD, cluname)
+	fenadd['Description'] = 'Add a Fence Device to this cluster'
+	if pagetype == FENCEDEV_ADD:
+		fenadd['currentItem'] = True
+	else:
+		fenadd['currentItem'] = False
+
+	fencfg = {}
+	fencfg['Title'] = 'Configure a Fence Device'
+	fencfg['cfg_type'] = 'fencedevicecfg'
+	fencfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_CONFIG, cluname)
+	fencfg['Description'] = 'Configure a Fence Device for this cluster'
+	if pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV:
+		fencfg['show_children'] = True
+	else:
+		fencfg['show_children'] = False
+	if pagetype == FENCEDEV_CONFIG:
+		fencfg['currentItem'] = True
+	else:
+		fencfg['currentItem'] = False
+
+	fences = model.getFenceDevices()
+	fenceable = list()
+	for fence in fences:
+		fencename = fence.getName()
+		fenc = {}
+		fenc['Title'] = fencename
+		fenc['cfg_type'] = 'fencedevice'
+		fenc['absolute_url'] = '%s?pagetype=%s&fencename=%s&clustername=%s' % (url, FENCEDEV, fencename, cluname)
+		fenc['Description'] = 'Configure this Fence Device'
+		if pagetype == FENCEDEV:
+			try:
+				fenname = request['fencename']
+			except KeyError, e:
+				fenname = ''
+			if fencename == fenname:
+				fenc['currentItem'] = True
+			else:
+				fenc['currentItem'] = False
+		else:
+			fenc['currentItem'] = False
+
+		fenceable.append(fenc)
+	fencfg['children'] = fenceable
+
+
+
+	kids = list()
+	kids.append(fenadd)
+	kids.append(fencfg)
+	fen['children'] = kids
+#############################################################
+
+	mylist = list()
+	mylist.append(nd)
+	mylist.append(sv)
+	mylist.append(rv)
+	mylist.append(fd)
+	mylist.append(fen)
+
+	dummynode['children'] = mylist
+
+	return dummynode
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeExternal.py,v  -->  standard output
revision 1.3.2.1
--- conga/luci/site/luci/Extensions/LuciZopeExternal.py
+++ -	2007-08-09 21:35:23.301305000 +0000
@@ -0,0 +1,53 @@
+# Copyright (C) 2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+#
+# The only purpose of this file is to aggregate all the functions
+# called by Zope External Methods.
+#
+
+from homebase_adapters import getUserPerms, homebaseControl, \
+	getDefaultUser
+
+from cluster_adapters import clusterTaskProcess, \
+	resourceAdd, resourceDelete, serviceDelete, \
+	getClusterURL, getSystemLogs, getRicciAgentForCluster, \
+	isClusterBusy, nodeTaskProcess, process_cluster_conf_editor, \
+	serviceMigrate, serviceRestart, serviceStart, serviceStop, \
+	getResourceInfo
+
+from HelperFunctions import add_commas, bytes_to_value_units, convert_bytes
+
+from LuciClusterInfo import getClusterAlias, getClusterInfo, getClusterName, \
+	getClustersInfo, getClusterStatus, getFdomInfo, get_fdom_names, \
+	getFdomsInfo, getFence, getFenceInfo, getFencesInfo, getModelBuilder, \
+	getNodeInfo, getnodes, getNodesInfo, getResourcesInfo, \
+	getServiceInfo, getServicesInfo, getVMInfo, getClusterOS
+
+from LuciDB import access_to_host_allowed, allowed_systems, \
+	check_clusters, getRicciAgent, getSystems, getClusters, \
+	getStorage
+
+from LuciZope import appendModel, bytes_to_value_prefunits, \
+	set_persistent_var, strFilter, getTabs, siteIsSetup
+
+from LuciZopeClusterPortal import createCluChooser, createCluConfigTree
+
+from LuciZopePerm import isAdmin, userAuthenticated
+
+from ricci_communicator import get_ricci_communicator
+
+from storage_adapters import createStorageChooser, \
+	createStorageConfigTree, getStorageURL
+
+from StorageReport import apply_storage_changes, cache_storage_report, \
+	get_bd_data, get_mapper_data, get_mappers_data, get_mapper_template_data, \
+	get_mappings_info, get_storage_batch_result, get_storage_report, \
+	group_systems_by_cluster, is_storage_report_cached, validate, \
+	get_content_data
+
+from LuciZopeAsync import get_cluster_nodes_async, get_sysinfo_async
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopePerm.py,v  -->  standard output
revision 1.2.2.1
--- conga/luci/site/luci/Extensions/LuciZopePerm.py
+++ -	2007-08-09 21:35:23.488645000 +0000
@@ -0,0 +1,50 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from AccessControl import getSecurityManager
+from conga_constants import PLONE_ROOT
+
+def userAuthenticated(self):
+	try:
+		if (isAdmin(self) or getSecurityManager().getUser().has_role('Authenticated', self.restrictedTraverse(PLONE_ROOT))):
+			return True
+	except Exception, e:
+		pass
+	return False
+
+def isAdmin(self):
+	try:
+		return getSecurityManager().getUser().has_role('Owner', self.restrictedTraverse(PLONE_ROOT))
+	except Exception, e:
+		pass
+	return False
+
+# In case we want to give access to non-admin users in the future
+
+def havePermCreateCluster(self):
+	return isAdmin(self)
+
+def havePermAddStorage(self):
+	return isAdmin(self)
+
+def havePermAddCluster(self):
+	return isAdmin(self)
+
+def havePermAddUser(self):
+	return isAdmin(self)
+
+def havePermDelUser(self):
+	return isAdmin(self)
+
+def havePermRemStorage(self):
+	return isAdmin(self)
+
+def havePermRemCluster(self):
+	return isAdmin(self)
+
+def havePermEditPerms(self):
+	return isAdmin(self)
/cvs/cluster/conga/luci/site/luci/Extensions/ResourceHandler.py,v  -->  standard output
revision 1.2.2.1
--- conga/luci/site/luci/Extensions/ResourceHandler.py
+++ -	2007-08-09 21:35:23.613137000 +0000
@@ -0,0 +1,793 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from ClusterModel.Ip import Ip
+from ClusterModel.Fs import Fs
+from ClusterModel.Clusterfs import Clusterfs
+from ClusterModel.Netfs import Netfs
+from ClusterModel.NFSExport import NFSExport
+from ClusterModel.NFSClient import NFSClient
+from ClusterModel.Script import Script
+from ClusterModel.Samba import Samba
+from ClusterModel.Tomcat5 import Tomcat5
+from ClusterModel.Postgres8 import Postgres8
+from ClusterModel.Apache import Apache
+from ClusterModel.OpenLDAP import OpenLDAP
+from ClusterModel.LVM import LVM
+from ClusterModel.MySQL import MySQL
+from ClusterModel.SAPDatabase import SAPDatabase
+from ClusterModel.SAPInstance import SAPInstance
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def get_fsid_list(model):
+	obj_list = model.searchObjectTree('fs')
+	obj_list.extend(model.searchObjectTree('clusterfs'))
+	return map(lambda x: x.getAttribute('fsid') and int(x.getAttribute('fsid')) or 0, obj_list)
+
+def fsid_is_unique(model, fsid):
+	fsid_list = get_fsid_list(model)
+	return fsid not in fsid_list
+
+def generate_fsid(model, name):
+	import binascii
+	from random import random
+	fsid_list = get_fsid_list(model)
+
+	fsid = binascii.crc32(name) & 0xffff
+	dupe = fsid in fsid_list
+	while dupe is True:
+		fsid = (fsid + random.randrange(1, 0xfffe)) & 0xffff
+		dupe = fsid in fsid_list
+	return fsid
+
+def getResourceForEdit(model, name):
+	resPtr = model.getResourcesPtr()
+	resources = resPtr.getChildren()
+
+	for res in resources:
+		if res.getName() == name:
+			resPtr.removeChild(res)
+			return res
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GRFE0: unable to find resource "%s"' % name)
+	raise KeyError, name
+
+def addIp(res, rname, form, model):
+	errors = list()
+
+	try:
+		addr = form['ip_address'].strip()
+		if not addr:
+			raise KeyError, 'No IP address was given'
+		res.addAttribute('address', addr)
+	except KeyError, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addIp4: %s' % err)
+
+	if form.has_key('monitorLink'):
+		res.addAttribute('monitor_link', '1')
+	else:
+		res.addAttribute('monitor_link', '0')
+
+	return errors
+
+def addFs(res, rname, form, model):
+	errors = list()
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		if not mountpoint:
+			raise Exception, 'No mount point was given for "%s"' % rname
+		res.addAttribute('mountpoint', mountpoint)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs6: %s' % err)
+
+	try:
+		device = form['device'].strip()
+		if not device:
+			raise Exception, 'No device was given for "%s"' % rname
+		res.addAttribute('device', device)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs7: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs8: %s' % err)
+
+	try:
+		fstype = form['fstype'].strip()
+		if not fstype:
+			raise Exception, 'No filesystem type was given for "%s"' % rname
+		res.addAttribute('fstype', fstype)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs9: %s' % err)
+
+	try:
+		fsid = form['fsid'].strip()
+		if not fsid:
+			raise Exception, 'no fsid'
+		fsid_int = int(fsid)
+		if not fsid_is_unique(model, fsid_int):
+			raise Exception, 'fsid not uniq'
+	except Exception, e:
+		fsid = str(generate_fsid(model, rname))
+	res.addAttribute('fsid', fsid)
+
+	if form.has_key('forceunmount'):
+		res.addAttribute('force_unmount', '1')
+	else:
+		res.addAttribute('force_unmount', '0')
+
+	if form.has_key('selffence'):
+		res.addAttribute('self_fence', '1')
+	else:
+		res.addAttribute('self_fence', '0')
+
+	if form.has_key('checkfs'):
+		res.addAttribute('force_fsck', '1')
+	else:
+		res.addAttribute('force_fsck', '0')
+
+	return errors
+
+def addClusterfs(res, rname, form, model):
+	errors = list()
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		if not mountpoint:
+			raise Exception, 'No mount point was given for "%s"' % rname
+		res.addAttribute('mountpoint', mountpoint)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs5: %s' % err)
+
+	try:
+		device = form['device'].strip()
+		if not device:
+			raise Exception, 'No device was given for "%s"' % rname
+		res.addAttribute('device', device)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs6: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs7: %s' % err)
+
+	try:
+		fsid = form['fsid'].strip()
+		if not fsid:
+			raise Exception, 'no fsid'
+		fsid_int = int(fsid)
+		if not fsid_is_unique(model, fsid_int):
+			raise Exception, 'not uniq'
+	except Exception, e:
+		fsid = str(generate_fsid(model, rname))
+	res.addAttribute('fsid', fsid)
+
+	if form.has_key('forceunmount'):
+		res.addAttribute('force_unmount', '1')
+	else:
+		res.addAttribute('force_unmount', '0')
+
+	return errors
+
+def addNetfs(res, rname, form, model):
+	errors = list()
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		if not mountpoint:
+			raise Exception, 'No mount point was given for "%s"' % rname
+		res.addAttribute('mountpoint', mountpoint)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm5: %s' % err)
+
+	try:
+		host = form['host'].strip()
+		if not host:
+			raise Exception, 'No host server was given for "%s"' % rname
+		res.addAttribute('host', host)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm6 error: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm7: %s' % err)
+
+	try:
+		exportpath = form['exportpath'].strip()
+		if not exportpath:
+			raise Exception, 'No export path was given for "%s"' % rname
+		res.addAttribute('exportpath', exportpath)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm8: %s' % err)
+
+	try:
+		nfstype = form['nfstype'].strip().lower()
+		if nfstype != 'nfs' and nfstype != 'nfs4':
+			raise Exception, 'An invalid NFS version "%s" was given for "%s"' \
+								% (nfstype, rname)
+		res.addAttribute('nfstype', nfstype)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm9: %s' % err)
+
+	if form.has_key('forceunmount'):
+		res.addAttribute('force_unmount', '1')
+	else:
+		res.addAttribute('force_unmount', '0')
+
+	return errors
+
+def addNFSClient(res, rname, form, model):
+	errors = list()
+
+	try:
+		target = form['target'].strip()
+		if not target:
+			raise Exception, 'No target was given for "%s"' % rname
+		res.addAttribute('target', target)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsc5: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsc6: %s' % err)
+
+	if form.has_key('allow_recover'):
+		res.addAttribute('allow_recover', '1')
+	else:
+		res.addAttribute('allow_recover', '0')
+
+	return errors
+
+def addNFSExport(res, rname, form, model):
+	errors = list()
+	return errors
+
+def addScript(res, rname, form, model):
+	errors = list()
+
+	try:
+		path = form['file'].strip()
+		if not path:
+			raise Exception, 'No path to a script file was given for this "%s"' % rname
+		res.addAttribute('file', path)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addScr5: %s' % err)
+
+	return errors
+
+def addSamba(res, rname, form, model):
+	errors = list()
+
+	try:
+		workgroup = form['workgroup'].strip()
+		if not workgroup:
+			raise Exception, 'No workgroup was given for "%s"' % rname
+		res.addAttribute('workgroup', workgroup)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addSmb5: %s' % err)
+
+	return errors
+
+def addApache(res, rname, form, model):
+	errors = list()
+
+	try:
+		server_root = form['server_root'].strip()
+		if not server_root:
+			raise KeyError, 'No server root was given for "%s"' % rname
+		res.addAttribute('server_root', server_root)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache5: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not server_root:
+			raise KeyError, 'No path to the Apache configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache6: %s' % err)
+
+	try:
+		options = form['httpd_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('httpd_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('httpd_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache7: %s' % err)
+
+	return errors
+
+def addMySQL(res, rname, form, model):
+	errors = list()
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the MySQL configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL5: %s' % err)
+
+	try:
+		listen_addr = form['listen_address'].strip()
+		if not listen_addr:
+			raise KeyError, 'No address was given for "%s"' % rname
+		res.addAttribute('listen_address', listen_addr)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL6: %s' % err)
+
+	try:
+		options = form['mysql_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('mysql_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('mysql_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL7: %s' % err)
+
+	return errors
+
+def addOpenLDAP(res, rname, form, model):
+	errors = list()
+
+	try:
+		url_list = form['url_list'].strip()
+		if not url_list:
+			raise KeyError, 'No URL list was given for "%s"' % rname
+		res.addAttribute('url_list', url_list)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP5: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the OpenLDAP configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP6: %s' % err)
+
+	try:
+		options = form['slapd_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('slapd_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('slapd_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+
+	return errors
+
+def addPostgres8(res, rname, form, model):
+	errors = list()
+	try:
+		user = form['postmaster_user'].strip()
+		if not user:
+			raise KeyError, 'No postmaster user was given for "%s"' % rname
+		res.addAttribute('postmaster_user', user)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL85: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the PostgreSQL 8 configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL86: %s' % err)
+
+	try:
+		options = form['postmaster_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('postmaster_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('postmaster_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+
+	return errors
+
+def addTomcat5(res, rname, form, model):
+	errors = list()
+
+	try:
+		user = form['tomcat_user'].strip()
+		if not user:
+			raise KeyError, 'No Tomcat user was given for "%s"' % rname
+		res.addAttribute('tomcat_user', user)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat55: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the Tomcat 5 configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat56: %s' % err)
+
+	try:
+		options = form['catalina_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('catalina_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('catalina_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat57: %s' % err)
+
+	try:
+		catalina_base = form['catalina_base'].strip()
+		if not catalina_base:
+			raise KeyError, 'No cataliny base directory was given for "%s"' % rname
+		res.addAttribute('catalina_base', catalina_base)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat58: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat59: %s' % err)
+
+	return errors
+
+def addLVM(res, rname, form, model):
+	errors = list()
+
+	try:
+		vg_name = form['vg_name'].strip()
+		if not vg_name:
+			raise KeyError, 'No volume group name was given for "%s"' % rname
+		res.addAttribute('vg_name', vg_name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addLVM5: %s' % err)
+
+	try:
+		lv_name = form['lv_name'].strip()
+		if not lv_name:
+			raise KeyError, 'No logical volume name was given for "%s"' % rname
+		res.addAttribute('lv_name', lv_name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addLVM6: %s' % err)
+
+	return errors
+
+def addSAPDatabase(res, rname, form, model):
+	errors = list()
+
+	res.removeAttribute('name')
+	res.addAttribute('SID', rname)
+
+	try:
+		dbtype = form['DBTYPE'].strip()
+		if not dbtype in [ 'ORA', 'DB6', 'ADA' ]:
+			raise Exception, 'You gave an invalid database type: %s' % dbtype
+		res.addAttribute('DBTYPE', dbtype)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addSAPD1: %s' % err)
+
+	if form.has_key('DBJ2EE_ONLY'):
+		res.addAttribute('DBJ2EE_ONLY', 'TRUE')
+	else:
+		res.removeAttribute('DBJ2EE_ONLY')
+
+	# Optional string parameters
+	for param in [ 'DIR_EXECUTABLE', 'NETSERVICENAME', 'DIR_BOOTSTRAP', 'DIR_SECSTORE' ]:
+		try:
+			pval = form[param].strip()
+			if not pval:
+				raise KeyError, 'blank'
+			res.addAttribute(param, pval)
+		except KeyError, e:
+			res.removeAttribute(param)
+		except Exception, e:
+			err = str(e)
+			errors.append(err)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('addSAPD2: %s %s' % (param, err))
+	return errors
+
+def addSAPInstance(res, rname, form, model):
+	errors = list()
+
+	res.removeAttribute('name')
+	res.addAttribute('InstanceName', rname)
+
+	# Optional string parameters
+	for param in [ 'DIR_EXECUTABLE', 'DIR_PROFILE', 'START_PROFILE' ]:
+		try:
+			pval = form[param].strip()
+			if not pval:
+				raise KeyError, 'blank'
+			res.addAttribute(param, pval)
+		except KeyError, e:
+			res.removeAttribute(param)
+		except Exception, e:
+			err = str(e)
+			errors.append(err)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('addSAPI1: %s %s' % (param, err))
+	return errors
+
+resource_table = {
+	'ip':			[ addIp,			Ip			],
+	'fs':			[ addFs,			Fs			],
+	'gfs':			[ addClusterfs,		Clusterfs	],
+	'nfsm':			[ addNetfs,			Netfs		],
+	'nfsx':			[ addNFSExport,		NFSExport	],
+	'nfsc':			[ addNFSClient,		NFSClient	],
+	'scr':			[ addScript,		Script		],
+	'smb':			[ addSamba,			Samba		],
+	'tomcat-5':		[ addTomcat5,		Tomcat5		],
+	'postgres-8':	[ addPostgres8,		Postgres8	],
+	'apache':		[ addApache,		Apache		],
+	'openldap':		[ addOpenLDAP,		OpenLDAP	],
+	'lvm':			[ addLVM,			LVM			],
+	'mysql':		[ addMySQL,			MySQL		],
+	'SAPDatabase':	[ addSAPDatabase,	SAPDatabase	],
+	'SAPInstance':	[ addSAPInstance,	SAPInstance	]
+}
+
+def create_resource(res_type, form, model):
+	if not resource_table.has_key(res_type):
+		raise Exception, [ 'Unknown resource type: "%s"' % res_type ]
+
+	res = None
+	if form.has_key('edit'):
+		if not form.has_key('oldname'):
+			raise Exception, [ 'Cannot find this resource\'s original name.' ]
+
+		oldname = form['oldname'].strip()
+		if not oldname:
+			raise Exception, [ 'Cannot find this resource\'s original name.' ]
+
+		try:
+			res = getResourceForEdit(model, oldname)
+			if not res:
+				raise Exception, 'not found'
+		except Exception, e:
+			raise Exception, [ 'No Resource named "%s" exists.' % oldname ]
+	else:
+		res = resource_table[res_type][1]()
+
+	if res_type != 'ip':
+		if not form.has_key('resourceName') or not form['resourceName'].strip():
+			raise Exception, [ 'All resources must have a unique name.' ]
+		rname = form['resourceName'].strip()
+		res.addAttribute('name', rname)
+	else:
+		rname = form['ip_address'].strip()
+
+	errors = resource_table[res_type][0](res, rname, form, model)
+	try:
+		dummy = getResourceForEdit(model, rname)
+		if dummy:
+			errors.append('A resource named "%s" already exists.' % rname)
+	except:
+		pass
+
+	if len(errors) > 0:
+		raise Exception, errors
+	return res
/cvs/cluster/conga/luci/site/luci/Extensions/RicciQueries.py,v  -->  standard output
revision 1.7.2.1
--- conga/luci/site/luci/Extensions/RicciQueries.py
+++ -	2007-08-09 21:35:23.742263000 +0000
@@ -0,0 +1,728 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from xml.dom import minidom
+from ricci_communicator import RicciCommunicator
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def addClusterNodeBatch(cluster_name,
+						install_base,
+						install_services,
+						install_shared_storage,
+						install_LVS,
+						upgrade_rpms,
+						gulm):
+	batch = list()
+
+	batch.append('<?xml version="1.0" ?>')
+	batch.append('<batch>')
+	batch.append('<module name="rpm">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="install">')
+	if upgrade_rpms:
+		batch.append('<var name="upgrade" type="boolean" value="true"/>')
+	else:
+		batch.append('<var name="upgrade" type="boolean" value="false"/>')
+	batch.append('<var name="sets" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	batch.append('<module name="service">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="disable">')
+	batch.append('<var mutable="false" name="services" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	need_reboot = install_base or install_services or install_shared_storage or install_LVS
+	if need_reboot:
+		batch.append('<module name="reboot">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="reboot_now"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="set_cluster.conf">')
+	batch.append('<var mutable="false" name="propagate" type="boolean" value="false"/>')
+	batch.append('<var mutable="false" name="cluster.conf" type="xml">')
+	batch.append('<cluster config_version="1" name="%s">' % cluster_name)
+	batch.append('<fence_daemon post_fail_delay="0" post_join_delay="3"/>')
+	batch.append('<clusternodes/>')
+	batch.append('<cman/>')
+	batch.append('<fencedevices/>')
+	batch.append('<rm/>')
+	batch.append('</cluster>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	if install_shared_storage:
+		batch.append('<module name="storage">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="enable_clustered_lvm"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="start_node"/>')
+	batch.append('</request>')
+	batch.append('</module>')
+	batch.append('</batch>')
+
+	return minidom.parseString(''.join(batch)).firstChild
+
+def createClusterBatch( os_str,
+						cluster_name,
+						cluster_alias,
+						nodeList,
+						install_base,
+						install_services,
+						install_shared_storage,
+						install_LVS,
+						upgrade_rpms,
+						gulm_lockservers):
+
+	batch = list()
+	batch.append('<?xml version="1.0" ?>')
+	batch.append('<batch>')
+
+	batch.append('<module name="rpm">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="install">')
+	if upgrade_rpms:
+		batch.append('<var name="upgrade" type="boolean" value="true"/>')
+	else:
+		batch.append('<var name="upgrade" type="boolean" value="false"/>')
+
+	batch.append('<var name="sets" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm_lockservers:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	batch.append('<module name="service">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="disable">')
+	batch.append('<var mutable="false" name="services" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm_lockservers:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	need_reboot = install_base or install_services or install_shared_storage or install_LVS
+	if need_reboot:
+		batch.append('<module name="reboot">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="reboot_now"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="set_cluster.conf">')
+	batch.append('<var mutable="false" name="propagate" type="boolean" value="false"/>')
+	batch.append('<var mutable="false" name="cluster.conf" type="xml">')
+	batch.append('<cluster config_version="1" name="%s" alias="%s">' % (cluster_name, cluster_alias))
+	batch.append('<fence_daemon post_fail_delay="0" post_join_delay="3"/>')
+	batch.append('<clusternodes>')
+	x = 1
+	for i in nodeList:
+		if os_str == 'rhel4':
+			batch.append('<clusternode name="%s" votes="1"/>' % i)
+		else:
+			batch.append('<clusternode name="%s" votes="1" nodeid="%d"/>' % (i, x))
+		x += 1
+	batch.append('</clusternodes>')
+
+	if not gulm_lockservers:
+		if len(nodeList) == 2:
+			batch.append('<cman expected_votes="1" two_node="1"/>')
+		else:
+			batch.append('<cman/>')
+	batch.append('<fencedevices/>')
+	batch.append('<rm/>')
+	if gulm_lockservers:
+		batch.append('<gulm>')
+		for i in gulm_lockservers:
+			batch.append('<lockserver name="%s" />' % i)
+		batch.append('</gulm>')
+	batch.append('</cluster>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	if install_shared_storage:
+		batch.append('<module name="storage">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="enable_clustered_lvm"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="start_node">')
+	batch.append('<var mutable="false" name="cluster_startup" type="boolean" value="true"/>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+	batch.append('</batch>')
+
+	return minidom.parseString(''.join(batch)).firstChild
+
+def batchAttemptResult(doc):
+	if not doc:
+		return (None, None)
+
+	try:
+		batch = doc.getElementsByTagName('batch')
+		if not batch or len(batch) < 1:
+			raise Exception, 'no batch tag was found'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('BAR0: %r %s' % (e, str(e)))
+		return (None, None)
+
+	for i in batch:
+		try:
+			batch_number = str(i.getAttribute('batch_id'))
+			result = str(i.getAttribute('status'))
+			return (batch_number, result)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('BAR1: %s' % e)
+
+	if LUCI_DEBUG_MODE is True:
+		try:
+			luci_log.debug_verbose('BAR2: batchid, status not in \"%s\"' \
+				% doc.toxml())
+		except:
+			pass
+	return (None, None)
+
+def getClusterStatusBatch(rc):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+	ricci_xml = rc.batch_run(batch_str, async=False)
+
+	if not ricci_xml:
+		return None
+
+	try:
+		cluster_tags = ricci_xml.getElementsByTagName('cluster')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB0: %r %s' % (e, str(e)))
+		return None
+
+	if len(cluster_tags) < 1:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB1: %d entries - expecting 1' \
+				% len(cluster_tags))
+			return None
+	elif len(cluster_tags) > 1:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB2: %d entries, expecting 1 use first' \
+				% len(cluster_tags))
+
+	try:
+		cluster_node = cluster_tags[0]
+		if not cluster_node:
+			raise Exception, 'element 0 is None'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB3: %r %s' % (e, str(e)))
+		return None
+
+	try:
+		doc = minidom.Document()
+		doc.appendChild(cluster_node)
+		return doc
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB4: %r %s' % (e, str(e)))
+
+	return None
+
+def setClusterConf(rc, clusterconf, propagate=True):
+	if propagate is True:
+		propg = 'true'
+	else:
+		propg = 'false'
+
+	conf = str(clusterconf).replace('<?xml version="1.0"?>', '')
+	conf = conf.replace('<?xml version="1.0" ?>', '')
+	conf = conf.replace('<? xml version="1.0"?>', '')
+	conf = conf.replace('<? xml version="1.0" ?>', '')
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="%s"/><var type="xml" mutable="false" name="cluster.conf">%s</var></function_call></request></module>' % (propg, conf)
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def getNodeLogs(rc):
+	from time import time, ctime
+
+	errstr = 'log not accessible'
+
+	batch_str = '<module name="log"><request API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="18000"/><var mutable="false" name="tags" type="list_str"></var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml:
+		return errstr
+	try:
+		log_entries = ricci_xml.getElementsByTagName('logentry')
+		if not log_entries or len(log_entries) < 1:
+			raise Exception, 'no log data is available.'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL0: retrieving log data from %s: %r %s' \
+				% (rc.hostname(), e, str(e)))
+		return None
+
+	time_now = time()
+	entry_list = list()
+
+	try:
+		# Show older entries first.
+		log_entries.sort(lambda x, y: int(y.getAttribute('age')) - int(x.getAttribute('age')))
+	except:
+		pass
+
+	for i in log_entries:
+		try:
+			log_msg = i.getAttribute('msg')
+		except:
+			log_msg = ''
+
+		if not log_msg:
+			continue
+
+		try:
+			log_age = int(i.getAttribute('age'))
+		except:
+			log_age = 0
+
+		try:
+			log_domain = i.getAttribute('domain')
+		except:
+			log_domain = ''
+
+		try:
+			log_pid = i.getAttribute('pid')
+		except:
+			log_pid = ''
+
+		if log_age:
+			entry_list.append('%s ' % ctime(time_now - log_age))
+		if log_domain:
+			entry_list.append(log_domain)
+		if log_pid:
+			entry_list.append('[%s]' % log_pid)
+		entry_list.append(': %s<br/>' % log_msg)
+	return ''.join(entry_list)
+
+def nodeReboot(rc):
+	batch_str = '<module name="reboot"><request API_version="1.0"><function_call name="reboot_now"/></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def nodeLeaveCluster(rc, cluster_shutdown=False, purge=False):
+	cshutdown = 'false'
+	if cluster_shutdown is True:
+		cshutdown = 'true'
+
+	purge_conf = 'true'
+	if purge is False:
+		purge_conf = 'false'
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="%s"/><var mutable="false" name="purge_conf" type="boolean" value="%s"/></function_call></request></module>' % (cshutdown, purge_conf)
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def nodeFence(rc, nodename):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="%s"/></function_call></request></module>' % nodename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def nodeJoinCluster(rc, cluster_startup=False):
+	cstartup = 'false'
+	if cluster_startup is True:
+		cstartup = 'true'
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="%s"/></function_call></request></module>' % cstartup
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def startService(rc, servicename, preferrednode=None):
+	if preferrednode is not None:
+		batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value="%s"/><var mutable="false" name="nodename" type="string" value="%s"/></function_call></request></module>' % (servicename, preferrednode)
+	else:
+		batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value="%s"/></function_call></request></module>' % servicename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def migrateService(rc, servicename, preferrednode):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="migrate_service"><var mutable="false" name="servicename" type="string" value="%s"/><var mutable="false" name="nodename" type="string" value="%s" /></function_call></request></module>' % (servicename, preferrednode)
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def updateServices(rc, enable_list, disable_list):
+	batch_list = list()
+
+	if enable_list and len(enable_list) > 0:
+		batch_list.append('<module name="service"><request API_version="1.0"><function_call name="enable"><var mutable="false" name="services" type="list_xml">')
+		for i in enable_list:
+			batch_list.append('<service name="%s"/>' % str(i))
+		batch_list.append('</var></function_call></request></module>')
+
+	if disable_list and len(disable_list) > 0:
+		batch_list.append('<module name="service"><request API_version="1.0"><function_call name="disable"><var mutable="false" name="services" type="list_xml">')
+		for i in disable_list:
+			batch_list.append('<service name="%s"/>' % str(i))
+		batch_list.append('</var></function_call></request></module>')
+
+	if len(batch_list) < 1:
+		return None, None
+	ricci_xml = rc.batch_run(''.join(batch_list))
+	return batchAttemptResult(ricci_xml)
+
+def restartService(rc, servicename):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"%s\"/></function_call></request></module>' % servicename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def stopService(rc, servicename):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"%s\"/></function_call></request></module>' % servicename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def svc_manage(rc, hostname, servicename, op):
+	svc_func = None
+
+	doc = minidom.Document()
+	elem = doc.createElement('result')
+	elem.setAttribute('success', '0')
+
+	if not servicename:
+		elem.setAttribute('service', 'No service name was specified.')
+		elem.setAttribute('message', 'No service name was specified.')
+
+	if not op:
+		elem.setAttribute('operation', 'No operation was specified.')
+		elem.setAttribute('message', 'No operation was specified.')
+
+	if not servicename or not op:
+		doc.appendChild(elem)
+		return doc
+
+	elem.setAttribute('service', servicename)
+	elem.setAttribute('operation', op)
+	elem.setAttribute('hostname', hostname)
+
+	try:
+		op = op.strip().lower()
+		if op == 'restart' or op == 'start' or op == 'stop':
+			svc_func = op
+		else:
+			raise Exception, op
+	except Exception, e:
+		elem.setAttribute('message', 'Unknown operation: %s' % str(e))
+		doc.appendChild(elem)
+		return doc
+
+	batch_str = '<module name="service"><request API_version="1.0"><function_call name="%s"><var mutable="false" name="services" type="list_xml"><service name="%s"/></var></function_call></request></module>' % (svc_func, servicename)
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		elem.setAttribute('message', 'operation failed')
+		doc.appendChild(elem)
+		return doc
+
+	try:
+		mod_elem = ricci_xml.getElementsByTagName('module')
+		status_code = int(mod_elem[0].getAttribute('status'))
+		if status_code == 0:
+			var_elem = mod_elem[0].getElementsByTagName('var')
+			for i in var_elem:
+				name = i.getAttribute('name').lower()
+				if name == 'success':
+					success = i.getAttribute('value').lower()
+					if success == 'true':
+						elem.setAttribute('success', '1')
+						elem.setAttribute('message', 'success')
+					else:
+						elem.setAttribute('message', 'operation failed')
+					break
+		else:
+			err_msg = mod_elem[0].childNodes[1].getAttribute('description')
+			elem.setAttribute('message', err_msg)
+	except Exception, e:
+		elem.setAttribute('message', 'operation failed')
+
+	doc.appendChild(elem)
+	return doc
+
+def list_services(rc):
+	batch_str = '<module name="service"><request API_version="1.0"><function_call name="list"><var mutable="false" name="description" type="boolean" value="true"/></function_call></request></module>'
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LS0: None returned')
+		return None
+	try:
+		service_tags = ricci_xml.getElementsByTagName('service')
+		return service_tags
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LS1: %r %s' % (e, str(e)))
+	return None
+
+def nodeIsVirtual(rc):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="virt_guest"/></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('nodeIsVirtual0: None returned')
+		return None
+
+	var_tags = ricci_xml.getElementsByTagName('var')
+	if not var_tags or len(var_tags) < 2:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('nodeIsVirtual1: unexpected response %s' \
+				% ricci_xml.toxml())
+		return None
+
+	success = False
+	virtual = False
+	for i in var_tags:
+		try:
+			name = i.getAttribute('name')
+			if not name:
+				raise Exception, 'name is blank'
+			if name == 'success':
+				result = i.getAttribute('value')
+				if result == 'true':
+					success = True
+			elif name == 'virt_guest':
+				result = i.getAttribute('value')
+				if result == 'true':
+					virtual = True
+			else:
+				raise Exception, 'unexpected attribute name: %s' % name
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('nodeIsVirtual2: error: %r %s' \
+					% (e, str(e)))
+
+	if not success:
+		return None
+	return virtual
+
+def getDaemonStates(rc, dlist):
+	batch_list = list()
+	batch_list.append('<module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">')
+
+	for item in dlist:
+		batch_list.append('<service name=\"%s\"/>' % item)
+	batch_list.append('</var></function_call></request></module>')
+
+	ricci_xml = rc.batch_run(''.join(batch_list), async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GDS0: no ricci_xml')
+		return None
+	result = extractDaemonInfo(ricci_xml.firstChild)
+	return result
+
+def extractDaemonInfo(bt_node):
+	if not bt_node:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EDI0: no bt_node')
+		return None
+
+	resultlist = list()
+	svc_nodes = bt_node.getElementsByTagName('service')
+	for node in svc_nodes:
+		svchash = {}
+		try:
+			name = node.getAttribute('name')
+			if not name:
+				raise Exception, 'No name'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI1: no service name: %r %s' \
+					% (e, str(e)))
+			name = '[unknown]'
+		svchash['name'] = name
+
+		try:
+			svc_enabled = node.getAttribute('enabled')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI2: no enabled: %r %s' \
+					% (e, str(e)))
+			svc_enabled = '[unknown]'
+		svchash['enabled'] = svc_enabled
+
+		try:
+			running = node.getAttribute('running')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI3: no running: %r %s' \
+					% (e, str(e)))
+			running = '[unknown]'
+		svchash['running'] = running
+		resultlist.append(svchash)
+
+	return resultlist
+
+def getClusterConf(rc):
+	import xml.dom
+
+	if rc is None:
+		return None
+
+	doc = minidom.Document()
+	batch = doc.createElement('batch')
+	module = doc.createElement('module')
+	module.setAttribute('name', 'cluster')
+	request = doc.createElement('request')
+	request.setAttribute('API_version', '1.0')
+	call = doc.createElement('function_call')
+	call.setAttribute('name', 'get_cluster.conf')
+	request.appendChild(call)
+	module.appendChild(request)
+	batch.appendChild(module)
+
+	# temporary workaround for ricci bug
+	system_info = rc.hostname()
+	try:
+		rc = RicciCommunicator(system_info)
+		if rc is None:
+			raise Exception, 'unknown error'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCC0: connecting to %s: %r %s' \
+				% (system_info, e, str(e)))
+		return None
+	# end workaround
+
+	try:
+		ret = rc.process_batch(batch)
+		if not ret:
+			raise Exception, 'no XML response'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCC1: process_batch error for %s: %r %s' \
+				% (system_info, e, str(e)))
+		return None
+
+	var_nodes = ret.getElementsByTagName('var')
+	for i in var_nodes:
+		if i.getAttribute('name') == 'cluster.conf':
+			for j in i.childNodes:
+				if j.nodeType == xml.dom.Node.ELEMENT_NODE:
+					return j
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GCC2: no conf node found')
+	return None
+
+def set_xvm_key(rc, key_base64):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_xvm_key"><var mutable="false" name="key_base64" type="string" value="%s"/></function_call></request></module>' % key_base64
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-09-28  5:36 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-09-28  5:36 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-09-28 05:36:39

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py RicciQueries.py 

Log message:
	Add and disable cluster services at boot time only when adding or deleting nodes. When starting and stopping nodes, don't don't disable them automatically.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&r1=1.6&r2=1.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&r1=1.7&r2=1.8

--- conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/09/11 16:04:33	1.6
+++ conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/09/28 05:36:39	1.7
@@ -227,8 +227,13 @@
 # Cluster node membership-related tasks
 #
 
-def NodeJoinCluster(self, rc, clustername, nodename_resolved):
-	batch_number, result = rq.nodeJoinCluster(rc)
+def NodeJoinCluster(self,
+					rc,
+					clustername,
+					nodename_resolved,
+					enable_services=True):
+
+	batch_number, result = rq.nodeJoinCluster(rc, enable_services=enable_services)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
@@ -249,7 +254,8 @@
 						rc,
 						clustername,
 						nodename_resolved,
-						stop_cluster=False):
+						stop_cluster=False,
+						disable_services=True):
 	reported_cluname = None
 	try:
 		cluster_info = rc.cluster_info()
@@ -270,7 +276,9 @@
 				% nodename_resolved)
 		return None
 
-	batch_number, result = rq.nodeLeaveCluster(rc, cluster_shutdown=stop_cluster)
+	batch_number, result = rq.nodeLeaveCluster(rc,
+							cluster_shutdown=stop_cluster,
+							disable_services=disable_services)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('NLC2: %s: batch_number or result is None' \
@@ -375,7 +383,9 @@
 	# First, delete cluster.conf from node to be deleted.
 	# next, have node leave cluster.
 
-	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
+	batch_number, result = rq.nodeLeaveCluster(rc,
+							purge=True,
+							disable_services=True)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('ND5: batch_number and/or result is None')
@@ -473,7 +483,7 @@
 			errors += 1
 			continue
 
-		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved, enable_services=False) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('CStart1: nodeJoin %s' \
 					% nodename_resolved)
@@ -514,7 +524,8 @@
 				errors += 1
 		else:
 			ret = NodeLeaveCluster(self, rc, clustername,
-					nodename_resolved, stop_cluster=True)
+					nodename_resolved, stop_cluster=True,
+					disable_services=False)
 			if ret is None:
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
@@ -528,13 +539,14 @@
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('cluRestart0: ClusterStop: %d errs' \
 				% snum_err)
+		return snum_err
 
 	jnum_err = ClusterStart(self, model)
 	if jnum_err:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('cluRestart1: ClusterStart: %d errs' \
 				% jnum_err)
-	return snum_err + jnum_err
+	return jnum_err
 
 def ClusterDelete(self, model):
 	try:
--- conga/luci/site/luci/Extensions/RicciQueries.py	2007/08/08 21:00:07	1.7
+++ conga/luci/site/luci/Extensions/RicciQueries.py	2007/09/28 05:36:39	1.8
@@ -113,7 +113,9 @@
 
 	batch.append('<module name="cluster">')
 	batch.append('<request API_version="1.0">')
-	batch.append('<function_call name="start_node"/>')
+	batch.append('<function_call name="start_node">')
+	batch.append('<var mutable="false" name="enable_services" type="boolean" value="true"/>"')
+	batch.append('</function_call>')
 	batch.append('</request>')
 	batch.append('</module>')
 	batch.append('</batch>')
@@ -412,7 +414,10 @@
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
-def nodeLeaveCluster(rc, cluster_shutdown=False, purge=False):
+def nodeLeaveCluster(	rc,
+						cluster_shutdown=False,
+						purge=False,
+						disable_services=True):
 	cshutdown = 'false'
 	if cluster_shutdown is True:
 		cshutdown = 'true'
@@ -421,7 +426,11 @@
 	if purge is False:
 		purge_conf = 'false'
 
-	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="%s"/><var mutable="false" name="purge_conf" type="boolean" value="%s"/></function_call></request></module>' % (cshutdown, purge_conf)
+	disable_svc = 'true'
+	if disable_services is False:
+		disable_svc = 'false'
+		
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="%s"/><var mutable="false" name="purge_conf" type="boolean" value="%s"/><var mutable="false" name="disable_services" type="boolean" value="%s"/></function_call></request></module>' % (cshutdown, purge_conf, disable_svc)
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
@@ -432,12 +441,16 @@
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
-def nodeJoinCluster(rc, cluster_startup=False):
+def nodeJoinCluster(rc, cluster_startup=False, enable_services=True):
 	cstartup = 'false'
 	if cluster_startup is True:
 		cstartup = 'true'
 
-	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="%s"/></function_call></request></module>' % cstartup
+	enable_services = 'true'
+	if enable_services is False:
+		enable_services = 'false'
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="%s"/><var mutable="false" name="enable_services" type="boolean" value="%s"/></function_call></request></module>' % (cstartup, enable_services)
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2007-10-22 19:24 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2007-10-22 19:24 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-10-22 19:24:03

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py 

Log message:
	Use the node name in cluster.conf instead of the node name in the luci database for forced fencing

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.4&r2=1.1.4.5

--- conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/08/17 20:26:32	1.1.4.4
+++ conga/luci/site/luci/Extensions/LuciClusterActions.py	2007/10/22 19:24:02	1.1.4.5
@@ -601,10 +601,10 @@
 	if rc is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('FNF0: no ricci to fence %s for cluster %s' \
-				% (nodename_resolved, clustername))
+				% (nodename, clustername))
 		return None
 			
-	batch_number, result = rq.nodeFence(rc, nodename_resolved)
+	batch_number, result = rq.nodeFence(rc, nodename)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('FNF1: batch_number and/or result is None')
@@ -614,7 +614,7 @@
 		set_node_flag(self, clustername, rc.hostname(),
 			str(batch_number), NODE_FENCE,
 			'Node "%s" is being fenced by node "%s"' \
-				% (nodename_resolved, rc.hostname()))
+				% (nodename, rc.hostname()))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('FNF2: failed to set flags: %r %s' \



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...
@ 2010-08-05 18:16 rmccabe
  0 siblings, 0 replies; 9+ messages in thread
From: rmccabe @ 2010-08-05 18:16 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2010-08-05 18:16:12

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py 

Log message:
	Make sure cluster services are enabled when adding new nodes and disabled when deleting nodes
	
	Resolves: rhbz#590143

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.7&r2=1.1.4.8

--- conga/luci/site/luci/Extensions/LuciClusterActions.py	2008/03/12 15:13:12	1.1.4.7
+++ conga/luci/site/luci/Extensions/LuciClusterActions.py	2010/08/05 18:16:12	1.1.4.8
@@ -226,7 +226,7 @@
 						clustername,
 						nodename_resolved,
 						stop_cluster=False,
-						disable_services=True):
+						disable_services=False):
 	reported_cluname = None
 	try:
 		cluster_info = rc.cluster_info()
@@ -458,7 +458,7 @@
 			errors += 1
 			continue
 
-		if NodeJoinCluster(self, rc, clustername, nodename_resolved, enable_services=False) is None:
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved, enable_services=True) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('CStart1: nodeJoin %s' \
 					% nodename_resolved)
@@ -499,7 +499,8 @@
 				errors += 1
 		else:
 			ret = NodeLeaveCluster(self, rc, clustername,
-					nodename_resolved, stop_cluster=True,
+					nodename_resolved,
+					stop_cluster=True,
 					disable_services=False)
 			if ret is None:
 				if LUCI_DEBUG_MODE is True:



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2010-08-05 18:16 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-05-14 18:00 [Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct rmccabe
  -- strict thread matches above, loose matches on Subject: below --
2007-05-18  5:23 rmccabe
2007-05-22 21:52 rmccabe
2007-05-23 21:21 rmccabe
2007-07-26  4:21 rmccabe
2007-08-09 21:35 rmccabe
2007-09-28  5:36 rmccabe
2007-10-22 19:24 rmccabe
2010-08-05 18:16 rmccabe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).