cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] conga/luci/site/luci/Extensions RicciQueries.p ...
@ 2007-10-09 20:20 rmccabe
  0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2007-10-09 20:20 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2007-10-09 20:20:03

Modified files:
	luci/site/luci/Extensions: RicciQueries.py conga_constants.py 
	                           conga_storage_constants.py 
	                           ricci_communicator.py 

Log message:
	Cleanup

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&r1=1.8&r2=1.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.44&r2=1.45
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_storage_constants.py.diff?cvsroot=cluster&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.30&r2=1.31

--- conga/luci/site/luci/Extensions/RicciQueries.py	2007/09/28 05:36:39	1.8
+++ conga/luci/site/luci/Extensions/RicciQueries.py	2007/10/09 20:20:02	1.9
@@ -429,7 +429,7 @@
 	disable_svc = 'true'
 	if disable_services is False:
 		disable_svc = 'false'
-		
+
 	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="%s"/><var mutable="false" name="purge_conf" type="boolean" value="%s"/><var mutable="false" name="disable_services" type="boolean" value="%s"/></function_call></request></module>' % (cshutdown, purge_conf, disable_svc)
 
 	ricci_xml = rc.batch_run(batch_str)
--- conga/luci/site/luci/Extensions/conga_constants.py	2007/09/24 21:19:42	1.44
+++ conga/luci/site/luci/Extensions/conga_constants.py	2007/10/09 20:20:02	1.45
@@ -74,26 +74,26 @@
 NODE_FORCE_DELETE		= '105'
 
 # General tasks
-BASECLUSTER	= '201'
-FENCEDAEMON	= '202'
-MULTICAST	= '203'
-QUORUMD		= '204'
-
-PROPERTIES_TAB = 'tab'
-
-PROP_GENERAL_TAB	= '1'
-PROP_FENCE_TAB		= '2'
-PROP_MCAST_TAB		= '3'
-PROP_QDISK_TAB		= '4'
-PROP_GULM_TAB		= '5'
-
-PAGETYPE	= 'pagetype'
-ACTIONTYPE	= 'actiontype'
-TASKTYPE	= 'tasktype'
-CLUNAME		= 'clustername'
-BATCH_ID	= 'batch_id'
-FLAG_DESC	= 'flag_desc'
-LAST_STATUS	= 'last_status'
+BASECLUSTER				= '201'
+FENCEDAEMON				= '202'
+MULTICAST				= '203'
+QUORUMD					= '204'
+
+PROPERTIES_TAB			= 'tab'
+
+PROP_GENERAL_TAB		= '1'
+PROP_FENCE_TAB			= '2'
+PROP_MCAST_TAB			= '3'
+PROP_QDISK_TAB			= '4'
+PROP_GULM_TAB			= '5'
+
+PAGETYPE				= 'pagetype'
+ACTIONTYPE				= 'actiontype'
+TASKTYPE				= 'tasktype'
+CLUNAME					= 'clustername'
+BATCH_ID				= 'batch_id'
+FLAG_DESC				= 'flag_desc'
+LAST_STATUS				= 'last_status'
 
 PATH_TO_PRIVKEY	= '/var/lib/luci/var/certs/privkey.pem'
 PATH_TO_CACERT	= '/var/lib/luci/var/certs/cacert.pem'
--- conga/luci/site/luci/Extensions/conga_storage_constants.py	2007/09/24 21:19:42	1.10
+++ conga/luci/site/luci/Extensions/conga_storage_constants.py	2007/10/09 20:20:02	1.11
@@ -11,13 +11,13 @@
 
 ## request vars ##
 
-PAGETYPE = 'pagetype'
-CLUNAME = 'clustername'
-STONAME = 'storagename'
-
-PT_MAPPER_ID = 'mapper_id'
-PT_MAPPER_TYPE = 'mapper_type'
-PT_PATH = 'bd_path'
+PAGETYPE		= 'pagetype'
+CLUNAME			= 'clustername'
+STONAME			= 'storagename'
+
+PT_MAPPER_ID	= 'mapper_id'
+PT_MAPPER_TYPE	= 'mapper_type'
+PT_PATH			= 'bd_path'
 
 STORAGE_COMMIT_CHANGES = 'commit_changes'
 
@@ -35,19 +35,30 @@
 
 # mapper pretty names
 PRETTY_MAPPER_INFO = {
-	MAPPER_SYS_TYPE:		( 'Hard Drives',		'Hard Drive',		'BUG: source not defined' ),
-	MAPPER_VG_TYPE:			( 'Volume Group',		'Logical Volume',	'Physical Volume' ),
-	MAPPER_PT_TYPE:			( 'Partition Table',	'Partition',		'Disk' ),
-	MAPPER_MDRAID_TYPE:		( 'Software RAID',		'Volume',			'Disk' ),
-	MAPPER_ATARAID_TYPE:	( 'ATA-RAID',			'Volume', 			'Disk' ),
-	MAPPER_MULTIPATH_TYPE:	( 'Multipath', 			'Multipath',		'Path' ),
-	MAPPER_CRYPTO_TYPE:		( 'Encryption', 		'Volume',			'Device' ),
-	MAPPER_iSCSI_TYPE:		( 'iSCSI',				'Volume',			'BUG: source not defined' )
+	MAPPER_SYS_TYPE:
+		( 'Hard Drives',		'Hard Drive',		'BUG: source not defined' ),
+	MAPPER_VG_TYPE:
+		( 'Volume Group',		'Logical Volume',	'Physical Volume' ),
+	MAPPER_PT_TYPE:
+		( 'Partition Table',	'Partition',		'Disk' ),
+	MAPPER_MDRAID_TYPE:
+		( 'Software RAID',		'Volume',			'Disk' ),
+	MAPPER_ATARAID_TYPE:
+		( 'ATA-RAID',			'Volume',			'Disk' ),
+	MAPPER_MULTIPATH_TYPE:
+		( 'Multipath',			'Multipath',		'Path' ),
+	MAPPER_CRYPTO_TYPE:
+		( 'Encryption',			'Volume',			'Device' ),
+	MAPPER_iSCSI_TYPE:
+		( 'iSCSI',				'Volume',			'BUG: source not defined' )
 }
 
 def get_pretty_mapper_info(mapper_type):
 	return PRETTY_MAPPER_INFO.get(mapper_type) or \
-			( 'Mapper ' + mapper_type, mapper_type + '\s target', mapper_type + '\s source' )
+		(	'Mapper %s' % mapper_type,
+			'%s\s target' % mapper_type,
+			'%s\s source' % mapper_type
+		)
 
 PRETTY_PROP_NAMES = {
 	'active':					'Active',
@@ -148,14 +159,22 @@
 
 # icons
 MAPPER_ICONS = {
-	MAPPER_SYS_TYPE:		( '',							'icon_bd_ide.png',			''),
-	MAPPER_VG_TYPE:			( 'icon_mapper_VG.png',			'icon_bd_LV.png',			'icon_content_PV.png'),
-	MAPPER_PT_TYPE:			( 'icon_mapper_PT.png',			'icon_bd_partition.png',	''),
-	MAPPER_MDRAID_TYPE:		( 'icon_mapper_raid.png',		'icon_bd_raid.png',			''),
-	MAPPER_ATARAID_TYPE:	( 'icon_mapper_raid.png',		'icon_bd_raid.png',			''),
-	MAPPER_MULTIPATH_TYPE:	( 'icon_mapper_multipath.png',	'icon_bd_multipath.png',	''),
-	MAPPER_CRYPTO_TYPE:		( 'icon_mapper_crypto.png',		'icon_bd_crypto.png',		''),
-	MAPPER_iSCSI_TYPE:		( '',							'icon_bd_net.png',			'')
+	MAPPER_SYS_TYPE:
+		( '',							'icon_bd_ide.png',			''),
+	MAPPER_VG_TYPE:
+		( 'icon_mapper_VG.png',			'icon_bd_LV.png',			'icon_content_PV.png'),
+	MAPPER_PT_TYPE:
+		( 'icon_mapper_PT.png',			'icon_bd_partition.png',	''),
+	MAPPER_MDRAID_TYPE:
+		( 'icon_mapper_raid.png',		'icon_bd_raid.png',			''),
+	MAPPER_ATARAID_TYPE:
+		( 'icon_mapper_raid.png',		'icon_bd_raid.png',			''),
+	MAPPER_MULTIPATH_TYPE:
+		( 'icon_mapper_multipath.png',	'icon_bd_multipath.png',	''),
+	MAPPER_CRYPTO_TYPE:
+		( 'icon_mapper_crypto.png',		'icon_bd_crypto.png',		''),
+	MAPPER_iSCSI_TYPE:
+		( '',							'icon_bd_net.png',			'')
 }
 
 def get_mapper_icons(mapper_type):
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2007/09/24 21:19:42	1.30
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2007/10/09 20:20:02	1.31
@@ -220,7 +220,7 @@
 
 			if LUCI_DEBUG_NET is True:
 				luci_log.debug_net_priv('RC:PB2: sending "%s" to %s:%d: %r %s' \
-					 % (doc.toxml(), self.__hostname, self.__port, e, str(e)))
+					% (doc.toxml(), self.__hostname, self.__port, e, str(e)))
 			raise RicciError, errstr
 
 		# receive response
@@ -480,11 +480,11 @@
 #		total number of modules in batch
 # * num:
 #		if num == total:
-#			 all modules in the batch completed successfuly
+#			all modules in the batch completed successfuly
 #		if num > 0:
-#			 last seq. number of module that successfuly finished
+#			last seq. number of module that successfuly finished
 #		if num < 0:
-#			 module (-num) failed (next module won't be processed)
+#			module (-num) failed (next module won't be processed)
 
 def batch_status(batch_xml):
 	if batch_xml.nodeName != 'batch':



^ permalink raw reply	[flat|nested] 2+ messages in thread

* [Cluster-devel] conga/luci/site/luci/Extensions RicciQueries.p ...
@ 2007-10-22 19:29 rmccabe
  0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2007-10-22 19:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL5
Changes by:	rmccabe at sourceware.org	2007-10-22 19:29:38

Modified files:
	luci/site/luci/Extensions: RicciQueries.py cluster_adapters.py 

Log message:
	Generate node IDs for RHEL5 cman-based clusters

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.6&r2=1.1.4.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.39&r2=1.120.2.40

--- conga/luci/site/luci/Extensions/RicciQueries.py	2007/08/08 21:14:38	1.1.4.6
+++ conga/luci/site/luci/Extensions/RicciQueries.py	2007/10/22 19:29:38	1.1.4.7
@@ -204,9 +204,10 @@
 	batch.append('<cluster config_version="1" name="%s" alias="%s">' % (cluster_name, cluster_alias))
 	batch.append('<fence_daemon post_fail_delay="0" post_join_delay="3"/>')
 	batch.append('<clusternodes>')
+
 	x = 1
 	for i in nodeList:
-		if os_str == 'rhel4':
+		if gulm_lockservers:
 			batch.append('<clusternode name="%s" votes="1"/>' % i)
 		else:
 			batch.append('<clusternode name="%s" votes="1" nodeid="%d"/>' % (i, x))
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/08/27 18:36:45	1.120.2.39
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/10/22 19:29:38	1.120.2.40
@@ -34,7 +34,7 @@
 	CLUSTER_ADD, CLUSTER_CONFIG, CLUSTER_DAEMON, CLUSTER_DELETE, \
 	CLUSTER_FOLDER_PATH, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
 	DISABLE_SVC_TASK, ENABLE_SVC_TASK, FDOM, FDOM_ADD, FENCEDEV, \
-	FENCEDEV_NODE_CONFIG, FENCEDEVS, FLAG_DESC, INSTALL_TASK, \
+	FENCEDEV_NODE_CONFIG, FENCEDEVS, FLAG_DESC, INSTALL_TASK, CLUSTER_PROCESS, \
 	LAST_STATUS, LUCI_DEBUG_MODE, NODE, NODE_ADD, NODE_DELETE, \
 	NODE_FENCE, NODE_FORCE_DELETE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
 	NODE_REBOOT, NODES, POSSIBLE_REBOOT_MESSAGE, PRE_CFG, PRE_INSTALL, \
@@ -544,21 +544,32 @@
 		request.SESSION.set('add_node', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
+	gulm_ptr = None
+	next_node_id = 1
 	try:
 		model = getModelBuilder(None, cluster_ricci, cluster_ricci.dom0())
 		if not model:
 			errors.append('Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername)
 			raise Exception, 'unable to get model for %s' % clustername
-
 		nodesptr = model.getClusterNodesPtr()
-		used_ids = {}
+		gulm_ptr = model.getGULMPtr()
+		used_ids = []
+
 		for i in model.getNodes():
-			used_ids[int(i.getAttribute('nodeid'))] = 1
+			if not gulm_ptr:
+				used_ids.append(int(i.getAttribute('nodeid')))
+
 			node_name = str(i.getAttribute('name'))
 			if node_name in system_list:
 				system_list[node_name]['errors'] = True
 				errors.append('%s is already a member of %s' \
 					% (node_name, clustername))
+
+		if not gulm_ptr:
+			used_ids.sort()
+			used_list_len = len(used_ids)
+			if used_list_len > 0:
+				next_node_id = used_ids[used_list_len - 1] + 1
 	except Exception, e:
 		incomplete = True
 		errors.append('Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername)
@@ -569,8 +580,6 @@
 		request.SESSION.set('add_node', add_cluster)
 		return (False, { 'errors': errors, 'messages': messages })
 
-	next_node_id = 1
-
 	try:
 		for x in system_list:
 			i = system_list[x]
@@ -606,13 +615,13 @@
 						% (cur_host, e, str(e)))
 				continue
 
-			next_node_id += 1
 			new_node = ClusterNode()
 			new_node.attr_hash['name'] = str(i['host'])
 			new_node.attr_hash['votes'] = str(1)
-			while next_node_id in used_ids:
+
+			if not gulm_ptr:
+				new_node.attr_hash['nodeid'] = str(next_node_id)
 				next_node_id += 1
-			new_node.attr_hash['nodeid'] = str(next_node_id)
 			nodesptr.addChild(new_node)
 
 		if incomplete or len(errors) > 0:
@@ -2318,14 +2327,16 @@
 			luci_log.debug('CTP0: no cluster task')
 		return 'No cluster task was given'
 
+	cluname = fvar['clustername']
 	if not model:
-		cluname = fvar['clustername']
 		model = LuciExtractCluModel(self, request, cluname)
 		if model is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('CTP2: No cluster model for %s' \
 					% cluname)
 			return 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors.' % cluname
+	elif cluname is None:
+		cluname = model.getClusterName()
 
 	redirect_page = NODES
 	if task == CLUSTER_STOP:
@@ -2335,8 +2346,11 @@
 		from LuciClusterActions import ClusterStart
 		ClusterStart(self, model)
 	elif task == CLUSTER_RESTART:
-		from LuciClusterActions import ClusterRestart
-		ClusterRestart(self, model)
+		from LuciClusterActions import ClusterStop
+		ClusterStop(self, model)
+		request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&task=%s&busyfirst=true' \
+			% (baseurl, CLUSTER_PROCESS, cluname, CLUSTER_START))
+		return
 	elif task == CLUSTER_DELETE:
 		from LuciClusterActions import ClusterDelete
 		ret = ClusterDelete(self, model)



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2007-10-22 19:29 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-10-09 20:20 [Cluster-devel] conga/luci/site/luci/Extensions RicciQueries.p rmccabe
  -- strict thread matches above, loose matches on Subject: below --
2007-10-22 19:29 rmccabe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).