cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
From: rmccabe@sourceware.org <rmccabe@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
Date: 25 Sep 2006 22:59:16 -0000	[thread overview]
Message-ID: <20060925225916.22775.qmail@sourceware.org> (raw)

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-09-25 22:59:15

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 
	                           homebase_adapters.py ricci_bridge.py 

Log message:
	add node stuff

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.69&r2=1.70
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.18&r2=1.19

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/25 21:00:14	1.69
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/25 22:59:15	1.70
@@ -23,7 +23,7 @@
 #then only display chooser if the current user has 
 #permissions on at least one. If the user is admin, show ALL clusters
 
-from homebase_adapters import nodeAuth, nodeUnauth, manageCluster
+from homebase_adapters import nodeAuth, nodeUnauth, manageCluster, createClusterSystems
 
 CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
 
@@ -151,6 +151,7 @@
 			cluster_properties['isComplete'] = False
 			errors.append(error)
 			return (False, {'errors': errors, 'requestResults':cluster_properties })
+
 		batch_id_map = {}
 		for i in nodeList:
 			try:
@@ -189,7 +190,8 @@
 
 def validateAddClusterNode(self, request):
 	errors = list()
-	messages = list()
+	messages = list() 
+	requestResults = {}
 
 	try:
 	 	sessionData = request.SESSION.get('checkRet')
@@ -199,7 +201,7 @@
 	if 'clusterName' in request.form:
 		clusterName = request.form['clusterName']
 	else:
-		return (False, {'errors': [ 'Cluster name is missing'] })
+		return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
 
 	try:
 		numStorage = int(request.form['numStorage'])
@@ -207,7 +209,7 @@
 			raise
 	except:
 		errors.append('You must specify at least one node to add to the cluster')
-		return (False, {'errors': [ errors ] })
+		return (False, {'errors': [ errors ], 'requestResults': requestResults })
 
 	ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
 	errors.extend(ret[0])
@@ -225,18 +227,43 @@
 
 	i = 0
 	while i < len(nodeList):
+		clunode = nodeList[i]
 		try:
-			x = 0 # ricci call succeeds
-			messages.append('Cluster join initiated for host \"' + i['ricci_host'] + '\"')
+			batchNode = addClusterNodeBatch(clusterName, True, False, False)
+			if not batchNode:
+				raise
 			del nodeList[i]
 		except:
-			i['errors'] = True
-			errors.append('Unable to initiate node creation for host \"' + i['ricci_host'] + '\"')
-			cluster_properties['isComplete'] = 0
+			clunode['errors'] = True
+			nodeUnauth(nodeList)
+			cluster_properties['isComplete'] = False
+			errors.append('Unable to initiate node creation for host \"' + clunode['ricci_host'] + '\"')
 
 	if not cluster_properties['isComplete']:
 		return (False, {'errors': errors, 'requestResults': cluster_properties})
 
+	error = createClusterSystems(self, clusterName, nodeList)
+	if error:
+		nodeUnauth(nodeList)
+		cluster_properties['isComplete'] = False
+		errors.append(error)
+		return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+	batch_id_map = {}
+	for i in nodeList:
+		clunode = nodeList[i]
+		try:
+			rc = RicciCommunicator(clunode['ricci_host'])
+			resultNode = rc.process_batch(batchNode, async=True)
+			batch_id_map[clunode['ricci_host']] = resultNode.getAttribute('batch_id')
+			messages.append('Cluster join initiated for host \"' + clunode['ricci_host'] + '\"')
+		except:
+			nodeUnauth(nodeList)
+			cluster_properties['isComplete'] = False
+			errors.append('An error occurred while attempting to add cluster node \"' + clunode['ricci_host'] + '\"')
+			return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+	buildClusterCreateFlags(self, batch_id_map, clusterName)
 	return (True, {'errors': errors, 'messages': messages})
 
 formValidators = {
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2006/08/01 16:27:53	1.20
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2006/09/25 22:59:15	1.21
@@ -986,6 +986,92 @@
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except: pass
 
+def createClusterSystems(self, clusterName, nodeList):
+	try:
+		clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+		if not clusterObj:
+			raise
+	except:
+		nodeUnauth(nodeList)
+		return 'No cluster named \"' + clusterName + '\" is managed by Luci'
+
+	for i in nodeList:
+		if 'ricci_host' in i:
+			host = str(i['ricci_host'])
+		else:
+			host = str(i['host'])
+
+		try:
+			clusterObj.manage_addFolder(host, '__luci__:csystem:' + clusterName)
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+			if not newSystem:
+				raise
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except:
+			nodeUnauth(nodeList)
+			return 'Unable to create cluster node \"' + host + '\" for cluster \"' + clusterName + '\"'
+
+	try:
+		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+		if not ssystem:
+			raise
+	except:
+		return
+
+	# Only add storage systems if the and cluster node DB
+	# objects were added successfully.
+	for i in nodeList:
+		if 'ricci_host' in i:
+			host = str(i['ricci_host'])
+		else:
+			host = str(i['host'])
+
+		try:
+			# It's already there, as a storage system, no problem.
+			exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			continue
+		except: pass
+
+		try:
+			ssystem.manage_addFolder(host, '__luci__:system')
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except: pass
+
+def delSystem(self, systemName):
+	try:
+		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+	except:
+		return 'Unable to find storage system \"' + systemName + '\"'
+
+	try:
+		rc = RicciCommunicator(systemName)
+		if not rc:
+			raise
+	except:
+		return 'Unable to connect to the ricci agent on \"' + systemName + '\" to unauthenticate'
+
+	# Only unauthenticate if the system isn't a member of
+	# a managed cluster.
+	cluster_info = rc.cluster_info()
+	if not cluster_info[0]:
+		try: rc.unauth()
+		except: pass
+	else:
+		try:
+			newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + rc.system_name())
+		except:
+			try: rc.unauth()
+			except: pass
+
+	try:
+		ssystem.manage_delObjects([systemName])
+	except:
+		return 'Unable to delete storage system \"' + systemName + '\"'
+
+
 def delSystem(self, systemName):
 	try:
 		ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/09/23 04:04:08	1.18
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/09/25 22:59:15	1.19
@@ -594,6 +594,57 @@
     #parse out log entry  
     return payload
 
+def addClusterNodeBatch(cluster_name, services, shared_storage, LVS):
+	batch = '<?xml version="1.0" ?>'
+	batch += '<batch>'
+	batch += '<module name="rpm">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="install">'
+	batch += '<var name="sets" type="list_xml">'
+	batch += '<set name="Cluster Base"/>'
+	if services:
+		batch += '<set name="Cluster Service Manager"/>'
+	if shared_storage:
+		batch += '<set name="Clustered Storage"/>'
+	if LVS:
+		batch += '<set name="Linux Virtual Server"/>'
+	batch += '</var>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="reboot">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="reboot_now"/>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="cluster">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="set_cluster.conf">'
+	batch += '<var mutable="false" name="propagate" type="boolean" value="false"/>'
+	batch += '<var mutable="false" name="cluster.conf" type="xml">'
+	batch += '<cluster config_version="1" name="' + cluster_name + '">'
+	batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
+	batch += '<clusternodes/>'
+	batch += '<cman/>'
+	batch += '<fencedevices/>'
+	batch += '<rm/>'
+	batch += '</cluster>'
+	batch += '</var>'
+	batch += '</function_call>'
+	batch += '</request>'
+	batch += '</module>'
+
+	batch += '<module name="cluster">'
+	batch += '<request API_version="1.0">'
+	batch += '<function_call name="start_node"/>'
+	batch += '</request>'
+	batch += '</module>'
+	batch += '</batch>'
+
+	return minidom.parseString(batch).firstChild
+
 def createClusterBatch(cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
     batch = '<?xml version="1.0" ?>'
     batch += '<batch>'



             reply	other threads:[~2006-09-25 22:59 UTC|newest]

Thread overview: 185+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-09-25 22:59 rmccabe [this message]
  -- strict thread matches above, loose matches on Subject: below --
2008-07-17 16:36 [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte rmccabe
2008-04-18 20:37 rmccabe
2007-12-12 15:45 rmccabe
2007-08-23 19:00 rmccabe
2007-08-22 20:57 rmccabe
2007-05-03 19:51 rmccabe
2007-04-02 16:35 rmccabe
2007-04-02 15:56 rmccabe
2007-03-27  2:03 rmccabe
2007-03-16  3:19 rmccabe
2007-03-16  3:19 rmccabe
2007-03-16  3:19 rmccabe
2007-03-13  3:07 rmccabe
2007-03-13  3:06 rmccabe
2007-03-12  5:47 rmccabe
2007-03-12  5:46 rmccabe
2007-03-12  5:46 rmccabe
2007-03-06 22:48 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-02-13 19:50 rmccabe
2007-02-13 19:50 rmccabe
2007-02-12 20:25 rmccabe
2007-02-12 20:24 rmccabe
2007-02-07 22:00 rmccabe
2007-02-07 21:30 rmccabe
2007-02-05 19:56 rmccabe
2007-01-31 23:45 rmccabe
2007-01-31 19:28 rmccabe
2007-01-31 18:50 rmccabe
2007-01-30 21:41 jparsons
2007-01-30 21:21 jparsons
2007-01-30 21:05 jparsons
2007-01-29 23:30 rmccabe
2007-01-26 19:35 rmccabe
2007-01-18  2:48 rmccabe
2007-01-17 22:26 rmccabe
2007-01-17 22:14 rmccabe
2007-01-10 23:33 jparsons
2007-01-10 22:45 rmccabe
2007-01-10 20:06 rmccabe
2006-12-20 20:40 jparsons
2006-12-14 21:37 rmccabe
2006-12-14 17:03 rmccabe
2006-12-08 23:02 rmccabe
2006-11-30 20:12 jparsons
2006-11-27 21:06 rmccabe
2006-11-27 21:05 rmccabe
2006-11-27 18:15 rmccabe
2006-11-20 23:32 rmccabe
2006-11-20 15:05 jparsons
2006-11-17  5:50 rmccabe
2006-11-17  5:48 rmccabe
2006-11-10 18:18 rmccabe
2006-11-10 17:59 rmccabe
2006-11-09 22:30 rmccabe
2006-11-09 14:17 rmccabe
2006-11-08 21:42 rmccabe
2006-11-08 15:52 jparsons
2006-11-07 20:14 jparsons
2006-11-07 20:13 jparsons
2006-11-07  2:36 jparsons
2006-11-07  1:32 jparsons
2006-11-06 23:55 rmccabe
2006-11-05  0:59 rmccabe
2006-11-03 21:13 jparsons
2006-11-03  1:24 rmccabe
2006-11-03  1:08 rmccabe
2006-11-02 20:58 rmccabe
2006-11-02 20:45 rmccabe
2006-11-02 20:41 rmccabe
2006-11-02  3:17 rmccabe
2006-10-31 17:18 rmccabe
2006-10-31  0:16 rmccabe
2006-10-30 22:52 rmccabe
2006-10-30 20:43 jparsons
2006-10-27  1:11 rmccabe
2006-10-25  0:43 rmccabe
2006-10-24 14:08 rmccabe
2006-10-23 20:47 jparsons
2006-10-20 22:09 rmccabe
2006-10-20 21:59 rmccabe
2006-10-19 14:57 rmccabe
2006-10-18 23:12 rmccabe
2006-10-18 19:16 rmccabe
2006-10-16 21:01 rmccabe
2006-10-16 20:51 jparsons
2006-10-16 19:17 jparsons
2006-10-16  5:28 rmccabe
2006-10-16  4:54 rmccabe
2006-10-16  4:51 rmccabe
2006-10-13 22:56 rmccabe
2006-10-12 22:11 jparsons
2006-10-12 21:00 kupcevic
2006-10-12 20:54 jparsons
2006-10-12 20:48 jparsons
2006-10-12 19:40 rmccabe
2006-10-12 17:27 jparsons
2006-10-12 17:08 jparsons
2006-10-12 15:50 jparsons
2006-10-12 15:45 jparsons
2006-10-12  0:04 jparsons
2006-10-11 23:56 jparsons
2006-10-11 23:11 jparsons
2006-10-11 23:08 rmccabe
2006-10-11 22:37 jparsons
2006-10-11 20:58 jparsons
2006-10-11 17:43 jparsons
2006-10-11 17:29 rmccabe
2006-10-11 16:35 jparsons
2006-10-11 16:25 jparsons
2006-10-11 16:18 rmccabe
2006-10-10 21:33 kupcevic
2006-10-09 20:21 rmccabe
2006-10-04 16:20 rmccabe
2006-10-04 16:05 jparsons
2006-10-04 15:11 jparsons
2006-10-02 22:30 rmccabe
2006-10-02 21:42 rmccabe
2006-10-02 21:09 rmccabe
2006-10-02 20:53 rmccabe
2006-09-28 22:04 rmccabe
2006-09-28 20:10 rmccabe
2006-09-27 18:46 rmccabe
2006-09-27 16:18 jparsons
2006-09-27 15:51 jparsons
2006-09-27 15:35 jparsons
2006-09-22 18:24 rmccabe
2006-08-30 22:59 rmccabe
2006-08-22 17:46 jparsons
2006-08-22 17:41 jparsons
2006-08-16 23:40 jparsons
2006-08-16 21:56 jparsons
2006-08-16 21:54 jparsons
2006-08-16 21:51 jparsons
2006-08-16 19:14 rmccabe
2006-08-16 16:10 jparsons
2006-08-14 15:12 jparsons
2006-08-13 19:38 jparsons
2006-08-13 19:37 jparsons
2006-08-13 18:36 jparsons
2006-08-13 16:32 jparsons
2006-08-13 16:15 jparsons
2006-08-13 15:02 jparsons
2006-08-13 14:57 jparsons
2006-08-13 13:48 jparsons
2006-08-12 21:13 jparsons
2006-08-12 20:31 jparsons
2006-08-12 18:22 jparsons
2006-08-12 17:53 jparsons
2006-08-11  0:29 jparsons
2006-08-10 23:06 shuennek
2006-08-10 16:50 jparsons
2006-08-10 14:16 jparsons
2006-08-09 22:05 jparsons
2006-08-09 21:48 jparsons
2006-08-03 13:37 jparsons
2006-08-02 18:59 rmccabe
2006-08-02 17:25 rmccabe
2006-08-01 15:29 jparsons
2006-08-01 15:25 jparsons
2006-08-01 15:20 jparsons
2006-08-01 15:13 jparsons
2006-08-01 15:04 jparsons
2006-07-31 18:21 rmccabe
2006-07-28 19:03 jparsons
2006-07-28 18:57 jparsons
2006-07-28 18:40 jparsons
2006-07-28 14:16 jparsons
2006-07-28 14:02 jparsons
2006-07-28 11:46 jparsons
2006-07-27 16:34 jparsons
2006-07-27 15:53 rmccabe
2006-07-25 20:16 jparsons
2006-07-25 20:01 jparsons
2006-07-25  0:56 jparsons
2006-07-24 21:51 jparsons
2006-07-24 21:13 jparsons
2006-07-24 19:50 jparsons
2006-07-19 22:28 rmccabe
2006-07-19 21:38 rmccabe
2006-07-19 20:57 rmccabe
2006-07-19 20:19 rmccabe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060925225916.22775.qmail@sourceware.org \
    --to=rmccabe@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).