cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
From: rmccabe@sourceware.org <rmccabe@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
Date: 14 Dec 2006 17:03:00 -0000	[thread overview]
Message-ID: <20061214170300.32387.qmail@sourceware.org> (raw)

CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-12-14 17:02:57

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix for the case in delete cluster where we delete the last remaining node

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.180&r2=1.181

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/11 22:42:34	1.180
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/14 17:02:56	1.181
@@ -26,7 +26,7 @@
 from clusterOS import resolveOSType
 from FenceHandler import FenceHandler, FENCE_OPTS
 from GeneralError import GeneralError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
+from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode, delCluster
 from LuciSyslog import LuciSyslog
 
 #Policy for showing the cluster chooser menu:
@@ -2807,14 +2807,20 @@
 		try:
 			rc = RicciCommunicator(nodename_resolved)
 		except Exception, e:
-			luci_log.debug_verbose('[%d] CStop0: RC %s: %s' \
+			luci_log.debug_verbose('CStop0: [%d] RC %s: %s' \
 				% (delete, nodename_resolved, str(e)))
 			errors += 1
 			continue
-		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('[%d] CStop1: nodeLeave %s' \
-				% (delete, nodename_resolved))
-			errors += 1
+
+		if delete is True:
+			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
+				luci_log.debug_verbose('CStop1: nodeDelete failed')
+				errors += 1
+		else:
+			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+				luci_log.debug_verbose('CStop2: nodeLeave %s' \
+					% (delete, nodename_resolved))
+				errors += 1
 	return errors
 
 def clusterRestart(self, model):
@@ -2827,7 +2833,18 @@
 	return snum_err + jnum_err
 
 def clusterDelete(self, model):
-	return clusterStop(self, model, delete=True)
+	if clusterStop(self, model, delete=True) < 1:
+		try:
+			clustername = model.getClusterName()
+		except Exception, e:
+			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+			return None
+
+		try:
+			delCluster(self, clustername)
+		except Exception, e:
+			luci_log.debug_verbose('clusterDelete0: %s: %s' \
+				% (clustername, str(e)))
 
 def forceNodeReboot(self, rc, clustername, nodename_resolved):
 	batch_number, result = nodeReboot(rc)
@@ -2908,75 +2925,89 @@
 	return True
 
 def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved):
-	#We need to get a node name other than the node
-	#to be deleted, then delete the node from the cluster.conf
-	#and propogate it. We will need two ricci agents for this task.
+	# We need to get a node name other than the node
+	# to be deleted, then delete the node from the cluster.conf
+	# and propogate it. We will need two ricci agents for this task,
+	# unless we are deleting the last remaining cluster node.
+
+	if len(model.getNodes()) == 1:
+		# If this is the last cluster node, we don't need a second
+		# node to propagate a new cluster.conf file. We need only to
+		# stop the final node and delete its cluster.conf file.
+		last_node = True
+	else:
+		# Make sure we can find a second node before we hose anything.
 
-	# Make sure we can find a second node before we hose anything.
-	path = str(CLUSTER_FOLDER_PATH + clustername)
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		if not clusterfolder:
-			raise Exception, 'no cluster folder at %s' % path
-	except Exception, e:
-		luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
-				% (clustername, str(e)))
-		return None
+		last_node = False
+		found_one = False
 
-	try:
-		nodes = clusterfolder.objectItems('Folder')
-		if not nodes or len(nodes) < 1:
-			raise Exception, 'no cluster nodes in DB'
-	except Exception, e:
-		luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
-			% (clustername, str(e)))
+		path = str(CLUSTER_FOLDER_PATH + clustername)
 
-	found_one = False
-	for node in nodes:
-		if node[1].getId().find(nodename) != (-1):
-			continue
-		#here we make certain the node is up...
-		# XXX- we should also make certain this host is still
-		# in the cluster we believe it is.
 		try:
-			rc2 = RicciCommunicator(node[1].getId())
+			clusterfolder = self.restrictedTraverse(path)
+			if not clusterfolder:
+				raise Exception, 'no cluster folder@%s' % path
 		except Exception, e:
-			luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
-			continue
+			luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
+			return None
 
-		if not rc2.authed():
-			try:
-				setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes in DB'
+		except Exception, e:
+			luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
+				% (clustername, str(e)))
+
+		for node in nodes:
+			if node[1].getId().find(nodename) != (-1):
+				continue
+			# here we make certain the node is up...
+			# XXX- we should also make certain this host is still
+			# in the cluster we believe it is.
 
 			try:
-				snode = getStorageNode(self, node[0])
-				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
+				rc2 = RicciCommunicator(node[1].getId())
+				if not rc2:
+					raise Exception, 'ND1a: rc2 is None'
+			except Exception, e:
+				luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
+				continue
 
-			luci_log.debug_verbose('ND3: %s is not authed' % node[0])
-			rc2 = None
-			continue
-		else:
-			found_one = True
-			break
+			if not rc2.authed():
+				try:
+					setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
 
-	if not found_one:
-		luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
-		return None
+				try:
+					snode = getStorageNode(self, node[0])
+					setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				luci_log.debug_verbose('ND3: %s is not authed' % node[0])
+				rc2 = None
+				continue
+			else:
+				found_one = True
+				break
+
+		if not found_one:
+			luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+			return None
 
-	#First, delete cluster.conf from node to be deleted.
-	#next, have node leave cluster.
+	# First, delete cluster.conf from node to be deleted.
+	# next, have node leave cluster.
 	batch_number, result = nodeLeaveCluster(rc, purge=True)
 	if batch_number is None or result is None:
 		luci_log.debug_verbose('ND5: batch_number and/or result is None')
 		return None
 
-	#It is not worth flagging this node in DB, as we are going
-	#to delete it anyway. Now, we need to delete node from model
-	#and send out new cluster.conf
+	# It is not worth flagging this node in DB, as we are going
+	# to delete it anyway. Now, we need to delete node from model
+	# and send out new cluster.conf
 	delete_target = None
 	nodelist = model.getNodes()
 	find_node = lower(nodename)
@@ -2989,27 +3020,32 @@
 			continue
 
 	if delete_target is None:
-		luci_log.debug_verbose('ND6: unable to find delete target for %s in %s' \
+		luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
 			% (nodename, clustername))
 		return None
 
-	model.deleteNode(delete_target)
-
 	try:
-		str_buf = model.exportModelAsString()
-		if not str_buf:
-			raise Exception, 'model string is blank'
+		model.deleteNode(delete_target)
 	except Exception, e:
-		luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
-		return None
+		luci_log.debug_verbose('ND6a: deleteNode %s failed: %s' \
+			% (delete_target.getName(), str(e)))
 
-	# propagate the new cluster.conf via the second node
-	batch_number, result = setClusterConf(rc2, str(str_buf))
-	if batch_number is None:
-		luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
-		return None
+	if not last_node:
+		try:
+			str_buf = model.exportModelAsString()
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
+			return None
 
-	#Now we need to delete the node from the DB
+		# propagate the new cluster.conf via the second node
+		batch_number, result = setClusterConf(rc2, str(str_buf))
+		if batch_number is None:
+			luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+			return None
+
+	# Now we need to delete the node from the DB
 	path = str(CLUSTER_FOLDER_PATH + clustername)
 	del_path = str(path + '/' + nodename_resolved)
 
@@ -3021,10 +3057,12 @@
 		luci_log.debug_verbose('ND9: error deleting %s: %s' \
 			% (del_path, str(e)))
 
-	try:
-		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
-	except Exception, e:
-		luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
+	if not last_node:
+		try:
+			set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
+		except Exception, e:
+			luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
+
 	return True
 
 def nodeTaskProcess(self, model, request):



             reply	other threads:[~2006-12-14 17:03 UTC|newest]

Thread overview: 185+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-12-14 17:03 rmccabe [this message]
  -- strict thread matches above, loose matches on Subject: below --
2008-07-17 16:36 [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte rmccabe
2008-04-18 20:37 rmccabe
2007-12-12 15:45 rmccabe
2007-08-23 19:00 rmccabe
2007-08-22 20:57 rmccabe
2007-05-03 19:51 rmccabe
2007-04-02 16:35 rmccabe
2007-04-02 15:56 rmccabe
2007-03-27  2:03 rmccabe
2007-03-16  3:19 rmccabe
2007-03-16  3:19 rmccabe
2007-03-16  3:19 rmccabe
2007-03-13  3:07 rmccabe
2007-03-13  3:06 rmccabe
2007-03-12  5:47 rmccabe
2007-03-12  5:46 rmccabe
2007-03-12  5:46 rmccabe
2007-03-06 22:48 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-02-13 19:50 rmccabe
2007-02-13 19:50 rmccabe
2007-02-12 20:25 rmccabe
2007-02-12 20:24 rmccabe
2007-02-07 22:00 rmccabe
2007-02-07 21:30 rmccabe
2007-02-05 19:56 rmccabe
2007-01-31 23:45 rmccabe
2007-01-31 19:28 rmccabe
2007-01-31 18:50 rmccabe
2007-01-30 21:41 jparsons
2007-01-30 21:21 jparsons
2007-01-30 21:05 jparsons
2007-01-29 23:30 rmccabe
2007-01-26 19:35 rmccabe
2007-01-18  2:48 rmccabe
2007-01-17 22:26 rmccabe
2007-01-17 22:14 rmccabe
2007-01-10 23:33 jparsons
2007-01-10 22:45 rmccabe
2007-01-10 20:06 rmccabe
2006-12-20 20:40 jparsons
2006-12-14 21:37 rmccabe
2006-12-08 23:02 rmccabe
2006-11-30 20:12 jparsons
2006-11-27 21:06 rmccabe
2006-11-27 21:05 rmccabe
2006-11-27 18:15 rmccabe
2006-11-20 23:32 rmccabe
2006-11-20 15:05 jparsons
2006-11-17  5:50 rmccabe
2006-11-17  5:48 rmccabe
2006-11-10 18:18 rmccabe
2006-11-10 17:59 rmccabe
2006-11-09 22:30 rmccabe
2006-11-09 14:17 rmccabe
2006-11-08 21:42 rmccabe
2006-11-08 15:52 jparsons
2006-11-07 20:14 jparsons
2006-11-07 20:13 jparsons
2006-11-07  2:36 jparsons
2006-11-07  1:32 jparsons
2006-11-06 23:55 rmccabe
2006-11-05  0:59 rmccabe
2006-11-03 21:13 jparsons
2006-11-03  1:24 rmccabe
2006-11-03  1:08 rmccabe
2006-11-02 20:58 rmccabe
2006-11-02 20:45 rmccabe
2006-11-02 20:41 rmccabe
2006-11-02  3:17 rmccabe
2006-10-31 17:18 rmccabe
2006-10-31  0:16 rmccabe
2006-10-30 22:52 rmccabe
2006-10-30 20:43 jparsons
2006-10-27  1:11 rmccabe
2006-10-25  0:43 rmccabe
2006-10-24 14:08 rmccabe
2006-10-23 20:47 jparsons
2006-10-20 22:09 rmccabe
2006-10-20 21:59 rmccabe
2006-10-19 14:57 rmccabe
2006-10-18 23:12 rmccabe
2006-10-18 19:16 rmccabe
2006-10-16 21:01 rmccabe
2006-10-16 20:51 jparsons
2006-10-16 19:17 jparsons
2006-10-16  5:28 rmccabe
2006-10-16  4:54 rmccabe
2006-10-16  4:51 rmccabe
2006-10-13 22:56 rmccabe
2006-10-12 22:11 jparsons
2006-10-12 21:00 kupcevic
2006-10-12 20:54 jparsons
2006-10-12 20:48 jparsons
2006-10-12 19:40 rmccabe
2006-10-12 17:27 jparsons
2006-10-12 17:08 jparsons
2006-10-12 15:50 jparsons
2006-10-12 15:45 jparsons
2006-10-12  0:04 jparsons
2006-10-11 23:56 jparsons
2006-10-11 23:11 jparsons
2006-10-11 23:08 rmccabe
2006-10-11 22:37 jparsons
2006-10-11 20:58 jparsons
2006-10-11 17:43 jparsons
2006-10-11 17:29 rmccabe
2006-10-11 16:35 jparsons
2006-10-11 16:25 jparsons
2006-10-11 16:18 rmccabe
2006-10-10 21:33 kupcevic
2006-10-09 20:21 rmccabe
2006-10-04 16:20 rmccabe
2006-10-04 16:05 jparsons
2006-10-04 15:11 jparsons
2006-10-02 22:30 rmccabe
2006-10-02 21:42 rmccabe
2006-10-02 21:09 rmccabe
2006-10-02 20:53 rmccabe
2006-09-28 22:04 rmccabe
2006-09-28 20:10 rmccabe
2006-09-27 18:46 rmccabe
2006-09-27 16:18 jparsons
2006-09-27 15:51 jparsons
2006-09-27 15:35 jparsons
2006-09-25 22:59 rmccabe
2006-09-22 18:24 rmccabe
2006-08-30 22:59 rmccabe
2006-08-22 17:46 jparsons
2006-08-22 17:41 jparsons
2006-08-16 23:40 jparsons
2006-08-16 21:56 jparsons
2006-08-16 21:54 jparsons
2006-08-16 21:51 jparsons
2006-08-16 19:14 rmccabe
2006-08-16 16:10 jparsons
2006-08-14 15:12 jparsons
2006-08-13 19:38 jparsons
2006-08-13 19:37 jparsons
2006-08-13 18:36 jparsons
2006-08-13 16:32 jparsons
2006-08-13 16:15 jparsons
2006-08-13 15:02 jparsons
2006-08-13 14:57 jparsons
2006-08-13 13:48 jparsons
2006-08-12 21:13 jparsons
2006-08-12 20:31 jparsons
2006-08-12 18:22 jparsons
2006-08-12 17:53 jparsons
2006-08-11  0:29 jparsons
2006-08-10 23:06 shuennek
2006-08-10 16:50 jparsons
2006-08-10 14:16 jparsons
2006-08-09 22:05 jparsons
2006-08-09 21:48 jparsons
2006-08-03 13:37 jparsons
2006-08-02 18:59 rmccabe
2006-08-02 17:25 rmccabe
2006-08-01 15:29 jparsons
2006-08-01 15:25 jparsons
2006-08-01 15:20 jparsons
2006-08-01 15:13 jparsons
2006-08-01 15:04 jparsons
2006-07-31 18:21 rmccabe
2006-07-28 19:03 jparsons
2006-07-28 18:57 jparsons
2006-07-28 18:40 jparsons
2006-07-28 14:16 jparsons
2006-07-28 14:02 jparsons
2006-07-28 11:46 jparsons
2006-07-27 16:34 jparsons
2006-07-27 15:53 rmccabe
2006-07-25 20:16 jparsons
2006-07-25 20:01 jparsons
2006-07-25  0:56 jparsons
2006-07-24 21:51 jparsons
2006-07-24 21:13 jparsons
2006-07-24 19:50 jparsons
2006-07-19 22:28 rmccabe
2006-07-19 21:38 rmccabe
2006-07-19 20:57 rmccabe
2006-07-19 20:19 rmccabe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20061214170300.32387.qmail@sourceware.org \
    --to=rmccabe@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).