* [Cluster-devel] conga/luci/site/luci/Extensions ricci_bridge.p ...
@ 2006-07-14 16:00 jparsons
0 siblings, 0 replies; 2+ messages in thread
From: jparsons @ 2006-07-14 16:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: jparsons at sourceware.org 2006-07-14 16:00:26
Modified files:
luci/site/luci/Extensions: ricci_bridge.py cluster_adapters.py
conga_constants.py
Log message:
Node config support
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.1&r2=1.2
--- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/05/30 20:17:21 1.1
+++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/07/14 16:00:26 1.2
@@ -188,6 +188,23 @@
batch_number, result = self.batchAttemptResult(payload)
return (batch_number, result)
+ def getDaemonStates(self, dlist):
+ CLUSTER_STR='<?xml version="1.0" ?><ricci async="false" function="process_batch" version="1.0"><batch><module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">'
+
+ for item in dlist:
+ CLUSTER_STR = CLUSTER_STR + '<service name=\"' + item + '\"/>'
+
+ CLUSTER_STR = CLUSTER_STR + '</var></function_call></request></module></batch></ricci>'
+
+ try:
+ payload = self.makeConnection(CLUSTER_STR)
+ except RicciReceiveError, r:
+ return None
+
+ result = self.extractDaemonInfo(payload)
+
+ return result
+
def makeConnection(self,query_str):
# socket
sock = socket(AF_INET, SOCK_STREAM)
@@ -379,6 +396,67 @@
return docc
+ def extractDaemonInfo(self, doc):
+ print "VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV"
+ print doc.toxml()
+ print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
+ resultlist = list()
+ docc = None
+ bt_node = None
+ for node in doc.firstChild.childNodes:
+ if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+ if node.nodeName == 'batch':
+ bt_node = node
+ if bt_node == None:
+ print "bt_node == None"
+ doc = None
+ else:
+ #print doc.toxml()
+ mod_node = None
+ for node in bt_node.childNodes:
+ if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+ if node.nodeName == 'module':
+ mod_node = node
+ if mod_node == None:
+ print "mod_node == None"
+ doc = None
+ else:
+ resp_node = None
+ for node in mod_node.childNodes:
+ if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+ resp_node = node
+ if resp_node == None:
+ print "resp_node == None"
+ doc = None
+ else:
+ fr_node = None
+ for node in resp_node.childNodes:
+ if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+ fr_node = node
+ if fr_node == None:
+ print "fr_node == None"
+ doc = None
+ else:
+ varnode = None
+ for node in fr_node.childNodes:
+ if node.nodeName == 'var':
+ varnode = node
+ break
+ if varnode == None:
+ print "varnode == None"
+ doc = None
+ else:
+ svc_node = None
+ for node in varnode.childNodes:
+ if node.nodeName == 'service':
+ svchash = {}
+ svchash['name'] = node.getAttribute('name')
+ svchash['enabled'] = node.getAttribute('enabled')
+ svchash['running'] = node.getAttribute('running')
+ resultlist.append(svchash)
+
+ return resultlist
+
def batchAttemptResult(self, doc):
docc = None
rc_node = None
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/05/30 20:17:21 1.1
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/07/14 16:00:26 1.2
@@ -815,12 +815,14 @@
nmap = {}
name = item['name']
nmap['nodename'] = name
- cfgurl = baseurl + "?" + PAGETYPE + "=" + NODE_CONFIG + "&" + CLUNAME + "=" + clustername + "&nodename=" + name
+ cfgurl = baseurl + "?" + PAGETYPE + "=" + NODE + "&" + CLUNAME + "=" + clustername + "&nodename=" + name
nmap['configurl'] = cfgurl
- if item['online'] == "true":
- nmap['status'] = "online"
+ if item['clustered'] == "true":
+ nmap['status'] = NODE_ACTIVE
+ elif item['online'] == "false":
+ nmap['status'] = NODE_UNKNOWN
else:
- nmap['status'] = "offline"
+ nmap['status'] = NODE_INACTIVE
node_dict_list.append(nmap)
map['currentnodes'] = node_dict_list
@@ -828,7 +830,59 @@
return map
-
+def getNodeInfo(self, model, status, request):
+ infohash = {}
+ baseurl = request['URL']
+ nodestate = NODE_ACTIVE
+ #Get cluster name and node name from request
+ clustername = request['clustername']
+ nodename = request['nodename']
+ #extract correct node line from cluster status
+ found = False
+ for item in status:
+ if (item['type'] == "node") and (item['name'] == nodename):
+ found = True
+ break
+ if found == False:
+ raise UnknownClusterError("Fatal", "Unable to resolve node name in cluster status")
+ #Now determine state of node...
+ if item['online'] == "false":
+ nodestate = NODE_UNKNOWN
+ elif item['clustered'] == "true":
+ nodestate = NODE_ACTIVE
+ else:
+ nodestate = NODE_INACTIVE
+
+ infohash['nodestate'] = nodestate
+ infohash['nodename'] = nodename
+
+ #set up drop down links
+ if nodestate == NODE_ACTIVE:
+ infohash['jl_url'] = baseurl + "?pagetype=" + NODE_LEAVE_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
+ infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
+ infohash['fence_url'] = baseurl + "?pagetype=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
+ infohash['delete_url'] = baseurl + "?pagetype=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
+
+ if nodestate == NODE_INACTIVE:
+ infohash['jl_url'] = baseurl + "?pagetype=" + NODE_JOIN_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
+ infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
+ infohash['fence_url'] = baseurl + "?pagetype=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
+ infohash['delete_url'] = baseurl + "?pagetype=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
+
+ #return infohash
+ if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
+ #call service module on node and find out which daemons are running
+ rb = ricci_bridge(nodename)
+ dlist = list()
+ dlist.append("ccsd")
+ dlist.append("cman")
+ dlist.append("fenced")
+ dlist.append("rgmanager")
+ states = rb.getDaemonStates(dlist)
+ infohash['d_states'] = states
+
+ return infohash
+ #get list of faildoms for node
def getNodesInfo(self, model,status,req):
resultlist = list()
--- conga/luci/site/luci/Extensions/conga_constants.py 2006/05/30 20:17:21 1.1
+++ conga/luci/site/luci/Extensions/conga_constants.py 2006/07/14 16:00:26 1.2
@@ -41,3 +41,8 @@
PATH_TO_PRIVKEY="/var/lib/luci/var/certs/privkey.pem"
PATH_TO_CACERT="/var/lib/luci/var/certs/cacert.pem"
+
+#Node states
+NODE_ACTIVE="0"
+NODE_INACTIVE="1"
+NODE_UNKNOWN="2"
^ permalink raw reply [flat|nested] 2+ messages in thread
* [Cluster-devel] conga/luci/site/luci/Extensions ricci_bridge.p ...
@ 2006-10-31 13:16 rmccabe
0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2006-10-31 13:16 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2006-10-31 13:16:30
Modified files:
luci/site/luci/Extensions: ricci_bridge.py ricci_communicator.py
Log message:
make getElementsByTagName work for the XML fragments we get from ricci
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.37&r2=1.38
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.14&r2=1.15
--- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/10/30 21:21:17 1.37
+++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/10/31 13:16:30 1.38
@@ -218,7 +218,7 @@
try:
batch_number = i.getAttribute('batch_id')
result = i.getAttribute('status')
- return (batch_number, result)
+ return (str(batch_number), str(result))
except Exception, e:
luci_log.debug_verbose('batchAttemptResult: %s' % str(e))
--- conga/luci/site/luci/Extensions/ricci_communicator.py 2006/10/20 22:29:22 1.14
+++ conga/luci/site/luci/Extensions/ricci_communicator.py 2006/10/31 13:16:30 1.15
@@ -216,7 +216,9 @@
luci_log.debug('An error occurred while trying to process the batch job: %s' % batch_xml_str)
return None
- return ricci_xml
+ doc = minidom.Document()
+ doc.appendChild(ricci_xml)
+ return doc
def batch_report(self, batch_id):
luci_log.debug_verbose('[auth=%d] asking for batchid# %d for host %s' \
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2006-10-31 13:16 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-10-31 13:16 [Cluster-devel] conga/luci/site/luci/Extensions ricci_bridge.p rmccabe
-- strict thread matches above, loose matches on Subject: below --
2006-07-14 16:00 jparsons
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).