From mboxrd@z Thu Jan 1 00:00:00 1970 From: rmccabe@sourceware.org Date: 7 Dec 2006 17:54:34 -0000 Subject: [Cluster-devel] conga/luci cluster/form-chooser cluster/form-m ... Message-ID: <20061207175434.18986.qmail@sourceware.org> List-Id: To: cluster-devel.redhat.com MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit CVSROOT: /cvs/cluster Module name: conga Branch: RHEL5 Changes by: rmccabe at sourceware.org 2006-12-07 17:54:31 Modified files: luci/cluster : form-chooser form-macros index_html resource-form-macros resource_form_handlers.js luci/homebase : luci_homebase.css luci/site/luci/Extensions: ClusterNode.py FenceDevice.py LuciSyslog.py cluster_adapters.py conga_constants.py ricci_bridge.py Log message: Related: bz217387, bz218040, bz212021 Patches: http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-chooser.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.12.2.1&r2=1.12.2.2 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.6&r2=1.90.2.7 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20.2.4&r2=1.20.2.5 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21.2.2&r2=1.21.2.3 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource_form_handlers.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20.2.2&r2=1.20.2.3 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/luci_homebase.css.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.28.2.1&r2=1.28.2.2 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterNode.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1&r2=1.1.2.1 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceDevice.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2&r2=1.2.2.1 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciSyslog.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.3&r2=1.2.2.4 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.12&r2=1.120.2.13 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.19.2.2&r2=1.19.2.3 http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.30.2.10&r2=1.30.2.11 --- conga/luci/cluster/form-chooser 2006/11/16 19:34:52 1.12.2.1 +++ conga/luci/cluster/form-chooser 2006/12/07 17:54:31 1.12.2.2 @@ -80,6 +80,9 @@
+ +
+
--- conga/luci/cluster/form-macros 2006/11/30 22:31:59 1.90.2.6 +++ conga/luci/cluster/form-macros 2006/12/07 17:54:31 1.90.2.7 @@ -77,7 +77,7 @@ @@ -122,14 +122,22 @@ + + + + An error occurred while attempting to get status information for this cluster. The information shown may be out of date. + + + +
    -
  • Status:
  • -
  • Total Cluster Votes:
  • -
  • Minimum Required Quorum:
  • +
  • Status:
  • +
  • Total Cluster Votes:
  • +
  • Minimum Required Quorum:
@@ -1141,7 +1149,7 @@
- + @@ -1174,6 +1182,20 @@
Fence Type APC Power Switch
+
+ + + + + +
+ + + + +
@@ -1183,7 +1205,7 @@
- + @@ -1216,6 +1238,20 @@
Fence Type McData SAN Switch
+
+ + + + + +
+ + + + +
@@ -1225,7 +1261,7 @@
- + @@ -1251,6 +1287,20 @@
Fence Type WTI Power Switch
+
+ + + + + +
+ + + + +
@@ -1260,7 +1310,7 @@
- + @@ -1293,6 +1343,11 @@
Fence Type HP iLO
+ + + +
@@ -1302,7 +1357,7 @@
- + @@ -1334,6 +1389,11 @@ tal:attributes="value cur_fencedev/passwd | nothing" />
Fence Type Dell Drac
+ + + +
@@ -1343,7 +1403,7 @@
- + @@ -1376,6 +1436,11 @@
Fence Type IBM RSA II
+ + + +
@@ -1385,7 +1450,7 @@
- + @@ -1418,6 +1483,20 @@
Fence Type Brocade Fabric Switch
+
+ + + + + +
+ + + + +
@@ -1427,7 +1506,7 @@
- + @@ -1457,6 +1536,20 @@
Fence Type QLogic SANbox2
+
+ + + + + +
+ + + + +
@@ -1466,7 +1559,7 @@
- + @@ -1492,6 +1585,20 @@
Fence Type Vixel SAN Switch
+
+ + + + + +
+ + + + +
@@ -1501,7 +1608,7 @@
- + @@ -1520,6 +1627,20 @@
Fence Type GNBD
+
+ + + + + +
+ + + + +
@@ -1529,7 +1650,7 @@
- + @@ -1555,6 +1676,20 @@
Fence Type Egenera SAN Controller
+
+ + + + + +
+ + + + +
@@ -1564,7 +1699,7 @@
- + @@ -1597,6 +1732,11 @@
Fence Type IBM Blade Center
+ + + +
@@ -1606,7 +1746,7 @@
- + @@ -1639,6 +1779,20 @@
Fence Type Bull PAP
+
+ + + + + +
+ + + + +
@@ -1648,7 +1802,7 @@
- + @@ -1670,11 +1824,16 @@
Fence Type RPS10 Serial Switch
Port + tal:attributes="value cur_fencedev/port | nothing" />
- + + + + +
@@ -1682,8 +1841,8 @@ tal:attributes="id cur_fencedev/name | nothing">
- - +
+ @@ -1694,7 +1853,22 @@ tal:attributes="value cur_fencedev/name | nothing" /> -
Fence Type Virtual Machine Fencing
+ +
+ + + + + +
+ + + + + +
@@ -1702,8 +1876,8 @@ tal:attributes="id cur_fencedev/name | nothing">
- - +
+ @@ -1714,7 +1888,14 @@ tal:attributes="value cur_fencedev/name | nothing" /> -
Fence Type SCSI Reservation Fencing
+ + + + + + +
@@ -1723,7 +1904,7 @@
- + @@ -1760,6 +1941,11 @@
Fence Type IPMI Lan
+ + + +
@@ -1769,7 +1955,7 @@
- + @@ -1780,8 +1966,13 @@ tal:attributes="value cur_fencedev/name | nothing" /> -
Fence Type Manual Fencing
+ + + + +
@@ -1863,172 +2054,206 @@
-
+
+ +
Port + tal:attributes="value cur_instance/port | nothing" />
Switch + tal:attributes="value cur_instance/switch | nothing" />
-
+
+ +
lpan + tal:attributes="value cur_instance/lpan | nothing" />
pserver + tal:attributes="value cur_instance/pserver | nothing" />
-
+
+ +
Port + tal:attributes="value cur_instance/port | nothing" />
-
+
+ +
Port + tal:attributes="value cur_instance/port | nothing" />
-
+
+ +
Port + tal:attributes="value cur_instance/port | nothing" />
-
+
+ +
Port + tal:attributes="value cur_instance/port | nothing" />
-
+
+ +
Port + tal:attributes="value cur_instance/port | nothing" />
-
+
+ +
IP Address + tal:attributes="value cur_instance/ipaddress | nothing" />
-
+
+ +
Blade + tal:attributes="value cur_instance/ipaddress | nothing" />
-
+
+ +
Domain + tal:attributes="value cur_instance/domain | nothing" />
-
+
+ +
Domain + tal:attributes="value cur_instance/domain | nothing" />
+
@@ -2071,7 +2296,7 @@ global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
@@ -2302,7 +2539,7 @@ global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
@@ -2523,7 +2760,7 @@ global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" /> @@ -2546,9 +2783,31 @@
@@ -2566,7 +2825,7 @@ This service is stopped
-

Autostart is enabled for this service

+

Autostart is enabled for this service

@@ -2628,6 +2887,14 @@ + + + Automatically start this service + + + + +
@@ -2650,7 +2917,10 @@ + + +
@@ -2710,30 +2980,46 @@
- - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
@@ -2749,7 +3035,7 @@ @@ -2775,7 +3061,8 @@ tal:content="sinfo/name | nothing" tal:attributes="class python: running and 'running' or 'stopped'" /> - +
@@ -2790,14 +3077,15 @@ - - + @@ -2842,6 +3130,18 @@
+ + + + +
+ Automatically start this service +
+ + +
@@ -2857,7 +3157,10 @@ + + +
@@ -2881,6 +3184,11 @@

Service Process Form

+
+

Service Delete Form

+ +
+

Resources Form

@@ -2918,7 +3226,7 @@ global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
@@ -3051,23 +3359,23 @@
- - -
+ + + - - - + + - - +
- +
+ Fencing Type
+
--- conga/luci/cluster/index_html 2006/11/16 19:34:52 1.20.2.4 +++ conga/luci/cluster/index_html 2006/12/07 17:54:31 1.20.2.5 @@ -100,25 +100,6 @@ window.open(href, windowname, 'width=500,height=500,scrollbars=yes'); return false; } - - function swap_fence_div(container_id, element_id) { - var container_element = document.getElementById(container_id) - if (!container_element) { - alert('Can\'t find element with id = ' + container_id); - return (-1); - } - - var child_element = document.getElementById(element_id); - if (!child_element) { - alert('Can\'t find element with id = ' + element_id); - return (-1); - } - if (container_element.firstChild) - container_element.removeChild(container_element.firstChild); - var target = child_element.cloneNode(1); - target.id = null; - container_element.appendChild(target); - } //--> --- conga/luci/cluster/resource-form-macros 2006/11/16 19:34:53 1.21.2.2 +++ conga/luci/cluster/resource-form-macros 2006/12/07 17:54:31 1.21.2.3 @@ -271,7 +271,7 @@
- +
@@ -307,7 +307,10 @@ tal:attributes="name res/parent_uuid | nothing" tal:define="editDisabled resourceIsRef | nothing"> - + + - + + - + + - + + - + + - + + - + + - + + ]+>/ig).toString().replace(/>(,|$)/g, '/>'); - if (!temp) - continue; + errors = errors.concat(err); + var temp = ''; + + var input_elem = form[i].getElementsByTagName('input'); + for (var j = 0 ; j < input_elem.length ; j++) { + var res_type = input_elem[j].type; + if (res_type == 'hidden' || res_type == 'text' || + res_type == 'password') + { + temp += ''; + } else if (res_type == 'checkbox' || res_type == 'radio') { + if (input_elem[j].checked) + temp += ''; + } + form_xml += '' + temp + ''; } + if (!svc_name) + errors.push('No name was given for this service.'); + if (!form_xml) errors.push('No resource information was submitted.'); @@ -403,7 +443,15 @@ /* sort this out in the backend */ master_form.form_xml.value = '' + form_xml + ''; - if (confirm('Save changes to this service?')) + master_form.svc_name.value = svc_name; + master_form.autostart.value = autostart; + + var confirm_msg = null; + if (master_form.action.value == 'add') + confirm_msg = 'Create this service?'; + else + confirm_msg = 'Save changes to this service?'; + if (confirm(confirm_msg)) master_form.submit(); return (0); } --- conga/luci/homebase/luci_homebase.css 2006/11/16 19:34:53 1.28.2.1 +++ conga/luci/homebase/luci_homebase.css 2006/12/07 17:54:31 1.28.2.2 @@ -14,27 +14,27 @@ background: #dee7ec; } -div.fencedev { +div.fence { max-width: 700px; padding: .5em; } -*.fencedev { +*.fence { background: #dee7ec; } -table.fencedev { +table.fence { padding: .618em; width: 700px; } -div.fencedev, -td.fencedev { +div.fence, +td.fence { padding-top: .309em; padding-bottom: .309em; } -td.fencedev { +td.fence { width: 50%; } @@ -438,6 +438,21 @@ margin-bottom: .25em ! important; } +div.fence_add { + margin-top: .25em ! important; + margin-bottom: .25em ! important; +} + +div.fence_container { + border: thin solid #c9c9c9; +} + +div.fence_control { + margin-top: 2em; + margin-bottom: .75em; + margin-left: .33em; +} + div.service_comp_list { background: #dee7ec; max-width: 700px; --- conga/luci/site/luci/Extensions/ClusterNode.py 2006/05/30 20:17:21 1.1 +++ conga/luci/site/luci/Extensions/ClusterNode.py 2006/12/07 17:54:31 1.1.2.1 @@ -96,3 +96,10 @@ except KeyError, e: return "" + def getVotes(self): + try: + return self.getAttribute('votes') + except KeyError, e: + return "1" + except: + return None --- conga/luci/site/luci/Extensions/FenceDevice.py 2006/10/04 15:11:10 1.2 +++ conga/luci/site/luci/Extensions/FenceDevice.py 2006/12/07 17:54:31 1.2.2.1 @@ -19,7 +19,12 @@ def getAgentType(self): - return self.attr_hash["agent"] + agent = self.attr_hash["agent"] + try: + return agent[agent.rfind('/') + 1:] + except: + pass + return agent def isShared(self): agent = self.getAgentType() --- conga/luci/site/luci/Extensions/LuciSyslog.py 2006/11/16 19:34:53 1.2.2.3 +++ conga/luci/site/luci/Extensions/LuciSyslog.py 2006/12/07 17:54:31 1.2.2.4 @@ -78,6 +78,7 @@ def close(self): try: + self.__init = 0 closelog() except: pass --- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/11/30 20:12:38 1.120.2.12 +++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/07 17:54:31 1.120.2.13 @@ -12,8 +12,10 @@ from Ip import Ip from Clusterfs import Clusterfs from Fs import Fs +from RefObject import RefObject from NFSClient import NFSClient from NFSExport import NFSExport +from Service import Service from Netfs import Netfs from Vm import Vm from Script import Script @@ -396,19 +398,31 @@ response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true') def validateServiceAdd(self, request): + errors = list() + try: form_xml = request['form_xml'] if not form_xml: - raise KeyError('form_xml must not be blank') - except KeyError, e: + raise KeyError, 'form_xml must not be blank' + except Exception, e: + luci_log.debug_verbose('vSA0: no form_xml: %s' % str(e)) return (False, {'errors': ['No resource data was supplied for this service.']}) try: + model = request.SESSION.get('model') + if not model: + raise Exception, 'model is None' + except Exception, e: + luci_log.debug_verbose('vSA0a: %s' % str(e)) + return (False, {'errors': [ 'The cluster model is missing from the session object.' ]}) + + try: doc = minidom.parseString(form_xml) forms = doc.getElementsByTagName('form') if len(forms) < 1: raise - except: + except Exception, e: + luci_log.debug_verbose('vSA1: error: %s' % str(e)) return (False, {'errors': ['The resource data submitted for this service is not properly formed.']}) form_hash = {} @@ -440,27 +454,145 @@ try: dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value')) except Exception, e: - luci_log.debug_verbose('Error parsing service XML: %s' % str(e)) + luci_log.debug_verbose('vSA2: parsing XML: %s' % str(e)) pass try: res_type = dummy_form['type'].strip() - if not res_type or not res_type in resourceAddHandler: - raise - except: - return (False, {'errors': ['An invalid resource type was specified: ' + res_type]}) + if not res_type: + raise Exception, 'no resource type was given' + if not res_type in resourceAddHandler: + raise Exception, 'invalid resource type: %s' % res_type + except Exception, e: + luci_log.debug_verbose('vSA3: %s' % str(e)) + return (False, {'errors': [ 'An invalid resource type was specified' ]}) try: - resObj = resourceAddHandler[res_type](request, dummy_form) - except: - luci_log('res type %d is invalid' % res_type) + if res_type == 'ip': + dummy_form['resourceName'] = dummy_form['ip_address'] + except Exception, e: + luci_log.debug_verbose('vSA3a: type is ip but no addr: %s' % str(e)) + return (False, {'errors': [ 'No IP address was given.' ]}) + + try: + if dummy_form.has_key('immutable'): + newRes = getResource(model, dummy_form['resourceName']) + resObj = RefObject(newRes) + resObj.setRef(newRes.getName()) + else: + resObj = resourceAddHandler[res_type](request, dummy_form)[0] + except Exception, e: resObj = None + luci_log.debug_verbose('vSA4: type %s: %s' % (res_type, str(e))) if resObj is None: - return (False, {'errors': ['An error occurred while adding ' + res_type]}) + return (False, {'errors': [ 'An error occurred while adding %s' % res_type ]}) form_hash[form_id]['obj'] = resObj - - return (True, {'messages': ['This service has been updated.']}) + + if len(errors) > 0: + return (False, {'errors': errors}) + + try: + service_name = request.form['svc_name'].strip() + except Exception, e: + luci_log.debug_verbose('vSA5: no service name: %s' % str(e)) + return (False, {'errors': [ 'No service name was given.' ]}) + + autostart = "1" + try: + if not request.form.has_key('autostart') or request.form['autostart'] == "0": + autostart = "0" + except Exception, e: + autostart = None + luci_log.debug_verbose('vSA5a: error getting autostart: %s' % str(e)) + + try: + cur_service = model.retrieveServiceByName(service_name) + except GeneralError, e: + luci_log.debug_verbose('vSA5b: no service named %s found' % service_name) + cur_service = None + except Exception, e: + luci_log.debug_verbose('vSA5c: no service named %s found: %s' % (service_name, str(e))) + cur_service = None + + try: + if request.form['action'] == 'edit': + if cur_service is None: + return (False, {'errors': [ 'The service %s could not be found for editing.' % service_name ]}) + model.deleteService(service_name) + elif request.form['action'] == 'add': + if cur_service is not None: + return (False, {'errors': [ 'A service with the name %s already exists.' % service_name ]}) + else: + luci_log.debug_verbose('vSA4a: unknown action %s' % request.form['action']) + return (False, {'errors': [ 'An unknown action was specified.' ]}) + except Exception, e: + luci_log.debug_verbose('vSA5: no action type: %s' % str(e)) + + def buildSvcTree(parent, child_id_list): + for i in child_id_list: + try: + child = form_hash[i]['obj'] + if not child: + raise Exception, 'No object for %s' % i + except Exception, e: + luci_log.debug_verbose('bST0: %s' % str(e)) + continue + parent.addChild(child) + if 'kids' in form_hash[i]: + buildSvcTree(child, form_hash[i]['kids']) + + new_service = Service() + new_service.addAttribute('name', service_name) + if autostart is not None: + new_service.attr_hash['autostart'] = autostart + + buildSvcTree(new_service, form_hash['toplevel']['kids']) + model.resourcemanager_ptr.addChild(new_service) + + clustername = model.getClusterName() + if not clustername: + luci_log.debug_verbose('vAS6: no cluname from mb') + return (False, {'errors': [ 'Unable to determine cluster name' ]}) + + try: + conf = model.exportModelAsString() + if not conf: + raise Exception, 'model string for %s is blank' % clustername + except Exception, e: + luci_log.debug_verbose('vAS6a: exportModelAsString : %s' \ + % str(e)) + return (False, {'errors': [ 'An error occurred while adding this service.' ]}) + + rc = getRicciAgent(self, clustername) + if not rc: + luci_log.debug_verbose('vAS6b: unable to find a ricci agent for cluster %s' % clustername) + return 'Unable to find a ricci agent for the %s cluster' % clustername + + try: + ragent = rc.hostname() + if not ragent: + luci_log.debug_verbose('vAS7: missing ricci hostname') + raise Exception, 'unknown ricci agent hostname' + + batch_number, result = setClusterConf(rc, str(conf)) + if batch_number is None or result is None: + luci_log.debug_verbose('vAS8: missing batch_number or result') + raise Exception, 'unable to save the new cluster configuration.' + except Exception, e: + luci_log.debug_verbose('vAS9: %s' % str(e)) + return 'An error occurred while propagating the new cluster.conf: %s' % str(e) + + try: + if request.form['action'] == 'edit': + set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_CONFIG, "Configuring service \'%s\'" % service_name) + else: + set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_ADD, "Adding new service \'%s\'" % service_name) + except Exception, e: + luci_log.debug_verbose('vAS10: failed to set flags: %s' % str(e)) + + response = request.RESPONSE + response.redirect(request['URL'] + "?pagetype=" + SERVICES + "&clustername=" + clustername + '&busyfirst=true') def validateResourceAdd(self, request): try: @@ -1712,23 +1844,133 @@ return None return getRicciAgent(self, clustername) -def getClusterStatus(self, rc): +def getClusterStatusModel(model): + results = list() + vals = {} + + try: + clustername = model.getClusterName() + clusteralias = model.getClusterAlias() + vals['type'] = 'cluster' + vals['alias'] = clusteralias + vals['name'] = clustername + vals['error'] = True + vals['votes'] = '[unknown]' + vals['quorate'] = '[unknown]' + vals['minQuorum'] = '[unknown]' + results.append(vals) + except Exception, e: + luci_log.debug_verbose('GCSM0: %s' % str(e)) + return None + + try: + nodelist = model.getNodes() + except Exception, e: + luci_log.debug_verbose('GCSM1: %s' % str(e)) + return None + + for node in nodelist: + node_val = {} + node_val['type'] = 'node' + try: + node_name = node.getName() + if not node_name: + raise Exception, 'cluster node name is unknown' + except: + node_name = '[unknown]' + + node_val['name'] = node_name + node_val['clustered'] = '[unknown]' + node_val['online'] = '[unknown]' + node_val['error'] = True + + try: + votes = node.getVotes() + if not votes: + raise Exception, 'unknown unmber of votes' + except: + votes = '[unknown]' + + node_val['votes'] = votes + results.append(node_val) + return results + +def getClusterStatusDB(self, clustername): + results = list() + vals = {} + + vals['type'] = 'cluster' + vals['alias'] = clustername + vals['name'] = clustername + vals['error'] = True + vals['quorate'] = '[unknown]' + vals['votes'] = '[unknown]' + vals['minQuorum'] = '[unknown]' + results.append(vals) + + try: + cluster_path = '%s/luci/systems/cluster/%s' % (CLUSTER_FOLDER_PATH, clustername) + nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder') + except Exception, e: + luci_log.debug_verbose('GCSDB0: %s: %s' % (clustername, str(e))) + return results + + for node in nodelist: + try: + node_val = {} + node_val['type'] = 'node' + node_val['name'] = node[0] + node_val['clustered'] = '[unknown]' + node_val['online'] = '[unknown]' + node_val['error'] = True + results.append(node_val) + except Exception, e: + luci_log.debug_verbose('GCSDB1: %s' % str(e)) + return results + +def getClusterStatus(self, request, rc): try: doc = getClusterStatusBatch(rc) + if not doc: + raise Exception, 'doc is None' except Exception, e: luci_log.debug_verbose('GCS0: error: %s' % str(e)) doc = None + if doc is None: + try: + model = request.SESSION.get('model') + cinfo = getClusterStatusModel(model) + if not cinfo or len(cinfo) < 1: + raise Exception, 'cinfo is None' + return cinfo + except Exception, e: + luci_log.debug_verbose('GCS1: %s' % str(e)) + doc = None + if not doc: try: - luci_log.debug_verbose('GCS1: returned None for %s/%s' % rc.cluster_info()) - except: - pass + clustername = None + try: + clustername = request['clustername'] + except: + try: + clustername = request.form['clustername'] + except: + pass - return {} + if not clustername: + raise Exception, 'unable to determine cluster name' - results = list() + cinfo = getClusterStatusDB(self, clustername) + if not cinfo or len(cinfo) < 1: + raise Exception, 'cinfo is None' + return cinfo + except Exception, e: + luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %s' % str(e)) + return [] + results = list() vals = {} vals['type'] = "cluster" @@ -1796,6 +2038,7 @@ itemmap['nodename'] = item['nodename'] itemmap['autostart'] = item['autostart'] itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE + itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE_DELETE svc = modelb.retrieveServiceByName(item['name']) dom = svc.getAttribute("domain") @@ -1847,6 +2090,7 @@ if item['name'] == servicename: hmap['name'] = servicename starturls = list() + hmap['autostart'] = item['autostart'] if item['running'] == "true": hmap['running'] = "true" #In this case, determine where it can run... @@ -1855,6 +2099,8 @@ innermap['current'] = "This service is currently running on %s" % nodename innermap['disableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_STOP innermap['restarturl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_RESTART + innermap['delurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_DELETE + nodes = modelb.getNodes() for node in nodes: starturl = {} @@ -1901,16 +2147,18 @@ if parent is not None: rc_map['parent'] = parent rc_map['name'] = child.getName() + + #Note: Final version needs all resource attrs if child.isRefObject() == True: rc_map['ref_object'] = True rc_map['type'] = child.getObj().getResourceType() + rc_map['attrs'] = child.getObj().getAttributes() else: rc_map['type'] = child.getResourceType() + rc_map['attrs'] = child.getAttributes() rc_map['indent_ctr'] = indent_ctr - #Note: Final version needs all resource attrs - rc_map['attrs'] = child.getAttributes() rc_map['uuid'] = make_uuid('resource') rc_map['parent_uuid'] = parent_uuid @@ -2186,39 +2434,31 @@ return {} if model is None: - rc = getRicciAgent(self, cluname) - if not rc: - luci_log.debug_verbose('GCI1: unable to find a ricci agent for the %s cluster' % cluname) - return {} try: - model = getModelBuilder(None, rc, rc.dom0()) + model = getModelForCluster(self, cluname) if not model: raise Exception, 'model is none' - - try: - req.SESSION.set('model', model) - except Exception, e2: - luci_log.debug_verbose('GCI2 unable to set model in session: %s' % str(e2)) + req.SESSION.set('model', model) except Exception, e: - luci_log.debug_verbose('GCI3: unable to get model for cluster %s: %s' % (cluname, str(e))) + luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %s' % (cluname, str(e))) return {} prop_baseurl = req['URL'] + '?' + PAGETYPE + '=' + CLUSTER_CONFIG + '&' + CLUNAME + '=' + cluname + '&' - map = {} + clumap = {} basecluster_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_GENERAL_TAB #needed: - map['basecluster_url'] = basecluster_url + clumap['basecluster_url'] = basecluster_url #name field - map['clustername'] = model.getClusterAlias() + clumap['clustername'] = model.getClusterAlias() #config version cp = model.getClusterPtr() - map['config_version'] = cp.getConfigVersion() + clumap['config_version'] = cp.getConfigVersion() #------------- #new cluster params - if rhel5 #------------- #Fence Daemon Props fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB - map['fencedaemon_url'] = fencedaemon_url + clumap['fencedaemon_url'] = fencedaemon_url fdp = model.getFenceDaemonPtr() pjd = fdp.getAttribute('post_join_delay') if pjd is None: @@ -2227,35 +2467,35 @@ if pfd is None: pfd = "0" #post join delay - map['pjd'] = pjd + clumap['pjd'] = pjd #post fail delay - map['pfd'] = pfd + clumap['pfd'] = pfd #------------- #if multicast multicast_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_MCAST_TAB - map['multicast_url'] = multicast_url + clumap['multicast_url'] = multicast_url #mcast addr is_mcast = model.isMulticast() - #map['is_mcast'] = is_mcast + #clumap['is_mcast'] = is_mcast if is_mcast: - map['mcast_addr'] = model.getMcastAddr() - map['is_mcast'] = "True" + clumap['mcast_addr'] = model.getMcastAddr() + clumap['is_mcast'] = "True" else: - map['is_mcast'] = "False" - map['mcast_addr'] = "1.2.3.4" + clumap['is_mcast'] = "False" + clumap['mcast_addr'] = "1.2.3.4" #------------- #quorum disk params quorumd_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_QDISK_TAB - map['quorumd_url'] = quorumd_url + clumap['quorumd_url'] = quorumd_url is_quorumd = model.isQuorumd() - map['is_quorumd'] = is_quorumd - map['interval'] = "" - map['tko'] = "" - map['votes'] = "" - map['min_score'] = "" - map['device'] = "" - map['label'] = "" + clumap['is_quorumd'] = is_quorumd + clumap['interval'] = "" + clumap['tko'] = "" + clumap['votes'] = "" + clumap['min_score'] = "" + clumap['device'] = "" + clumap['label'] = "" #list struct for heuristics... hlist = list() @@ -2264,27 +2504,27 @@ qdp = model.getQuorumdPtr() interval = qdp.getAttribute('interval') if interval is not None: - map['interval'] = interval + clumap['interval'] = interval tko = qdp.getAttribute('tko') if tko is not None: - map['tko'] = tko + clumap['tko'] = tko votes = qdp.getAttribute('votes') if votes is not None: - map['votes'] = votes + clumap['votes'] = votes min_score = qdp.getAttribute('min_score') if min_score is not None: - map['min_score'] = min_score + clumap['min_score'] = min_score device = qdp.getAttribute('device') if device is not None: - map['device'] = device + clumap['device'] = device label = qdp.getAttribute('label') if label is not None: - map['label'] = label + clumap['label'] = label heuristic_kids = qdp.getChildren() h_ctr = 0 @@ -2313,9 +2553,9 @@ else: hmap['hinterval'] = "" hlist.append(hmap) - map['hlist'] = hlist + clumap['hlist'] = hlist - return map + return clumap def getClustersInfo(self, status, req): map = {} @@ -2335,6 +2575,10 @@ if len(clulist) < 1: return {} clu = clulist[0] + cluerror = False + if 'error' in clu: + cluerror = True + map['error'] = True clustername = clu['name'] if clu['alias'] != "": map['clusteralias'] = clu['alias'] @@ -2349,6 +2593,7 @@ map['running'] = "false" map['votes'] = clu['votes'] map['minquorum'] = clu['minQuorum'] + map['clucfg'] = baseurl + "?" + PAGETYPE + "=" + CLUSTER_CONFIG + "&" + CLUNAME + "=" + clustername map['restart_url'] = baseurl + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + clustername + '&task=' + CLUSTER_RESTART @@ -2370,6 +2615,7 @@ map['currentservices'] = svc_dict_list node_dict_list = list() for item in nodelist: + node_error = 'error' in item nmap = {} name = item['name'] nmap['nodename'] = name @@ -2872,15 +3118,16 @@ infohash['currentservices'] = svc_dict_list - #next is faildoms - fdoms = model.getFailoverDomainsForNode(nodename) fdom_dict_list = list() - for fdom in fdoms: - fdom_dict = {} - fdom_dict['name'] = fdom.getName() - fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName() - fdom_dict['fdomurl'] = fdomurl - fdom_dict_list.append(fdom_dict) + if model: + #next is faildoms + fdoms = model.getFailoverDomainsForNode(nodename) + for fdom in fdoms: + fdom_dict = {} + fdom_dict['name'] = fdom.getName() + fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName() + fdom_dict['fdomurl'] = fdomurl + fdom_dict_list.append(fdom_dict) infohash['fdoms'] = fdom_dict_list @@ -2911,7 +3158,6 @@ infohash['logurl'] = '/luci/logs/?nodename=' + nodename_resolved + '&clustername=' + clustername return infohash - #get list of faildoms for node def getNodesInfo(self, model, status, req): resultlist = list() @@ -3015,6 +3261,10 @@ return resultlist def getFence(self, model, request): + if not model: + luci_log.debug_verbose('getFence0: model is None') + return {} + map = {} fencename = request['fencename'] fencedevs = model.getFenceDevices() @@ -3061,6 +3311,10 @@ raise def getFenceInfo(self, model, request): + if not model: + luci_log.debug_verbose('getFenceInfo00: model is None') + return {} + try: clustername = request['clustername'] except: @@ -3311,9 +3565,14 @@ return map def getFencesInfo(self, model, request): + map = {} + if not model: + luci_log.debug_verbose('getFencesInfo0: model is None') + map['fencedevs'] = list() + return map + clustername = request['clustername'] baseurl = request['URL'] - map = {} fencedevs = list() #This is for the fencedev list page #Get list of fence devices @@ -3798,6 +4057,7 @@ for item in modelb.getResources(): itemmap = {} itemmap['name'] = item.getName() + itemmap['attrs'] = item.attr_hash itemmap['type'] = item.resource_type itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_CONFIG itemmap['url'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE @@ -3859,6 +4119,82 @@ except: continue +def delService(self, request): + errstr = 'An error occurred while attempting to set the new cluster.conf' + + try: + modelb = request.SESSION.get('model') + except Exception, e: + luci_log.debug_verbose('delService0: no model: %s' % str(e)) + return (False, {'errors': [ errstr ] }) + + name = None + try: + name = request['servicename'] + except: + try: + name = request.form['servicename'] + except: + pass + + if name is None: + luci_log.debug_verbose('delService1: no service name') + return (False, {'errors': [ '%s: no service name was provided.' % errstr ]}) + + clustername = None + try: + clustername = request['clustername'] + except: + try: + clustername = request.form['clustername'] + except: + pass + + if clustername is None: + luci_log.debug_verbose('delService2: no cluster name for %s' % name) + return (False, {'errors': [ '%s: no cluster name was provided.' % errstr ]}) + + rc = getRicciAgent(self, clustername) + if not rc: + luci_log.debug_verbose('delService3: unable to get ricci agent for cluster %s' % clustername) + return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster.' % errstr ]}) + + try: + ragent = rc.hostname() + if not ragent: + raise Exception, 'unable to determine the hostname of the ricci agent' + except Exception, e: + luci_log.debug_verbose('delService4: %s: %s' % (errstr, str(e))) + return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster.' % errstr ]}) + + try: + modelb.deleteService(name) + except Exception, e: + luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s' % (name, clustername)) + return (False, {'errors': [ '%s: error removing service %s.' % (errstr, name) ]}) + + try: + conf = modelb.exportModelAsString() + if not conf: + raise Exception, 'model string is blank' + except Exception, e: + luci_log.debug_verbose('delService6: exportModelAsString failed: %s' \ + % str(e)) + return (False, {'errors': [ '%s: error removing service %s.' % (errstr, name) ]}) + + batch_number, result = setClusterConf(rc, str(conf)) + if batch_number is None or result is None: + luci_log.debug_verbose('delService7: missing batch and/or result') + return (False, {'errors': [ '%s: error removing service %s.' % (errstr, name) ]}) + + try: + set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_DELETE, "Removing service \'%s\'" % name) + except Exception, e: + luci_log.debug_verbose('delService8: failed to set flags: %s' % str(e)) + + response = request.RESPONSE + response.redirect(request['URL'] + "?pagetype=" + SERVICES + "&clustername=" + clustername + '&busyfirst=true') + def delResource(self, rc, request): errstr = 'An error occurred while attempting to set the new cluster.conf' @@ -4516,7 +4852,7 @@ 'gfs': addGfs, 'nfsm': addNfsm, 'nfsx': addNfsx, - 'nfsc': addNfsx, + 'nfsc': addNfsc, 'scr': addScr, 'smb': addSmb } @@ -4651,6 +4987,17 @@ response = request.RESPONSE response.redirect(request['URL'] + "?pagetype=" + RESOURCES + "&clustername=" + clustername + '&busyfirst=true') +def getResource(modelb, name): + resPtr = modelb.getResourcesPtr() + resources = resPtr.getChildren() + + for res in resources: + if res.getName() == name: + return res + + luci_log.debug_verbose('getResource: unable to find resource \"%s\"' % name) + raise KeyError, name + def getResourceForEdit(modelb, name): resPtr = modelb.getResourcesPtr() resources = resPtr.getChildren() @@ -4660,7 +5007,7 @@ resPtr.removeChild(res) return res - luci_log.debug_verbose('unable to find resource \"%s\"' % name) + luci_log.debug_verbose('GRFE0: unable to find resource \"%s\"' % name) raise KeyError, name def appendModel(request, model): --- conga/luci/site/luci/Extensions/conga_constants.py 2006/11/16 19:34:53 1.19.2.2 +++ conga/luci/site/luci/Extensions/conga_constants.py 2006/12/07 17:54:31 1.19.2.3 @@ -43,6 +43,7 @@ FENCEDEV_CONFIG="53" FENCEDEV="54" CLUSTER_DAEMON="55" +SERVICE_DELETE = '56' #Cluster tasks CLUSTER_STOP = '1000' @@ -119,7 +120,7 @@ PRE_JOIN = "Packages are installed and configuration has been distributed, but the node has not yet joined the cluster." -POSSIBLE_REBOOT_MESSAGE = "This node is not currently responding and is probably
rebooting as planned. This state should persist for 5 minutes or so..." +POSSIBLE_REBOOT_MESSAGE = "This node is not currently responding and is probably rebooting as planned. This state should persist for 5 minutes or so..." REDIRECT_MSG = " You will be redirected in 5 seconds. Please fasten your safety restraints." --- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/11/20 23:36:12 1.30.2.10 +++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/12/07 17:54:31 1.30.2.11 @@ -63,7 +63,7 @@ batch += '' batch += '' batch += '' - + need_reboot = install_base or install_services or install_shared_storage or install_LVS if need_reboot: batch += '' @@ -96,7 +96,7 @@ batch += '' batch += '' batch += '' - + batch += '' batch += '' batch += '' @@ -236,7 +236,7 @@ batch += '' else: batch += '' - + batch += '' batch += '' batch += '' @@ -298,66 +298,39 @@ pass return (None, None) - -def getPayload(bt_node): - if not bt_node: - return None - - mod_node = None - for node in bt_node.childNodes: - if node.nodeType == xml.dom.Node.ELEMENT_NODE and node.nodeName == 'module': - mod_node = node - if not mod_node: - return None - - resp_node = None - for node in mod_node.childNodes: - if node.nodeType == xml.dom.Node.ELEMENT_NODE: - resp_node = node - if not resp_node: - return None +def getClusterStatusBatch(rc): + batch_str = '' + ricci_xml = rc.batch_run(batch_str, async=False) - fr_node = None - for node in resp_node.childNodes: - if node.nodeType == xml.dom.Node.ELEMENT_NODE: - fr_node = node - if not fr_node: + try: + cluster_tags = ricci_xml.getElementsByTagName('cluster') + except Exception, e: + luci_log.debug_verbose('getClusterStatusBatch0: %s' % str(e)) return None - varnode = None - for node in fr_node.childNodes: - if node.nodeName == 'var': - varnode = node - break - if not varnode: - return None + if len(cluster_tags) < 1: + luci_log.debug_verbose('getClusterStatusBatch1: %d entries - expecting 1' \ + % len(cluster_tags)) + elif len(cluster_tags) > 1: + luci_log.debug_verbose('getClusterStatusBatch2: %d entries - expecting 1, using the first' % len(cluster_tags)) - cl_node = None - for node in varnode.childNodes: - if node.nodeName == 'cluster': - cl_node = node - break - if not cl_node: + try: + cluster_node = cluster_tags[0] + if not cluster_node: + raise Exception, 'element 0 is None' + except Exception, e: + luci_log.debug_verbose('getClusterStatusBatch3: %s' % str(e)) return None - doc = minidom.Document() - doc.appendChild(cl_node) - return doc - -def getClusterStatusBatch(rc): - batch_str = '' - ricci_xml = rc.batch_run(batch_str, async=False) - - if not ricci_xml or not ricci_xml.firstChild: - luci_log.debug_verbose('ricci_xml is None from batch_run') - - doc = getPayload(ricci_xml.firstChild) - if not doc or not doc.firstChild: - luci_log.debug_verbose('doc is None from getPayload: %s' % ricci_xml.toxml()) - return None + try: + doc = minidom.Document() + doc.appendChild(cluster_node) + return doc + except Exception, e: + luci_log.debug_verbose('getClusterStatusBatch4: %s' % str(e)) - return doc + return None def setClusterConf(rc, clusterconf, propagate=True): if propagate == True: @@ -490,7 +463,7 @@ return None, None ricci_xml = rc.batch_run(batch) return batchAttemptResult(ricci_xml) - + def restartService(rc, servicename): batch_str = ''