* [Cluster-devel] conga/luci cluster/busy_wait-macro cluster/for ...
@ 2008-01-25 0:37 rmccabe
0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2008-01-25 0:37 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2008-01-25 00:36:59
Modified files:
luci/cluster : busy_wait-macro form-macros
luci/homebase : validate_cluster_add.js
luci/site/luci/Extensions: LuciDB.py RicciQueries.py
cluster_adapters.py
Log message:
Fix a few bugs found while testing
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.220&r2=1.221
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&r1=1.13&r2=1.14
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&r1=1.8&r2=1.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.280&r2=1.281
--- conga/luci/cluster/busy_wait-macro 2008/01/14 20:51:42 1.2
+++ conga/luci/cluster/busy_wait-macro 2008/01/25 00:36:59 1.3
@@ -65,6 +65,6 @@
tal:attributes="onclick python:'javascript:document.stop_waiting_form%s.submit()' % nodereport.get('report_index')">Stop waiting for this job to complete</a>
</form>
</div>
+ <hr/>
</div>
- <hr/>
</div>
--- conga/luci/cluster/form-macros 2008/01/23 04:34:09 1.220
+++ conga/luci/cluster/form-macros 2008/01/25 00:36:59 1.221
@@ -225,7 +225,7 @@
checked add_cluster/shared_storage |string:checked" />Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <input type="checkbox" name="reboot_nodes"
+ <input type="checkbox" id="reboot_nodes" name="reboot_nodes"
tal:attributes="checked python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''" />Reboot nodes before joining cluster
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
@@ -1135,7 +1135,7 @@
checked add_cluster/shared_storage | string:checked" />Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <input type="checkbox" name="reboot_nodes"
+ <input type="checkbox" id="reboot_nodes" name="reboot_nodes"
tal:attributes="checked python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''" />Reboot nodes before joining cluster
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
--- conga/luci/homebase/validate_cluster_add.js 2008/01/02 20:52:23 1.13
+++ conga/luci/homebase/validate_cluster_add.js 2008/01/25 00:36:59 1.14
@@ -48,14 +48,21 @@
return (-1);
}
+ var reboot_nodes = document.getElementById('reboot_nodes');
var view_certs = document.getElementById('view_certs');
if (!view_certs || !view_certs.checked) {
var confirm_str = '';
if (form.addnode) {
- confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?\nEach node added will be rebooted during this process.';
+ confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?';
+ if (reboot_nodes && reboot_nodes.checked) {
+ confirm_str += '\nEach node added will be rebooted during this process.';
+ }
} else {
if (form.cluster_create) {
- confirm_str = 'All nodes added to this cluster will be rebooted as part of this process.\n\nCreate cluster \"' + clustername + '\"?';
+ confirm_str = 'Create cluster \"' + clustername + '\"?\n\n';
+ if (reboot_nodes && reboot_nodes.checked) {
+ confirm_str += 'All nodes added to this cluster will be rebooted as part of this process.\n\n';
+ }
} else {
confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
}
--- conga/luci/site/luci/Extensions/LuciDB.py 2008/01/14 20:51:42 1.8
+++ conga/luci/site/luci/Extensions/LuciDB.py 2008/01/25 00:36:59 1.9
@@ -335,14 +335,17 @@
objname = '%s____flag' % key
clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+
# now designate this new object properly
objpath = str('%s/%s' % (path, objname))
flag = self.restrictedTraverse(objpath)
flag.manage_addProperty(BATCH_ID, batch_id, 'string')
flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, 'string')
- flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clustername), 'string')
flag.manage_addProperty(LAST_STATUS, 0, 'int')
+ flag.manage_addProperty(FLAG_DESC,
+ 'Creating node "%s" for cluster "%s"' % (key, clustername),
+ 'string')
except Exception, e:
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r %s' % (key, e, str(e)))
--- conga/luci/site/luci/Extensions/RicciQueries.py 2008/01/23 04:34:09 1.10
+++ conga/luci/site/luci/Extensions/RicciQueries.py 2008/01/25 00:36:59 1.11
@@ -68,7 +68,7 @@
batch.append('</request>')
batch.append('</module>')
- need_reboot = reboot_nodes or install_base or install_services or install_shared_storage or install_LVS
+ need_reboot = reboot_nodes
if need_reboot:
batch.append('<module name="reboot">')
batch.append('<request API_version="1.0">')
@@ -183,7 +183,7 @@
batch.append('</request>')
batch.append('</module>')
- need_reboot = reboot_nodes or install_base or install_services or install_shared_storage or install_LVS
+ need_reboot = reboot_nodes
if need_reboot:
batch.append('<module name="reboot">')
batch.append('<request API_version="1.0">')
@@ -738,11 +738,12 @@
install_shared_storage,
install_LVS,
upgrade_rpms,
- gulm_lockservers):
+ gulm_lockservers,
+ reboot_nodes=False):
batch_str = createClusterBatch(os_str, cluster_name, cluster_alias,
nodeList, install_base, install_services,
install_shared_storage, install_LVS, upgrade_rpms,
- gulm_lockservers)
+ gulm_lockservers, reboot_nodes)
ricci_xml = rc.batch_run(batch_str)
return batchAttemptResult(ricci_xml)
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2008/01/23 04:34:09 1.280
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2008/01/25 00:36:59 1.281
@@ -280,7 +280,8 @@
ret = send_batch_to_hosts(node_list, 10, rq.create_cluster,
add_cluster['cluster_os'], clustername, clustername,
node_list, True, True, add_cluster['shared_storage'], False,
- add_cluster['download_pkgs'], lockservers, add_cluster['reboot_nodes'])
+ add_cluster['download_pkgs'], lockservers,
+ add_cluster['reboot_nodes'])
batch_id_map = {}
for i in ret.iterkeys():
@@ -291,14 +292,13 @@
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose(msg)
continue
- batch_id_map[i] = ret[i]['batch_result']
+ batch_id_map[i] = ret[i]['batch_result'][0]
if len(batch_id_map) == 0:
request.SESSION.set('create_cluster', add_cluster)
return (False, { 'errors': errors, 'messages': messages })
buildClusterCreateFlags(self, batch_id_map, clustername)
-
response = request.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], CLUSTER_CONFIG, clustername))
^ permalink raw reply [flat|nested] 2+ messages in thread
* [Cluster-devel] conga/luci cluster/busy_wait-macro cluster/for ...
@ 2008-06-10 14:50 rmccabe
0 siblings, 0 replies; 2+ messages in thread
From: rmccabe @ 2008-06-10 14:50 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2008-06-10 14:50:53
Modified files:
luci/cluster : busy_wait-macro form-macros index_html
validate_create_gulm.js
luci/plone-custom: conga.js update_hostinfo.js
luci/site/luci/Extensions: FenceHandler.py LuciClusterInfo.py
Log message:
Luci fixes from the RHEL4 branch
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.223&r2=1.224
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&r1=1.42&r2=1.43
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_create_gulm.js.diff?cvsroot=cluster&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/conga.js.diff?cvsroot=cluster&r1=1.13&r2=1.14
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/update_hostinfo.js.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&r1=1.32&r2=1.33
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&r1=1.20&r2=1.21
--- conga/luci/cluster/busy_wait-macro 2008/01/25 00:36:59 1.3
+++ conga/luci/cluster/busy_wait-macro 2008/06/10 14:50:53 1.4
@@ -3,7 +3,7 @@
<p><img src="100wait.gif" /></p>
- <span tal:define="global nodereports isBusy/nodereports" />
+ <span tal:define="global nodereports isBusy/nodereports | nothing" />
<div tal:repeat="nodereport nodereports">
<tal:block
--- conga/luci/cluster/form-macros 2008/04/23 17:33:37 1.223
+++ conga/luci/cluster/form-macros 2008/06/10 14:50:53 1.224
@@ -148,6 +148,80 @@
</div>
</div>
+<div metal:define-macro="lockserver-macro" tal:omit-tag="">
+ <strong class="cluster">Lock Manager</strong>
+ <ul class="vanilla">
+ <li class="vanilla">
+ <input type="radio" name="lockmanager" value="dlm"
+ onchange="toggle_gulm(this.form, this.value)"
+ tal:attributes="checked python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm'">DLM (preferred)
+ </li>
+ <li class="vanilla">
+ <input type="radio" name="lockmanager" value="gulm"
+ onchange="toggle_gulm(this.form, this.value)"
+ tal:attributes="checked python: add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] == 'gulm'">GULM
+ </li>
+ <div id="gulm_lockservers"
+ tal:attributes="class python: (add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] != 'gulm') and 'invisible' or ''">
+ <fieldset>
+ <legend class="rescfg">GULM lock server properties</legend>
+ <p>You must enter exactly 1, 3, or 5 GULM lock servers.</p>
+ <table class="systemsTable">
+ <tr>
+ <td class="pad_right">Lock Server 1</td>
+ <td>
+ <input type="text" name="__GULM__:server1"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server1 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 2</td>
+ <td>
+ <input type="text" name="__GULM__:server2"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server2 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 3</td>
+ <td>
+ <input type="text" name="__GULM__:server3"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server3 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 4</td>
+ <td>
+ <input type="text" name="__GULM__:server4"
+ class="hostname" tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server4 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 5</td>
+ <td>
+ <input type="text" name="__GULM__:server5"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server5 | nothing" />
+ </td>
+ </tr>
+ </table>
+ </fieldset>
+ </div>
+ </ul>
+</div>
+
<div metal:define-macro="cluster-form">
<h2>Cluster Form</h2>
</div>
@@ -171,10 +245,17 @@
tal:define="
global add_cluster request/SESSION/create_cluster | nothing">
+ <div class="invisible"
+ tal:condition="not:exists:add_cluster/gulm_support">
+ <div id="lockserver_div">
+ <tal:block metal:use-macro="here/form-macros/macros/lockserver-macro" />
+ </div>
+ </div>
+
<input name="pagetype" type="hidden"
tal:attributes="value request/form/pagetype | request/pagetype |string:6" />
- <input name="cluster_os" type="hidden"
+ <input name="cluster_os" type="hidden" id="cluster_os"
tal:attributes="value add_cluster/cluster_os | nothing" />
<table id="systemsTable" class="systemsTable" cellspacing="0">
@@ -241,91 +322,11 @@
</ul>
</td></tr>
- <tr class="systemsTable"
- tal:condition="exists:add_cluster/gulm_support">
- <td class="systemsTable" colspan="2">
- <strong class="cluster">Lock Manager</strong>
- <ul class="vanilla">
- <li class="vanilla">
- <input type="radio"
- name="lockmanager" value="dlm"
- onchange="toggle_gulm(this.form, this.value)"
- tal:attributes="checked python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm'"
- >DLM (preferred)
- </li>
- <li class="vanilla">
- <input type="radio"
- name="lockmanager" value="gulm"
- onchange="toggle_gulm(this.form, this.value)"
- tal:attributes="checked python: add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] == 'gulm'"
- >GULM
- </li>
- <div id="gulm_lockservers"
- tal:attributes="class python: (add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] != 'gulm') and 'invisible' or ''">
- <fieldset>
- <legend class="rescfg">GULM lock server properties</legend>
- <p>You must enter exactly 1, 3, or 5 GULM lock servers.</p>
-
- <table class="systemsTable">
- <tr>
- <td class="pad_right">Lock Server 1</td>
- <td>
- <input type="text"
- name="__GULM__:server1"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server1 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 2</td>
- <td>
- <input type="text"
- name="__GULM__:server2"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server2 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 3</td>
- <td>
- <input type="text"
- name="__GULM__:server3"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server3 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 4</td>
- <td>
- <input type="text"
- name="__GULM__:server4"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server4 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 5</td>
- <td>
- <input type="text"
- name="__GULM__:server5"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server5 | nothing" />
- </td>
- </tr>
- </table>
- </fieldset>
- </div>
- <ul>
+ <tr class="systemsTable">
+ <td class="systemsTable" colspan="2" id="lockserver_dialog">
+ <tal:block tal:condition="exists:add_cluster/gulm_support">
+ <tal:block metal:use-macro="here/form-macros/macros/lockserver-macro" />
+ </tal:block>
</td>
</tr>
</tfoot>
@@ -471,6 +472,8 @@
</div>
<div class="hbSubmit" id="hbSubmit">
+ <input id="cluster_version" name="cluster_version" type="hidden"
+ tal:attributes="value os_version | nothing" />
<input type="hidden" name="cluster_create" value="1" />
<input type="button" name="Submit" value="Submit"
onClick="validate_cluster_create(this.form)" />
@@ -1094,7 +1097,7 @@
<input name="pagetype" type="hidden"
tal:attributes="value request/form/pagetype | request/pagetype | string:15" />
- <input name="cluster_os" type="hidden"
+ <input name="cluster_os" type="hidden" id="cluster_os"
tal:attributes="value add_cluster/cluster_os | nothing" />
<table id="systemsTable" class="systemsTable" cellspacing="0">
--- conga/luci/cluster/index_html 2008/02/08 21:47:56 1.42
+++ conga/luci/cluster/index_html 2008/06/10 14:50:53 1.43
@@ -27,20 +27,26 @@
<tal:block tal:define="
global sinfo nothing;
global hascluster request/clustername | nothing;
- global isBusy python: False;
+ global isBusy python:False;
+ global isVirtualized python:False;
global firsttime nothing;
global ri_agent nothing;
+ global os_version nothing;
global busywaiting python:None" />
<tal:block tal:condition="hascluster">
<tal:block tal:define="
global ri_agent python:here.getRicciAgentForCluster(request);
- resmap python:here.getClusterOS(ri_agent);
- global isVirtualized resmap/isVirtualized | nothing;
- global os_version resmap/os | nothing;
- global isBusy python:here.isClusterBusy(request);
global firsttime request/busyfirst | nothing" />
+ <tal:block tal:condition="ri_agent"
+ tal:define="resmap python:here.getClusterOS(ri_agent)">
+ <tal:block tal:define="
+ global isVirtualized resmap/isVirtualized | nothing;
+ global os_version resmap/os | nothing;
+ global isBusy python:here.isClusterBusy(request)" />
+ </tal:block>
+
<tal:block tal:condition="firsttime">
<tal:block tal:define="global busywaiting python:True" />
<meta http-equiv="refresh"
--- conga/luci/cluster/validate_create_gulm.js 2008/01/02 20:52:22 1.3
+++ conga/luci/cluster/validate_create_gulm.js 2008/06/10 14:50:53 1.4
@@ -65,10 +65,19 @@
}
function validate_cluster_create(form) {
- if (form.lockmanager && !form.lockmanager[0].checked) {
- var errors = check_gulm_lkserv();
- if (error_dialog(errors))
- return (-1);
+ try {
+ if (!form.cluster_os.value) {
+ get_system_info(null, null, 1);
+ }
+ } catch (e) {
+ }
+
+ if (form.cluster_os.value) {
+ if (form.lockmanager && !form.lockmanager[0].checked) {
+ var errors = check_gulm_lkserv();
+ if (error_dialog(errors))
+ return (-1);
+ }
+ return validateForm(form);
}
- return validateForm(form);
}
--- conga/luci/plone-custom/conga.js 2008/02/05 19:32:04 1.13
+++ conga/luci/plone-custom/conga.js 2008/06/10 14:50:53 1.14
@@ -45,7 +45,7 @@
}
function display_msgs(id, msg_list) {
- if (!msg_list || msg_list.length < 1) {
+ if (id === null || !msg_list || msg_list.length < 1) {
return (-1);
}
--- conga/luci/plone-custom/update_hostinfo.js 2008/01/22 15:02:33 1.1
+++ conga/luci/plone-custom/update_hostinfo.js 2008/06/10 14:50:53 1.2
@@ -97,6 +97,7 @@
var errors = [];
var msgs = [];
var systems_list = get_systems_list_offsets(errors);
+ var cluster_os = null;
for (var i = 0 ; i < systems_list.length ; i++) {
var cur_node_props = node_props[systems_list[i][0]];
@@ -106,6 +107,10 @@
var addr_elem = document.getElementById('__SYSTEM' + offset + ':Addr');
var pwd_elem = document.getElementById('__SYSTEM' + offset + ':Passwd');
var fp_elem = document.getElementById('__SYSTEM' + offset + 'Fingerprint');
+ try {
+ cluster_os = cur_node_props['OS'];
+ } catch (e) {
+ }
if (cur_node_props['available'] != 'True') {
errors.push(cur_node_props['err_msg']);
@@ -130,7 +135,32 @@
}
}
} catch (e) {
- alert(e);
+ //alert(e);
+ }
+ }
+
+ if (cluster_os !== null) {
+ var os_str = null;
+
+ if (cluster_os.match(/Nahant/)) {
+ var lsdiv = document.getElementById('lockserver_div');
+ if (lsdiv !== null) {
+ var ls_cell = document.getElementById('lockserver_dialog');
+ if (ls_cell !== null) {
+ lsdiv.id = null;
+ ls_cell.appendChild(lsdiv);
+ }
+ }
+ os_str = 'rhel4';
+ } else {
+ os_str = 'rhel5';
+ }
+
+ if (os_str !== null) {
+ var os_in = document.getElementById('cluster_os');
+ if (os_in !== null) {
+ os_in.value = os_str;
+ }
}
}
--- conga/luci/site/luci/Extensions/FenceHandler.py 2008/06/06 16:41:52 1.32
+++ conga/luci/site/luci/Extensions/FenceHandler.py 2008/06/10 14:50:53 1.33
@@ -814,8 +814,10 @@
use_ssh = form.has_key('secure') and (form['secure'] == '1' or form['secure'].lower() == 'true')
if use_ssh:
fencedev.addAttribute('secure', '1')
+ fencedev.addAttribute('agent', 'fence_drac5')
else:
fencedev.removeAttribute('secure')
+ fencedev.addAttribute('agent', 'fence_drac')
return errors
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py 2008/04/23 17:33:37 1.20
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py 2008/06/10 14:50:53 1.21
@@ -588,7 +588,7 @@
clumap['os_major'] = 4
os_minor = int(cluster_os[cluster_os.find('Update ') + 7])
clumap['os_minor'] = os_minor
- if os_minor > 6:
+ if os_minor > 7:
clumap['has_fence_ssh'] = True
except Exception, e:
if LUCI_DEBUG_MODE is True:
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2008-06-10 14:50 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-25 0:37 [Cluster-devel] conga/luci cluster/busy_wait-macro cluster/for rmccabe
-- strict thread matches above, loose matches on Subject: below --
2008-06-10 14:50 rmccabe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).