* [Cluster-devel] conga luci/cluster/form-chooser luci/cluster/f ...
@ 2006-12-22 17:50 rmccabe
0 siblings, 0 replies; only message in thread
From: rmccabe @ 2006-12-22 17:50 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2006-12-22 17:50:17
Modified files:
luci/cluster : form-chooser form-macros index_html
validate_config_qdisk.js
luci/conga_ssl : SSLClient.cpp SSLClient.h conga_ssl_lib.cpp
luci/homebase : form-macros homebase_common.js index_html
luci_homebase.css validate_sys_remove.js
luci/site/luci/Extensions: FenceHandler.py ModelBuilder.py
cluster_adapters.py
conga_constants.py conga_ssl.py
homebase_adapters.py ricci_bridge.py
ricci_communicator.py
luci/utils : luci_admin
ricci/modules/storage: ExtendedFS.cpp
Removed files:
luci/site/luci/Extensions: fence_test.py
Log message:
Resolves: 201394
Improves, Related: 212021
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-chooser.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.12.2.2&r2=1.12.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.10&r2=1.90.2.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20.2.5&r2=1.20.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_config_qdisk.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/conga_ssl/SSLClient.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/conga_ssl/SSLClient.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/conga_ssl/conga_ssl_lib.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.44.2.4&r2=1.44.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/homebase_common.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.13.2.1&r2=1.13.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.18.2.2&r2=1.18.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/luci_homebase.css.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.28.2.2&r2=1.28.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_sys_remove.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.1&r2=1.4.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.8.2.2&r2=1.8.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.14&r2=1.120.2.15
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.19.2.3&r2=1.19.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_ssl.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.34.2.6&r2=1.34.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.30.2.14&r2=1.30.2.15
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.7&r2=1.9.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/fence_test.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1&r2=NONE
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/utils/luci_admin.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.50&r2=1.50.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/ExtendedFS.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.7&r2=1.7.2.1
--- conga/luci/cluster/form-chooser 2006/12/07 17:54:31 1.12.2.2
+++ conga/luci/cluster/form-chooser 2006/12/22 17:50:16 1.12.2.3
@@ -152,6 +152,11 @@
<span tal:omit-tag="" tal:condition="python: ptype == '55'">
<div metal:use-macro="here/form-macros/macros/fencedevprocess-form"/>
</span>
+
+ <span tal:omit-tag="" tal:condition="python: ptype == '80'">
+ <div metal:use-macro="here/form-macros/macros/conf_editor-form"/>
+ </span>
+
</span>
</metal:choose-form>
</body>
--- conga/luci/cluster/form-macros 2006/12/13 23:55:06 1.90.2.10
+++ conga/luci/cluster/form-macros 2006/12/22 17:50:16 1.90.2.11
@@ -76,7 +76,7 @@
<tal:block tal:condition="python: ricci_agent">
<tal:block tal:define="
- global stat python: here.getClusterStatus(request, ricci_agent);
+ global stat python: here.getClusterStatus(request, ricci_agent, cluname=clu[0]);
global cstatus python: here.getClustersInfo(stat, request);
global cluster_status python: 'cluster ' + (('running' in cstatus and cstatus['running'] == 'true') and 'running' or 'stopped');"
/>
@@ -84,7 +84,7 @@
<table class="cluster" width="100%">
<tr class="cluster info_top">
<td class="cluster cluster_name">
- <strong class="cluster cluster_name">Cluster Name</strong>:
+ <strong class="cluster cluster_name">Cluster Name:</strong>
<a href=""
tal:attributes="href cstatus/clucfg | nothing;
class python: 'cluster ' + cluster_status;"
@@ -124,7 +124,7 @@
<tr class="cluster">
<td tal:condition="exists: cstatus/error" class="cluster">
<span class="errmsgs">
- An error occurred while attempting to get status information for this cluster. The information shown may be out of date.
+ An error occurred while attempting to get status information for this cluster. The information shown may be stale or inaccurate.
</span>
</td>
</tr>
@@ -208,173 +208,226 @@
<tal:block tal:omit-tag=""
tal:define="global sessionObj python: request.SESSION.get('checkRet')" />
- <form name="adminform" action="" method="post">
- <input name="pagetype" id="pagetype" type="hidden" value="6" />
+ <h1>Add a cluster</h1>
- <h1>Add a cluster</h1>
+ <form name="create_cluster" action="" method="post"
+ tal:define="
+ global add_cluster request/SESSION/create_cluster | nothing">
+
+ <input name="pagetype" type="hidden"
+ tal:attributes="value request/form/pagetype | request/pagetype |string:6" />
+
+ <input name="cluster_os" type="hidden"
+ tal:attributes="value add_cluster/cluster_os | nothing" />
- <tal:block tal:condition="python: not sessionObj or not 'requestResults' in sessionObj or not 'nodeList' in sessionObj['requestResults']">
- <input name="numStorage" type="hidden" value="3" />
<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
<thead class="systemsTable">
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <div class="systemsTableTop">
- <strong>Cluster Name</strong>
- <input class="hbInputSys" type="text" id="clusterName" name="clusterName" />
- </div>
- </td></tr>
+ <tr class="systemsTable"><td class="systemsTable" colspan="2">
+ <div class="systemsTableTop">
+ <strong>Cluster Name</strong>
+ <input class="hbInputSys" type="text"
+ id="clusterName" name="clusterName"
+ tal:attributes="value add_cluster/name | nothing" />
+ </div>
+ </td></tr>
<tr class="systemsTable">
- <th class="systemsTable">System Hostname</th>
- <th class="systemsTable">Password</th>
+ <th class="systemsTable">Node Hostname</th>
+ <th class="systemsTable">Root Password</th>
+ <tal:block tal:condition="add_cluster">
+ <th class="systemsTable">Key ID</th>
+ <th class="systemsTable">Trust</th>
+ </tal:block>
+ <th></th>
</tr>
</thead>
<tfoot class="systemsTable">
<tr class="systemsTable"><td colspan="2" class="systemsTable">
<ul class="vanilla deploy">
- <li class="vanilla"><input type="radio" name="rhn_dl" value="1" checked="checked" />Download packages</li>
- <li class="vanilla"><input type="radio" name="rhn_dl" value="0" />Use locally installed packages.</li>
+ <li class="vanilla">
+ <input type="radio" name="download_pkgs"
+ value="1" checked="checked" />
+ Download packages
+ </li>
+ <li class="vanilla">
+ <input type="radio" name="download_pkgs"
+ value="0" />
+ Use locally installed packages.
+ </li>
</ul>
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <input type="checkbox" value="1" name="enable_storage" />Enable Shared Storage Support
+ <input type="checkbox" name="enable_storage" />
+ Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <div>
- <input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
- </div>
+ <ul class="vanilla">
+ <li class="vanilla">
+ <input name="check_certs" type="checkbox"
+ tal:attributes="checked python: (add_cluster and add_cluster['check_certs']) and 'checked'" />
+ View system certificates before sending any passwords.
+ </li>
+ <li class="vanilla">
+ <input type="checkbox"
+ name="allSameCheckBox" id="allSameCheckBox"
+ onClick="allPasswdsSame(this.form)"
+ tal:attributes="checked python: (add_cluster and add_cluster['identical_passwds']) and 'checked'"
+ />
+
+ Check if node passwords are identical.
+ </li>
+ </ul>
</td></tr>
-
<tr class="systemsTable"><td class="systemsTable" colspan="2">
<div class="systemsTableEnd">
- <input type="button" value="Add Another Row" onClick="addSystem(adminform);" />
+ <input type="button" value="Add another entry"
+ onClick="addSystem(this.form)" />
</div>
</td></tr>
</tfoot>
+ <tal:block tal:define="global cur_sysnum python:0" />
+
<tbody class="systemsTable">
- <tr class="systemsTable">
+ <tal:block
+ tal:condition="exists: add_cluster/nodes"
+ tal:repeat="cur_sys add_cluster/nodes">
+ <tr class="systemsTable"
+ tal:attributes="id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+ tal:define="sys python: add_cluster['nodes'][cur_sys]">
+ <td class="systemsTable">
+ <input type="text"
+ tal:attributes="
+ value sys/host | nothing;
+ id python: '__SYSTEM%d:Addr' % cur_sysnum;
+ name python: '__SYSTEM%d:Addr' % cur_sysnum;
+ class python: 'hbInputSys' + ('errors' in sys and ' error' or '');
+ disabled python: ('auth' in sys and sys['host'].count('.') > 0) and 1 or 0"
+ />
+ </td>
+ <td class="systemsTable">
+ <tal:block tal:condition="not: exists: sys/auth">
+ <input type="password"
+ onChange="pwd0Change(this.form)"
+ autocomplete="off"
+ tal:attributes="
+ value sys/passwd | nothing;
+ class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+ </tal:block>
+
+ <tal:block tal:condition="exists: sys/auth">
+ <input type="text" onChange="pwd0Change(this.form)"
+ disabled="disabled" value="[authenticated]"
+ tal:attributes="
+ class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+ </tal:block>
+ </td>
+ <td tal:condition="add_cluster" class="systemsTable">
+ <img
+ tal:attributes="
+ src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+ title sys/fp | string:no key fingerprint available" />
+ <input type="hidden"
+ tal:attributes="
+ id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ value sys/fp | nothing" />
+ </td>
+ <td tal:condition="add_cluster" class="systemsTable">
+ <input type="checkbox" tal:attributes="
+ checked exists: sys/fp;
+ id python: '__SYSTEM%dTrusted' % cur_sysnum;
+ name python: '__SYSTEM%dTrusted' % cur_sysnum;
+ disabled python: 'trusted' in sys"
+ />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ tal:attributes="
+ onclick python: 'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
+ </td>
+ </tr>
+ <tal:block
+ tal:define="global cur_sysnum python: cur_sysnum + 1" />
+ </tal:block>
+
+ <tr class="systemsTable" id="__SYSTEM_ROW_0"
+ tal:condition="not: add_cluster">
<td class="systemsTable">
<input class="hbInputSys" type="text"
id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
</td>
<td class="systemsTable">
<input type="password"
- id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
+ onChange="pwd0Change(this.form)"
class="hbInputPass" autocomplete="off"
- onChange="pwd0Change(adminform);" />
+ onChange="pwd0Change(this.form)"
+ id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd" />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ onclick="delete_element_id('__SYSTEM_ROW_0')" />
</td>
</tr>
-
- <tr class="systemsTable">
+ <tr class="systemsTable" id="__SYSTEM_ROW_1"
+ tal:condition="not: add_cluster">
<td class="systemsTable">
<input class="hbInputSys" type="text"
id="__SYSTEM1:Addr" name="__SYSTEM1:Addr" />
</td>
<td class="systemsTable">
<input type="password"
- id="__SYSTEM1:Passwd" name="__SYSTEM1:Passwd"
+ onChange="pwd0Change(this.form)"
class="hbInputPass" autocomplete="off"
- onChange="pwd0Change(adminform);" />
+ id="__SYSTEM1:Passwd" name="__SYSTEM1:Passwd" />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ onclick="delete_element_id('__SYSTEM_ROW_1')" />
</td>
</tr>
-
- <tr class="systemsTable">
+ <tr class="systemsTable" id="__SYSTEM_ROW_2"
+ tal:condition="not: add_cluster">
<td class="systemsTable">
<input class="hbInputSys" type="text"
id="__SYSTEM2:Addr" name="__SYSTEM2:Addr" />
</td>
<td class="systemsTable">
<input type="password"
- id="__SYSTEM2:Passwd" name="__SYSTEM2:Passwd"
+ onChange="pwd0Change(this.form)"
class="hbInputPass" autocomplete="off"
- onChange="pwd0Change(adminform);" />
- </td>
- </tr>
- </tbody>
- </table>
- </tal:block>
-
- <tal:block tal:condition="python: sessionObj and 'requestResults' in sessionObj and 'nodeList' in sessionObj['requestResults']">
-
- <table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
- <thead class="systemsTable">
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <div class="systemsTableTop">
- <strong>Cluster Name:</strong>
- <input type="text" id="clusterName" name="clusterName"
- tal:attributes="value python: sessionObj['requestResults']['clusterName']" />
- </div>
- </td></tr>
- <tr class="systemsTable">
- <th class="systemsTable">Node Hostname</th>
- <th class="systemsTable">Root Password</th>
- </tr>
- </thead>
-
- <tfoot class="systemsTable">
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <ul class="vanilla deploy">
- <li class="vanilla"><input type="radio" name="rhn_dl" value="1" checked="checked" />Download packages</li>
- <li class="vanilla"><input type="radio" name="rhn_dl" value="0" />Use locally installed packages.</li>
- </ul>
- </td></tr>
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <input type="checkbox" value="1" name="enable_storage" />Enable Shared Storage Support
- </td></tr>
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <div>
- <input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
- </div>
- </td></tr>
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <div class="systemsTableEnd">
- <input type="button" value="Add Another Row" onClick="addSystem(adminform);" />
- </div>
- </td></tr>
- </tfoot>
-
- <span tal:omit-tag="" tal:define="global sysNum python: 0" />
-
- <tbody class="systemsTable">
- <tal:block tal:repeat="node python: sessionObj['requestResults']['nodeList']">
- <span tal:omit-tag=""
- tal:define="global nodeAuth python: node['cur_auth']" />
-
- <tr class="systemsTable">
- <td class="systemsTable">
- <input type="text"
- tal:attributes="
- id python: '__SYSTEM' + str(sysNum) + ':Addr';
- name python: '__SYSTEM' + str(sysNum) + ':Addr';
- value python: node['ricci_host'];
- class python: 'hbInputSys' + ('errors' in node and ' error' or '')"
- />
+ id="__SYSTEM2:Passwd" name="__SYSTEM2:Passwd" />
</td>
<td class="systemsTable">
- <input
- onChange="pwd0Change(adminform);"
- tal:attributes="
- type python: nodeAuth and 'text' or 'password';
- value python: nodeAuth and '[authenticated]' or '';
- class python: 'hbInputPass' + ('errors' in node and ' error' or '');
- id python: '__SYSTEM' + str(sysNum) + ':Passwd';
- name python: '__SYSTEM' + str(sysNum) + ':Passwd'"
- />
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ onclick="delete_element_id('__SYSTEM_ROW_2')" />
</td>
+ <tal:block tal:define="global cur_sysnum python:3" />
</tr>
- <span tal:omit-tag="" tal:define="global sysNum python: sysNum + 1" />
- </tal:block>
</tbody>
</table>
- <input type="hidden" name="numStorage" tal:attributes="value python: sysNum" />
- </tal:block>
+ <input name="numStorage" id="numStorage" type="hidden"
+ tal:attributes="value cur_sysnum" />
<div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+ <input type="button" name="Submit" value="Submit"
+ onClick="validateForm(this.form)" />
</div>
</form>
+
+ <div tal:condition="add_cluster">
+ <tal:block
+ tal:define="x python: request.SESSION.delete('create_cluster')" />
+ </div>
</div>
<div metal:define-macro="clusterconfig-form">
@@ -772,7 +825,8 @@
<tbody class="systemsTable">
<tr class="systemsTable">
- <td class="systemsTable">Post Fail Delay</td>
+ <td class="systemsTable">Post Fail <span tal:attributes="onclick python:'window.location.assign(\'./?pagetype=80&clustername=' + request['clustername'] + '\')'">Delay</span>
+ </td>
<td class="systemsTable">
<input type="text" name="post_fail_delay"
tal:attributes="value clusterinfo/pfd" />
@@ -1018,9 +1072,10 @@
<input class="qdscore qdisk" type="text" name="heuristic0:hscore" id="heuristic0:hscore" value="">
</td>
<td class="systemsTable">
- <img class="qdscore qdisk qdel_img"
+ <img class="qdisk deleteRow"
id="heuristic0:hdel" name="heuristic0:hdel"
- src="/luci/homebase/x.png"
+ src="/luci/delete-row.png"
+ title="delete this heuristic"
onClick="delete_qdisk_heur(this, document.quorum_partition);">
</td>
</tr>
@@ -1067,8 +1122,9 @@
name python: 'heuristic' + str(curHeur) + ':hscore';"/>
</td>
<td class="systemsTable">
- <img class="qdscore qdisk qdel_img"
- src="/luci/homebase/x.png"
+ <img class="qdisk deleteRow"
+ src="/luci/homebase/delete-row.png"
+ title="delete this heuristic"
onClick="delete_qdisk_heur(this, document.quorum_partition);"
tal:attributes="
id python: 'heuristic' + str(curHeur) + ':hdel';
@@ -1156,44 +1212,55 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>IP Address</td>
<td>
<input name="ip_addr" type="text"
- tal:attributes="value cur_fencedev/ipaddr | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/ipaddr | nothing" />
</td>
</tr>
<tr>
<td>Login</td>
<td>
<input name="login" type="text"
- tal:attributes="value cur_fencedev/login | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/login | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-apc" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_apc" />
</div>
@@ -1212,44 +1279,55 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>IP Address</td>
<td>
<input name="ip_addr" type="text"
- tal:attributes="value cur_fendev/ipaddr | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fendev/ipaddr | nothing" />
</td>
</tr>
<tr>
<td>Login</td>
<td>
<input name="login" type="text"
- tal:attributes="value cur_fencedev/login | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/login | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-mcdata" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_mcdata" />
</div>
@@ -1268,37 +1346,46 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>IP Address</td>
<td>
<input name="ip_addr" type="text"
- tal:attributes="value cur_fencedev/ipaddr | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/ipaddr | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-wti" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_wti" />
</div>
@@ -1342,11 +1429,13 @@
</td>
</tr>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="fence_type" value="fence_ilo" />
</div>
</div>
@@ -1388,11 +1477,13 @@
tal:attributes="value cur_fencedev/passwd | nothing" />
</td>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="fence_type" value="fence_drac" />
</div>
</div>
@@ -1435,11 +1526,13 @@
</td>
</tr>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="fence_type" value="fence_rsa" />
</div>
</div>
@@ -1457,44 +1550,55 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>IP Address</td>
<td>
<input name="ip_addr" type="text"
- tal:attributes="value cur_fencedev/ipaddr | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/ipaddr | nothing" />
</td>
</tr>
<tr>
<td>Login</td>
<td>
<input name="login" type="text"
- tal:attributes="value cur_fencedev/login | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/login | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-brocade" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_brocade" />
</div>
@@ -1513,7 +1617,9 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
@@ -1524,30 +1630,37 @@
<td>Login</td>
<td>
<input name="login" type="text"
- tal:attributes="value cur_fencedev/login | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/login | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-sanbox2" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_sanbox2" />
</div>
@@ -1566,37 +1679,46 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>IP Address</td>
<td>
<input name="ip_addr" type="text"
- tal:attributes="value cur_fencedev/ipaddr | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/ipaddr | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-brocade" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_vixel" />
</div>
@@ -1615,30 +1737,37 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>Servers (whitespace separated list)</td>
<td>
<input name="servers" type="text"
- tal:attributes="value cur_fencedev/servers | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/servers | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-gnbd" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_gnbd" />
</div>
@@ -1657,37 +1786,46 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>CServer</td>
<td>
<input name="cserver" type="text"
- tal:attributes="value cur_fencedev/cserver | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/cserver | nothing" />
</td>
</tr>
<tr>
<td>ESH Path (Optional)</td>
<td>
<input name="login" type="text"
- tal:attributes="value cur_fencedev/login | string:/opt/pan-mgr/bin/esh" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/login | string:/opt/pan-mgr/bin/esh" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-egenera" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_egenera" />
</div>
@@ -1731,11 +1869,13 @@
</td>
</tr>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="fence_type" value="fence_bladecenter" />
</div>
</div>
@@ -1753,44 +1893,55 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
<tr>
<td>IP Address</td>
<td>
<input name="ip_addr" type="text"
- tal:attributes="value cur_fencedev/ipaddr | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/ipaddr | nothing" />
</td>
</tr>
<tr>
<td>Login</td>
<td>
<input name="login" type="text"
- tal:attributes="value cur_fencedev/login | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/login | nothing" />
</td>
</tr>
<tr>
<td>Password</td>
<td>
<input name="password" type="password" autocomplete="off"
- tal:attributes="value cur_fencedev/passwd | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/passwd | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-bullpap" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="fence_bullpap" />
</div>
@@ -1827,11 +1978,13 @@
</td>
</tr>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="fence_type" value="fence_rps10" />
</div>
</div>
@@ -1849,23 +2002,28 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
</table>
+
<div name="instances">
<tal:block tal:condition="exists: cur_fence_instances">
- <tal:block tal:repeat="cur_fence_instance cur_fence_instances">
+ <tal:block tal:repeat="cur_instance cur_fence_instances">
<tal:block
metal:use-macro="here/form-macros/macros/fence-instance-form-xvm" />
</tal:block>
</tal:block>
</div>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="xvm" />
</div>
@@ -1884,15 +2042,19 @@
<td>Name</td>
<td>
<input name="name" type="text"
- tal:attributes="value cur_fencedev/name | nothing" />
+ tal:attributes="
+ disabled cur_fencedev/isShared | nothing;
+ value cur_fencedev/name | nothing" />
</td>
</tr>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="sharable" value="1" />
<input type="hidden" name="fence_type" value="scsi" />
</div>
@@ -1940,11 +2102,13 @@
<td><input name="auth_type" type="text" Title="Options are to leave blank for none, password, md2, or md5"/></td>
</tr>
</table>
+
<tal:block tal:condition="exists: cur_fencedev">
<input type="hidden" name="existing_device" value="1" />
<input type="hidden" name="old_name"
tal:attributes="value cur_fencedev/name | nothing" />
</tal:block>
+
<input type="hidden" name="fence_type" value="fence_ipmilan" />
</div>
</div>
@@ -2508,6 +2672,7 @@
<tal:block tal:condition="exists: fenceinfo/level1">
<tal:block tal:repeat="cur_fencedev fenceinfo/level1">
<tal:block tal:define="
+ cur_fence_instances cur_fencedev/instance_list | nothing;
cur_fence_type cur_fencedev/agent | nothing;
cur_fence_level python: 1;">
<div tal:attributes="id python: 'fence1_' + str(cur_fence_num)">
@@ -2695,6 +2860,7 @@
<script type="text/javascript">
set_page_title('Luci ??? cluster ??? Add a new cluster node');
</script>
+
<script type="text/javascript"
src="/luci/homebase/homebase_common.js">
</script>
@@ -2703,79 +2869,191 @@
src="/luci/homebase/validate_cluster_add.js">
</script>
- <input type="hidden" name="clusterName"
- tal:attributes="value request/form/clusterName | request/clustername | none"
- />
- <form name="adminform" action="" method="post">
- <input name="numStorage" type="hidden" value="1" />
- <input name="pagetype" type="hidden" value="15" />
- <input name="addnode" type="hidden" value="1" />
+ <form name="add_node" action="" method="post"
+ tal:define="
+ global add_cluster request/SESSION/add_node | nothing;
+ global cur_cluster_name add_cluster/name | request/clustername | request/form/clusterName | nothing">
+
+ <h2>Add a node to <span tal:replace="cur_cluster_name | string:this cluster" /></h2>
<input type="hidden" name="clusterName"
- tal:attributes="
- value request/form/clusterName | request/clustername | nothing"
- />
+ tal:attributes="value cur_cluster_name | string:[unknown]" />
- <h2>Add a node to <span tal:replace="request/form/clusterName | request/clustername | string:the cluster" /></h2>
+ <input name="pagetype" type="hidden"
+ tal:attributes="value request/form/pagetype | request/pagetype | string:15" />
- <table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+ <input name="cluster_os" type="hidden"
+ tal:attributes="value add_cluster/cluster_os | nothing" />
+
+ <table id="systemsTable" class="systemsTable" cellspacing="0">
<thead class="systemsTable">
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <div class="systemsTableTop">
- <strong>Cluster Name</strong> <span tal:content="request/form/clusterName | request/clustername | none" />
- </div>
- </td></tr>
<tr class="systemsTable">
- <th class="systemsTable">System Hostname</th>
- <th class="systemsTable">Password</th>
+ <th class="systemsTable">Node Hostname</th>
+ <th class="systemsTable">Root Password</th>
+ <tal:block tal:condition="add_cluster">
+ <th class="systemsTable">Key ID</th>
+ <th class="systemsTable">Trust</th>
+ </tal:block>
+ <th></th>
</tr>
</thead>
<tfoot class="systemsTable">
<tr class="systemsTable"><td colspan="2" class="systemsTable">
<ul class="vanilla deploy">
- <li class="vanilla"><input type="radio" name="rhn_dl" value="1" checked="checked" />Download packages</li>
- <li class="vanilla"><input type="radio" name="rhn_dl" value="0" />Use locally installed packages.</li>
+ <li class="vanilla">
+ <input type="radio" name="download_pkgs" value="1"
+ tal:attributes="
+ checked add_system/download_pkgs | string:checked" />
+ Download packages
+ </li>
+ <li class="vanilla">
+ <input type="radio" name="download_pkgs" value="0"
+ tal:attributes="
+ checked not: add_system/download_pkgs | nothing" />
+
+ Use locally installed packages.
+ </li>
</ul>
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <input type="checkbox" value="1" name="enable_storage" />Enable Shared Storage Support
+ <input type="checkbox" name="enable_storage"
+ tal:attributes="
+ checked add_system/shared_storage | nothing" />
+ Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <div id="allSameDiv">
- <input type="checkbox" class="allSameCheckBox"
- name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/>
- Check if cluster node passwords are identical.
- </div>
+ <ul class="vanilla">
+ <li class="vanilla">
+ <input name="check_certs" type="checkbox"
+ tal:attributes="checked python: (add_cluster and add_cluster['check_certs']) and 'checked'" />
+ View system certificates before sending any passwords.
+ </li>
+ <li class="vanilla"
+ tal:attributes="id python: (not add_cluster or ('nodes' in add_cluster and len(add_cluster['nodes']) < 2)) and 'allSameDiv'">
+ <input type="checkbox"
+ name="allSameCheckBox" id="allSameCheckBox"
+ onClick="allPasswdsSame(this.form)"
+ tal:attributes="checked python: (add_cluster and add_cluster['identical_passwds']) and 'checked'"
+ />
+ Check if node passwords are identical.
+ </li>
+ </ul>
</td></tr>
-
<tr class="systemsTable"><td class="systemsTable" colspan="2">
<div class="systemsTableEnd">
- <input type="button" value="Add Another Row" onClick="addSystem(adminform);" />
+ <input type="button" value="Add another entry"
+ onClick="addSystem(this.form)" />
</div>
</td></tr>
</tfoot>
+ <tal:block tal:define="global cur_sysnum python:0" />
+
<tbody class="systemsTable">
- <tr class="systemsTable">
+ <tal:block
+ tal:condition="exists: add_cluster/nodes"
+ tal:repeat="cur_sys add_cluster/nodes">
+ <tr class="systemsTable"
+ tal:attributes="id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+ tal:define="sys python: add_cluster['nodes'][cur_sys]">
+ <td class="systemsTable">
+ <input type="text"
+ tal:attributes="
+ value sys/host | nothing;
+ id python: '__SYSTEM%d:Addr' % cur_sysnum;
+ name python: '__SYSTEM%d:Addr' % cur_sysnum;
+ class python: 'hbInputSys' + ('errors' in sys and ' error' or '');
+ disabled python: ('auth' in sys and sys['host'].count('.') > 0) and 1 or 0"
+ />
+ </td>
+ <td class="systemsTable">
+ <tal:block tal:condition="not: exists: sys/auth">
+ <input type="password"
+ autocomplete="off"
+ onChange="pwd0Change(this.form)"
+ tal:attributes="
+ value sys/passwd | nothing;
+ class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+ </tal:block>
+
+ <tal:block tal:condition="exists: sys/auth">
+ <input type="text" onChange="pwd0Change(this.form)"
+ disabled="disabled" value="[authenticated]"
+ tal:attributes="
+ class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+ </tal:block>
+ </td>
+ <td tal:condition="add_cluster" class="systemsTable">
+ <img
+ tal:attributes="
+ src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+ title sys/fp | string:no key fingerprint available" />
+ <input type="hidden"
+ tal:attributes="
+ id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ value sys/fp | nothing" />
+ </td>
+ <td tal:condition="add_cluster" class="systemsTable">
+ <input type="checkbox" tal:attributes="
+ checked exists: sys/fp;
+ id python: '__SYSTEM%dTrusted' % cur_sysnum;
+ name python: '__SYSTEM%dTrusted' % cur_sysnum;
+ disabled python: 'trusted' in sys"
+ />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ tal:attributes="
+ onclick python: 'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
+ </td>
+ </tr>
+ <tal:block
+ tal:define="global cur_sysnum python: cur_sysnum + 1" />
+ </tal:block>
+
+ <tr class="systemsTable" id="__SYSTEM_ROW_0"
+ tal:condition="not: add_cluster">
<td class="systemsTable">
<input class="hbInputSys" type="text"
id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
</td>
<td class="systemsTable">
<input type="password"
- id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
+ onChange="pwd0Change(this.form)"
class="hbInputPass" autocomplete="off"
- onChange="pwd0Change(adminform);" />
+ onChange="pwd0Change(this.form)"
+ id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd" />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ onclick="delete_element_id('__SYSTEM_ROW_0')" />
</td>
+ <tal:block tal:define="global cur_sysnum python:1" />
</tr>
</tbody>
</table>
+ <input name="numStorage" id="numStorage" type="hidden"
+ tal:attributes="value cur_sysnum" />
+
<div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+ <input type="button" name="Submit" value="Submit"
+ onClick="validateForm(this.form)" />
</div>
</form>
+
+ <div tal:condition="add_cluster">
+ <tal:block
+ tal:define="x python: request.SESSION.delete('add_node')" />
+ </div>
</div>
<div metal:define-macro="nodeprocess-form">
@@ -2784,14 +3062,14 @@
<div id="errmsgsdiv" class="errmsgs"
tal:condition="python: result and len(result) > 1 and 'errors' in result[1]">
- <p class="errmsgs">The following errors occurred:</p>
+ <p class="errmsgs">The following errors occurred:</p>
- <ul class="errmsgs">
- <tal:block tal:repeat="e python: result[1]['errors']">
- <li class="errmsgs" tal:content="python:e" />
- </tal:block>
- </ul>
- </div>
+ <ul class="statusmsg">
+ <tal:block tal:repeat="e python: result[1]['errors']">
+ <li class="statusmsg" tal:content="python:e" />
+ </tal:block>
+ </ul>
+ </div>
</tal:block>
</div>
@@ -3410,8 +3688,6 @@
<table id="fence_devices" class="cluster node fence">
<tfoot class="fence">
<tr class="cluster node fence"><td class="cluster node fence">
- <input type="button" value="Add this shared fence device"
- onClick="validate_fence_properties('fence_devices')" />
</td></tr>
</tfoot>
<tbody class="fence">
@@ -3431,8 +3707,13 @@
<tal:block metal:use-macro="here/form-macros/macros/shared-fence-option-list" />
</select>
</form>
+ <form name="fencedevaddform" action="" method="post">
<div id="fence_container">
</div>
+ <input type="submit" value="Add this shared fence device"/>
+ <input type="hidden" name="pagetype" value="51" id="pagetype" />
+ <input type="hidden" name="clustername" value="" id="pagetype" tal:attributes="value request/clustername"/>
+ </form>
</td>
</tr>
</tbody>
@@ -3567,9 +3848,23 @@
global cur_fencedev python:here.getFence(modelb,request);
global cur_fence_type cur_fencedev/agent" />
</span>
-
+
+ <form name="fencedeveditform" action="" method="post">
<tal:block
metal:use-macro="here/form-macros/macros/fencedev-cond-ladder" />
+ <input type="submit" value="Update this fence device"/>
+ <input type="hidden" name="pagetype" value="54" id="pagetype" />
+ <input type="hidden" name="clustername" value="" id="pagetype" tal:attributes="value request/clustername"/>
+ <input type="hidden" name="orig_name" value="" id="orig_name" tal:attributes="value request/fencename"/>
+ <input type="hidden" name="fencename" value="" id="fencename" tal:attributes="value request/fencename"/>
+ </form>
+ <form name="fencedevdeleteform" action="" method="post">
+ <input type="submit" value="Delete this fence device"/>
+ <input type="hidden" name="pagetype" value="57" id="pagetype"/>
+ <input type="hidden" name="clustername" value="" id="pagetype" tal:attributes="value request/clustername"/>
+ <input type="hidden" name="fencename" value="" id="fencename" tal:attributes="value request/fencename"/>
+ <input type="hidden" name="orig_name" value="" id="orig_name" tal:attributes="value request/fencename"/>
+ </form>
</div>
</div>
@@ -3577,6 +3872,28 @@
<h2>Fence Device Process Form</h2>
</div>
+<div metal:define-macro="conf_editor-form">
+ <h2>Edit cluster.conf</h2>
+ <form method="post"
+ tal:attributes="action python: './?' + request['QUERY_STRING']"
+ tal:define="ret python: here.process_cluster_conf_editor(request)">
+ <span tal:content="structure python: ret['msg'].replace('\n', '<br/>')"/>
+ <textarea name="new_cluster_conf"
+ tal:attributes="rows python: len(ret['cluster_conf'].splitlines()) + 8"
+ tal:content="structure ret/cluster_conf"></textarea>
+ <input tal:attributes="type string:hidden;
+ name string:pagetype;
+ value python:request['pagetype']"/>
+ <input tal:attributes="type string:hidden;
+ name string:clustername;
+ value python:request['clustername']"/>
+ <input type="button"
+ value="Reset"
+ tal:attributes="onclick python:'window.location.assign(\'./?pagetype=' + request['pagetype'] + '&clustername=' + request['clustername'] + '\')'"/>
+ <input type="submit" value="Propagate"/>
+ </form>
+</div>
+
</body>
</html>
--- conga/luci/cluster/index_html 2006/12/07 17:54:31 1.20.2.5
+++ conga/luci/cluster/index_html 2006/12/22 17:50:16 1.20.2.6
@@ -207,23 +207,24 @@
<tal:block tal:define="ret python: request.SESSION.get('checkRet')">
<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
<div class="hbclosebox">
- <a href="javascript:hide_element('retmsgsdiv');"><img src="../homebase/x.png"></a>
+ <a href="javascript:hide_element('retmsgsdiv')"><img src="/luci/homebase/x.png" class="closeBox" title="dismiss"></a>
</div>
- <ul class="retmsgs">
+ <p class="retmsgs">Status messages:</p>
+ <ul class="statusmsg">
<tal:block tal:repeat="e python:ret['messages']">
- <li class="retmsgs" tal:content="python:e" />
+ <li class="statusmsg" tal:content="python:e" />
</tal:block>
</ul>
</div>
<div id="errmsgsdiv" class="errmsgs" tal:condition="python:(ret and 'errors' in ret and len(ret['errors']))">
<div class="hbclosebox">
- <a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="../homebase/x.png"></a>
+ <a class="hbclosebox" href="javascript:hide_element('errmsgsdiv')"><img src="/luci/homebase/x.png" class="closeBox" title="dismiss"></a>
</div>
<p class="errmsgs">The following errors occurred:</p>
- <ul class="errmsgs">
+ <ul class="statusmsg">
<tal:block tal:repeat="e python:ret['errors']">
- <li class="errmsgs" tal:content="python:e" />
+ <li class="statusmsg" tal:content="python:e" />
</tal:block>
</ul>
</div>
--- conga/luci/cluster/validate_config_qdisk.js 2006/10/04 17:24:58 1.4
+++ conga/luci/cluster/validate_config_qdisk.js 2006/12/22 17:50:16 1.4.2.1
@@ -314,10 +314,11 @@
var del_td = document.createElement('td');
del_td.className = 'systemsTable';
var del_img = document.createElement('img');
- del_img.className = 'qdscore qdisk qdel_img';
+ del_img.className = 'qdisk deleteRow';
del_img.setAttribute('name', hstr + ':hdel');
del_img.setAttribute('id', hstr + ':hdel');
- del_img.setAttribute('src', '/luci/homebase/x.png');
+ del_img.setAttribute('src', '/luci/delete-row.png');
+ del_img.setAttribute('title', 'delete this row');
del_img.setAttribute('onClick', 'delete_qdisk_heur(this, document.quorum_partition)');
del_td.appendChild(del_img);
--- conga/luci/conga_ssl/SSLClient.cpp 2006/12/08 18:27:32 1.1.2.1
+++ conga/luci/conga_ssl/SSLClient.cpp 2006/12/22 17:50:16 1.1.2.2
@@ -437,7 +437,7 @@
}
bool
-SSLClient::trust_peer_cert()
+SSLClient::trust_peer_cert(const String& hostname)
{
MutexLocker l(global_lock);
@@ -447,35 +447,12 @@
if (!peer_has_cert())
throw String("peer did not present cert");
- String f_name(_certs_store_dir);
- f_name += "/peer_cert_XXXXXX";
- int fd = -1;
- char* buff = new char[f_name.size() + 1];
+ String filename(_certs_store_dir);
+ filename += "/" + hostname + "_cert_pub";
try {
- // pick a filename
- strcpy(buff, f_name.c_str());
- if ((fd = mkstemp(buff)) == -1)
- throw String("unable to generate random file");
- f_name = buff;
- delete[] buff; buff = 0;
-
- String data(_cert_pem);
- while (data.size()) {
- ssize_t i = write(fd, data.c_str(), data.size());
- if (i == -1) {
- if (errno != EINTR)
- throw String("error writing certificate");
- } else
- data = data.substr(i);
- }
- while (close(fd) && errno == EINTR)
- ;
+ File::create(filename).replace(_cert_pem);
} catch ( ... ) {
- delete[] buff;
- if (fd != -1)
- while (close(fd) && errno == EINTR)
- ;
- unlink(f_name.c_str());
+ unlink(filename.c_str());
return false;
}
--- conga/luci/conga_ssl/SSLClient.h 2006/12/08 18:27:32 1.1.2.1
+++ conga/luci/conga_ssl/SSLClient.h 2006/12/22 17:50:16 1.1.2.2
@@ -56,7 +56,7 @@
String peer_cert_fingerprint(String& digest);
- bool trust_peer_cert();
+ bool trust_peer_cert(const String&);
bool untrust_peer_cert(); // remove peer's cert from cert_store
ClientSocket& socket();
--- conga/luci/conga_ssl/conga_ssl_lib.cpp 2006/12/08 18:27:32 1.1.2.1
+++ conga/luci/conga_ssl/conga_ssl_lib.cpp 2006/12/22 17:50:16 1.1.2.2
@@ -261,7 +261,8 @@
conga_ssl_lib_trust(PyObject *self, PyObject *args)
{
int id;
- if (!PyArg_ParseTuple(args, "i", &id))
+ const char* msg;
+ if (!PyArg_ParseTuple(args, "is", &id, &msg))
return NULL;
try {
@@ -273,7 +274,7 @@
bool resp;
{
PythonThreadsAllower all;
- resp = iter->second->trust_peer_cert();
+ resp = iter->second->trust_peer_cert(msg);
}
PyObject* resp_p = Py_BuildValue("i", (resp)?1:0);
--- conga/luci/homebase/form-macros 2006/11/16 19:34:53 1.44.2.4
+++ conga/luci/homebase/form-macros 2006/12/22 17:50:16 1.44.2.5
@@ -71,7 +71,8 @@
tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
<div class="hbSubmit" tal:condition="python:userList" id="hbSubmit">
- <input name="Submit" type="button" value="Delete This User" onClick="validateForm(document.adminform);" />
+ <input name="Submit" type="button" value="Delete This User"
+ onClick="validateForm(this.form)" />
</div>
</form>
@@ -140,7 +141,8 @@
tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
<div class="hbSubmit" id="hbSubmit">
- <input name="Submit" type="button" value="Submit" onClick="validateForm(document.adminform);" />
+ <input name="Submit" type="button" value="Submit"
+ onClick="validateForm(this.form)" />
</div>
</form>
</div>
@@ -196,7 +198,7 @@
<span tal:condition="python:perms" tal:content="string:Select a User" /><br/>
- <select tal:omit-tag="python: not perms" class="homebase" name="userList" onChange="document.location = adminform.baseURL.value + '&user=' + adminform.userList.options[adminform.userList.selectedIndex].text">
+ <select tal:omit-tag="python: not perms" class="homebase" name="userList" onChange="document.location = this.form.baseURL.value + '&user=' + this.form.userList.options[this.form.userList.selectedIndex].text">
<tal:block tal:repeat="user python:perms">
<option class="homebase"
tal:content="python:user"
@@ -250,12 +252,14 @@
tal:attributes="value python: num_clusters + 1" />
<div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Update Permissions" value="Update Permissions" onClick="validateForm(document.adminform);" />
+ <input type="button" name="Update Permissions" value="Update Permissions"
+ onClick="validateForm(this.form)" />
</div>
</form>
<div tal:condition="python: blankForm">
- <p>Either no users have been added or no clusters or storage systems are being managed by Luci.</p>
+ <p>Either no users have been added or no clusters
+ or storage systems are managed by Luci.</p>
</div>
</div>
@@ -287,93 +291,191 @@
set_page_title('Luci ??? homebase ??? Remove a system or cluster from Luci');
</script>
- <span tal:omit-tag=""
- tal:define="global systems python:here.getSystems();
- global blankForm python:1;
- global num_clusters python:-1;
- global num_systems python:-1"
- />
-
<h2 class="homebase">Manage Systems and Clusters</h2>
<h3>Authenticate to Storage or Cluster Systems</h3>
<form name="authform" method="post" action="">
- <input type="hidden" name="pagetype" value="8" />
- <table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
- <thead class="systemsTable">
- <tr class="systemsTable">
- <th class="systemsTable">System Hostname</th>
- <th class="systemsTable">Root Password</th>
- </tr>
- </thead>
+ <table id="systemsTable" class="systemsTable" border="0" cellspacing="0"
+ tal:define="
+ new_systems request/SESSION/auth_systems | nothing;
+ global cur_sysnum python: 1">
- <tfoot class="systemsTable">
- <tr class="systemsTable"><td colspan="2" class="systemsTable">
- <div id="allSameDiv" class="invisible">
- <input type="checkbox" class="allSameCheckBox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(this.form);"/><span>Check if storage system passwords are identical.</span>
- </div>
- </td></tr>
-
- <tr class="systemsTable"><td class="systemsTable" colspan="2">
- <div class="systemsTableEnd">
- <input type="button" value="Add another entry" onClick="addSystem(this.form);" />
- </div>
- </td></tr>
- </tfoot>
+ <tal:block tal:condition="not: new_systems">
+ <thead class="systemsTable">
+ <tr class="systemsTable">
+ <th class="systemsTable">System Hostname</th>
+ <th class="systemsTable">Root Password</th>
+ <th class="systemsTable"></th>
+ </tr>
+ </thead>
+
+ <tbody class="systemsTable">
+ <tr class="systemsTable" id="__SYSTEM_ROW_0">
+ <td class="systemsTable">
+ <input class="hbInputSys" type="text"
+ id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+ </td>
+ <td class="systemsTable">
+ <input type="password"
+ autocomplete="off"
+ id="__SYSTEM0:Passwd"
+ name="__SYSTEM0:Passwd"
+ class="hbInputPass"
+ onChange="pwd0Change(this.form)" />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ onclick="delete_element_id('__SYSTEM_ROW_0')" />
+ </td>
+ </tr>
+ </tbody>
+ </tal:block>
- <tbody class="systemsTable">
- <tr class="systemsTable">
- <td class="systemsTable">
- <input class="hbInputSys" type="text"
- id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
- </td>
- <td class="systemsTable">
- <input type="password" autocomplete="off"
- id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
- class="hbInputPass"
- onChange="pwd0Change(this.form);" />
- </td>
- </tr>
+ <tal:block tal:condition="new_systems">
+ <thead class="systemsTable">
+ <tr class="systemsTable">
+ <th class="systemsTable">System Hostname</th>
+ <th class="systemsTable">Root Password</th>
+ <th class="systemsTable">Key ID</th>
+ <th class="systemsTable">Trust</th>
+ <th class="systemsTable"></th>
+ </tr>
+ </thead>
+ <tal:block tal:define="global cur_sysnum python: 0" />
- <tal:block tal:define="global numsys python: 0" />
+ <tbody class="systemsTable">
+ <tal:block tal:repeat="cur_sys new_systems">
+ <tr class="systemsTable"
+ tal:attributes="
+ id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+ tal:define="sys python: new_systems[cur_sys]">
- <tal:block tal:repeat="s python:systems[2]">
- <tal:block tal:define="global numsys python: numsys + 1" />
- <tr class="systemsTable">
<td class="systemsTable">
<input class="hbInputSys" type="text"
tal:attributes="
- id python: '__SYSTEM' + str(numsys) + ':Addr';
- name python: '__SYSTEM' + str(numsys) + ':Addr'" />
+ id python: '__SYSTEM%d:Addr' % cur_sysnum;
+ name python: '__SYSTEM%d:Addr' % cur_sysnum;
+ value sys/host | nothing" />
</td>
-
<td class="systemsTable">
- <input type="password" autocomplete="off"
- onChange="pwd0Change(this.form);"
+ <input type="password"
+ autocomplete="off"
class="hbInputPass"
+ onChange="pwd0Change(this.form)"
+ tal:attributes="
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ value sys/passwd | nothing" />
+ </td>
+ <td class="systemsTable">
+ <img
tal:attributes="
- id python: '__SYSTEM' + str(numsys) + ':Password';
- name python: '__SYSTEM' + str(numsys) + ':Password'" />
+ src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+ title sys/fp | string:no key fingerprint available"
+ />
+ <input type="hidden"
+ tal:attributes="
+ id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ value sys/fp | nothing" />
+ </td>
+ <td class="systemsTable">
+ <input type="checkbox" checked tal:attributes="
+ id python: '__SYSTEM%dTrusted' % cur_sysnum;
+ name python: '__SYSTEM%dTrusted' % cur_sysnum;
+ disabled python: 'trusted' in sys"
+ />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ tal:attributes="onclick python:'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
</td>
</tr>
+ <tal:block
+ tal:define="global cur_sysnum python: cur_sysnum + 1" />
</tal:block>
</tbody>
+ <tal:block
+ tal:define="
+ x python: request.SESSION.delete('auth_systems')" />
+ </tal:block>
+
+ <tfoot class="systemsTable">
+ <tr class="systemsTable"><td colspan="2" class="systemsTable">
+ <ul class="vanilla">
+ <li class="vanilla"><input name="check_certs" type="checkbox">View system certificates before sending any passwords.</li>
+ <li class="vanilla"
+ tal:attributes="id python: cur_sysnum < 2 and 'allSameDiv' or ''">
+ <input type="checkbox" name="allSameCheckBox"
+ id="allSameCheckBox" onClick="allPasswdsSame(this.form)" />
+ Check if system passwords are identical.
+ </li>
+ </ul>
+ </td></tr>
+
+ <tr class="systemsTable"><td class="systemsTable" colspan="2">
+ <div class="systemsTableEnd">
+ <input type="button" value="Add another entry"
+ onClick="addSystem(this.form)" />
+ </div>
+ </td></tr>
+ </tfoot>
</table>
- <input type="hidden" name="numStorage" value="1" />
+ <input name="numStorage" id="numStorage" type="hidden"
+ tal:attributes="value cur_sysnum | string:1" />
+
+ <input type="hidden" name="pagetype" value="8" />
<div class="hbSubmit" id="hbSubmit">
<input type="button" name="Submit" value="Submit"
onClick="validateAuth(this.form)" />
</div>
+
+ <tal:block tal:condition="exists: request/SESSION/auth_status">
+ <div class="retmsgs" id="auth_retmsgsdiv"
+ tal:condition="exists: request/SESSION/auth_status/messages">
+ <div class="hbclosebox">
+ <a href="javascript:hide_element('auth_retmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
+ </div>
+ <p class="retmsgs">Status messages:</p>
+ <ul class="statusmsg">
+ <tal:block tal:repeat="e request/SESSION/auth_status/messages">
+ <li class="statusmsg" tal:content="e" />
+ </tal:block>
+ </ul>
+ </div>
+ <div class="errmsgs" id="auth_errmsgsdiv"
+ tal:condition="exists: request/SESSION/auth_status/errors">
+ <div class="hbclosebox">
+ <a href="javascript:hide_element('auth_errmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
+ </div>
+ <p class="errmsgs">The following errors occurred:</p>
+ <ul class="statusmsg">
+ <tal:block tal:repeat="e request/SESSION/auth_status/errors">
+ <li class="statusmsg" tal:content="e" />
+ </tal:block>
+ </ul>
+ </div>
+ <tal:block
+ tal:define="x python: request.SESSION.delete('auth_status')" />
+ <div class="padding"> </div>
+ </tal:block>
</form>
+ <tal:block tal:define="
+ global systems python:here.getSystems();
+ global blankForm python:1;
+ global num_clusters python:-1;
+ global num_systems python:-1" />
+
<form name="adminform" method="post" action=""
tal:condition="python:(systems[0] and len(systems[0]) > 0) or (systems[1] and len(systems[1]) > 0)">
- <span tal:omit-tag="" tal:define="global blankForm python:0" />
-
+ <tal:block tal:define="global blankForm python:0" />
<input type="hidden" name="pagetype"
tal:attributes="value request/form/pagetype | request/pagetype | nothing" />
@@ -425,19 +527,20 @@
</tal:block>
</div>
- <input type="hidden" id="numStorage"
+ <input type="hidden" id="num_storage"
tal:attributes="value python: num_systems + 1" />
- <input type="hidden" id="numClusters"
+ <input type="hidden" id="num_clusters"
tal:attributes="value python: num_clusters + 1" />
- <div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Submit" value="Remove Selected Systems" onClick="validateForm(document.adminform);" />
+ <div class="hbSubmit">
+ <input type="button" name="Submit" value="Remove selected entries"
+ onClick="validateForm(this.form)" />
</div>
</form>
<div tal:condition="python: blankForm">
- <p>No clusters or storage systems are currently being managed by Luci.</p>
+ <p>No clusters or storage systems are currently managed by Luci.</p>
</div>
</div>
@@ -476,51 +579,141 @@
<input name="pagetype" type="hidden"
tal:attributes="value request/form/pagetype | request/pagetype | nothing" />
- <input name="numStorage" id="numStorage" type="hidden" value="1" />
-
<input name="absoluteURL" type="hidden"
tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
- <table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
- <thead class="systemsTable">
- <tr class="systemsTable">
- <th class="systemsTable">System Hostname</th>
- <th class="systemsTable">Root Password</th>
- </tr>
- </thead>
+ <table id="systemsTable" class="systemsTable" border="0" cellspacing="0"
+ tal:define="
+ new_systems request/SESSION/add_systems | nothing;
+ global cur_sysnum python: 1">
+
+ <tal:block tal:condition="not: new_systems">
+ <thead class="systemsTable">
+ <tr class="systemsTable">
+ <th class="systemsTable">System Hostname</th>
+ <th class="systemsTable">Root Password</th>
+ <th class="systemsTable"></th>
+ </tr>
+ </thead>
+
+ <tbody class="systemsTable">
+ <tr class="systemsTable" id="__SYSTEM_ROW_0">
+ <td class="systemsTable">
+ <input class="hbInputSys" type="text"
+ id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+ </td>
+ <td class="systemsTable">
+ <input type="password"
+ autocomplete="off"
+ id="__SYSTEM0:Passwd"
+ name="__SYSTEM0:Passwd"
+ class="hbInputPass"
+ onChange="pwd0Change(this.form)" />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ onclick="delete_element_id('__SYSTEM_ROW_0')" />
+ </td>
+ </tr>
+ </tbody>
+ </tal:block>
+
+ <tal:block tal:condition="new_systems">
+ <thead class="systemsTable">
+ <tr class="systemsTable">
+ <th class="systemsTable">System Hostname</th>
+ <th class="systemsTable">Root Password</th>
+ <th class="systemsTable">Key ID</th>
+ <th class="systemsTable">Trust</th>
+ <th class="systemsTable"></th>
+ </tr>
+ </thead>
+ <tal:block tal:define="global cur_sysnum python: 0" />
+
+ <tbody class="systemsTable">
+ <tal:block tal:repeat="cur_sys new_systems">
+ <tr class="systemsTable"
+ tal:attributes="
+ id python: '__SYSTEM_ROW_%d' % cur_sysnum"
+ tal:define="sys python: new_systems[cur_sys]">
+
+ <td class="systemsTable">
+ <input class="hbInputSys" type="text"
+ tal:attributes="
+ id python: '__SYSTEM%d:Addr' % cur_sysnum;
+ name python: '__SYSTEM%d:Addr' % cur_sysnum;
+ value sys/host | nothing" />
+ </td>
+ <td class="systemsTable">
+ <input type="password"
+ autocomplete="off"
+ class="hbInputPass"
+ onChange="pwd0Change(this.form)"
+ tal:attributes="
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ value sys/passwd | nothing" />
+
+ </td>
+ <td class="systemsTable">
+ <img
+ tal:attributes="
+ src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+ title sys/fp | string:no key fingerprint available"
+ />
+ <input type="hidden"
+ tal:attributes="
+ id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ value sys/fp | nothing" />
+ </td>
+ <td class="systemsTable">
+ <input type="checkbox" checked tal:attributes="
+ id python: '__SYSTEM%dTrusted' % cur_sysnum;
+ name python: '__SYSTEM%dTrusted' % cur_sysnum;
+ disabled python: 'trusted' in sys"
+ />
+ </td>
+ <td class="systemsTable">
+ <img src="/luci/delete-row.png" class="deleteRow"
+ title="delete this row"
+ tal:attributes="onclick python:'delete_element_id(\'__SYSTEM_ROW_%d\')' % cur_sysnum" />
+ </td>
+ </tr>
+ <tal:block
+ tal:define="global cur_sysnum python: cur_sysnum + 1" />
+ </tal:block>
+ </tbody>
+ <tal:block
+ tal:define="
+ x python: request.SESSION.delete('add_systems')" />
+ </tal:block>
<tfoot class="systemsTable">
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <div id="allSameDiv">
- <input type="checkbox" class="allSameCheckBox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/><span>Check if storage system passwords are identical.</span>
- </div>
+ <ul class="vanilla">
+ <li class="vanilla"><input name="check_certs" type="checkbox">View system certificates before sending any passwords.</li>
+ <li class="vanilla"
+ tal:attributes="id python: cur_sysnum < 2 and 'allSameDiv' or ''"><input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(this.form);"/>Check if storage system passwords are identical.</li>
+ </ul>
</td></tr>
<tr class="systemsTable"><td class="systemsTable" colspan="2">
<div class="systemsTableEnd">
- <input type="button" value="Add another entry" onClick="addSystem(adminform);" />
+ <input type="button" value="Add another entry"
+ onClick="addSystem(this.form)" />
</div>
</td></tr>
</tfoot>
-
- <tbody class="systemsTable">
- <tr class="systemsTable">
- <td class="systemsTable">
- <input class="hbInputSys" type="text"
- id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
- </td>
- <td class="systemsTable">
- <input type="password" autocomplete="off"
- id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
- class="hbInputPass"
- onChange="pwd0Change(adminform);" />
- </td>
- </tr>
- </tbody>
</table>
+ <input name="numStorage" id="numStorage" type="hidden"
+ tal:attributes="value cur_sysnum | string:1" />
+
<div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+ <input type="button" name="Submit" value="Submit"
+ onClick="validateForm(this.form)" />
</div>
</form>
</div>
@@ -550,95 +743,147 @@
set_page_title('Luci ??? homebase ??? Add a running cluster to be managed by Luci');
</script>
- <tal:block tal:define="
- global sessionObj python:request.SESSION.get('checkRet')" />
-
<h2 class="homebase">Add Cluster</h2>
+ <tal:block tal:define="
+ global add_cluster request/SESSION/add_cluster | nothing" />
+
<form name="adminform" action="" method="post"
- tal:condition="python: sessionObj and len(sessionObj)">
+ tal:condition="add_cluster">
+
<input name="pagetype" type="hidden"
tal:attributes="value request/form/pagetype | request/pagetype | nothing" />
<input name="absoluteURL" type="hidden"
tal:attributes="value python:data['children'][data['curIndex']]['absolute_url']" />
+ <input name="pass" type="hidden"
+ tal:attributes="value add_cluster/pass | string:0" />
+
+ <input name="cluster_os" type="hidden"
+ tal:attributes="value add_cluster/cluster_os | string:rhel5" />
+
<table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
<thead class="systemsTable">
<tr class="systemsTable"><td class="systemsTable" colspan="2">
<div class="systemsTableTop">
- <strong>Cluster Name:</strong> <span tal:replace="python: sessionObj['requestResults']['clusterName']" />
- <input type="hidden" type="text" id="clusterName" name="clusterName" tal:attributes="value python: sessionObj['requestResults']['clusterName']" />
+ <strong class="cluster_name">Cluster Name:
+ <span tal:replace="add_cluster/name | string:[unknown]" />
+ </strong>
+ <input type="hidden" id="clusterName" name="clusterName"
+ tal:attributes="value add_cluster/name | nothing" />
</div>
</td></tr>
+
<tr class="systemsTable">
<th class="systemsTable">Node Hostname</th>
<th class="systemsTable">Root Password</th>
+ <th class="systemsTable">Key ID</th>
+ <th class="systemsTable">Trust</th>
</tr>
+
</thead>
<tfoot class="systemsTable">
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <div tal:condition="python: not 'isComplete' in sessionObj['requestResults'] or not sessionObj['requestResults']['isComplete'] or ('errors' in sessionObj and len(sessionObj['errors']) > 0)">
- <input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(adminform);"/> Check if cluster node passwords are identical.
- </div>
- <div class="systemsTable"
- tal:condition="python: 'isComplete' in sessionObj['requestResults'] and sessionObj['requestResults']['isComplete']"> </div>
+ <ul class="vanilla">
+ <li class="vanilla">
+ <input name="check_certs" type="checkbox"
+ tal:attributes="checked python: add_cluster['check_certs'] and 'checked'" />
+ View system certificates before sending any passwords.
+ </li>
+ <li class="vanilla" id="allSameDiv">
+ <li class="vanilla" tal:condition="not: exists: add_cluster/complete">
+ <input type="checkbox" name="allSameCheckBox"
+ id="allSameCheckBox" onClick="allPasswdsSame(this.form)"
+ tal:attributes="checked python: add_cluster['identical_passwds'] and 'checked'"
+ />
+
+ Check if node passwords are identical.
+ </li>
+ <li class="vanilla" tal:condition="python: add_cluster['pass'] > 0 and 'incomplete' in add_cluster">
+ <input type="checkbox" name="asis">
+ Add the cluster to Luci as-is.<br>
+ Any nodes that are not authenticated will need to be authenticated later.
+ </li>
+ </ul>
+ <br/>
</td></tr>
</tfoot>
- <span tal:omit-tag=""
- tal:define="global sysNum python: 0"
- />
-
- <tbody class="systemsTable" tal:condition="python: 'nodeList' in sessionObj['requestResults']">
- <tal:block tal:repeat="node python: sessionObj['requestResults']['nodeList']">
- <span tal:omit-tag=""
- tal:define="global nodeAuth python: node['cur_auth']" />
+ <tal:block tal:define="global cur_sysnum python:0" />
- <tr class="systemsTable">
+ <tbody class="systemsTable" tal:condition="add_cluster/nodes">
+ <tal:block tal:repeat="cur_sys add_cluster/nodes">
+ <tr class="systemsTable"
+ tal:define="sys python: add_cluster['nodes'][cur_sys]">
<td class="systemsTable">
<input type="text"
tal:attributes="
- id python: '__SYSTEM' + str(sysNum) + ':Addr';
- name python: '__SYSTEM' + str(sysNum) + ':Addr';
- value python: node['host'];
- class python: 'hbInputSys' + ('errors' in node and ' error' or '');
- disabled python: (nodeAuth and node['host'].count('.') > 0) and 1 or 0"
+ value sys/host | nothing;
+ id python: '__SYSTEM%d:Addr' % cur_sysnum;
+ name python: '__SYSTEM%d:Addr' % cur_sysnum;
+ class python: 'hbInputSys' + ('errors' in sys and ' error' or '');
+ disabled python: ('auth' in sys and sys['host'].count('.') > 0) and 1 or 0"
/>
</td>
<td class="systemsTable">
- <input onChange="pwd0Change(adminform);"
+ <tal:block tal:condition="not: exists: sys/auth">
+ <input type="password"
+ autocomplete="off"
+ onChange="pwd0Change(this.form)"
+ tal:attributes="
+ value sys/passwd | nothing;
+ class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+ </tal:block>
+
+ <tal:block tal:condition="exists: sys/auth">
+ <input type="text" onChange="pwd0Change(this.form)"
+ disabled="disabled" value="[authenticated]"
+ tal:attributes="
+ class python: 'hbInputPass' + ('errors' in sys and ' error' or '');
+ id python: '__SYSTEM%d:Passwd' % cur_sysnum;
+ name python: '__SYSTEM%d:Passwd' % cur_sysnum" />
+ </tal:block>
+ </td>
+ <td class="systemsTable">
+ <img
tal:attributes="
- type python: nodeAuth and 'text' or 'password';
- value python: nodeAuth and '[authenticated]' or '';
- class python: 'hbInputPass' + ('errors' in node and ' error' or '');
- id python: '__SYSTEM' + str(sysNum) + ':Passwd';
- name python: '__SYSTEM' + str(sysNum) + ':Passwd';
- disabled python: nodeAuth and 1 or 0"
+ src python: 'trusted' in sys and '/luci/lock-ok.png' or ('fp' in sys and '/luci/lock-closed.png' or '/luci/lock-open.png');
+ title sys/fp | string:no key fingerprint available" />
+ <input type="hidden"
+ tal:attributes="
+ id python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ name python: '__SYSTEM%dFingerprint' % cur_sysnum;
+ value sys/fp | nothing" />
+ </td>
+ <td class="systemsTable">
+ <input type="checkbox" tal:attributes="
+ checked python: add_cluster['pass'] > 0;
+ id python: '__SYSTEM%dTrusted' % cur_sysnum;
+ name python: '__SYSTEM%dTrusted' % cur_sysnum;
+ disabled python: 'trusted' in sys"
/>
</td>
</tr>
- <span tal:omit-tag=""
- tal:define="global sysNum python: sysNum + 1"
- />
- </tal:block>
+ <tal:block tal:define="global cur_sysnum python: cur_sysnum + 1" />
+ </tal:block>
</tbody>
</table>
<input name="numStorage" id="numStorage" type="hidden"
- tal:attributes="value python: sysNum" />
+ tal:attributes="value cur_sysnum" />
<div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Submit" value="Add This Cluster" onClick="validateForm(document.adminform);" />
+ <input type="button" name="Submit" value="Add This Cluster"
+ onClick="validateForm(this.form)" />
</div>
</form>
- <div tal:condition="python: not sessionObj or not len(sessionObj)">
- <span class="error">
- A data integrity error has occurred. Please attempt to add this cluster to the Luci management interface again.
- </span>
- <tal:block tal:define="nop python:here.abortManageCluster(request)" />
+ <div tal:condition="add_cluster">
+ <tal:block tal:define="x python: request.SESSION.delete('add_cluster')" />
</div>
</div>
@@ -675,19 +920,26 @@
<p class="hbText">Enter one node from the cluster you wish to add to the Luci management interface.</p>
- <table id="systemsTable" class="systemsTable" border="0" cellspacing="0">
+ <table id="systemsTable" class="systemsTable" border="0" cellspacing="0"
+ tal:define="cur_sys request/SESSION/add_cluster_initial | nothing">
+
<thead class="systemsTable">
<tr class="systemsTable">
<th class="systemsTable">System Hostname</th>
<th class="systemsTable">Root Password</th>
+ <tal:block tal:condition="cur_sys">
+ <th>Key Id</th>
+ <th>Trust</th>
+ </tal:block>
</tr>
</thead>
<tfoot class="systemsTable">
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <div class="hbcheckdiv">
- <input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" /> Attempt to authenticate to all cluster nodes using the password provided above.
- </div>
+ <ul class="vanilla">
+ <li class="vanilla"><input name="check_certs" type="checkbox">View system certificates before sending any passwords.</li>
+ <li class="vanilla"><input type="checkbox" name="allSameCheckBox" id="allSameCheckBox" onClick="allPasswdsSame(this.form);"/>Authenticate to all cluster nodes using the password provided above.</li>
+ </ul>
</td></tr>
</tfoot>
@@ -695,21 +947,42 @@
<tr class="systemsTable">
<td class="systemsTable">
<input class="hbInputSys" type="text"
- id="__SYSTEM0:Addr" name="__SYSTEM0:Addr" />
+ id="__SYSTEM0:Addr" name="__SYSTEM0:Addr"
+ tal:attributes="
+ value cur_sys/host | nothing" />
</td>
<td class="systemsTable">
- <input type="password" autocomplete="off"
+ <input class="hbInputPass" type="password"
+ onChange="pwd0Change(this.form)"
+ autocomplete="off"
id="__SYSTEM0:Passwd" name="__SYSTEM0:Passwd"
- class="hbInputPass" />
+ tal:attributes="
+ value cur_sys/passwd | nothing" />
</td>
+ <tal:block tal:condition="cur_sys">
+ <td class="systemsTable">
+ <img tal:attributes="
+ title sys/fp | string:no key fingerprint available;
+ src python: 'trusted' in cur_sys and '/luci/lock-ok.png' or ('fp' in cur_sys and '/luci/lock-closed.png' or '/luci/lock-open.png')"
+ />
+ </td>
+ <td class="systemsTable">
+ <input type="checkbox" name="host_is_trusted" checked="checked" />
+ </td>
+ </tal:block>
</tr>
</tbody>
+ <tal:block tal:condition="cur_sys">
+ <tal:block
+ tal:define="x python: request.SESSION.delete('add_cluster_initial')" />
+ </tal:block>
</table>
<input type="hidden" name="numStorage" value="1" />
<div class="hbSubmit" id="hbSubmit">
- <input type="button" name="Submit" value="Submit" onClick="validateForm(document.adminform);" />
+ <input type="button" name="Submit" value="Submit"
+ onClick="validateForm(this.form)" />
</div>
</form>
</div>
--- conga/luci/homebase/homebase_common.js 2006/11/16 19:34:53 1.13.2.1
+++ conga/luci/homebase/homebase_common.js 2006/12/22 17:50:16 1.13.2.2
@@ -139,22 +139,40 @@
var num_systems = form.numStorage.value;
var state = cb.checked;
- var passwd = document.getElementById('__SYSTEM0:Passwd');
- if (!passwd || passwd.type != 'password')
+
+ var first_passwd = null;
+ var first_system = 0;
+ for (var i = 0 ; i < num_systems ; i++) {
+ var passwd = document.getElementById('__SYSTEM' + i + ':Passwd');
+ if (!passwd || passwd.type != 'password')
+ continue
+ first_passwd = passwd.value;
+ first_system = i;
+ break;
+ }
+
+ if (first_passwd === null)
return (-1);
- passwd = passwd.value;
- if (!passwd || !state)
- passwd = '';
- for (var i = 1 ; i < num_systems ; i++) {
+ if (!first_passwd || !state)
+ first_passwd = '';
+
+ for (var i = first_system + 1 ; i < num_systems ; i++) {
var element = document.getElementById('__SYSTEM' + i + ':Passwd');
if (element && element.type == 'password') {
- element.value = passwd;
+ element.value = first_passwd;
element.disabled = state;
}
}
}
+function delete_element_id(id_str) {
+ var elem = document.getElementById(id_str);
+ if (!elem || !elem.parentNode)
+ return (-1);
+ elem.parentNode.removeChild(elem);
+}
+
function pwd0Change(form) {
var element = document.getElementById('allSameCheckBox');
if (element && element.checked)
@@ -182,23 +200,43 @@
newsysp.setAttribute('value', '');
newsysp.setAttribute('autocomplete', 'off');
+ var first_passwd = '';
+ for (var i = 0 ; i < num_systems - 1 ; i++) {
+ var pwd = document.getElementById('__SYSTEM' + i + ':Passwd');
+ if (!pwd || pwd.type != 'password')
+ continue;
+ first_passwd = pwd.value;
+ break;
+ }
+
var allSameCB = document.getElementById('allSameCheckBox');
if (allSameCB && allSameCB.checked) {
- newsysp.setAttribute('value', document.getElementById('__SYSTEM0:Passwd').value);
+ newsysp.setAttribute('value', first_passwd);
newsysp.setAttribute('disabled', true);
}
var newrow = document.createElement('tr');
+ newrow.setAttribute('id', '__SYSTEM_ROW_' + num_systems);
newrow.className = 'systemsTable';
+
var hcol = document.createElement('td');
hcol.className = 'systemsTable';
var pcol = document.createElement('td');
pcol.className = 'systemsTable';
+ var dcol = document.createElement('td');
+ dcol.className = 'systemsTable';
+ var del_img = document.createElement('img');
+ del_img.src = '/luci/delete-row.png';
+ del_img.title = 'delete this row'
+ del_img.className = 'deleteRow'
+ del_img.setAttribute('onClick', 'delete_element_id(\'' + newrow.id + '\')');
+ dcol.appendChild(del_img);
hcol.appendChild(newsys);
pcol.appendChild(newsysp);
newrow.appendChild(hcol);
newrow.appendChild(pcol);
+ newrow.appendChild(dcol);
sltab.appendChild(newrow);
form.numStorage.value = ++num_systems;
--- conga/luci/homebase/index_html 2006/11/16 19:34:53 1.18.2.2
+++ conga/luci/homebase/index_html 2006/12/22 17:50:16 1.18.2.3
@@ -134,23 +134,24 @@
<div class="retmsgs" id="retmsgsdiv" tal:condition="python:(ret and 'messages' in ret and len(ret['messages']))">
<div class="hbclosebox">
- <a href="javascript:hide_element('retmsgsdiv');"><img src="x.png"></a>
+ <a href="javascript:hide_element('retmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
</div>
- <ul class="retmsgs">
+ <p class="retmsgs">Status messages:</p>
+ <ul class="statusmsg">
<tal:block tal:repeat="e python:ret['messages']">
- <li class="retmsgs" tal:content="python:e" />
+ <li class="statusmsg" tal:content="python:e" />
</tal:block>
</ul>
</div>
<div id="errmsgsdiv" class="errmsgs" tal:condition="python:(ret and 'errors' in ret and len(ret['errors']))">
<div class="hbclosebox">
- <a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="x.png"></a>
+ <a class="hbclosebox" href="javascript:hide_element('errmsgsdiv');"><img src="x.png" class="closeBox" title="dismiss"></a>
</div>
<p class="errmsgs">The following errors occurred:</p>
- <ul class="errmsgs">
+ <ul class="statusmsg">
<tal:block tal:repeat="e python:ret['errors']">
- <li class="errmsgs" tal:content="python:e" />
+ <li class="statusmsg" tal:content="python:e" />
</tal:block>
</ul>
</div>
--- conga/luci/homebase/luci_homebase.css 2006/12/07 17:54:31 1.28.2.2
+++ conga/luci/homebase/luci_homebase.css 2006/12/22 17:50:16 1.28.2.3
@@ -48,6 +48,8 @@
}
input.qdisk {
+ font-family: "Bitstream Vera Sans Mono", "DejaVu Sans Mono", monospace ! important;
+ font-size: 12px ! important;
padding: .2em;
}
@@ -81,6 +83,10 @@
margin-left: 0 ! important;
}
+ul.statusmsg, li.statusmsg {
+ color: black ! important;
+}
+
ul.deploy {
margin-bottom: +.5em;
}
@@ -153,7 +159,7 @@
color: green !important;
}
-p.errmsgs {
+p.errmsgs, p.retmsgs {
font-weight: 800;
}
@@ -163,7 +169,7 @@
border-width: 2px;
border-color: red;
margin-top: 2em;
- max-width: 700px;
+ max-width: 600px ! important;
}
div.retmsgs {
@@ -172,7 +178,7 @@
border-style: dotted;
border-width: 2px;
border-color: green;
- max-width: 700px;
+ max-width: 600px ! important;
}
div.hbCSystems {
@@ -208,6 +214,7 @@
table.systemsTable {
padding-left: +.5em;
background: #dee7ec;
+ max-width: 700px;
}
td.systemsTable {
@@ -250,8 +257,6 @@
}
img.qdel_img {
- height: 7px;
- width: 7px;
background: #dee7ec;
border: none;
}
@@ -415,6 +420,10 @@
color: blue ! important;
}
+img.deleteRow, img.closeBox {
+ cursor: pointer;
+}
+
*.running,
*.node_active {
color: green ! important;
--- conga/luci/homebase/validate_sys_remove.js 2006/10/16 20:46:46 1.2
+++ conga/luci/homebase/validate_sys_remove.js 2006/12/22 17:50:16 1.2.2.1
@@ -6,7 +6,7 @@
if (!form)
return (-1);
- var num_clusters = document.getElementById('numClusters').value;
+ var num_clusters = document.getElementById('num_clusters').value;
for (var i = 0 ; i < num_clusters ; i++) {
var element = document.getElementById('__CLUSTER' + i);
if (!element || !element.value || !element.checked)
@@ -14,7 +14,7 @@
selected_clusters.push(element.value);
}
- var num_storage = document.getElementById('numStorage').value;
+ var num_storage = document.getElementById('num_storage').value;
for (var i = 0 ; i < num_storage ; i++) {
var element = document.getElementById('__SYSTEM' + i);
if (!element || !element.value || !element.checked)
--- conga/luci/site/luci/Extensions/FenceHandler.py 2006/11/16 19:34:53 1.4.2.1
+++ conga/luci/site/luci/Extensions/FenceHandler.py 2006/12/22 17:50:16 1.4.2.2
@@ -2,9 +2,14 @@
from ValidationError import ValidationError
import MessageLibrary
import ModelBuilder
+#from FenceDevice import FenceDevice
+from conga_constants import FD_VAL_SUCCESS, FD_VAL_FAIL
INSTALLDIR="/usr/share/system-config-cluster"
+FD_NEW_SUCCESS = "New %s successfully added to cluster"
+FD_UPDATE_SUCCESS = "Fence device %s successfully updated"
+FD_NEW_FAIL = "No agent type %s in shared device list"
FD_PROVIDE_NAME = "A unique name must be provided for each Fence Device"
FD_PROVIDE_PATH = "An xCAT path must be provided for each xCAT Fence Device"
@@ -52,6 +57,8 @@
"fence_egenera":"Egenera SAN Controller",
"fence_bullpap":"Bull PAP",
"fence_drac":"DRAC",
+ "fence_xvm":"Virtual Machine Fencing",
+ "fence_scsi":"SCSI Reservation",
"fence_ipmilan":"IPMI Lan",
"fence_manual":"Manual Fencing" }
@@ -1025,4 +1032,528 @@
gtkentry.set_text(name)
# select text
raise ValidationError('FATAL', ILLEGAL_CHARS_REPLACED)
-
+
+def validateNewFenceDevice(form, model):
+ from FenceDevice import FenceDevice
+ try:
+ agent_type = form['fence_type']
+ except KeyError, e:
+ return (FD_VAL_FAIL, "No agent type in form submission")
+
+ ##Now that we have an agent type, we should check the fencedev name
+ ##before wasting any time checking other fields.
+ try:
+ fencedev_name = form['name']
+ fencedev_name = fencedev_name.strip()
+ except KeyError, e:
+ return (FD_VAL_FAIL, "No device name in form submission")
+
+ if fencedev_name == "":
+ return (1, "A unique name is required for every fence device")
+
+ fencedevs = model.getFenceDevices()
+ for fd in fencedevs:
+ if fd.getName().strip() == fencedev_name:
+ return (FD_VAL_FAIL, FD_PROVIDE_NAME)
+
+ if agent_type == "fence_apc":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_wti":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_brocade":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_vixel":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+
+ elif agent_type == "fence_mcdata":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+
+ elif agent_type == "fence_gnbd":
+ try:
+ server = form['server']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_SERVER)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("server",server)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_egenera":
+ try:
+ cserver = form['cserver']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_CSERVER)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("cserver",cserver)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+
+ elif agent_type == "fence_sanbox2":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_bladecenter":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_bullpap":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+
+ elif agent_type == "fence_xvm":
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ elif agent_type == "fence_scsi":
+
+ fencedev = FenceDevice()
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedevptr = model.getFenceDevicePtr()
+ fencedevptr.addChild(fencedev)
+ return (FD_VAL_SUCCESS, FD_NEW_SUCCESS % FENCE_OPTS[agent_type])
+
+ #Oh-oh...no agent match
+ else:
+ return (FD_VAL_FAIL, FD_NEW_FAIL % agent_type)
+
+def validateFenceDevice(form, model):
+ from FenceDevice import FenceDevice
+ namechange = False
+ try:
+ agent_type = form['fence_type']
+ except KeyError, e:
+ return (FD_VAL_FAIL, "No agent type in form submission")
+
+ ##Now that we have an agent type, we should check the fencedev name
+ ##before wasting any time checking other fields.
+ try:
+ fencedev_name = form['name']
+ fencedev_name = fencedev_name.strip()
+ except KeyError, e:
+ return (FD_VAL_FAIL, "No device name in form submission")
+
+ if fencedev_name == "":
+ return (1, "A unique name is required for every fence device")
+
+ try:
+ orig_name = form['orig_name']
+ except KeyError, e:
+ return (FD_VAL_FAIL, "Cannot retrieve original fence device")
+
+ if orig_name != fencedev_name:
+ namechange = True
+
+ fencedevs = model.getFenceDevices()
+ for fd in fencedevs:
+ if fd.getName().strip() == fencedev_name:
+ return (FD_VAL_FAIL, FD_PROVIDE_NAME)
+
+ #Now we know name is unique...find device now
+ fencedev = None
+ for fd in fencedevs:
+ if fd.getName().strip() == orig_name:
+ fencedev = fd
+ break
+
+ if fencedev == None:
+ return (FD_VAL_FAIL, "Could not find fencedevice in current configuration")
+
+ if agent_type == "fence_apc":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_wti":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_brocade":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_vixel":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+
+ elif agent_type == "fence_mcdata":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+
+ elif agent_type == "fence_gnbd":
+ try:
+ server = form['server']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_SERVER)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("server",server)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_egenera":
+ try:
+ cserver = form['cserver']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_CSERVER)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("cserver",cserver)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+
+ elif agent_type == "fence_sanbox2":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_bladecenter":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_bullpap":
+ try:
+ ip = form['ip_addr']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_IP)
+ try:
+ log = form['login']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_LOGIN)
+ try:
+ pwd = form['password']
+ except KeyError, e:
+ return (FD_VAL_FAIL, FD_PROVIDE_PASSWD)
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ fencedev.addAttribute("ipaddr",ip)
+ fencedev.addAttribute("login",log)
+ fencedev.addAttribute("passwd",pwd)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+
+ elif agent_type == "fence_xvm":
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ elif agent_type == "fence_scsi":
+
+ fencedev.addAttribute("agent",agent_type)
+ fencedev.addAttribute("name",fencedev_name)
+ if namechange:
+ model.rectifyNewFencedevicenameWithFences(orig_name,fencedev_name)
+ return (FD_VAL_SUCCESS, FD_UPDATE_SUCCESS % orig_name)
+
+ #Oh-oh...no agent match
+ else:
+ return (FD_VAL_FAIL, FD_NEW_FAIL % agent_type)
--- conga/luci/site/luci/Extensions/ModelBuilder.py 2006/10/31 17:28:04 1.8.2.2
+++ conga/luci/site/luci/Extensions/ModelBuilder.py 2006/12/22 17:50:16 1.8.2.3
@@ -902,6 +902,21 @@
if fence.getName() == oldname:
fence.addAttribute("name",newname)
+ ###Method for removing fence instances if a fence device
+ ###has been deleted from the configuration
+ def removeFenceInstancesForFenceDevice(self, name):
+ nodes = self.getNodes()
+ for node in nodes:
+ levels = node.getFenceLevels()
+ for level in levels:
+ fences = level.getChildren()
+ kill_list = list()
+ for fence in fences:
+ if fence.getName() == name:
+ kill_list.append(fence)
+ for victim in kill_list:
+ level.removeChild(victim)
+
def removeReferences(self, tagobj):
self.__removeReferences(tagobj, self.cluster_ptr)
def __removeReferences(self, tagobj, level):
@@ -911,7 +926,7 @@
level.removeChild(t)
continue
self.__removeReferences(tagobj, t)
-
+
def updateReferences(self):
self.__updateReferences(self.cluster_ptr)
def __updateReferences(self, level):
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/11 23:58:00 1.120.2.14
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/22 17:50:16 1.120.2.15
@@ -5,7 +5,6 @@
from conga_constants import *
from ricci_bridge import *
from ricci_communicator import RicciCommunicator, RicciError, batch_status, extract_module_status
-from string import lower
import time
import Products.ManagedSystem
from Products.Archetypes.utils import make_uuid
@@ -24,9 +23,9 @@
from QuorumD import QuorumD
from Heuristic import Heuristic
from clusterOS import resolveOSType
-from FenceHandler import FenceHandler, FENCE_OPTS
+from FenceHandler import validateNewFenceDevice, FENCE_OPTS, validateFenceDevice
from GeneralError import GeneralError
-from homebase_adapters import nodeUnauth, nodeAuth, manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode
+from homebase_adapters import manageCluster, createClusterSystems, havePermCreateCluster, setNodeFlag, delNodeFlag, userAuthenticated, getStorageNode, getClusterNode, delCluster, parseHostForm
from LuciSyslog import LuciSyslog
#Policy for showing the cluster chooser menu:
@@ -42,391 +41,519 @@
except:
pass
-def validateClusterNodes(request, sessionData, clusterName, numStorage):
- nodeList = list()
- nodeHash = {}
- rnodeHash = {}
- oldNodeHash = {}
- oldRnodeHash = {}
- requestResults = {}
- errors = list()
+def buildClusterCreateFlags(self, batch_map, clusterName):
+ path = str(CLUSTER_FOLDER_PATH + clusterName)
- if sessionData and 'requestResults' in sessionData:
- requestResults = sessionData['requestResults']
- if 'nodeHash' in requestResults:
- oldNodeHash = requestResults['nodeHash']
- if 'rnodeHash' in requestResults:
- oldRnodeHash = requestResults['rnodeHash']
+ try:
+ clusterfolder = self.restrictedTraverse(path)
+ except Exception, e:
+ luci_log.debug_verbose('buildCCF0: no cluster folder@%s' % path)
+ return None
- i = 0
- while i < numStorage:
+ for key in batch_map.keys():
try:
- sysData = request.form['__SYSTEM' + str(i)]
- if not sysData or sysData[0] == '':
- raise
-
- if len(sysData) < 2 or sysData[1] == '':
- errors.append('No password was specified for host \"' + sysData[0] + '\"')
- raise
- except:
- i += 1
- continue
-
- if len(sysData) > 1:
- node = nodeAuth(None, sysData[0], sysData[1])
-
- if oldRnodeHash and node['ricci_host'] in oldRnodeHash:
- oldNode = oldRnodeHash[node['ricci_host']]
- elif oldNodeHash and node['host'] in nodeHash:
- oldNode = oldNodeHash[node['host']]
- else:
- oldNode = None
-
- if 'errors' in node:
- errors.append(node['errors'])
- node['errors'] = True
-
- if node['host'] in nodeHash or node['ricci_host'] in rnodeHash:
- node['errors'] = True
- errors.append('You added the node \"' + node['host'] + '\" more than once')
- else:
- if oldNode and 'prev_auth' in oldNode:
- node['prev_auth'] = oldNode['prev_auth']
-
- nodeHash[node['host']] = node
- rnodeHash[node['ricci_host']] = node
- nodeList.append(node)
- i += 1
-
- sfn = lambda x, y: \
- x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x))
- nodeList.sort(sfn)
-
- dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
- cluster_properties = {
- 'clusterName': clusterName,
- 'nodeList': nodeList,
- 'nodeHash': nodeHash,
- 'rnodeHash': rnodeHash,
- 'isComplete': len(errors) < 1 and len(filter(dfn, nodeList)) == 0
- }
-
- return [errors, cluster_properties]
-
+ key = str(key)
+ batch_id = str(batch_map[key])
+ #This suffix needed to avoid name collision
+ objname = str(key + "____flag")
-def validateCreateCluster(self, request):
- errors = list()
- requestResults = {}
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #now designate this new object properly
+ objpath = str(path + "/" + objname)
+ flag = self.restrictedTraverse(objpath)
- if not havePermCreateCluster(self):
- return (False, {'errors': ['You do not have sufficient rights to create a cluster.']})
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
+ flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
+ flag.manage_addProperty(LAST_STATUS, 0, "int")
+ except Exception, e:
+ luci_log.debug_verbose('buildCCF1: error creating flag for %s: %s' \
+ % (key, str(e)))
+def parseClusterNodes(self, request, cluster_os):
+ check_certs = False
try:
- sessionData = request.SESSION.get('checkRet')
+ check_certs = 'check_certs' in request.form
except:
- sessionData = None
-
- if not 'clusterName' in request.form or not request.form['clusterName']:
- return (False, {'errors': [ 'No cluster name was specified.' ]})
- clusterName = request.form['clusterName']
+ check_certs = False
+ download_pkgs = 1
try:
- numStorage = int(request.form['numStorage'])
+ download_pkgs = int(request.form['download_pkgs'].strip())
except:
- return (False, { 'errors': ['Unknown number of systems entered'], 'requestResults': requestResults })
-
- if numStorage < 1:
- return (False, { 'errors': ['A cluster must contain at least one node'], 'requestResults': requestResults })
+ download_pkgs = 1
- ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
- errors.extend(ret[0])
- cluster_properties = ret[1]
-
- rhn_dl = 1
+ clusterName = None
try:
- rhn_dls = request.form['rhn_dl'].strip().lower()
- if rhn_dls != '1' and rhn_dls != 'true':
- rhn_dl = 0
+ clusterName = str(request.form['clusterName'])
except:
- rhn_dl = 0
+ clusterName = None
- enable_storage = 0
+ if clusterName is None:
+ luci_log.debug_verbose('PCN0: no cluster name was given')
+ return (False, { 'errors': [ 'No cluster name was given.' ]})
+
+ shared_storage = False
try:
- enable_storage_str = request.form['enable_storage'].strip().lower()
- if enable_storage_str:
- enable_storage = 1
+ shared_storage = request.form.has_key('enable_storage')
except:
- enable_storage = 0
+ shared_storage = False
+ same_node_passwds = False
try:
- nodeList = cluster_properties['nodeList']
- if len(nodeList) < 1:
- raise
+ same_node_passwds = 'allSameCheckBox' in request.form
except:
- errors.append('A cluster must contain@least one node')
+ same_node_passwds = False
- cluster_os = None
- try:
- cluster_os = nodeList[0]['os']
- if not cluster_os:
- raise KeyError('OS for ' + nodeList[0]['host'] + ' is blank')
- except KeyError, e:
- cluster_properties['isComplete'] = False
- errors.append('Unable to identify the operating system running on the first cluster node: ' + str(e))
+ add_cluster = { 'name': clusterName,
+ 'shared_storage': shared_storage,
+ 'download_pkgs': download_pkgs,
+ 'cluster_os': cluster_os,
+ 'identical_passwds': same_node_passwds,
+ 'check_certs': check_certs }
+
+ system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
+ add_cluster['nodes'] = system_list
+
+ for i in system_list:
+ cur_system = system_list[i]
+
+ cur_host_trusted = 'trusted' in cur_system
+ cur_host = cur_system['host']
- if cluster_properties['isComplete'] != True:
- nodeUnauth(nodeList)
- return (False, {'errors': errors, 'requestResults':cluster_properties })
- else:
try:
- if len(filter(lambda x: x['os'] != cluster_os, nodeList[1:])) > 0:
- raise Exception('different operating systems were detected.')
+ cur_passwd = cur_system['passwd']
except:
- cluster_properties['isComplete'] = False
- errors.append('Cluster nodes must be running compatible operating systems.')
+ cur_passwd = None
- if cluster_properties['isComplete'] == True:
- batchNode = createClusterBatch(cluster_os,
- clusterName,
- clusterName,
- map(lambda x: x['host'], nodeList),
- True,
- True,
- enable_storage,
- False,
- rhn_dl)
-
- if not batchNode:
- nodeUnauth(nodeList)
- cluster_properties['isComplete'] = False
- errors.append('Unable to generate cluster creation ricci command')
- return (False, {'errors': errors, 'requestResults':cluster_properties })
-
- error = manageCluster(self, clusterName, nodeList)
- if error:
- nodeUnauth(nodeList)
- cluster_properties['isComplete'] = False
- errors.append(error)
- return (False, {'errors': errors, 'requestResults':cluster_properties })
-
- batch_id_map = {}
- rc = None
- for i in nodeList:
- success = True
+ if (cur_host_trusted or not check_certs) and cur_passwd:
try:
- rc = RicciCommunicator(i['host'])
- except RicciError, e:
- luci_log.debug('Unable to connect to the ricci agent on %s: %s'\
- % (i['host'], str(e)))
- success = False
+ rc = RicciCommunicator(cur_host, enforce_trust=True)
+ if not rc:
+ raise Exception, 'connection failed'
+ except Exception, e:
+ cur_system['errors'] = True
+ incomplete = True
+ errors.append('Unable to connect to %s: %s' \
+ % (cur_host, str(e)))
+ luci_log.debug_verbose('PCN1: %s: %s' % (cur_host, str(e)))
+ continue
+
+ prev_auth = rc.authed()
+ cur_system['prev_auth'] = prev_auth
+
+ try:
+ if prev_auth:
+ messages.append('Host %s is already authenticated.' \
+ % cur_host)
+ else:
+ rc.auth(cur_passwd)
+
+ if not rc.authed():
+ raise Exception, 'authentication failed'
except:
- success = False
+ cur_system['errors'] = True
+ incomplete = True
+ errors.append('Error authenticating to %s: %s' \
+ % (cur_host, str(e)))
+ luci_log.debug_verbose('PCN2: %s: %s' % (cur_host, str(e)))
+ continue
+
+ cur_cluster_info = rc.cluster_info()
+ if cur_cluster_info[0] or cur_cluster_info[1]:
+ cur_system['errors'] = True
+ incomplete = True
+
+ if cur_cluster_info[0]:
+ cur_cluster_name = cur_cluster_info[0]
+ elif cur_cluster_info[1]:
+ cur_cluster_name = cur_cluster_info[1]
- if success == True:
try:
- resultNode = rc.process_batch(batchNode, async=True)
- batch_id_map[i['host']] = resultNode.getAttribute('batch_id')
- except:
- success = False
+ if not cur_system['prev_auth']:
+ rc.unauth()
+ del cur_system['trusted']
+ except Exception, e:
+ luci_log.debug_verbose('PCN3: %s: %s' % (cur_host, str(e)))
+
+ errors.append('%s reports it is a member of cluster \"%s\"' \
+ % (cur_host, cur_cluster_name))
+ luci_log.debug_verbose('PCN4: %s: already in %s cluster' \
+ % (cur_host, cur_cluster_name))
+ continue
- if not success:
- nodeUnauth(nodeList)
- cluster_properties['isComplete'] = False
- errors.append('An error occurred while attempting to add cluster node \"' + i['host'] + '\"')
- return (False, {'errors': errors, 'requestResults':cluster_properties })
- buildClusterCreateFlags(self, batch_id_map, clusterName)
+ cur_host_os = resolveOSType(rc.os())
+ if cluster_os is None:
+ cluster_os = cur_host_os
+ add_cluster['cluster_os'] = cur_host_os
+
+ elif cluster_os != cur_host_os:
+ cur_system['errors'] = True
+ incomplete = True
- response = request.RESPONSE
- response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
+ try:
+ if not cur_system['prev_auth']:
+ rc.unauth()
+ del cur_system['trusted']
+ except Exception, e:
+ luci_log.debug_verbose('PCN5: %s: %s' % (cur_host, str(e)))
+
+ errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
+ luci_log.debug_verbose('PCN6: version mismatch for %s: (%s vs. %s)' \
+ % (cur_host, cur_host_os, cluster_os))
+ continue
-def buildClusterCreateFlags(self, batch_map, clusterName):
- path = str(CLUSTER_FOLDER_PATH + clusterName)
+ return add_cluster, incomplete, errors, messages
+def validateCreateCluster(self, request):
try:
- clusterfolder = self.restrictedTraverse(path)
- except Exception, e:
- luci_log.debug_verbose('buildCCF0: no cluster folder at %s' % path)
- return None
+ request.SESSION.delete('create_cluster')
+ except:
+ pass
- for key in batch_map.keys():
- try:
- key = str(key)
- batch_id = str(batch_map[key])
- #This suffix needed to avoid name collision
- objname = str(key + "____flag")
+ cluster_os = None
+ try:
+ cluster_os = request.form['cluster_os'].strip()
+ if not cluster_os:
+ raise Exception, 'cluster OS is blank'
+ except:
+ cluster_os = None
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #now designate this new object properly
- objpath = str(path + "/" + objname)
- flag = self.restrictedTraverse(objpath)
+ add_cluster, incomplete, errors, messages = parseClusterNodes(self, request, cluster_os)
+ clusterName = add_cluster['name']
- flag.manage_addProperty(BATCH_ID, batch_id, "string")
- flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, "string")
- flag.manage_addProperty(FLAG_DESC, "Creating node " + key + " for cluster " + clusterName, "string")
- flag.manage_addProperty(LAST_STATUS, 0, "int")
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('create_cluster', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
+
+ node_list = add_cluster['nodes'].keys()
+ batchNode = createClusterBatch(add_cluster['cluster_os'],
+ clusterName,
+ clusterName,
+ node_list,
+ True,
+ True,
+ add_cluster['shared_storage'],
+ False,
+ add_cluster['download_pkgs'])
+
+ if not batchNode:
+ request.SESSION.set('create_cluster', add_cluster)
+ errors.append('Unable to generate cluster creation ricci command')
+ return (False, { 'errors': errors, 'messages': messages })
+
+ error = manageCluster(self, clusterName, add_cluster['nodes'], add_cluster['cluster_os'])
+ if error:
+ errors.append('Unable to create the cluster Luci database objects')
+ request.SESSION.set('create_cluster', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
+
+ batch_id_map = {}
+ for i in node_list:
+ try:
+ rc = RicciCommunicator(i)
+ if not rc:
+ raise 'rc is None'
except Exception, e:
- luci_log.debug_verbose('buildCCF1: error creating flag for %s: %s' \
- % (key, str(e)))
+ msg = 'Unable to connect to the ricci agent on %s: %s' % (i, str(e))
+ errors.append(msg)
+ luci_log.debug_verbose(msg)
+ if len(batch_id_map) == 0:
+ request.SESSION.set('create_cluster', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
+ continue
-def validateAddClusterNode(self, request):
- requestResults = {}
- errors = list()
+ try:
+ resultNode = rc.process_batch(batchNode, async=True)
+ batch_id_map[i] = resultNode.getAttribute('batch_id')
+ except:
+ errors.append('An error occurred while attempting to add cluster node \"%s\"' % i)
+ if len(batch_id_map) == 0:
+ request.SESSION.set('create_cluster', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
+ continue
+
+ buildClusterCreateFlags(self, batch_id_map, clusterName)
+ response = request.RESPONSE
+ response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
+def validateAddClusterNode(self, request):
try:
- sessionData = request.SESSION.get('checkRet')
+ request.SESSION.delete('add_node')
except:
- sessionData = None
+ pass
- if 'clusterName' in request.form:
- clusterName = str(request.form['clusterName'])
- else:
- luci_log.debug_verbose('vACN00: no cluster name was given')
- return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
+ check_certs = False
+ try:
+ check_certs = 'check_certs' in request.form
+ except:
+ check_certs = False
- rhn_dl = 1
+ download_pkgs = 1
try:
- rhn_dls = request.form['rhn_dl'].strip().lower()
- if rhn_dls != '1' and rhn_dls != 'true':
- rhn_dl = 0
+ download_pkgs = int(request.form['download_pkgs'].strip())
except:
- rhn_dl = 0
+ download_pkgs = 1
- enable_storage = 0
+ cluster_os = None
try:
- enable_storages = request.form['enable_storage'].strip().lower()
- if enable_storages:
- enable_storage = 1
+ cluster_os = request.form['cluster_os'].strip()
+ if not cluster_os:
+ raise Exception, 'cluster OS is blank'
except:
- enable_storage = 0
+ cluster_os = None
+ clusterName = None
try:
- numStorage = int(request.form['numStorage'])
- if numStorage < 1:
- raise Exception, 'no nodes were added'
- except Exception, e:
- luci_log.debug_verbose('vACN0: %s: %s' % (clusterName, str(e)))
- errors.append('You must specify at least one node to add to the cluster')
- return (False, {'errors': [ errors ], 'requestResults': requestResults })
+ clusterName = str(request.form['clusterName'])
+ except:
+ clusterName = None
+
+ if clusterName is None:
+ luci_log.debug_verbose('VACN0: no cluster name was given')
+ return (False, { 'errors': [ 'No cluster name was given.' ]})
+
+ if cluster_os is None:
+ cluster_folder = None
+ try:
+ cluster_folder = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH + clusterName))
+ if not cluster_folder:
+ raise Exception, 'cluster DB object is missing'
+ except Exception, e:
+ luci_log.debug_verbose('VACN1: %s: %s' % (clusterName, str(e)))
+ return (False, { 'errors': [ 'The database object for %s is missing.' % clusterName ] })
+
+ try:
+ cluster_os = cluster_folder.manage_getProperty('cluster_os')
+ if not cluster_os:
+ raise Exception, 'cluster os is blank'
+ except Exception, e:
+ luci_log.debug_verbose('VACN2: %s: %s' % (clusterName, str(e)))
+ cluster_os = None
+
+ if cluster_os is None:
+ try:
+ cluster_ricci = getRicciAgent(self, clusterName)
+ cluster_os = resolveOSType(cluster_ricci.os())
+ except Exception, e:
+ luci_log.debug_verbose('VACN3: %s: %s' % (clusterName, str(e)))
+ cluster_os = None
- ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
- errors.extend(ret[0])
- cluster_properties = ret[1]
+ if cluster_os is None:
+ luci_log.debug_verbose('Unable to determine cluster OS for %s' % clusterName)
+ return (False, { 'errors': [ 'Unable to determine the version of the cluster suite this cluster is running.' ] })
+ shared_storage = False
try:
- nodeList = cluster_properties['nodeList']
- if len(nodeList) < 1:
- raise Exception, 'no cluster nodes'
- except Exception, e:
- luci_log.debug_verbose('vACN1: %s: %s' % (clusterName, str(e)))
- errors.append('You must specify@least one valid node to add to the cluster')
+ shared_storage = request.form.has_key('enable_storage')
+ except:
+ shared_storage = False
- clusterObj = None
+ same_node_passwds = False
try:
- clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
- cluster_os = clusterObj.manage_getProperty('cluster_os')
- if not cluster_os:
- raise Exception, 'no cluster OS was found in DB for %s' % clusterName
- except Exception, e:
- luci_log.debug_verbose('vACN2: %s: %s' % (clusterName, str(e)))
+ same_node_passwds = 'allSameCheckBox' in request.form
+ except:
+ same_node_passwds = False
+
+ add_cluster = { 'name': clusterName,
+ 'shared_storage': shared_storage,
+ 'download_pkgs': download_pkgs,
+ 'cluster_os': cluster_os,
+ 'identical_passwds': same_node_passwds,
+ 'check_certs': check_certs }
+
+ system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
+ add_cluster['nodes'] = system_list
+
+ for i in system_list:
+ cur_system = system_list[i]
+
+ cur_host_trusted = 'trusted' in cur_system
+ cur_host = cur_system['host']
+
try:
- cluster_ricci = getRicciAgent(self, clusterName)
- if not cluster_ricci:
- raise Exception, 'cannot find a ricci agent for %s' % clusterName
- cluster_os = getClusterOS(self, cluster_ricci)['os']
- if clusterObj is None:
- try:
- clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
- except:
- pass
+ cur_passwd = cur_system['passwd']
+ except:
+ cur_passwd = None
+
+ if (cur_host_trusted or not check_certs) and cur_passwd:
+ try:
+ rc = RicciCommunicator(cur_host, enforce_trust=True)
+ if not rc:
+ raise Exception, 'connection failed'
+ except Exception, e:
+ cur_system['errors'] = True
+ incomplete = True
+ errors.append('Unable to connect to %s: %s' \
+ % (cur_host, str(e)))
+ luci_log.debug_verbose('VACN4: %s: %s' % (cur_host, str(e)))
+ continue
+
+ prev_auth = rc.authed()
+ cur_system['prev_auth'] = prev_auth
+ try:
+ if prev_auth:
+ messages.append('Host %s is already authenticated.' \
+ % cur_host)
+ else:
+ rc.auth(cur_passwd)
+
+ if not rc.authed():
+ raise Exception, 'authentication failed'
+ except:
+ cur_system['errors'] = True
+ incomplete = True
+ errors.append('Error authenticating to %s: %s' \
+ % (cur_host, str(e)))
+ luci_log.debug_verbose('VACN5: %s: %s' % (cur_host, str(e)))
+ continue
+
+ cur_cluster_info = rc.cluster_info()
+ if cur_cluster_info[0] or cur_cluster_info[1]:
+ cur_system['errors'] = True
+ incomplete = True
+
+ if cur_cluster_info[0]:
+ cur_cluster_name = cur_cluster_info[0]
+ elif cur_cluster_info[1]:
+ cur_cluster_name = cur_cluster_info[1]
try:
- clusterObj.manage_addProperty('cluster_os', cluster_os, 'string')
- except:
- pass
- except Exception, e:
- luci_log.debug_verbose('vACN3: %s: %s' % (clusterName, str(e)))
- nodeUnauth(nodeList)
- cluster_os = None
- cluster_properties['isComplete'] = False
- errors.append('Unable to determine the cluster OS for the ' + clusterName + ' cluster.')
+ if not cur_system['prev_auth']:
+ rc.unauth()
+ del cur_system['trusted']
+ except Exception, e:
+ luci_log.debug_verbose('VACN6: %s: %s' % (cur_host, str(e)))
+
+ errors.append('%s reports it is already a member of cluster \"%s\"' % (cur_host, cur_cluster_name))
+ luci_log.debug_verbose('VACN7: %s: already in %s cluster' \
+ % (cur_host, cur_cluster_name))
+ continue
- try:
- if cluster_os is None:
- raise Exception, 'no cluster OS found for %s' % clusterName
- if len(filter(lambda x: x['os'] != cluster_os, nodeList)) > 0:
- raise Exception, 'different operating systems were detected.'
- except Exception, e:
- luci_log.debug_verbose('vACN4: %s: %s' % (clusterName, str(e)))
- nodeUnauth(nodeList)
- cluster_properties['isComplete'] = False
- errors.append('Cluster nodes must be running compatible operating systems.')
-
- if not cluster_properties['isComplete']:
- return (False, {'errors': errors, 'requestResults': cluster_properties})
-
- for clunode in nodeList:
- try:
- batchNode = addClusterNodeBatch(clunode['os'],
- clusterName,
- True,
- True,
- enable_storage,
- False,
- rhn_dl)
- if not batchNode:
- raise Exception, 'batchnode is None'
- clunode['batchnode'] = batchNode
- except Exception, e:
- luci_log.debug_verbose('vACN5: node add for %s failed: %s' \
- % (clunode['host'], str(e)))
- clunode['errors'] = True
- nodeUnauth(nodeList)
- cluster_properties['isComplete'] = False
- errors.append('Unable to initiate node creation for host \"' + clunode['host'] + '\"')
+ cur_host_os = resolveOSType(rc.os())
+ if cluster_os is not None and cluster_os != cur_host_os:
+ cur_system['errors'] = True
+ incomplete = True
- if not cluster_properties['isComplete']:
- return (False, {'errors': errors, 'requestResults': cluster_properties})
+ try:
+ if not cur_system['prev_auth']:
+ rc.unauth()
+ del cur_system['trusted']
+ except Exception, e:
+ luci_log.debug_verbose('VACN8: %s: %s' % (cur_host, str(e)))
+
+ errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
+ luci_log.debug_verbose('VACN9: version mismatch for %s: (%s vs. %s)' \
+ % (cur_host, cur_host_os, cluster_os))
+ continue
+
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
try:
cluster_ricci = getRicciAgent(self, clusterName)
if not cluster_ricci:
- raise Exception, 'Unable to get a ricci agent for %s' % clusterName
+ raise Exception, 'Unable to find a ricci agent for %s' % clusterName
except Exception, e:
- cluster_properties['isComplete'] = False
- nodeUnauth(nodeList)
- errors.append('Unable to contact a Ricci agent for %s.' % clusterName)
- luci_log.debug_verbose('vACN6: ricci %s: %s' % (clusterName, str(e)))
- return (False, {'errors': errors, 'requestResults': cluster_properties})
+ incomplete = True
+ errors.append('Unable to contact a ricci agent for %s.' % clusterName)
+ luci_log.debug_verbose('VACN10: %s: %s' % (clusterName, str(e)))
+
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
try:
model = getModelBuilder(None, cluster_ricci, cluster_ricci.dom0())
if not model:
+ errors.append('Unable to build the cluster model for %s' \
+ % clusterName)
raise Exception, 'unable to get model for %s' % clusterName
+
nodesptr = model.getClusterNodesPtr()
used_ids = {}
for i in model.getNodes():
+ used_ids[int(i.getAttribute('nodeid'))] = 1
+ node_name = str(i.getAttribute('name'))
+ if node_name in system_list:
+ system_list[node_name]['errors'] = True
+ errors.append('%s is already a member of %s' \
+ % (node_name, clusterName))
+ except Exception, e:
+ incomplete = True
+ errors.append('Unable to build the cluster model for %s' \
+ % clusterName)
+ luci_log.debug_verbose('VACN11: %s' % str(e))
+
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
+
+ next_node_id = 1
+
+ try:
+ for x in system_list:
+ i = system_list[x]
+
try:
- used_ids[int(i.getAttribute('nodeid'))] = 1
+ batch_node = addClusterNodeBatch(cluster_os,
+ clusterName,
+ True,
+ True,
+ shared_storage,
+ False,
+ download_pkgs)
+ if not batch_node:
+ raise Exception, 'batch is blank'
+ system_list[x]['batch'] = batch_node
except Exception, e:
- luci_log.debug_verbose('vACN7: %s' % str(e))
- pass
- next_node_id = 1
- for i in nodeList:
+ cur_system['errors'] = True
+ incomplete = True
+
+ try:
+ if not cur_system['prev_auth']:
+ rc.unauth()
+ del cur_system['trusted']
+ except Exception, e:
+ luci_log.debug_verbose('VACN12: %s: %s' % (cur_host, str(e)))
+
+ errors.append('Unable to initiate cluster join for %s' % cur_host)
+ luci_log.debug_verbose('VACN13: %s: %s' % (cur_host, str(e)))
+ continue
+
next_node_id += 1
new_node = ClusterNode()
- new_node.attr_hash['name'] = i['host']
+ new_node.attr_hash['name'] = str(i['host'])
new_node.attr_hash['votes'] = str(1)
while next_node_id in used_ids:
next_node_id += 1
new_node.attr_hash['nodeid'] = str(next_node_id)
nodesptr.addChild(new_node)
- model.isModified = True
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
+
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
conf_str = str(model.exportModelAsString())
if not conf_str:
- raise Exception, 'unable to export model as a string'
- batch_number, result = setClusterConf(cluster_ricci, conf_str)
+ raise Exception, 'Unable to save the new cluster model.'
+ batch_number, result = setClusterConf(cluster_ricci, conf_str)
+ if not batch_number or not result:
+ raise Exception, 'batch or result is None'
+ except Exception, e:
+ incomplete = True
+ errors.append('Unable to save the new cluster model.')
+ luci_log.debug_verbose('VACN14: %s' % str(e))
+
+ # Propagate the new cluster.conf to the existing nodes
+ # before having any of the new nodes join. If this fails,
+ # abort the whole process.
+ try:
while True:
batch_ret = checkBatch(cluster_ricci, batch_number)
code = batch_ret[0]
@@ -438,47 +565,61 @@
if code == False:
time.sleep(0.5)
except Exception, e:
- luci_log.debug_verbose('vACN8: %s' % str(e))
- errors.append('Unable to update the cluster node list for %s' % clusterName)
- return (False, {'errors': errors, 'requestResults': cluster_properties})
+ incomplete = True
+ errors.append('Unable to update the cluster node list for %s' \
+ % clusterName)
+ luci_log.debug_verbose('VACN15: %s' % str(e))
+
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
- error = createClusterSystems(self, clusterName, nodeList)
+ error = createClusterSystems(self, clusterName, system_list)
if error:
- luci_log.debug_verbose('vACN9: %s: %s' % (clusterName, str(e)))
- nodeUnauth(nodeList)
- cluster_properties['isComplete'] = False
+ incomplete = True
errors.append(error)
- return (False, {'errors': errors, 'requestResults': cluster_properties})
+ luci_log.debug_verbose('VACN16: %s: %s' % (clusterName, error))
+
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
batch_id_map = {}
- for clunode in nodeList:
+ for x in system_list:
+ clunode = system_list[x]
success = True
+
+ cur_host = clunode['host']
try:
- rc = RicciCommunicator(clunode['host'])
+ rc = RicciCommunicator(cur_host)
if not rc:
raise Exception, 'rc is None'
except Exception, e:
- nodeUnauth([clunode['host']])
success = False
- luci_log.info('vACN10: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
+ clunode['errors'] = True
+ errors.append('Unable to connect to the ricci agent on %s: %s' \
+ % (cur_host, str(e)))
+ luci_log.info('VACN17: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
if success:
try:
- resultNode = rc.process_batch(clunode['batchnode'], async=True)
- batch_id_map[clunode['host']] = resultNode.getAttribute('batch_id')
+ resultNode = rc.process_batch(clunode['batch'], async=True)
+ batch_id_map[cur_host] = resultNode.getAttribute('batch_id')
except Exception, e:
- nodeUnauth([clunode['host']])
+ clunode['errors'] = True
success = False
- luci_log.info('vACN11: %s' % (clunode['host'], str(e)))
+ luci_log.debug_verbose('VACN18: %s: %s' \
+ % (cur_host, str(e)))
if not success:
- cluster_properties['isComplete'] = False
- errors.append('An error occurred while attempting to add cluster node \"' + clunode['host'] + '\"')
+ incomplete = True
+ errors.append('An error occurred while attempting to add cluster node \"%s\"')
- buildClusterCreateFlags(self, batch_id_map, clusterName)
+ if incomplete or len(errors) > 0:
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
- if len(errors) > 0:
- return (False, {'errors': errors, 'requestResults': cluster_properties})
+ buildClusterCreateFlags(self, batch_id_map, clusterName)
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
@@ -642,6 +783,9 @@
return (False, {'errors': [ 'Unable to determine cluster name' ]})
try:
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
conf = model.exportModelAsString()
if not conf:
raise Exception, 'model string for %s is blank' % clustername
@@ -696,9 +840,9 @@
if res and res[2]:
errors.extend(res[2])
raise Exception, 'An error occurred while adding this resource'
- modelb = res[1]
+ model = res[1]
newres = res[0]
- addResource(self, request, modelb, newres, res_type)
+ addResource(self, request, model, newres, res_type)
except Exception, e:
if len(errors) < 1:
errors.append('An error occurred while adding this resource')
@@ -726,14 +870,16 @@
if mcast_manual == True:
try:
- addr_str = form['mcast_addr'].strip()
+ addr_str = form['mcast_address'].strip()
socket.inet_pton(socket.AF_INET, addr_str)
except KeyError, e:
+ addr_str = None
errors.append('No multicast address was given')
except socket.error, e:
try:
socket.inet_pton(socket.AF_INET6, addr_str)
except socket.error, e:
+ addr_str = None
errors.append('An invalid multicast address was given: %s')
else:
addr_str = None
@@ -988,120 +1134,430 @@
'qdisk': validateQDiskConfig
}
-def validateConfigCluster(self, request):
- errors = list()
- messages = list()
- rc = None
+def validateConfigCluster(self, request):
+ errors = list()
+ messages = list()
+ rc = None
+
+ try:
+ model = request.SESSION.get('model')
+ if not model:
+ raise Exception, 'model is none'
+ except Exception, e:
+ model = None
+ try:
+ cluname = request.form['clustername']
+ except:
+ try:
+ cluname = request['clustername']
+ except:
+ luci_log.debug_verbose('VCC0a: no model, no cluster name')
+ return (False, {'errors': ['No cluster model was found.']})
+
+ try:
+ model = getModelForCluster(self, cluname)
+ except:
+ model = None
+
+ if model is None:
+ luci_log.debug_verbose('VCC0: unable to get model from session')
+ return (False, {'errors': ['No cluster model was found.']})
+ try:
+ if not 'configtype' in request.form:
+ luci_log.debug_verbose('VCC2: no configtype')
+ raise Exception, 'no config type'
+ except Exception, e:
+ luci_log.debug_verbose('VCC2a: %s' % str(e))
+ return (False, {'errors': ['No configuration type was submitted.']})
+
+ if not request.form['configtype'] in configFormValidators:
+ luci_log.debug_verbose('VCC3: invalid config type: %s' % request.form['configtype'])
+ return (False, {'errors': ['An invalid configuration type was submitted.']})
+
+ try:
+ cp = model.getClusterPtr()
+ except:
+ luci_log.debug_verbose('VCC3a: getClusterPtr failed')
+ return (False, {'errors': ['No cluster model was found.']})
+
+ config_validator = configFormValidators[request.form['configtype']]
+ ret = config_validator(model, request.form)
+
+ retcode = ret[0]
+ if 'errors' in ret[1]:
+ errors.extend(ret[1]['errors'])
+
+ if 'messages' in ret[1]:
+ messages.extend(ret[1]['messages'])
+
+ if retcode == True:
+ try:
+ config_ver = int(cp.getConfigVersion()) + 1
+ # always increment the configuration version
+ cp.setConfigVersion(str(config_ver))
+ model.setModified(True)
+ conf_str = model.exportModelAsString()
+ if not conf_str:
+ raise Exception, 'conf_str is none'
+ except Exception, e:
+ luci_log.debug_verbose('VCC4: export model as string failed: %s' \
+ % str(e))
+ errors.append('Unable to store the new cluster configuration')
+
+ try:
+ clustername = model.getClusterName()
+ if not clustername:
+ raise Exception, 'cluster name from model.getClusterName() is blank'
+ except Exception, e:
+ luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
+ errors.append('Unable to determine cluster name from model')
+
+ if len(errors) > 0:
+ return (retcode, {'errors': errors, 'messages': messages})
+
+ if not rc:
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
+ errors.append('Unable to contact a ricci agent for cluster %s' \
+ % clustername)
+
+ if rc:
+ batch_id, result = setClusterConf(rc, str(conf_str))
+ if batch_id is None or result is None:
+ luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
+ errors.append('Unable to propagate the new cluster configuration for %s' \
+ % clustername)
+ else:
+ try:
+ set_node_flag(self, clustername, rc.hostname(), batch_id, CLUSTER_CONFIG, 'Updating cluster configuration')
+ except:
+ pass
+
+ if len(errors) < 1:
+ messages.append('The cluster properties have been updated.')
+ else:
+ return (retcode, {'errors': errors, 'messages': messages})
+
+ response = request.RESPONSE
+ response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername + '&busyfirst=true')
+
+def validateFenceAdd(self, request):
+ errors = list()
+ messages = list()
+ rc = None
+
+ try:
+ model = request.SESSION.get('model')
+ if not model:
+ raise Exception, 'model is none'
+ except Exception, e:
+ model = None
+ try:
+ cluname = request.form['clustername']
+ except:
+ try:
+ cluname = request['clustername']
+ except:
+ luci_log.debug_verbose('VFE: no model, no cluster name')
+ return (False, {'errors': ['No cluster model was found.']})
+
+ try:
+ model = getModelForCluster(self, cluname)
+ except:
+ model = None
+
+ if model is None:
+ luci_log.debug_verbose('VFE: unable to get model from session')
+ return (False, {'errors': ['No cluster model was found.']})
+
+ form = None
+ try:
+ response = request.response
+ form = request.form
+ if not form:
+ form = None
+ raise Exception, 'no form was submitted'
+ except:
+ pass
+
+ if form is None:
+ luci_log.debug_verbose('VFE: no form was submitted')
+ return (False, {'errors': ['No form was submitted']})
+
+ #fencehandler = FenceHandler()
+ error_code, error_string = validateNewFenceDevice(form, model)
+ if error_code == FD_VAL_SUCCESS:
+ messages.append(error_string)
+ try:
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ conf_str = model.exportModelAsString()
+ if not conf_str:
+ raise Exception, 'conf_str is none'
+ except Exception, e:
+ luci_log.debug_verbose('VFE: export model as string failed: %s' \
+ % str(e))
+ errors.append('Unable to store the new cluster configuration')
+
+ try:
+ clustername = model.getClusterName()
+ if not clustername:
+ raise Exception, 'cluster name from model.getClusterName() is blank'
+ except Exception, e:
+ luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
+ errors.append('Unable to determine cluster name from model')
+
+ if not rc:
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+ errors.append('Unable to contact a ricci agent for cluster %s' \
+ % clustername)
+
+ if rc:
+ batch_id, result = setClusterConf(rc, str(conf_str))
+ if batch_id is None or result is None:
+ luci_log.debug_verbose('VFA: setCluserConf: batchid or result is None')
+ errors.append('Unable to propagate the new cluster configuration for %s' \
+ % clustername)
+ else:
+ try:
+ set_node_flag(self, clustername, rc.hostname(), batch_id,
+ CLUSTER_CONFIG, 'Updating cluster configuration')
+ except:
+ pass
+
+ response.redirect(request['URL'] + "?pagetype=" + FENCEDEV + "&clustername=" + clustername + "&fencename=" + form['name'] + '&busyfirst=true')
+ return (True, {'errors': errors, 'messages': messages})
+ else:
+ errors.append(error_string)
+ return (False, {'errors': errors, 'messages': messages})
+
+
+def validateFenceEdit(self, request):
+ errors = list()
+ messages = list()
+ rc = None
+
+ try:
+ model = request.SESSION.get('model')
+ if not model:
+ raise Exception, 'model is none'
+ except Exception, e:
+ model = None
+ try:
+ cluname = request.form['clustername']
+ except:
+ try:
+ cluname = request['clustername']
+ except:
+ luci_log.debug_verbose('VFE: no model, no cluster name')
+ return (False, {'errors': ['No cluster model was found.']})
+
+ try:
+ model = getModelForCluster(self, cluname)
+ except:
+ model = None
+
+ if model is None:
+ luci_log.debug_verbose('VFE: unable to get model from session')
+ return (False, {'errors': ['No cluster model was found.']})
- try:
- model = request.SESSION.get('model')
- if not model:
- raise Exception, 'model is none'
- except Exception, e:
- model = None
- try:
- cluname = request.form['clustername']
- except:
- try:
- cluname = request['clustername']
- except:
- luci_log.debug_verbose('VCC0a: no model, no cluster name')
- return (False, {'errors': ['No cluster model was found.']})
+ form = None
+ try:
+ response = request.response
+ form = request.form
+ if not form:
+ form = None
+ raise Exception, 'no form was submitted'
+ except:
+ pass
- try:
- model = getModelForCluster(self, cluname)
- except:
- model = None
+ if form is None:
+ luci_log.debug_verbose('VFE: no form was submitted')
+ return (False, {'errors': ['No form was submitted']})
+
+ #This is a fence edit situation, so the model should already have an
+ #entry for this fence device.
+ #
+ #pass form and model to validation method, then save changes if it passes.
+ error_code, error_string = validateFenceDevice(form, model)
+ if error_code == FD_VAL_SUCCESS:
+ messages.append(error_string)
+ try:
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ conf_str = model.exportModelAsString()
+ if not conf_str:
+ raise Exception, 'conf_str is none'
+ except Exception, e:
+ luci_log.debug_verbose('VFE: export model as string failed: %s' \
+ % str(e))
+ errors.append('Unable to store the new cluster configuration')
- if model is None:
- luci_log.debug_verbose('VCC0: unable to get model from session')
- return (False, {'errors': ['No cluster model was found.']})
- try:
- if not 'configtype' in request.form:
- luci_log.debug_verbose('VCC2: no configtype')
- raise Exception, 'no config type'
- except Exception, e:
- luci_log.debug_verbose('VCC2a: %s' % str(e))
- return (False, {'errors': ['No configuration type was submitted.']})
-
- if not request.form['configtype'] in configFormValidators:
- luci_log.debug_verbose('VCC3: invalid config type: %s' % request.form['configtype'])
- return (False, {'errors': ['An invalid configuration type was submitted.']})
+ try:
+ clustername = model.getClusterName()
+ if not clustername:
+ raise Exception, 'cluster name from model.getClusterName() is blank'
+ except Exception, e:
+ luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
+ errors.append('Unable to determine cluster name from model')
- try:
- cp = model.getClusterPtr()
- except:
- luci_log.debug_verbose('VCC3a: getClusterPtr failed')
- return (False, {'errors': ['No cluster model was found.']})
+ if not rc:
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+ errors.append('Unable to contact a ricci agent for cluster %s' \
+ % clustername)
+
+ if rc:
+ batch_id, result = setClusterConf(rc, str(conf_str))
+ if batch_id is None or result is None:
+ luci_log.debug_verbose('VFA: setClusterConf: batchid or result is None')
+ errors.append('Unable to propagate the new cluster configuration for %s' \
+ % clustername)
+ else:
+ try:
+ set_node_flag(self, clustername, rc.hostname(), batch_id,
+ CLUSTER_CONFIG, 'Updating cluster configuration')
+ except:
+ pass
- config_validator = configFormValidators[request.form['configtype']]
- ret = config_validator(model, request.form)
+ response.redirect(request['URL'] + "?pagetype=" + FENCEDEV + "&clustername=" + clustername + "&fencename=" + request['fencename'] + '&busyfirst=true')
+ return (True, {'errors': errors, 'messages': messages})
+ else:
+ errors.append(error_string)
+ return (False, {'errors': errors, 'messages': messages})
- retcode = ret[0]
- if 'errors' in ret[1]:
- errors.extend(ret[1]['errors'])
- if 'messages' in ret[1]:
- messages.extend(ret[1]['messages'])
+def deleteFenceDevice(self, request):
+ errors = list()
+ messages = list()
+ rc = None
+
+ try:
+ model = request.SESSION.get('model')
+ if not model:
+ raise Exception, 'model is none'
+ except Exception, e:
+ model = None
+ try:
+ cluname = request.form['clustername']
+ except:
+ try:
+ cluname = request['clustername']
+ except:
+ luci_log.debug_verbose('VFE: no model, no cluster name')
+ return (False, {'errors': ['No cluster model was found.']})
+
+ try:
+ model = getModelForCluster(self, cluname)
+ except:
+ model = None
+
+ if model is None:
+ luci_log.debug_verbose('VFE: unable to get model from session')
+ return (False, {'errors': ['No cluster model was found.']})
- if retcode == True:
- try:
- config_ver = int(cp.getConfigVersion()) + 1
- # always increment the configuration version
- cp.setConfigVersion(str(config_ver))
- model.setModified(True)
- conf_str = model.exportModelAsString()
- if not conf_str:
- raise Exception, 'conf_str is none'
- except Exception, e:
- luci_log.debug_verbose('VCC4: export model as string failed: %s' \
- % str(e))
- errors.append('Unable to store the new cluster configuration')
+ form = None
+ try:
+ response = request.response
+ form = request.form
+ if not form:
+ form = None
+ raise Exception, 'no form was submitted'
+ except:
+ pass
- try:
- clustername = model.getClusterName()
- if not clustername:
- raise Exception, 'cluster name from modelb.getClusterName() is blank'
- except Exception, e:
- luci_log.debug_verbose('VCC5: error: getClusterName: %s' % str(e))
- errors.append('Unable to determine cluster name from model')
+ if form is None:
+ luci_log.debug_verbose('VFE: no form was submitted')
+ return (False, {'errors': ['No form was submitted']})
- if len(errors) > 0:
- return (retcode, {'errors': errors, 'messages': messages})
+ #get name of fencedev
+ try:
+ fencedev_name = form['orig_name']
+ fencedev_name = fencedev_name.strip()
+ except KeyError, e:
+ return (False, {'errors':['No device name in form submission']})
- if not rc:
- rc = getRicciAgent(self, clustername)
- if not rc:
- luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
- errors.append('Unable to contact a ricci agent for cluster %s' \
- % clustername)
-
- if rc:
- batch_id, result = setClusterConf(rc, str(conf_str))
- if batch_id is None or result is None:
- luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
- errors.append('Unable to propagate the new cluster configuration for %s' \
- % clustername)
- else:
- try:
- set_node_flag(self, clustername, rc.hostname(), batch_id,
- CLUSTER_CONFIG, 'Updating cluster configuration')
- except:
- pass
+ fdev_to_delete = None
+ #iterate thru list of current fencedevs and find one to be deleted
+ fdevs = model.getFenceDevices()
+ for fdev in fdevs:
+ if fdev.getName().strip() == fencedev_name:
+ fdev_to_delete = fdev
+ break
+ if fdev_to_delete == None:
+ luci_log.debug_verbose('VFD: Could not find fence device name in model')
+ return (False, {'errors':['Could not find fence device name in model']})
+
+ #get fencedev ptr
+ fdev_ptr = model.getFenceDevicePtr()
+ #remove child
+ try:
+ fdev_ptr.removeChild(fdev_to_delete)
+ error_code = FD_VAL_SUCCESS
+ error_string = "Fence device %s successfully removed from configuration" % fencedev_name
+ except:
+ error_code = FD_VAL_FAIL
+ error_string = "Fence device %s could not be removed from configuration" % fencedev_name
+
+ try:
+ model.removeFenceInstancesForFenceDevice(fencedev_name)
+ except:
+ luci_log.debug_verbose('VFD: Could not remove fence instances for')
+
- if len(errors) < 1:
- messages.append('The cluster properties have been updated.')
- else:
- return (retcode, {'errors': errors, 'messages': messages})
+ if error_code == FD_VAL_SUCCESS:
+ messages.append(error_string)
+ try:
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ conf_str = model.exportModelAsString()
+ if not conf_str:
+ raise Exception, 'conf_str is none'
+ except Exception, e:
+ luci_log.debug_verbose('VFE: export model as string failed: %s' \
+ % str(e))
+ errors.append('Unable to store the new cluster configuration')
- response = request.RESPONSE
- response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername + '&busyfirst=true')
+ try:
+ clustername = model.getClusterName()
+ if not clustername:
+ raise Exception, 'cluster name from model.getClusterName() is blank'
+ except Exception, e:
+ luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
+ errors.append('Unable to determine cluster name from model')
-def validateFenceAdd(self, request):
- return (True, {})
+ if not rc:
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+ errors.append('Unable to contact a ricci agent for cluster %s' \
+ % clustername)
+
+ if rc:
+ batch_id, result = setClusterConf(rc, str(conf_str))
+ if batch_id is None or result is None:
+ luci_log.debug_verbose('VFA: setCluserConf: batchid or result is None')
+ errors.append('Unable to propagate the new cluster configuration for %s' \
+ % clustername)
+ else:
+ try:
+ set_node_flag(self, clustername, rc.hostname(), batch_id,
+ CLUSTER_CONFIG, 'Updating cluster configuration')
+ except:
+ pass
-def validateFenceEdit(self, request):
- return (True, {})
+ response.redirect(request['URL'] + "?pagetype=" + FENCEDEVS + "&clustername=" + clustername + '&busyfirst=true')
+ return (True, {'errors': errors, 'messages': messages})
+ else:
+ errors.append(error_string)
+ return (False, {'errors': errors, 'messages': messages})
def validateDaemonProperties(self, request):
errors = list()
@@ -1196,7 +1652,8 @@
33: validateResourceAdd,
51: validateFenceAdd,
50: validateFenceEdit,
- 55: validateDaemonProperties
+ 55: validateDaemonProperties,
+ 57: deleteFenceDevice
}
def validatePost(self, request):
@@ -1850,11 +2307,19 @@
luci_log.debug('GRA1: no cluster nodes for %s found.' % clustername)
raise Exception, 'no cluster nodes were found@%s' % path
except Exception, e:
- luci_log.debug('GRA2: cluster folder %s for %s is missing: %s.' \
- % (path, clustername, str(e)))
+ try:
+ luci_log.debug('GRA2: cluster folder %s for %s is missing: %s.' \
+ % (path, clustername, str(e)))
+
+ if len(clusterfolder.objectItems()) < 1:
+ clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+ clusters.manage_delObjects([clustername])
+ except Exception, e:
+ luci_log.debug_verbose('GRA3: %s' % str(e))
+
return None
- cluname = lower(clustername)
+ cluname = clustername.lower()
for node in nodes:
try:
@@ -1862,32 +2327,42 @@
except:
try:
hostname = node[0]
- except:
+ except Exception, e:
+ luci_log.debug_verbose('GRA2a: %s' % str(e))
continue
try:
rc = RicciCommunicator(hostname)
- except RicciError, e:
+ if not rc:
+ raise Exception, 'rc is None'
+ ricci_hostname = rc.hostname()
+ if not ricci_hostname:
+ raise Exception, 'ricci_hostname is blank'
+ except Exception, e:
luci_log.debug('GRA3: ricci error: %s' % str(e))
continue
try:
clu_info = rc.cluster_info()
except Exception, e:
- luci_log.debug('GRA4: cluster_info error: %s' % str(e))
+ luci_log.debug('GRA4: cluster_info error for %s: %s' \
+ % (ricci_hostname, str(e)))
+ continue
try:
cur_name = str(clu_info[0]).strip().lower()
if not cur_name:
- raise
- except:
+ raise Exception, 'cluster name is none for %s' % ricci_hostname
+ except Exception, e:
+ luci_log.debug_verbose('GRA4a: %s' % str(e))
cur_name = None
try:
cur_alias = str(clu_info[1]).strip().lower()
if not cur_alias:
- raise
- except:
+ raise Exception, 'cluster alias is none'
+ except Exception, e:
+ luci_log.debug_verbose('GRA4b: %s' % str(e))
cur_alias = None
if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
@@ -1899,14 +2374,20 @@
pass
continue
- if rc.authed():
- return rc
try:
- setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
- except:
- pass
+ if rc.authed():
+ return rc
+
+ try:
+ setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
+ raise Exception, '%s not authed' % rc.hostname()
+ except Exception, e:
+ luci_log.debug_verbose('GRA6: %s' % str(e))
+ continue
- luci_log.debug('GRA6: no ricci agent could be found for cluster %s' \
+ luci_log.debug('GRA7: no ricci agent could be found for cluster %s' \
% cluname)
return None
@@ -1995,10 +2476,11 @@
results.append(vals)
try:
- cluster_path = '%s/luci/systems/cluster/%s' % (CLUSTER_FOLDER_PATH, clustername)
+ cluster_path = CLUSTER_FOLDER_PATH + clustername
nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
except Exception, e:
- luci_log.debug_verbose('GCSDB0: %s: %s' % (clustername, str(e)))
+ luci_log.debug_verbose('GCSDB0: %s -> %s: %s' \
+ % (clustername, cluster_path, str(e)))
return results
for node in nodelist:
@@ -2014,7 +2496,7 @@
luci_log.debug_verbose('GCSDB1: %s' % str(e))
return results
-def getClusterStatus(self, request, rc):
+def getClusterStatus(self, request, rc, cluname=None):
try:
doc = getClusterStatusBatch(rc)
if not doc:
@@ -2023,7 +2505,7 @@
luci_log.debug_verbose('GCS0: error: %s' % str(e))
doc = None
- if doc is None:
+ if doc is None and not cluname:
try:
model = request.SESSION.get('model')
cinfo = getClusterStatusModel(model)
@@ -2036,14 +2518,15 @@
if not doc:
try:
- clustername = None
- try:
- clustername = request['clustername']
- except:
+ clustername = cluname
+ if clustername is None:
try:
- clustername = request.form['clustername']
+ clustername = request['clustername']
except:
- pass
+ try:
+ clustername = request.form['clustername']
+ except:
+ pass
if not clustername:
raise Exception, 'unable to determine cluster name'
@@ -2092,7 +2575,7 @@
results.append(vals)
return results
-def getServicesInfo(self, status, modelb, req):
+def getServicesInfo(self, status, model, req):
map = {}
maplist = list()
@@ -2126,7 +2609,7 @@
itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE_DELETE
- svc = modelb.retrieveServiceByName(item['name'])
+ svc = model.retrieveServiceByName(item['name'])
dom = svc.getAttribute("domain")
if dom is not None:
itemmap['faildom'] = dom
@@ -2137,7 +2620,7 @@
map['services'] = maplist
return map
-def getServiceInfo(self, status, modelb, req):
+def getServiceInfo(self, status, model, req):
#set up struct for service config page
hmap = {}
root_uuid = 'toplevel'
@@ -2187,7 +2670,7 @@
innermap['restarturl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_RESTART
innermap['delurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_DELETE
- nodes = modelb.getNodes()
+ nodes = model.getNodes()
for node in nodes:
starturl = {}
if node.getName() != nodename:
@@ -2201,7 +2684,7 @@
innermap = {}
innermap['current'] = "This service is currently stopped"
innermap['enableurl'] = baseurl + "?" + "clustername=" + cluname +"&servicename=" + servicename + "&pagetype=" + SERVICE_START
- nodes = modelb.getNodes()
+ nodes = model.getNodes()
starturls = list()
for node in nodes:
starturl = {}
@@ -2213,7 +2696,7 @@
#Now build hashes for resources under service.
#first get service by name from model
- svc = modelb.getService(servicename)
+ svc = model.getService(servicename)
resource_list = list()
if svc is not None:
indent_ctr = 0
@@ -2390,7 +2873,7 @@
response = req.RESPONSE
response.redirect(req['URL'] + "?pagetype=" + SERVICE_LIST + "&clustername=" + cluname + '&busyfirst=true')
-def getFdomsInfo(self, modelb, request, clustatus):
+def getFdomsInfo(self, model, request, clustatus):
slist = list()
nlist = list()
for item in clustatus:
@@ -2401,8 +2884,8 @@
fdomlist = list()
clustername = request['clustername']
baseurl = request['URL']
- fdoms = modelb.getFailoverDomains()
- svcs = modelb.getServices()
+ fdoms = model.getFailoverDomains()
+ svcs = model.getServices()
for fdom in fdoms:
fdom_map = {}
fdom_map['name'] = fdom.getName()
@@ -2649,6 +3132,7 @@
svclist = list()
clulist = list()
baseurl = req['URL']
+
for item in status:
if item['type'] == "node":
nodelist.append(item)
@@ -2698,6 +3182,7 @@
svc_dict_list.append(svc_dict)
map['currentservices'] = svc_dict_list
node_dict_list = list()
+
for item in nodelist:
nmap = {}
name = item['name']
@@ -2713,7 +3198,6 @@
node_dict_list.append(nmap)
map['currentnodes'] = node_dict_list
-
return map
def nodeLeave(self, rc, clustername, nodename_resolved):
@@ -2807,14 +3291,20 @@
try:
rc = RicciCommunicator(nodename_resolved)
except Exception, e:
- luci_log.debug_verbose('[%d] CStop0: RC %s: %s' \
+ luci_log.debug_verbose('CStop0: [%d] RC %s: %s' \
% (delete, nodename_resolved, str(e)))
errors += 1
continue
- if nodeLeave(self, rc, clustername, nodename_resolved) is None:
- luci_log.debug_verbose('[%d] CStop1: nodeLeave %s' \
- % (delete, nodename_resolved))
- errors += 1
+
+ if delete is True:
+ if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=True) is None:
+ luci_log.debug_verbose('CStop1: nodeDelete failed')
+ errors += 1
+ else:
+ if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+ luci_log.debug_verbose('CStop2: nodeLeave %s' \
+ % (delete, nodename_resolved))
+ errors += 1
return errors
def clusterRestart(self, model):
@@ -2827,7 +3317,31 @@
return snum_err + jnum_err
def clusterDelete(self, model):
- return clusterStop(self, model, delete=True)
+ num_errors = clusterStop(self, model, delete=True)
+ if num_errors < 1:
+ try:
+ clustername = model.getClusterName()
+ except Exception, e:
+ luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+ return None
+
+ try:
+ delCluster(self, clustername)
+ except Exception, e:
+ luci_log.debug_verbose('clusterDelete1: %s: %s' \
+ % (clustername, str(e)))
+
+ try:
+ clusterfolder = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH + clustername))
+ if len(clusterfolder.objectItems()) < 1:
+ clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+ clusters.manage_delObjects([clustername])
+ except Exception, e:
+ luci_log.debug_verbose('clusterDelete2: %s %s' \
+ % (clustername, str(e)))
+ else:
+ luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
+ % (clustername, num_errors))
def forceNodeReboot(self, rc, clustername, nodename_resolved):
batch_number, result = nodeReboot(rc)
@@ -2907,119 +3421,142 @@
luci_log.debug_verbose('FNF4: failed to set flags: %s' % str(e))
return True
-def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved):
- #We need to get a node name other than the node
- #to be deleted, then delete the node from the cluster.conf
- #and propogate it. We will need two ricci agents for this task.
-
- # Make sure we can find a second node before we hose anything.
- path = str(CLUSTER_FOLDER_PATH + clustername)
- try:
- clusterfolder = self.restrictedTraverse(path)
- if not clusterfolder:
- raise Exception, 'no cluster folder at %s' % path
- except Exception, e:
- luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
+def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
+ # We need to get a node name other than the node
+ # to be deleted, then delete the node from the cluster.conf
+ # and propogate it. We will need two ricci agents for this task,
+ # unless we are deleting the cluster itself.
+
+ if not delete_cluster:
+ # Make sure we can find a second node before we hose anything.
+ found_one = False
+
+ path = str(CLUSTER_FOLDER_PATH + clustername)
+
+ try:
+ clusterfolder = self.restrictedTraverse(path)
+ if not clusterfolder:
+ raise Exception, 'no cluster folder at %s' % path
+ except Exception, e:
+ luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
% (clustername, str(e)))
- return None
-
- try:
- nodes = clusterfolder.objectItems('Folder')
- if not nodes or len(nodes) < 1:
- raise Exception, 'no cluster nodes in DB'
- except Exception, e:
- luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
- % (clustername, str(e)))
+ return None
- found_one = False
- for node in nodes:
- if node[1].getId().find(nodename) != (-1):
- continue
- #here we make certain the node is up...
- # XXX- we should also make certain this host is still
- # in the cluster we believe it is.
try:
- rc2 = RicciCommunicator(node[1].getId())
+ nodes = clusterfolder.objectItems('Folder')
+ if not nodes or len(nodes) < 1:
+ raise Exception, 'no cluster nodes in DB'
except Exception, e:
- luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
- continue
+ luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
+ % (clustername, str(e)))
- if not rc2.authed():
- try:
- setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
- except:
- pass
+ for node in nodes:
+ if node[1].getId().find(nodename) != (-1):
+ continue
+ # here we make certain the node is up...
+ # XXX- we should also make certain this host is still
+ # in the cluster we believe it is.
try:
- snode = getStorageNode(self, node[0])
- setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
- except:
- pass
+ rc2 = RicciCommunicator(node[1].getId())
+ if not rc2:
+ raise Exception, 'ND1a: rc2 is None'
+ except Exception, e:
+ luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
+ continue
- luci_log.debug_verbose('ND3: %s is not authed' % node[0])
- rc2 = None
- continue
- else:
- found_one = True
- break
+ if not rc2.authed():
+ try:
+ setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
- if not found_one:
- luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
- return None
+ try:
+ snode = getStorageNode(self, node[0])
+ setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
+
+ luci_log.debug_verbose('ND3: %s is not authed' % node[0])
+ rc2 = None
+ continue
+ else:
+ found_one = True
+ break
+
+ if not found_one:
+ luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+ return None
- #First, delete cluster.conf from node to be deleted.
- #next, have node leave cluster.
+ # First, delete cluster.conf from node to be deleted.
+ # next, have node leave cluster.
batch_number, result = nodeLeaveCluster(rc, purge=True)
if batch_number is None or result is None:
luci_log.debug_verbose('ND5: batch_number and/or result is None')
return None
- #It is not worth flagging this node in DB, as we are going
- #to delete it anyway. Now, we need to delete node from model
- #and send out new cluster.conf
- delete_target = None
- nodelist = model.getNodes()
- find_node = lower(nodename)
- for n in nodelist:
+ # Unless we're deleting the whole cluster, it is not worth
+ # flagging this node in DB, as we are going to delete it
+ # anyway. Now, we need to delete node from model and send out
+ # new cluster.conf
+
+ if delete_cluster:
try:
- if lower(n.getName()) == find_node:
- delete_target = n
- break
- except:
- continue
+ set_node_flag(self, clustername, rc.hostname(), str(batch_number), CLUSTER_DELETE, "Deleting cluster \"%s\": Deleting node \'%s\'" \
+ % (clustername, nodename_resolved))
+ except Exception, e:
+ luci_log.debug_verbose('ND5a: failed to set flags: %s' % str(e))
+ else:
+ delete_target = None
+ nodelist = model.getNodes()
+ find_node = nodename.lower()
+ for n in nodelist:
+ try:
+ if n.getName().lower() == find_node:
+ delete_target = n
+ break
+ except:
+ continue
- if delete_target is None:
- luci_log.debug_verbose('ND6: unable to find delete target for %s in %s' \
- % (nodename, clustername))
- return None
+ if delete_target is None:
+ luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
+ % (nodename, clustername))
+ return None
- model.deleteNode(delete_target)
+ try:
+ model.deleteNode(delete_target)
+ except Exception, e:
+ luci_log.debug_verbose('ND6a: deleteNode %s failed: %s' \
+ % (delete_target.getName(), str(e)))
- try:
- str_buf = model.exportModelAsString()
- if not str_buf:
- raise Exception, 'model string is blank'
- except Exception, e:
- luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
- return None
+ try:
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ str_buf = model.exportModelAsString()
+ if not str_buf:
+ raise Exception, 'model string is blank'
+ except Exception, e:
+ luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
+ return None
- # propagate the new cluster.conf via the second node
- batch_number, result = setClusterConf(rc2, str(str_buf))
- if batch_number is None:
- luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
- return None
+ # propagate the new cluster.conf via the second node
+ batch_number, result = setClusterConf(rc2, str(str_buf))
+ if batch_number is None:
+ luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+ return None
- #Now we need to delete the node from the DB
+ # Now we need to delete the node from the DB
path = str(CLUSTER_FOLDER_PATH + clustername)
- del_path = str(path + '/' + nodename_resolved)
-
try:
- delnode = self.restrictedTraverse(del_path)
clusterfolder = self.restrictedTraverse(path)
- clusterfolder.manage_delObjects(delnode[0])
+ clusterfolder.manage_delObjects([nodename_resolved])
except Exception, e:
- luci_log.debug_verbose('ND9: error deleting %s: %s' \
- % (del_path, str(e)))
+ luci_log.debug_verbose('ND9: error deleting %s@%s: %s' \
+ % (nodename_resolved, path, str(e)))
+
+ if delete_cluster:
+ return True
try:
set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
@@ -3080,8 +3617,8 @@
% (nodename_resolved, clustername))
return (False, {'errors': [ 'Node %s reports it is not in a cluster.' % nodename_resolved ]})
- cname = lower(clustername)
- if cname != lower(cluinfo[0]) and cname != lower(cluinfo[1]):
+ cname = clustername.lower()
+ if cname != cluinfo[0].lower() and cname != cluinfo[1].lower():
luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
return (False, {'errors': [ 'Node %s reports it in cluster \"%s\". We expect it to be a member of cluster \"%s\"' % (nodename_resolved, cluinfo[0], clustername) ]})
@@ -3797,6 +4334,9 @@
xvm.addAttribute("path", req.form['xenvmpath'])
try:
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
stringbuf = model.exportModelAsString()
if not stringbuf:
raise Exception, 'model is blank'
@@ -3807,7 +4347,7 @@
try:
clustername = model.getClusterName()
if not clustername:
- raise Exception, 'cluster name from modelb.getClusterName() is blank'
+ raise Exception, 'cluster name from model.getClusterName() is blank'
except Exception, e:
luci_log.debug_verbose('error: getClusterName: %s' % str(e))
return None
@@ -4147,7 +4687,7 @@
map['isVirtualized'] = False
return map
-def getResourcesInfo(modelb, request):
+def getResourcesInfo(model, request):
resList = list()
baseurl = request['URL']
@@ -4160,7 +4700,7 @@
luci_log.debug_verbose('getResourcesInfo missing cluster name')
return resList
- for item in modelb.getResources():
+ for item in model.getResources():
itemmap = {}
itemmap['name'] = item.getName()
itemmap['attrs'] = item.attr_hash
@@ -4171,9 +4711,9 @@
resList.append(itemmap)
return resList
-def getResourceInfo(modelb, request):
- if not modelb:
- luci_log.debug_verbose('GRI0: no modelb object in session')
+def getResourceInfo(model, request):
+ if not model:
+ luci_log.debug_verbose('GRI0: no model object in session')
return {}
name = None
@@ -4212,7 +4752,7 @@
luci_log.debug_verbose('getResourceInfo missing URL')
return {}
- for res in modelb.getResources():
+ for res in model.getResources():
if res.getName() == name:
try:
resMap = {}
@@ -4229,7 +4769,7 @@
errstr = 'An error occurred while attempting to set the new cluster.conf'
try:
- modelb = request.SESSION.get('model')
+ model = request.SESSION.get('model')
except Exception, e:
luci_log.debug_verbose('delService0: no model: %s' % str(e))
return (False, {'errors': [ errstr ] })
@@ -4274,13 +4814,16 @@
return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster.' % errstr ]})
try:
- modelb.deleteService(name)
+ model.deleteService(name)
except Exception, e:
luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s' % (name, clustername))
return (False, {'errors': [ '%s: error removing service %s.' % (errstr, name) ]})
try:
- conf = modelb.exportModelAsString()
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ conf = model.exportModelAsString()
if not conf:
raise Exception, 'model string is blank'
except Exception, e:
@@ -4305,7 +4848,7 @@
errstr = 'An error occurred while attempting to set the new cluster.conf'
try:
- modelb = request.SESSION.get('model')
+ model = request.SESSION.get('model')
except Exception, e:
luci_log.debug_verbose('delResource0: no model: %s' % str(e))
return errstr
@@ -4344,7 +4887,7 @@
luci_log.debug_verbose('delResource3: %s: %s' % (errstr, str(e)))
return errstr + ': could not determine the ricci agent hostname'
- resPtr = modelb.getResourcesPtr()
+ resPtr = model.getResourcesPtr()
resources = resPtr.getChildren()
found = 0
@@ -4359,7 +4902,10 @@
return errstr + ': the specified resource was not found.'
try:
- conf = modelb.exportModelAsString()
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ conf = model.exportModelAsString()
if not conf:
raise Exception, 'model string is blank'
except Exception, e:
@@ -4388,9 +4934,9 @@
luci_log.debug_verbose('addIp error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addIp error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addIp error: model is missing')
return None
if form.has_key('edit'):
@@ -4398,7 +4944,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addIp error: %s' % str(e))
return None
@@ -4434,7 +4980,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addFs(request, form=None):
if form is None:
@@ -4444,9 +4990,9 @@
luci_log.debug_verbose('addFs error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addFs error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addFs error: model is missing')
return None
if form.has_key('edit'):
@@ -4454,7 +5000,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addFs error: %s' % str(e))
return None
@@ -4538,7 +5084,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addGfs(request, form=None):
if form is None:
@@ -4548,9 +5094,9 @@
luci_log.debug_verbose('addGfs error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addGfs error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addGfs error: model is missing')
return None
if form.has_key('edit'):
@@ -4558,7 +5104,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
if not res:
luci_log.debug('resource %s was not found for editing' % oldname)
return None
@@ -4629,7 +5175,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addNfsm(request, form=None):
if form is None:
@@ -4639,9 +5185,9 @@
luci_log.debug_verbose('addNfsm error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addNfsm error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addNfsm error: model is missing')
return None
if form.has_key('edit'):
@@ -4649,7 +5195,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addNfsm error: %s' % str(e))
return None
@@ -4724,7 +5270,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addNfsc(request, form=None):
if form is None:
@@ -4734,9 +5280,9 @@
luci_log.debug_verbose('addNfsc error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addNfsc error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addNfsc error: model is missing')
return None
if form.has_key('edit'):
@@ -4744,7 +5290,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addNfsc error: %s' % str(e))
return None
@@ -4788,19 +5334,19 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addNfsx(request, form=None):
if form is None:
form = request.form
if not form:
- luci_log.debug_verbose('addNfsx error: modelb is missing')
+ luci_log.debug_verbose('addNfsx error: model is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addNfsx error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addNfsx error: model is missing')
return None
if form.has_key('edit'):
@@ -4808,7 +5354,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addNfsx error: %s', str(e))
return None
@@ -4836,7 +5382,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addScr(request, form=None):
if form is None:
@@ -4846,9 +5392,9 @@
luci_log.debug_verbose('addScr error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addScr error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addScr error: model is missing')
return None
if form.has_key('edit'):
@@ -4856,7 +5402,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addScr error: %s' % str(e))
return None
@@ -4894,7 +5440,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
def addSmb(request, form=None):
if form is None:
@@ -4904,9 +5450,9 @@
luci_log.debug_verbose('addSmb error: form is missing')
return None
- modelb = request.SESSION.get('model')
- if not modelb:
- luci_log.debug_verbose('addSmb error: modelb is missing')
+ model = request.SESSION.get('model')
+ if not model:
+ luci_log.debug_verbose('addSmb error: model is missing')
return None
if form.has_key('edit'):
@@ -4914,7 +5460,7 @@
oldname = form['oldname'].strip()
if not oldname:
raise KeyError, 'oldname is blank.'
- res = getResourceForEdit(modelb, oldname)
+ res = getResourceForEdit(model, oldname)
except Exception, e:
luci_log.debug_verbose('addSmb error: %s' % str(e))
return None
@@ -4950,7 +5496,7 @@
if len(errors) > 1:
return [None, None, errors]
- return [res, modelb, None]
+ return [res, model, None]
resourceAddHandler = {
'ip': addIp,
@@ -4963,9 +5509,9 @@
'smb': addSmb
}
-def resolveClusterChanges(self, clusterName, modelb):
+def resolveClusterChanges(self, clusterName, model):
try:
- mb_nodes = modelb.getNodes()
+ mb_nodes = model.getNodes()
if not mb_nodes or not len(mb_nodes):
raise Exception, 'node list is empty'
except Exception, e:
@@ -5040,8 +5586,8 @@
return messages
-def addResource(self, request, modelb, res, res_type):
- clustername = modelb.getClusterName()
+def addResource(self, request, model, res, res_type):
+ clustername = model.getClusterName()
if not clustername:
luci_log.debug_verbose('addResource0: no cluname from mb')
return 'Unable to determine cluster name'
@@ -5052,13 +5598,16 @@
return 'Unable to find a ricci agent for the %s cluster' % clustername
try:
- modelb.getResourcesPtr().addChild(res)
+ model.getResourcesPtr().addChild(res)
except Exception, e:
luci_log.debug_verbose('addResource2: adding the new resource failed: %s' % str(e))
return 'Unable to add the new resource'
try:
- conf = modelb.exportModelAsString()
+ cp = model.getClusterPtr()
+ cp.incrementConfigVersion()
+ model.setModified(True)
+ conf = model.exportModelAsString()
if not conf:
raise Exception, 'model string for %s is blank' % clustername
except Exception, e:
@@ -5093,8 +5642,8 @@
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + RESOURCES + "&clustername=" + clustername + '&busyfirst=true')
-def getResource(modelb, name):
- resPtr = modelb.getResourcesPtr()
+def getResource(model, name):
+ resPtr = model.getResourcesPtr()
resources = resPtr.getChildren()
for res in resources:
@@ -5104,8 +5653,8 @@
luci_log.debug_verbose('getResource: unable to find resource \"%s\"' % name)
raise KeyError, name
-def getResourceForEdit(modelb, name):
- resPtr = modelb.getResourcesPtr()
+def getResourceForEdit(model, name):
+ resPtr = model.getResourcesPtr()
resources = resPtr.getChildren()
for res in resources:
@@ -5200,18 +5749,18 @@
return None
try:
- modelb = ModelBuilder(0, None, None, cluster_conf_node)
- if not modelb:
+ model = ModelBuilder(0, None, None, cluster_conf_node)
+ if not model:
raise Exception, 'ModelBuilder returned None'
except Exception, e:
try:
- luci_log.debug_verbose('GMB1: An error occurred while trying to get modelb for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
+ luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
except:
luci_log.debug_verbose('GMB1: ModelBuilder failed')
- if modelb:
- modelb.setIsVirtualized(isVirtualized)
- return modelb
+ if model:
+ model.setIsVirtualized(isVirtualized)
+ return model
def getModelForCluster(self, clustername):
rc = getRicciAgent(self, clustername)
@@ -5236,6 +5785,7 @@
batch_id = str(batchid)
objname = str(agent + '____flag')
+ objpath = ''
try:
clusterfolder = self.restrictedTraverse(path)
clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
@@ -5249,3 +5799,68 @@
% (batch_id, task, desc, objpath, str(e))
luci_log.debug_verbose(errmsg)
raise Exception, errmsg
+
+
+
+
+
+
+
+
+
+
+
+def process_cluster_conf_editor(self, req):
+ clustername = req['clustername']
+ msg = '\n'
+ cc = ''
+ if 'new_cluster_conf' in req:
+ cc = req['new_cluster_conf']
+ msg += 'Checking if valid XML - '
+ cc_xml = None
+ try:
+ cc_xml = minidom.parseString(cc)
+ except:
+ pass
+ if cc_xml == None:
+ msg += 'FAILED\n'
+ msg += 'Fix the error and try again:\n'
+ else:
+ msg += 'PASSED\n'
+
+ msg += 'Making sure no clustername change has accured - '
+ new_name = cc_xml.firstChild.getAttribute('name')
+ if new_name != clustername:
+ msg += 'FAILED\n'
+ msg += 'Fix the error and try again:\n'
+ else:
+ msg += 'PASSED\n'
+
+ msg += 'Increasing cluster version number - '
+ version = cc_xml.firstChild.getAttribute('config_version')
+ version = int(version) + 1
+ cc_xml.firstChild.setAttribute('config_version', str(version))
+ msg += 'DONE\n'
+
+ msg += 'Propagating new cluster.conf'
+ rc = getRicciAgent(self, clustername)
+ if not rc:
+ luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+ msg += '\nUnable to contact a ricci agent for cluster ' + clustername + '\n\n'
+ else:
+ batch_id, result = setClusterConf(rc, cc_xml.toxml())
+ if batch_id is None or result is None:
+ luci_log.debug_verbose('VFA: setClusterConf: batchid or result is None')
+ msg += '\nUnable to propagate the new cluster configuration for ' + clustername + '\n\n'
+ else:
+ msg += ' - DONE\n'
+ cc = cc_xml.toxml()
+ msg += '\n\nALL DONE\n\n'
+ else:
+ if getClusterInfo(self, None, req) == {}:
+ msg = 'invalid cluster'
+ else:
+ model = req.SESSION.get('model')
+ cc = model.exportModelAsString()
+ return {'msg' : msg,
+ 'cluster_conf' : cc}
--- conga/luci/site/luci/Extensions/conga_constants.py 2006/12/07 17:54:31 1.19.2.3
+++ conga/luci/site/luci/Extensions/conga_constants.py 2006/12/22 17:50:16 1.19.2.4
@@ -44,6 +44,9 @@
FENCEDEV="54"
CLUSTER_DAEMON="55"
SERVICE_DELETE = '56'
+FENCEDEV_DELETE = "57"
+
+CONF_EDITOR = '80'
#Cluster tasks
CLUSTER_STOP = '1000'
@@ -83,6 +86,7 @@
# Zope DB paths
CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
+STORAGE_FOLDER_PATH = '/luci/systems/storage/'
#Node states
NODE_ACTIVE="0"
@@ -92,6 +96,9 @@
NODE_INACTIVE_STR="Not a Cluster Member"
NODE_UNKNOWN_STR="Unknown State"
+FD_VAL_FAIL = 1
+FD_VAL_SUCCESS = 0
+
#cluster/node create batch task index
INSTALL_TASK = 1
DISABLE_SVC_TASK = 2
--- conga/luci/site/luci/Extensions/conga_ssl.py 2006/12/08 18:27:32 1.1.2.1
+++ conga/luci/site/luci/Extensions/conga_ssl.py 2006/12/22 17:50:16 1.1.2.2
@@ -20,6 +20,7 @@
timeout):
self.__id = -1
self.__id = conga_ssl_lib.connect(hostname, port, timeout)
+ self.__hostname = hostname
pass
def __del__(self):
self.disconnect()
@@ -37,7 +38,7 @@
def trust(self):
if self.trusted():
return True
- return conga_ssl_lib.trust(self.__id) == 1
+ return conga_ssl_lib.trust(self.__id, self.__hostname) == 1
def untrust(self):
return conga_ssl_lib.untrust(self.__id) == 1
--- conga/luci/site/luci/Extensions/homebase_adapters.py 2006/11/16 19:34:53 1.34.2.6
+++ conga/luci/site/luci/Extensions/homebase_adapters.py 2006/12/22 17:50:16 1.34.2.7
@@ -6,7 +6,9 @@
from conga_constants import PLONE_ROOT, CLUSTER_NODE_NEED_AUTH, \
HOMEBASE_ADD_CLUSTER, HOMEBASE_ADD_CLUSTER_INITIAL, \
HOMEBASE_ADD_SYSTEM, HOMEBASE_ADD_USER, \
- HOMEBASE_DEL_SYSTEM, HOMEBASE_DEL_USER, HOMEBASE_PERMS
+ HOMEBASE_DEL_SYSTEM, HOMEBASE_DEL_USER, HOMEBASE_PERMS, \
+ STORAGE_FOLDER_PATH, CLUSTER_FOLDER_PATH
+
from ricci_bridge import getClusterConf
from ricci_communicator import RicciCommunicator, CERTS_DIR_PATH
from clusterOS import resolveOSType
@@ -40,7 +42,7 @@
if dsResult:
errors.append(dsResult)
else:
- messages.append('Removed storage system \"' + i + '\" successfully')
+ messages.append('Removed storage system \"%s\" successfully' % i)
if '__CLUSTER' in request.form:
cluNames = request.form['__CLUSTER']
@@ -51,7 +53,7 @@
if dcResult:
errors.append(dcResult)
else:
- messages.append('Removed cluster \"' + i + '\" successfully')
+ messages.append('Removed cluster \"%s\" successfully' % i)
if len(errors) > 0:
retCode = False
@@ -129,369 +131,531 @@
messages.append('Added new user \"' + user + '\" successfully')
return (True, {'messages': messages, 'params': { 'user': user }})
-def nodeUnauth(nodeList):
- for i in nodeList:
- try:
- if i['prev_auth'] != True:
- host = i['host']
- rc = RicciCommunicator(host)
- rc.unauth()
- i['cur_auth'] = False
- except Exception, e:
- try:
- luci_log.debug_verbose('unauth for %s failed: %s' \
- % (i['host'], str(e)))
- except:
- pass
-
-def nodeAuth(cluster, host, passwd):
+def validateAddClusterInitial(self, request):
+ errors = list()
messages = list()
- systemName = host
- os_str = 'rhel5'
try:
- rc = RicciCommunicator(host)
- if not rc:
- luci_log.debug_verbose('nodeAuth0: rc is None')
- raise Exception, 'unknown error'
- except Exception, e:
- try:
- error = 'Ricci connection to %s failed: %s' % (host, str(e))
- except:
- error = 'Ricci connection to %s failed' % host
- luci_log.debug_verbose('nodeAuth1: rc failed: %s' % error)
-
- return { 'host': host, 'ricci_host': host, 'errors': error, 'cur_auth': False, 'os': os_str }
-
- if rc.authed():
- prevAuth = True
- messages.append('Luci is already authenticated to %s -- not checking password' % host)
- else:
- prevAuth = False
- if not passwd:
- return { 'host': host, 'ricci_host': systemName, 'prev_auth': False, 'cur_auth': False, 'os': os_str }
- else:
- try:
- rc.auth(passwd)
- except:
- pass
+ request.SESSION.delete('add_cluster_initial')
+ request.SESSION.delete('add_cluster')
+ except:
+ pass
- if rc.authed():
- try:
- os_str = resolveOSType(rc.os())
- if not os_str:
- raise
- except:
- os_str = "rhel5" #Backup plan in case all is almost lost...
+ cur_host = None
+ try:
+ sysData = request.form['__SYSTEM0']
+ if not sysData or len(sysData) < 1:
+ raise Exception, 'no node was given'
+ cur_host = sysData[0]
+ except Exception, e:
+ luci_log.debug_verbose('vACI0: %s' % str(e))
+ return (False, { 'errors': [ 'You must provide the address of@least one node in the cluster you wish to add.' ]})
+
+ cur_entry = { 'host': cur_host }
+ try:
+ if len(sysData) < 2 or not sysData[1]:
+ raise Exception, 'no password'
+ cur_pass = sysData[1]
+ cur_entry['passwd'] = cur_pass
+ except:
+ luci_log.debug_verbose('vACI1: %s no password given')
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ return (False, { 'errors': [ 'No password was given for %s' % cur_host ] })
- systemName = rc.system_name()
- if systemName[:9] == 'localhost' or systemName[:5] == '127.0':
- systemName = host
- node = { 'host': host, 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': True, 'os': os_str }
+ check_certs = False
+ try:
+ check_certs = request.form.has_key('check_certs')
+ except:
+ check_certs = False
- cluster_info = rc.cluster_info()
- if cluster and ((not cluster_info) or (cluster_info[0] != cluster)):
- node['errors'] = 'Node \"' + host + '\" is reporting it is not a member of cluster \"' + cluster + '\"'
- if cluster_info and cluster_info[0]:
- node['errors'] += ' and that it is a member of cluster \"' + cluster_info[0] + '\"'
- if not cluster and cluster_info and cluster_info[0]:
- node['errors'] = 'Node \"' + host + '\" reports it is a member of cluster \"' + cluster_info[0] + '\"'
- return node
+ cur_host_trusted = False
+ try:
+ cur_host_trusted = request.form.has_key('host_is_trusted')
+ except:
+ cur_host_trusted = False
- error = 'Unable to authenticate to the ricci agent on \"' + host + '\"'
- return { 'host': host, 'ricci_host': systemName, 'prev_auth': False , 'cur_auth': False, 'errors': error, 'os': os_str }
+ cur_host_fp = None
+ try:
+ cur_host_fp = request.form['host_fingerprint'].strip()
+ if not cur_host_fp:
+ cur_host_fp = None
+ except:
+ cur_host_fp = None
-def validateAddClusterInitial(self, request, must_complete=True):
- errors = list()
- messages = list()
- newNodeList = list()
- nodeHash = {}
- rnodeHash = {}
+ try:
+ rc = RicciCommunicator(cur_host)
+ if not rc:
+ raise Exception, 'rc is None'
+ cur_fp = rc.fingerprint()
+ if cur_host_fp is not None:
+ cur_entry['fp'] = cur_host_fp
+ else:
+ cur_entry['fp'] = cur_fp[1]
+ except Exception, e:
+ luci_log.debug_verbose('vACI2: %s: %s' % (cur_host, str(e)))
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ return (False, { 'errors': [ 'Unable to establish a secure connection to the ricci agent on %s: %s' \
+ % (cur_host, str(e)) ] })
+
+ if not check_certs or cur_host_trusted:
+ try:
+ if cur_host_fp is not None and cur_host_fp != cur_fp[1]:
+ errmsg = 'The key fingerprint for %s has changed from under us. It was \"%s\" and is now \"%s\".' \
+ % (cur_host, cur_host_fp, cur_fp[1])
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ luci_log.info('SECURITY: %s' % errmsg)
+ return (False, { 'errors': [ errmsg ] })
+ rc.trust()
+ except Exception, e:
+ luci_log.debug_verbose('vACI3: %s %s' % (cur_host, str(e)))
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ return (False, { 'errors': [ 'Unable to establish trust for host %s' % (cur_host, str(e)) ] })
+ elif check_certs:
+ if not rc.trusted():
+ msg = '%s has %s fingerprint %s' \
+ % (cur_host, cur_fp[0], cur_fp[1])
+ else:
+ cur_host_trusted = True
+ cur_entry['trusted'] = True
+ msg = 'Host %s %s fingerprint %s is already trusted.' \
+ % (cur_host, cur_fp[0], cur_fp[1])
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ messages.append(msg)
+ return (True, { 'messages': [ msg ] })
try:
- sysData = request.form['__SYSTEM0']
- if not sysData or len(sysData) < 2:
- raise
+ del rc
+ request.SESSION.delete('add_cluster_initial')
except:
- return (False, { 'errors': [ 'At least one system and its root password must be given' ] })
+ pass
try:
- rc = RicciCommunicator(sysData[0])
+ rc = RicciCommunicator(cur_host, enforce_trust=True)
if not rc:
- raise Exception, 'unknown error'
+ raise Exception, 'rc is None'
+ cur_entry['trusted'] = rc.trusted()
except Exception, e:
- return (False, { 'errors': [ 'Unable to establish a connection to the Ricci agent on %s: %s' % (sysData[0], str(e)) ] })
+ luci_log.debug_verbose('vACI4: %s %s' % (cur_host, str(e)))
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % cur_host ] })
- prevAuth = 0
- if not rc.authed():
+ prev_auth = rc.authed()
+ if not prev_auth:
try:
- rc.auth(sysData[1])
- except: pass
- if not rc.authed():
- return (False, { 'errors': [ 'Unable to authenticate to the Ricci agent on \"' + sysData[0] + '\"' ] })
- else:
- prevAuth = 1
+ rc.auth(cur_pass)
+ if not rc.authed():
+ raise Exception, 'authentication failed'
+ except Exception, e:
+ errmsg = 'Unable to authenticate to the ricci agent on %s: %s' % (cur_host, str(e))
+ luci_log.debug_verbose('vACI5: %s: %s' % (cur_host, str(e)))
+ request.SESSION.set('add_cluster_initial', cur_entry)
+ return (False, { 'errors': [ 'Unable to authenticate to the ricci agent on \"%s\"' % cur_host ] })
+
+ del cur_entry
try:
cluster_info = rc.cluster_info()
except:
cluster_info = None
- os_str = resolveOSType(rc.os())
- if not os_str:
- os_str = "rhel5" #Backup plan in case all is almost lost...
-
if not cluster_info or not cluster_info[0]:
- if not prevAuth:
- rc.unauth()
+ if not prev_auth:
+ try:
+ rc.unauth()
+ except:
+ pass
+
if not cluster_info:
- errmsg = 'An error occurred while attempting to retrieve the cluster.conf file for \"' + sysData[0] + '\"'
+ errmsg = 'An error occurred while attempting to retrieve the cluster.conf file from \"%s\"' % cur_host
else:
- errmsg = '\"' + sysData[0] + '\" is not a member of a cluster'
+ errmsg = '\"%s\" reports is not a member of any cluster.'
return (False, { 'errors': [ errmsg ] })
- clusterName = cluster_info[0]
- cluConf = getClusterConf(rc)
- if cluConf:
- nodeList = getClusterConfNodes(cluConf)
-
- if not cluConf or not nodeList or len(nodeList) < 1:
- if not prevAuth:
- rc.unauth()
- return (False, { 'errors': [ 'Error retrieving member nodes for cluster \"' + clusterName + '\"' ] })
-
- systemName = rc.system_name()
- if systemName[:9] == 'localhost':
- systemName = sysData[0]
-
- node = { 'host': rc.hostname(), 'ricci_host': systemName, 'prev_auth': prevAuth, 'cur_auth': rc.authed(), 'os': os_str }
- nodeHash[sysData[0]] = node
- rnodeHash[systemName] = node
- newNodeList.append(node)
+ cluster_name = cluster_info[0]
+ cluster_os = resolveOSType(rc.os())
+ try:
+ cluster_conf = getClusterConf(rc)
+ except:
+ cluster_conf = None
- if 'allSameCheckBox' in request.form:
- passwd = sysData[1]
- else:
- passwd = None
-
- for i in nodeList:
- node = nodeAuth(clusterName, i, passwd)
- if 'messages' in node:
- messages.extend(node['messages'])
- if node['host'] in nodeHash or node['ricci_host'] in rnodeHash:
- continue
- nodeHash[node['host']] = node
- if 'ricci_host' in node:
- rnodeHash[node['ricci_host']] = node
-
- if 'errors' in node:
- errors.append(node['errors'])
- node['errors'] = True
- newNodeList.append(node)
-
- sfn = lambda x, y: \
- x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x))
- newNodeList.sort(sfn)
+ if cluster_conf:
+ try:
+ node_list = getClusterConfNodes(cluster_conf)
+ except:
+ node_list = None
- if must_complete == True:
- dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
- else:
- dfn = lambda x: False
+ # Make sure a cluster with this name is not already managed before
+ # going any further.
+ try:
+ dummy = self.restrictedTraverse(CLUSTER_FOLDER_PATH + cluster_name)
+ if not dummy:
+ raise Exception, 'no existing cluster'
+ errors.append('A cluster named \"%s\" is already managed.')
+ if not prev_auth:
+ try:
+ rc.unauth()
+ except:
+ pass
+ return (False, { 'errors': errors })
+ except:
+ pass
- cluster_properties = {
- 'clusterName': clusterName,
- 'nodeList': newNodeList,
- 'nodeHash': nodeHash,
- 'rnodeHash': rnodeHash,
- 'isComplete': len(filter(dfn, newNodeList)) == 0
- }
+ if not cluster_conf or not node_list or len(node_list) < 1:
+ if not prev_auth:
+ try:
+ rc.unauth()
+ except:
+ pass
+ return (False, { 'errors': [ 'Error retrieving the nodes list for cluster \"%s\" from node \"%s\"' % (cluster_name, cur_host) ] })
- if len(errors) < len(nodeList):
- cluster_properties['redirect'] = HOMEBASE_ADD_CLUSTER
+ same_node_passwds = False
+ try:
+ same_node_passwds = 'allSameCheckBox' in request.form
+ except:
+ same_node_passwds = False
- return (len(errors) < 1,
- {'messages': messages, 'errors': errors, 'requestResults': cluster_properties })
+ add_cluster = { 'name': cluster_name,
+ 'nodes': {},
+ 'cluster_os':cluster_os,
+ 'pass': 0,
+ 'identical_passwds': same_node_passwds,
+ 'check_certs': check_certs }
+
+ for i in node_list:
+ cur_node = { 'host': i }
+ if same_node_passwds:
+ cur_node['passwd'] = cur_pass
+ add_cluster['nodes'][i] = cur_node
+ request.SESSION.set('add_cluster', add_cluster)
+ request.response.redirect('/luci/homebase/index_html?pagetype=%s' % HOMEBASE_ADD_CLUSTER)
-def validateAddCluster(self, request, must_complete=True):
+def parseHostForm(request, check_certs):
errors = list()
messages = list()
- requestResults = None
- nodeList = None
+ system_list = {}
try:
- sessionData = request.SESSION.get('checkRet')
- requestResults = sessionData['requestResults']
+ num_storage = int(request.form['numStorage'].strip())
except Exception, e:
- luci_log.debug_verbose('VAC0: error getting session obj: %s' % str(e))
+ luci_log.debug_verbose('PHF1: numStorage field missing: %s' % str(e))
+ errors.append('The number of systems entered could not be determined.')
+
+ incomplete = False
+ i = 0
+ while i < num_storage:
try:
- clusterName = request.form['clusterName']
+ sysData = request.form['__SYSTEM%d' % i]
+ if len(sysData) < 1 or not sysData[0]:
+ raise Exception, 'no hostname'
+ cur_host = sysData[0]
+ if cur_host in system_list:
+ errors.append('You have added \"%s\" more than once.' % cur_host)
+ raise Exception, '%s added more than once' % cur_host
except:
- clusterName = ''
+ i += 1
+ continue
+
+ cur_system = { 'host': cur_host }
+
+ if len(sysData) < 2 or not sysData[1]:
+ errors.append('No password for %s (entry %d).' % (cur_host, i))
+ cur_passwd = None
+ else:
+ cur_passwd = sysData[1]
+ cur_system['passwd'] = cur_passwd
try:
- nodeList = requestResults['nodeList']
- luci_log.debug_verbose('VAC1: unauth to node list')
- nodeUnauth(nodeList)
+ cur_fp = request.form['__SYSTEM%dFingerprint' % i].strip()
+ if not cur_fp:
+ raise Exception, 'fingerprint is blank'
+ cur_system['fp'] = cur_fp
except:
- pass
+ cur_fp = None
+
+ try:
+ cur_set_trust = request.form.has_key('__SYSTEM%dTrusted' % i)
+ except:
+ cur_set_trust = False
+
+ if check_certs or (cur_fp is not None and cur_set_trust is True):
+ try:
+ rc = RicciCommunicator(cur_host, enforce_trust=False)
+ if not rc:
+ raise Exception, 'rc is None'
+ cur_system['prev_auth'] = rc.authed()
+ fp = rc.fingerprint()
+
+ if cur_set_trust is True:
+ cur_system['fp'] = cur_fp
+ if cur_fp != fp[1]:
+ errmsg = 'The key fingerprint for %s has changed from under us. It was \"%s\" and is now \"%s\".' % (cur_host, cur_fp, fp[1])
+ errors.append(errmsg)
+ luci_log.info('SECURITY: %s' % errmsg)
+ cur_system['error'] = True
+ incomplete = True
+ else:
+ rc.trust()
+ cur_system['trusted'] = True
+ else:
+ cur_system['fp'] = fp[1]
+
+ if not rc.trusted():
+ incomplete = True
+ msg = '%s has %s fingerprint %s' % (cur_host, fp[0], fp[1])
+ else:
+ cur_system['trusted'] = True
+ msg = '%s %s fingerprint %s is already trusted.' % (cur_host, fp[0], fp[1])
+
+ if check_certs:
+ messages.append(msg)
+ except Exception, e:
+ cur_system['error'] = True
+ try:
+ del cur_system['trusted']
+ except:
+ pass
+ errors.append('Unable to retrieve the SSL fingerprint for node %s: %s' % (cur_host, str(e)))
+ luci_log.debug_verbose('PHF2: %s: %s' \
+ % (cur_host, str(e)))
+ else:
+ # The user doesn't care. Trust the system.
+ try:
+ rc = RicciCommunicator(cur_host)
+ if not rc:
+ raise Exception, 'rc is None'
+ rc.trust()
+ cur_system['trusted'] = True
+ cur_system['prev_auth'] = rc.authed()
+ except Exception, e:
+ incomplete = True
+ cur_system['error'] = True
+ try:
+ if not 'prev_auth' in cur_system:
+ del cur_system['trusted']
+ rc.untrust()
+ except:
+ pass
+ errors.append('Unable to add the key for node %s to the trusted keys list.' % cur_host)
+ luci_log.debug_verbose('PHF3: %s: %s' % (cur_host, str(e)))
+ system_list[cur_host] = cur_system
+ i += 1
+
+ return system_list, incomplete, errors, messages
+
+def validateAddCluster(self, request):
+ errors = list()
- return (False, { 'errors': [ 'A data integrity error has occurred. Please attempt adding the cluster again.' ], 'requestResults': { 'clusterName': clusterName, 'isComplete': False, 'nodeList': [], 'redirect': HOMEBASE_ADD_CLUSTER_INITIAL } })
-
try:
- clusterName = request.form['clusterName']
- if not clusterName:
- raise Exception, 'no cluster name was found'
- except Exception, e:
- luci_log.debug_verbose('VAC2: no cluser name found: %s', str(e))
- return (False, { 'errors': ['No cluster name was given.'], 'requestResults': requestResults })
+ request.SESSION.delete('add_cluster')
+ request.SESSION.delete('add_cluster_initial')
+ except:
+ pass
try:
- nodeList = requestResults['nodeList']
- if not nodeList or len(nodeList) < 1:
- raise Exception, 'no node list found'
- except Exception, e:
- luci_log.debug_verbose('VAC3: no nodeList found: %s', str(e))
- return (False, { 'errors': ['No cluster nodes were given.'], 'requestResults': requestResults })
+ cluster_name = request.form['clusterName'].strip()
+ except:
+ luci_log.debug_verbose('VAC0: no cluster name')
+ errors.append('No cluster name was given.')
try:
- nodeHash = requestResults['nodeHash']
+ cluster_os = request.form['cluster_os'].strip()
except:
- nodeHash = {}
+ luci_log.debug_verbose('VAC1: no cluster os')
+ errors.append('Unable to determine the version of cluster %s.' % cluster_name)
+ check_certs = False
try:
- rnodeHash = requestResults['rnodeHash']
+ check_certs = 'check_certs' in request.form
except:
- rnodeHash = {}
+ check_certs = False
- # This should never fail
try:
- numStorage = int(request.form['numStorage'])
- if numStorage != len(nodeList):
- raise Exception, 'numstorage != len(nodelist)'
- except Exception, e:
+ pass_num = int(request.form['pass'].strip()) + 1
+ except:
+ pass_num = 1
+
+ same_node_passwds = False
+ try:
+ same_node_passwds = 'allSameCheckBox' in request.form
+ except:
+ same_node_passwds = False
+
+ add_cluster = { 'name': cluster_name,
+ 'pass': pass_num,
+ 'cluster_os': cluster_os,
+ 'identical_passwds': same_node_passwds,
+ 'check_certs': check_certs }
+
+ system_list, incomplete, new_errors, messages = parseHostForm(request, check_certs)
+ errors.extend(new_errors)
+ add_cluster['nodes'] = system_list
+
+ for i in system_list:
+ cur_system = system_list[i]
+
+ cur_host_trusted = 'trusted' in cur_system
+ cur_host = cur_system['host']
+ prev_auth = False
try:
- requestResults['isComplete'] = False
- luci_log.debug_verbose('VAC4: error: %s' % str(e))
+ cur_passwd = cur_system['passwd']
except:
- pass
+ cur_passwd = None
- nodeUnauth(nodeList)
- return (False, {
- 'errors': [ 'Unknown number of nodes entered' ],
- 'requestResults': requestResults })
+ if (cur_host_trusted or not check_certs) and cur_passwd:
+ try:
+ rc = RicciCommunicator(cur_host, enforce_trust=False)
+ prev_auth = rc.authed()
+ except Exception, e:
+ errors.append('Unable to connect to the ricci agent on %s: %s' \
+ % (cur_host, str(e)))
+ incomplete = True
+ cur_system['errors'] = True
+ luci_log.debug_verbose('VAC2: %s: %s' % cur_host, str(e))
+ continue
- i = 0
- while i < numStorage:
- sysData = request.form['__SYSTEM' + str(i)]
- if not sysData:
- i += 1
- continue
+ try:
+ rc.auth(cur_passwd)
+ if not rc.authed():
+ raise Exception, 'authentication failed'
+ except Exception, e:
+ errors.append('Unable to authenticate to the ricci agent on %s: %s' \
+ % (cur_host, str(e)))
+ incomplete = True
+ cur_system['errors'] = True
+ luci_log.debug_verbose('VAC3: %s: %s' % cur_host, str(e))
+ continue
- oldNode = None
- node = nodeAuth(clusterName, sysData[0], sysData[1])
- if node['host'] in nodeHash:
- oldNode = nodeHash[node['host']]
- elif 'ricci_host' in node and node['ricci_host'] in rnodeHash:
- oldNode = rnodeHash[node['ricci_host']]
- elif not oldNode:
- for k in nodeHash.keys():
- if node['host'][:len(k) + 1] == k + '.':
- oldNode = nodeHash[k]
- elif not oldNode:
- for k in rnodeHash.keys():
- if node['host'][:len(k) + 1] == k + '.':
- oldNode = rnodeHash[k]
-
- if not oldNode:
- luci_log.debug_verbose('VAC5: node %s not found', sysData[0])
- nodeUnauth(nodeList)
- return (False, { 'errors': [ 'A data integrity error has occurred. Please attempt adding the cluster again.' ], 'requestResults': { 'clusterName': clusterName, 'nodeList': nodeList, 'isComplete': False, 'redirect': HOMEBASE_ADD_CLUSTER_INITIAL } })
-
- if oldNode['host'] != node['host']:
- del nodeHash[oldNode['host']]
- oldNode['host'] = node['host']
- nodeHash[node['host']] = oldNode
-
- if 'ricci_host' in node and (not 'ricci_host' in oldNode or node['ricci_host'] != oldNode['ricci_host']):
- if oldNode['ricci_host'] in rnodeHash:
- del rnodeHash[oldNode['ricci_host']]
- oldNode['ricci_host'] = node['ricci_host']
- rnodeHash[node['ricci_host']] = oldNode
-
- oldNode['cur_auth'] = node['cur_auth']
- if 'errors' in node:
- errors.append(node['errors'])
- oldNode['errors'] = True
- i += 1
+ cluster_info = rc.cluster_info()
+ if cluster_info[0] != cluster_name and cluster_info[1] != cluster_name:
+ incomplete = True
+ cur_system['errors'] = True
+
+ if cluster_info[0]:
+ cur_cluster_name = cluster_info[0]
+ else:
+ cur_cluster_name = cluster_info[1]
+
+ if cur_cluster_name:
+ err_msg = 'Node %s reports it is in cluster \"%s\" and we expect \"%s\"' \
+ % (cur_host, cur_cluster_name % cluster_name)
+ else:
+ err_msg = 'Node %s reports it is not a member of any cluster' % cur_host
+
+ if not prev_auth:
+ try:
+ rc.unauth()
+ except:
+ luci_log.debug_verbose('VAC4: %s: %s' % (cur_host, str(e)))
- if must_complete == True:
- dfn = lambda x: not 'cur_auth' in x or x['cur_auth'] != True
- else:
- dfn = lambda x: False
+ errors.append(err_msg)
+ luci_log.debug_verbose('VAC5: %s' % err_msg)
+ continue
- clusterComplete = len(filter(dfn, nodeList)) == 0
+ cur_os = resolveOSType(rc.os())
+ if cur_os != cluster_os:
+ incomplete = True
+ cur_system['errors'] = True
+
+ if not prev_auth:
+ try:
+ rc.unauth()
+ except Exception, e:
+ luci_log.debug_verbose('VAC6: %s: %s' % (cur_host, str(e)))
- if clusterComplete:
- err = manageCluster(self, clusterName, nodeList)
- if err:
- errors.append(err)
+ err_msg = 'Node %s reports its cluster version is %s and we expect %s' \
+ % (cur_os, cluster_os)
+
+ errors.append(err_msg)
+ luci_log.debug_verbose('VAC7: %s' % err_msg)
+ continue
else:
- messages.append('Cluster \"' + clusterName + '\" has been added to the Luci management interface.')
- else:
- sfn = lambda x, y: \
- x['cur_auth'] - y['cur_auth'] or (('errors' in y) - ('errors' in x))
- nodeList.sort(sfn)
-
- ret = { 'messages': messages, 'errors': errors }
-
- if len(errors) > 0 or not clusterComplete:
- ret['requestResults'] = {
- 'clusterName': clusterName,
- 'nodeList': nodeList,
- 'nodeHash': nodeHash,
- 'rnodeHash': rnodeHash,
- 'isComplete': clusterComplete
- }
- else:
- ret['requestResults'] = {
- 'redirect': HOMEBASE_ADD_CLUSTER_INITIAL,
- 'clusterName': clusterName,
- 'isComplete': True
- }
+ incomplete = True
- return (len(errors) < 1, ret)
+ if len(errors) > 0:
+ incomplete = True
+
+ if not incomplete or request.form.has_key('asis'):
+ err_msg = manageCluster(self, cluster_name, system_list, cluster_os)
+ if err_msg:
+ incomplete = True
+ errors.append('An error occurred while creating the database objects for cluster %s: %s' \
+ % (cluster_name, err_msg))
+ luci_log.debug_verbose('VAC7: error adding cluster DB objects for %s: %s' \
+ % (cluster_name, err_msg))
+ else:
+ messages.append('Cluster %s is now managed by Luci' % cluster_name)
+ incomplete = False
+
+ if incomplete:
+ add_cluster['incomplete'] = True
+ request.SESSION.set('add_cluster', add_cluster)
+ return_code = False
+ else:
+ return_code = True
+
+ return (return_code, {'errors': errors, 'messages': messages })
def validateAddSystem(self, request):
- errors = list()
- messages = list()
+ try:
+ request.SESSION.delete('add_systems')
+ except:
+ pass
+ check_certs = False
try:
- numStorage = request.form['numStorage']
+ check_certs = 'check_certs' in request.form
except:
- return (False, { 'errors': ['Unknown number of systems entered'] })
+ check_certs = False
+
+ add_systems, incomplete, errors, messages = parseHostForm(request, check_certs)
+ delete_keys = list()
+ for i in add_systems:
+ cur_system = add_systems[i]
+
+ cur_host_trusted = 'trusted' in cur_system
+ cur_host = cur_system['host']
- i = 0
- while i < numStorage:
try:
- sysData = request.form['__SYSTEM' + str(i)]
+ cur_passwd = cur_system['passwd']
except:
- break
-
- if len(sysData) == 2 and sysData[0] != '' and sysData[1] != '':
- csResult = createSystem(self, sysData[0], sysData[1])
+ cur_passwd = None
+ if (cur_host_trusted or not check_certs) and cur_passwd:
+ csResult = createSystem(self, cur_host, cur_passwd)
if csResult:
+ incomplete = True
+ cur_system['error'] = True
errors.append(csResult)
else:
- messages.append('Added storage system \"' + sysData[0] + '\" successfully')
- i += 1
+ delete_keys.append(i)
+ messages.append('Added storage system \"%s\" successfully' \
+ % cur_host)
+
+ for i in delete_keys:
+ try:
+ del add_systems[i]
+ except:
+ pass
if len(errors) > 0:
- returnCode = False
+ return_code = False
else:
- returnCode = True
+ return_code = True
- return (returnCode, {'errors': errors, 'messages': messages})
+ if incomplete:
+ try:
+ request.SESSION.set('add_systems', add_systems)
+ except Exception, e:
+ luci_log.debug_verbose('validateSA2: %s' % str(e))
+ return_code = False
+ else:
+ try:
+ request.SESSION.delete('add_systems')
+ del add_systems
+ except:
+ pass
+
+ return (return_code, { 'errors': errors, 'messages': messages})
def validatePerms(self, request):
userId = None
@@ -599,62 +763,99 @@
return (returnCode, {'errors': errors, 'messages': messages, 'params': {'user': userId }})
def validateAuthenticate(self, request):
- errors = list()
- messages = list()
+ try:
+ request.SESSION.delete('auth_systems')
+ except:
+ pass
+ check_certs = False
try:
- numStorage = int(request.form['numStorage'])
+ check_certs = 'check_certs' in request.form
except:
- return (False, {'errors': [ 'Unknown number of nodes entered']})
+ check_certs = False
- i = 0
- while i < numStorage:
- sysData = request.form['__SYSTEM' + str(i)]
- if not sysData or len(sysData) < 2 or not sysData[0] or not sysData[1]:
- i += 1
- continue
+ system_list, incomplete, errors, messages = parseHostForm(request, check_certs)
+ delete_keys = list()
+ for i in system_list:
+ cur_system = system_list[i]
- host = str(sysData[0])
- passwd = str(sysData[1])
+ cur_host_trusted = 'trusted' in cur_system
+ cur_host = cur_system['host']
try:
- rc = RicciCommunicator(sysData[0])
- if rc is None:
- raise Exception, 'unknown error'
- except Exception, e:
- errors.append('Unable to contact the ricci agent for %s: %s' \
- % (sysData[0], str(e)))
- i += 1
- continue
+ cur_passwd = cur_system['passwd']
+ except:
+ cur_passwd = None
- if rc.authed():
- messages.append(host + ' is already authenticated.')
- else:
+ if (cur_host_trusted or not check_certs) and cur_passwd:
try:
- rc.auth(passwd)
- except:
- errors.append('Error authenticating to the ricci agent on ' + host)
- i += 1
+ rc = RicciCommunicator(cur_host, enforce_trust=True)
+ if not rc:
+ raise Exception, 'connection failed'
+ except Exception, e:
+ luci_log.debug_verbose('validateAuth0: %s: %s' % (cur_host, str(e)))
+ errors.append('Unable to communicate with the ricci agent on %s: %s' \
+ % (cur_host, str(e)))
+ incomplete = True
+ cur_system['error'] = True
continue
- if not rc.authed():
- errors.append('Error authenticating to the ricci agent on ' + host)
- else:
- messages.append(host + ' was successfully authenticated.')
-
- if rc.authed():
+ try:
+ if rc.authed():
+ messages.append('%s is already authenticated.' % cur_host)
+ else:
+ rc.auth(cur_passwd)
+ if not rc.authed():
+ raise Exception, 'authentication failed'
+ messages.append('Authenticated to %s successfully' \
+ % cur_host)
+ delete_keys.append(i)
try:
- delNodeFlag(self, getStorageNode(self, host), CLUSTER_NODE_NEED_AUTH)
+ delNodeFlag(self, getStorageNode(self, cur_host), CLUSTER_NODE_NEED_AUTH)
except:
pass
try:
- delNodeFlag(self, getClusterNode(self, host, rc.cluster_info()[0]), CLUSTER_NODE_NEED_AUTH)
+ delNodeFlag(self, getClusterNode(self, cur_host, rc.cluster_info()[0]), CLUSTER_NODE_NEED_AUTH)
except:
pass
- i += 1
-
- return (len(errors) > 0, {'errors': errors, 'messages': messages })
+ except Exception, e:
+ errors.append('Unable to authenticate to %s: %s' % (cur_host, str(e)))
+ luci_log.debug_verbose('validateAuth1: %s: %s' % (cur_host, str(e)))
+ incomplete = True
+ cur_system['error'] = True
+
+ for i in delete_keys:
+ try:
+ del system_list[i]
+ except:
+ pass
+
+ if len(errors) > 0:
+ return_code = False
+ else:
+ return_code = True
+
+ if incomplete:
+ try:
+ request.SESSION.set('auth_systems', system_list)
+ except Exception, e:
+ luci_log.debug_verbose('validateAuthenticate2: %s' % str(e))
+ return_code = False
+ else:
+ try:
+ request.SESSION.delete('auth_systems')
+ del auth_systems
+ except:
+ pass
+
+ auth_msgs = {}
+ if len(errors) > 0:
+ auth_msgs['errors'] = errors
+ if len(messages) > 0:
+ auth_msgs['messages'] = messages
+ request.SESSION.set('auth_status', auth_msgs)
+ request.response.redirect('/luci/homebase/index_html?pagetype=5')
formValidators = [
validateAddUser,
@@ -690,11 +891,6 @@
return False
def homebaseControlPost(self, request):
- try:
- sessionData = request.SESSION.get('checkRet')
- except:
- sessionData = None
-
if 'ACTUAL_URL' in request:
url = request['ACTUAL_URL']
else:
@@ -718,24 +914,14 @@
pass
return homebasePortal(self, request, '.', '0')
- if validatorFn == validateAddClusterInitial or validatorFn == validateAddCluster:
- ret = validatorFn(self, request, must_complete=False)
- else:
- ret = validatorFn(self, request)
+ ret = validatorFn(self, request)
params = None
- if 'params' in ret[1]:
- params = ret[1]['params']
-
- if 'requestResults' in ret[1]:
- requestResults = ret[1]['requestResults']
+ if ret and len(ret) > 1 and ret[1]:
+ if 'params' in ret[1]:
+ params = ret[1]['params']
+ request.SESSION.set('checkRet', ret[1])
- if 'redirect' in requestResults:
- pagetype = requestResults['redirect']
- request['pagetype'] = pagetype
- request.form['pagetype'] = pagetype
-
- request.SESSION.set('checkRet', ret[1])
return homebasePortal(self, request, url, pagetype, params)
def homebaseControl(self, request):
@@ -789,33 +975,12 @@
# Initial add cluster page
try:
- if pagetype == HOMEBASE_ADD_CLUSTER:
- raise
if havePermAddCluster(self):
addCluster = {}
addCluster['Title'] = 'Add an Existing Cluster'
addCluster['absolute_url'] = url + '?pagetype=' + HOMEBASE_ADD_CLUSTER_INITIAL
addCluster['Description'] = 'Add an existing cluster to the Luci cluster management interface.'
- if pagetype == HOMEBASE_ADD_CLUSTER_INITIAL:
- addCluster['currentItem'] = True
- ret['curIndex'] = index
- cur = addCluster
- else:
- addCluster['currentItem'] = False
- index += 1
- temp.append(addCluster)
- except: pass
-
- # Add cluster - screen 2
- try:
- if pagetype != HOMEBASE_ADD_CLUSTER:
- raise
- if havePermAddCluster(self):
- addCluster = {}
- addCluster['Title'] = 'Add an Existing Cluster'
- addCluster['absolute_url'] = url + '?pagetype=' + HOMEBASE_ADD_CLUSTER
- addCluster['Description'] = 'Add an existing cluster to the Luci cluster management interface.'
- if pagetype == HOMEBASE_ADD_CLUSTER:
+ if pagetype == HOMEBASE_ADD_CLUSTER_INITIAL or pagetype == HOMEBASE_ADD_CLUSTER:
addCluster['currentItem'] = True
ret['curIndex'] = index
cur = addCluster
@@ -923,25 +1088,25 @@
def getClusterSystems(self, clusterName):
if isAdmin(self):
try:
- return self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+ return self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName + '/objectItems')('Folder')
except Exception, e:
- luci_log.debug_verbose('GCS0: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('GCSy0: %s: %s' % (clusterName, str(e)))
return None
try:
i = getSecurityManager().getUser()
if not i:
- raise Exception, 'GCSMGU failed'
+ raise Exception, 'security manager says no user'
except Exception, e:
- luci_log.debug_verbose('GCS1: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('GCSy1: %s: %s' % (clusterName, str(e)))
return None
try:
- csystems = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/objectItems')('Folder')
+ csystems = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName + '/objectItems')('Folder')
if not csystems or len(csystems) < 1:
return None
except Exception, e:
- luci_log.debug_verbose('GCS2: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('GCSy2: %s: %s' % (clusterName, str(e)))
return None
allowedCSystems = list()
@@ -950,7 +1115,7 @@
if i.has_role('View', c[1]):
allowedCSystems.append(c)
except Exception, e:
- luci_log.debug_verbose('GCS3: %s: %s: %s' \
+ luci_log.debug_verbose('GCSy3: %s: %s: %s' \
% (clusterName, c[0], str(e)))
return allowedCSystems
@@ -1024,20 +1189,19 @@
def createSystem(self, host, passwd):
try:
- exists = self.restrictedTraverse(PLONE_ROOT +'/systems/storage/' + host)
+ dummy = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
luci_log.debug_verbose('CS0: %s already exists' % host)
return 'Storage system %s is already managed' % host
except:
pass
try:
- rc = RicciCommunicator(host)
+ rc = RicciCommunicator(host, enforce_trust=True)
if rc is None:
- raise Exception, 'unknown error'
+ raise Exception, 'rc is None'
except Exception, e:
luci_log.debug_verbose('CS1: %s: %s' % (host, str(e)))
- return 'Unable to establish a connection to the ricci agent on %s: %s' \
- % (host, str(e))
+ return 'Unable to establish a secure connection to the ricci agent on %s: %s' % (host, str(e))
try:
if not rc.authed():
@@ -1056,21 +1220,21 @@
return 'Authentication for storage system %s failed' % host
try:
- exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ dummy = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
luci_log.debug_verbose('CS4 %s already exists' % host)
return 'Storage system %s is already managed' % host
except:
pass
try:
- ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+ ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
except Exception, e:
luci_log.debug_verbose('CS5 %s: %s' % (host, str(e)))
return 'Unable to create storage system %s: %s' % host
try:
ssystem.manage_addFolder(host, '__luci__:system')
- newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ newSystem = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
except Exception, e:
luci_log.debug_verbose('CS6 %s: %s' % (host, str(e)))
return 'Unable to create DB entry for storage system %s' % host
@@ -1085,28 +1249,22 @@
return None
def abortManageCluster(self, request):
- try:
- sessionData = request.SESSION.get('checkRet')
- nodeUnauth(sessionData['requestResults']['nodeList'])
- except Exception, e:
- luci_log.debug_verbose('AMC0: %s' % str(e))
+ pass
-def manageCluster(self, clusterName, nodeList):
+def manageCluster(self, clusterName, node_list, cluster_os):
clusterName = str(clusterName)
try:
- clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/')
+ clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
if not clusters:
raise Exception, 'cannot find the cluster entry in the DB'
except Exception, e:
- nodeUnauth(nodeList)
luci_log.debug_verbose('MC0: %s: %s' % (clusterName, str(e)))
return 'Unable to create cluster %s: the cluster directory is missing.' % clusterName
try:
- newCluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+ newCluster = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
if newCluster:
- nodeUnauth(nodeList)
luci_log.debug_verbose('MC1: cluster %s: already exists' % clusterName)
return 'A cluster named %s is already managed by Luci' % clusterName
except:
@@ -1114,11 +1272,10 @@
try:
clusters.manage_addFolder(clusterName, '__luci__:cluster')
- newCluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+ newCluster = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
if not newCluster:
raise Exception, 'unable to create the cluster DB entry for %s' % clusterName
except Exception, e:
- nodeUnauth(nodeList)
luci_log.debug_verbose('MC2: %s: %s' % (clusterName, str(e)))
return 'Unable to create cluster %s: %s' % (clusterName, str(e))
@@ -1127,7 +1284,6 @@
newCluster.manage_role('View', ['Access Contents Information', 'View'])
except Exception, e:
luci_log.debug_verbose('MC3: %s: %s' % (clusterName, str(e)))
- nodeUnauth(nodeList)
try:
clusters.manage_delObjects([clusterName])
except Exception, e:
@@ -1135,34 +1291,22 @@
return 'Unable to set permissions on new cluster: %s: %s' % (clusterName, str(e))
try:
- cluster_os = nodeList[0]['os']
- if not cluster_os:
- raise KeyError, 'Cluster OS is blank'
- except KeyError, e:
- luci_log.debug_verbose('MC5: %s: %s' % (clusterName, str(e)))
- cluster_os = 'rhel5'
-
- try:
newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
except Exception, e:
luci_log.debug_verbose('MC5: %s: %s: %s' \
% (clusterName, cluster_os, str(e)))
- for i in nodeList:
- #if 'ricci_host' in i:
- # host = str(i['ricci_host'])
- #else:
- host = str(i['host'])
+ for i in node_list:
+ host = node_list[i]['host']
try:
newCluster.manage_addFolder(host, '__luci__:csystem:' + clusterName)
- newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+ newSystem = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH + clusterName + '/' + host))
if not newSystem:
raise Exception, 'unable to create cluster system DB entry for node %s' % host
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
- nodeUnauth(nodeList)
try:
clusters.manage_delObjects([clusterName])
except Exception, e:
@@ -1175,7 +1319,7 @@
% (host, clusterName, str(e))
try:
- ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+ ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
if not ssystem:
raise Exception, 'The storage DB entry is missing'
except Exception, e:
@@ -1184,83 +1328,78 @@
# Only add storage systems if the cluster and cluster node DB
# objects were added successfully.
- for i in nodeList:
- #if 'ricci_host' in i:
- # host = str(i['ricci_host'])
- #else:
- host = str(i['host'])
+ for i in node_list:
+ host = node_list[i]['host']
try:
# It's already there, as a storage system, no problem.
- exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ dummy = self.restrictedTraverse(str(STORAGE_FOLDER_PATH + host))
continue
except:
pass
try:
ssystem.manage_addFolder(host, '__luci__:system')
- newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ newSystem = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
luci_log.debug_verbose('MC9: %s: %s: %s' % (clusterName, host, str(e)))
-def createClusterSystems(self, clusterName, nodeList):
+def createClusterSystems(self, clusterName, node_list):
try:
- clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+ clusterObj = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
if not clusterObj:
raise Exception, 'cluster %s DB entry is missing' % clusterName
except Exception, e:
- nodeUnauth(nodeList)
luci_log.debug_verbose('CCS0: %s: %s' % (clusterName, str(e)))
- return 'No cluster named \"' + clusterName + '\" is managed by Luci'
+ return 'No cluster named \"%s\" is managed by Luci' % clusterName
- for i in nodeList:
- #if 'ricci_host' in i:
- # host = str(i['ricci_host'])
- #else:
+ for x in node_list:
+ i = node_list[x]
host = str(i['host'])
try:
clusterObj.manage_addFolder(host, '__luci__:csystem:' + clusterName)
- newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+ except Exception, e:
+ luci_log.debug_verbose('CCS0a: %s: %s: %s' % (clusterName, host, str(e)))
+ try:
+ newSystem = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName + '/' + host)
if not newSystem:
raise Exception, 'cluster node DB entry for %s disappeared from under us' % host
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
- nodeUnauth(nodeList)
luci_log.debug_verbose('CCS1: %s: %s: %s' % (clusterName, host, str(e)))
return 'Unable to create cluster node %s for cluster %s: %s' \
% (host, clusterName, str(e))
try:
- ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+ ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
if not ssystem:
raise Exception, 'storage DB entry is missing'
except Exception, e:
+ # This shouldn't fail, but if it does, it's harmless right now
luci_log.debug_verbose('CCS2: %s: %s' % (clusterName, host, str(e)))
- return
+ return None
# Only add storage systems if the and cluster node DB
# objects were added successfully.
- for i in nodeList:
- #if 'ricci_host' in i:
- # host = str(i['ricci_host'])
- #else:
+ for x in node_list:
+ i = node_list[x]
host = str(i['host'])
try:
# It's already there, as a storage system, no problem.
- exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ dummy = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
continue
except:
pass
try:
ssystem.manage_addFolder(host, '__luci__:system')
- newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ newSystem = self.restrictedTraverse(STORAGE_FOLDER_PATH + host)
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
@@ -1268,7 +1407,7 @@
def delSystem(self, systemName):
try:
- ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+ ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
if not ssystem:
raise Exception, 'storage DB entry is missing'
except Exception, e:
@@ -1298,7 +1437,7 @@
pass
else:
try:
- newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + systemName)
+ dummy = self.restrictedTraverse(CLUSTER_FOLDER_PATH + cluster_info[0] + '/' + systemName)
except:
try:
rc.unauth()
@@ -1314,7 +1453,7 @@
def delCluster(self, clusterName):
try:
- clusters = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/')
+ clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
if not clusters:
raise Exception, 'clusters DB entry is missing'
except Exception, e:
@@ -1333,7 +1472,7 @@
def delClusterSystem(self, cluster, systemName):
try:
- if not self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + systemName):
+ if not self.restrictedTraverse(STORAGE_FOLDER_PATH + systemName):
raise
except:
# It's not a storage system, so unauthenticate.
@@ -1353,20 +1492,28 @@
def delClusterSystems(self, clusterName):
try:
- cluster = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+ cluster = self.restrictedTraverse(CLUSTER_FOLDER_PATH + clusterName)
if not cluster:
raise Exception, 'cluster DB entry is missing'
- csystems = getClusterSystems(self, clusterName)
- except Exception, e:
- luci_log.debug_verbose('delCluSysterms: error for %s: %s' \
- % (clusterName, str(e)))
- return 'Unable to find any systems for cluster %s' % clusterName
+
+ try:
+ csystems = getClusterSystems(self, clusterName)
+ if not csystems or len(csystems) < 1:
+ return None
+ except Exception, e:
+ luci_log.debug_verbose('delCluSystems0: %s' % str(e))
+ return None
+ except Exception, er:
+ luci_log.debug_verbose('delCluSystems1: error for %s: %s' \
+ % (clusterName, str(er)))
+ return str(er)
errors = ''
for i in csystems:
err = delClusterSystem(self, cluster, i[0])
if err:
errors += 'Unable to delete the cluster system %s: %s\n' % (i[0], err)
+ luci_log.debug_verbose('delCluSystems2: %s' % err)
return errors
def getDefaultUser(self, request):
@@ -1502,7 +1649,7 @@
def getClusterNode(self, nodename, clustername):
try:
- cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + str(clustername) + '/' + str(nodename))
+ cluster_node = self.restrictedTraverse(CLUSTER_FOLDER_PATH + str(clustername) + '/' + str(nodename))
if not cluster_node:
raise Exception, 'cluster node is none'
return cluster_node
@@ -1513,7 +1660,7 @@
def getStorageNode(self, nodename):
try:
- storage_node = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + '/' + str(nodename))
+ storage_node = self.restrictedTraverse(STORAGE_FOLDER_PATH + str(nodename))
if not storage_node:
raise Exception, 'storage node is none'
return storage_node
--- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/12/14 00:01:00 1.30.2.14
+++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/12/22 17:50:16 1.30.2.15
@@ -597,6 +597,8 @@
# temporary workaround for ricci bug
system_info = rc.hostname()
try:
+# FIXME
+# rc = RicciCommunicator(system_info, enforce_trust=True)
rc = RicciCommunicator(system_info)
if rc is None:
raise Exception, 'unknown error'
--- conga/luci/site/luci/Extensions/ricci_communicator.py 2006/12/08 18:27:32 1.9.2.7
+++ conga/luci/site/luci/Extensions/ricci_communicator.py 2006/12/22 17:50:16 1.9.2.8
@@ -83,7 +83,19 @@
luci_log.debug_verbose('RC:dom0: [auth %d] reported system_name = %s for %s' \
% (self.__authed, self.__dom0, self.__hostname))
return self.__dom0
-
+
+ def fingerprint(self):
+ return self.ss.peer_fingerprint()
+
+ def trust(self):
+ return self.ss.trust()
+
+ def untrust(self):
+ return self.ss.untrust()
+
+ def trusted(self):
+ return self.ss.trusted()
+
def auth(self, password):
if self.authed():
luci_log.debug_verbose('RC:auth0: already authenticated to %s' \
@@ -126,6 +138,10 @@
% (ret, self.__hostname))
if ret != '0':
raise Exception, 'Invalid response'
+ try:
+ self.ss.untrust()
+ except:
+ pass
except:
errstr = 'Error authenticating to host %s: %s' \
% (self.__hostname, str(ret))
--- conga/luci/utils/luci_admin 2006/10/13 06:56:32 1.50
+++ conga/luci/utils/luci_admin 2006/12/22 17:50:17 1.50.2.1
@@ -40,6 +40,7 @@
LUCI_HOME_DIR = '/var/lib/luci'
LUCI_DB_PATH = LUCI_HOME_DIR + '/var/Data.fs'
LUCI_CERT_DIR = LUCI_HOME_DIR + '/var/certs/'
+LUCI_PEERS_DIR = LUCI_CERT_DIR + 'peers/'
LUCI_BACKUP_DIR = LUCI_HOME_DIR + '/var'
LUCI_BACKUP_PATH = LUCI_BACKUP_DIR + '/luci_backup.xml'
LUCI_ADMIN_SET_PATH = LUCI_HOME_DIR + '/.default_password_has_been_reset'
@@ -57,12 +58,34 @@
SSL_KEYCONFIG_PATH = LUCI_CERT_DIR + SSL_KEYCONFIG_NAME
ssl_key_data = [
- { 'id': SSL_PRIVKEY_PATH, 'name': SSL_PRIVKEY_NAME, 'type': 'private', 'mode': 0600 },
- { 'id': SSL_HTTPS_PRIVKEY_PATH, 'name': SSL_HTTPS_PRIVKEY_NAME, 'type': 'private', 'mode': 0600 },
- { 'id': SSL_PUBKEY_PATH, 'name': SSL_PUBKEY_NAME, 'type': 'public', 'mode': 0644 },
- { 'id': SSL_HTTPS_PUBKEY_PATH, 'name': SSL_HTTPS_PUBKEY_NAME, 'type': 'public', 'mode': 0644 },
- { 'id': SSL_KEYCONFIG_PATH, 'name': SSL_KEYCONFIG_NAME, 'type': 'config', 'mode': 0644 }
+ { 'id' : SSL_PRIVKEY_PATH,
+ 'name': SSL_PRIVKEY_NAME,
+ 'type': 'private',
+ 'mode': 0600 },
+ { 'id' : SSL_HTTPS_PRIVKEY_PATH,
+ 'name': SSL_HTTPS_PRIVKEY_NAME,
+ 'type': 'private',
+ 'mode': 0600 },
+ { 'id' : SSL_PUBKEY_PATH,
+ 'name': SSL_PUBKEY_NAME,
+ 'type': 'public',
+ 'mode': 0644 },
+ { 'id' : SSL_HTTPS_PUBKEY_PATH,
+ 'name': SSL_HTTPS_PUBKEY_NAME,
+ 'type': 'public',
+ 'mode': 0644 },
+ { 'id' : SSL_KEYCONFIG_PATH,
+ 'name': SSL_KEYCONFIG_NAME,
+ 'type': 'config',
+ 'mode': 0644 }
]
+for name in os.listdir(LUCI_PEERS_DIR):
+ path = LUCI_PEERS_DIR + name
+ if stat.S_ISREG(os.stat(path).st_mode):
+ ssl_key_data.append({'id' : path,
+ 'name' : path.lstrip(LUCI_CERT_DIR),
+ 'type' : 'public',
+ 'mode' : 0644})
#null = file(os.devnull, 'rwb+', 0) - available on python 2.4 and above!!!
null = file('/dev/null', 'rwb+', 0)
--- conga/ricci/modules/storage/ExtendedFS.cpp 2006/10/06 03:10:13 1.7
+++ conga/ricci/modules/storage/ExtendedFS.cpp 2006/12/22 17:50:17 1.7.2.1
@@ -148,15 +148,19 @@
unsigned long long new_size,
const Props& new_props)
{
+ String extend_cmd("/usr/sbin/ext2online");
+ if (access(extend_cmd.c_str(), X_OK))
+ extend_cmd = "/sbin/resize2fs";
+
vector<String> args;
args.push_back(path);
String out, err;
int status;
- if (utils::execute("/usr/sbin/ext2online", args, out, err, status))
- throw command_not_found_error_msg("ext2online");
+ if (utils::execute(extend_cmd, args, out, err, status))
+ throw command_not_found_error_msg(extend_cmd);
if (status)
- throw String("ext2online failed");
+ throw String(extend_cmd + " failed");
}
bool
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2006-12-22 17:50 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-12-22 17:50 [Cluster-devel] conga luci/cluster/form-chooser luci/cluster/f rmccabe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).