* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/busy_wai ...
@ 2008-01-25 17:19 rmccabe
0 siblings, 0 replies; 3+ messages in thread
From: rmccabe @ 2008-01-25 17:19 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2008-01-25 17:19:05
Modified files:
. : conga.spec.in.in
luci/cluster : busy_wait-macro form-macros
luci/homebase : validate_cluster_add.js
luci/site/luci/Extensions: LuciDB.py RicciQueries.py
cluster_adapters.py
luci/site/luci/var: Data.fs
ricci/modules/rpm: PackageHandler.cpp RpmModule.cpp
ricci/modules/service: ServiceManager.cpp
ricci/ricci : RicciWorker.cpp
Log message:
- Fix a bug that prevented the fix for bz230462 from working
- Fix a few other bugs found while testing
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.64&r2=1.45.2.65
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.1&r2=1.2.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.32&r2=1.90.2.33
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.6&r2=1.4.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.6&r2=1.1.4.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.8&r2=1.1.4.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.41&r2=1.120.2.42
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/var/Data.fs.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.26&r2=1.15.2.27
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/rpm/PackageHandler.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.5&r2=1.9.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/rpm/RpmModule.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.3.2.1&r2=1.3.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/service/ServiceManager.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.5.2.3&r2=1.5.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/ricci/RicciWorker.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.11.2.1&r2=1.11.2.2
--- conga/conga.spec.in.in 2008/01/23 05:20:44 1.45.2.64
+++ conga/conga.spec.in.in 2008/01/25 17:18:37 1.45.2.65
@@ -292,6 +292,9 @@
### changelog ###
%changelog
+* Fri Jan 25 2008 Ryan McCabe <rmccabe@redhat.com> 0.12.0-1
+- Fix a bug that prevented the fix for bz230462 from working
+
* Tue Jan 22 2008 Ryan McCabe <rmccabe@redhat.com> 0.12.0-0
- Fixed bz230462 (RFE: Only reboot installation target machines when needed)
- Fixed bz238655 (conga does not set the "nodename" attribute for manual fencing)
--- conga/luci/cluster/busy_wait-macro 2008/01/23 04:44:30 1.2.2.1
+++ conga/luci/cluster/busy_wait-macro 2008/01/25 17:18:37 1.2.2.2
@@ -65,6 +65,6 @@
tal:attributes="onclick python:'javascript:document.stop_waiting_form%s.submit()' % nodereport.get('report_index')">Stop waiting for this job to complete</a>
</form>
</div>
+ <hr/>
</div>
- <hr/>
</div>
--- conga/luci/cluster/form-macros 2008/01/23 04:44:30 1.90.2.32
+++ conga/luci/cluster/form-macros 2008/01/25 17:18:37 1.90.2.33
@@ -225,7 +225,7 @@
checked add_cluster/shared_storage |string:checked" />Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <input type="checkbox" name="reboot_nodes"
+ <input type="checkbox" id="reboot_nodes" name="reboot_nodes"
tal:attributes="checked python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''" />Reboot nodes before joining cluster
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
@@ -1135,7 +1135,7 @@
checked add_cluster/shared_storage | string:checked" />Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
- <input type="checkbox" name="reboot_nodes"
+ <input type="checkbox" id="reboot_nodes" name="reboot_nodes"
tal:attributes="checked python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''" />Reboot nodes before joining cluster
</td></tr>
<tr class="systemsTable"><td colspan="2" class="systemsTable">
--- conga/luci/homebase/validate_cluster_add.js 2008/01/23 04:44:31 1.4.2.6
+++ conga/luci/homebase/validate_cluster_add.js 2008/01/25 17:18:38 1.4.2.7
@@ -48,14 +48,21 @@
return (-1);
}
+ var reboot_nodes = document.getElementById('reboot_nodes');
var view_certs = document.getElementById('view_certs');
if (!view_certs || !view_certs.checked) {
var confirm_str = '';
if (form.addnode) {
- confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?\nEach node added will be rebooted during this process.';
+ confirm_str = 'Add ' + (added_storage.length > 1 ? 'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?';
+ if (reboot_nodes && reboot_nodes.checked) {
+ confirm_str += '\nEach node added will be rebooted during this process.';
+ }
} else {
if (form.cluster_create) {
- confirm_str = 'All nodes added to this cluster will be rebooted as part of this process.\n\nCreate cluster \"' + clustername + '\"?';
+ confirm_str = 'Create cluster \"' + clustername + '\"?\n\n';
+ if (reboot_nodes && reboot_nodes.checked) {
+ confirm_str += 'All nodes added to this cluster will be rebooted as part of this process.\n\n';
+ }
} else {
confirm_str = 'Add the cluster \"' + clustername + '\" to the Luci management interface?';
}
--- conga/luci/site/luci/Extensions/LuciDB.py 2008/01/23 04:44:32 1.1.4.6
+++ conga/luci/site/luci/Extensions/LuciDB.py 2008/01/25 17:18:38 1.1.4.7
@@ -335,14 +335,17 @@
objname = '%s____flag' % key
clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+
# now designate this new object properly
objpath = str('%s/%s' % (path, objname))
flag = self.restrictedTraverse(objpath)
flag.manage_addProperty(BATCH_ID, batch_id, 'string')
flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, 'string')
- flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clustername), 'string')
flag.manage_addProperty(LAST_STATUS, 0, 'int')
+ flag.manage_addProperty(FLAG_DESC,
+ 'Creating node "%s" for cluster "%s"' % (key, clustername),
+ 'string')
except Exception, e:
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r %s' % (key, e, str(e)))
--- conga/luci/site/luci/Extensions/RicciQueries.py 2008/01/23 04:44:32 1.1.4.8
+++ conga/luci/site/luci/Extensions/RicciQueries.py 2008/01/25 17:18:38 1.1.4.9
@@ -68,7 +68,7 @@
batch.append('</request>')
batch.append('</module>')
- need_reboot = reboot_nodes or install_base or install_services or install_shared_storage or install_LVS
+ need_reboot = reboot_nodes
if need_reboot:
batch.append('<module name="reboot">')
batch.append('<request API_version="1.0">')
@@ -183,7 +183,7 @@
batch.append('</request>')
batch.append('</module>')
- need_reboot = reboot_nodes or install_base or install_services or install_shared_storage or install_LVS
+ need_reboot = reboot_nodes
if need_reboot:
batch.append('<module name="reboot">')
batch.append('<request API_version="1.0">')
@@ -738,11 +738,12 @@
install_shared_storage,
install_LVS,
upgrade_rpms,
- gulm_lockservers):
+ gulm_lockservers,
+ reboot_nodes=False):
batch_str = createClusterBatch(os_str, cluster_name, cluster_alias,
nodeList, install_base, install_services,
install_shared_storage, install_LVS, upgrade_rpms,
- gulm_lockservers)
+ gulm_lockservers, reboot_nodes)
ricci_xml = rc.batch_run(batch_str)
return batchAttemptResult(ricci_xml)
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2008/01/23 04:44:32 1.120.2.41
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2008/01/25 17:18:38 1.120.2.42
@@ -280,7 +280,8 @@
ret = send_batch_to_hosts(node_list, 10, rq.create_cluster,
add_cluster['cluster_os'], clustername, clustername,
node_list, True, True, add_cluster['shared_storage'], False,
- add_cluster['download_pkgs'], lockservers, add_cluster['reboot_nodes'])
+ add_cluster['download_pkgs'], lockservers,
+ add_cluster['reboot_nodes'])
batch_id_map = {}
for i in ret.iterkeys():
@@ -291,14 +292,13 @@
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose(msg)
continue
- batch_id_map[i] = ret[i]['batch_result']
+ batch_id_map[i] = ret[i]['batch_result'][0]
if len(batch_id_map) == 0:
request.SESSION.set('create_cluster', add_cluster)
return (False, { 'errors': errors, 'messages': messages })
buildClusterCreateFlags(self, batch_id_map, clustername)
-
response = request.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], CLUSTER_CONFIG, clustername))
Binary files /cvs/cluster/conga/luci/site/luci/var/Data.fs 2008/01/23 04:44:34 1.15.2.26 and /cvs/cluster/conga/luci/site/luci/var/Data.fs 2008/01/25 17:18:38 1.15.2.27 differ
rcsdiff: /cvs/cluster/conga/luci/site/luci/var/Data.fs: diff failed
--- conga/ricci/modules/rpm/PackageHandler.cpp 2008/01/17 17:38:38 1.9.2.5
+++ conga/ricci/modules/rpm/PackageHandler.cpp 2008/01/25 17:19:05 1.9.2.6
@@ -576,24 +576,24 @@
String name(iter->name);
map<String, Package>::iterator pack_iter = h_pre.packages().find(name);
if (pack_iter == h_pre.packages().end()) {
- throw String("package ") + name
- + " is present neither locally nor in repository";
+ throw String("Package \"") + name
+ + "\" is present neither locally nor in any available repository";
} else {
String curr_ver(pack_iter->second.version);
String repo_ver(pack_iter->second.repo_version);
if (curr_ver.empty()) {
// not installed
if (repo_ver.empty()) {
- throw String("package ") + name
- + " is not present in repository";
+ throw String("Package \"") + name
+ + "\" is not present in any available repository";
} else
rpms.push_back(name);
} else {
// already installed
if (upgrade) {
if (repo_ver.empty()) {
- throw String("package ") + name
- + " is not present in repository";
+ throw String("Package \"") + name
+ + "\" is not present in any available repository";
} else if (repo_ver > curr_ver)
rpms.push_back(name);
}
@@ -610,8 +610,8 @@
map<String, PackageSet>::iterator set_iter = h_pre.sets().find(name);
if (set_iter == h_pre.sets().end()) {
- throw String("packages of set ") + name
- + " present neither locally nor in repository";
+ throw String("Packages of set \"") + name
+ + "\" are neither present neither locally nor in any available repository";
} else {
PackageSet& p_set = set_iter->second;
if (p_set.installed) {
@@ -627,8 +627,7 @@
rpms.push_back(*name_iter);
}
} else {
- throw String("packages of set ") + name +
- " are not present in any available repository";
+ /* Packages are already up-to-date */
}
}
}
@@ -643,8 +642,8 @@
rpms.push_back(*name_iter);
}
} else {
- throw String("packages of set ") + name +
- " are not present in any available repository";
+ throw String("Packages of set \"") + name +
+ "\" are not present in any available repository";
}
}
}
@@ -679,6 +678,8 @@
FC6 = true;
else if (release.find("Moonshine") != release.npos)
FC6 = true;
+ else if (release.find("Werewolf") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else
--- conga/ricci/modules/rpm/RpmModule.cpp 2008/01/17 17:38:38 1.3.2.1
+++ conga/ricci/modules/rpm/RpmModule.cpp 2008/01/25 17:19:05 1.3.2.2
@@ -92,24 +92,24 @@
rpms.push_back(pack);
}
}
+ }
- list<PackageSet> sets;
- for (list<XMLObject>::const_iterator
- iter = sets_list.begin() ;
- iter != sets_list.end() ;
- iter++)
- {
- if (iter->tag() == "set") {
- String name(iter->get_attr("name"));
- if (name.size()) {
- PackageSet set(name);
- sets.push_back(set);
- }
+ list<PackageSet> sets;
+ for (list<XMLObject>::const_iterator
+ iter = sets_list.begin() ;
+ iter != sets_list.end() ;
+ iter++)
+ {
+ if (iter->tag() == "set") {
+ String name(iter->get_attr("name"));
+ if (name.size()) {
+ PackageSet set(name);
+ sets.push_back(set);
}
}
- PackageHandler::install(rpms, sets, upgrade);
}
+ PackageHandler::install(rpms, sets, upgrade);
return VarMap();
}
@@ -219,7 +219,7 @@
}
if (installable) {
- if (!set.installed && set.in_repo)
+ if (set.in_repo)
add = true;
}
--- conga/ricci/modules/service/ServiceManager.cpp 2008/01/17 17:38:39 1.5.2.3
+++ conga/ricci/modules/service/ServiceManager.cpp 2008/01/25 17:19:05 1.5.2.4
@@ -311,9 +311,9 @@
name();
for (list<Service>::const_iterator
- iter = servs.begin() ;
- iter != servs.end() ;
- iter++)
+ iter = servs.begin() ;
+ iter != servs.end() ;
+ iter++)
{
if (!iter->enabled())
return false;
@@ -478,13 +478,14 @@
list<String> servs;
String name = "Cluster Base";
- String descr = "Cluster infrastructure: ccs, cman, fence";
-
+ String descr;
if (RHEL4 || FC5) {
+ descr = "Cluster infrastructure (RHEL4): ccs, cman, fence";
servs.push_back("ccsd");
servs.push_back("cman");
servs.push_back("fenced");
} else if (RHEL5 || FC6) {
+ descr = "Cluster infrastructure (RHEL5): ccs, cman, fence";
servs.push_back("cman");
servs.push_back("qdiskd");
}
@@ -830,6 +831,8 @@
FC6 = true;
else if (release.find("Moonshine") != release.npos)
FC6 = true;
+ else if (release.find("Werewolf") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else {
--- conga/ricci/ricci/RicciWorker.cpp 2008/01/17 17:38:39 1.11.2.1
+++ conga/ricci/ricci/RicciWorker.cpp 2008/01/25 17:19:05 1.11.2.2
@@ -258,6 +258,7 @@
_path(path)
{
QueueLocker lock;
+ struct stat st;
_fd = open(_path.c_str(), O_RDONLY);
if (_fd == -1)
@@ -276,18 +277,24 @@
}
}
+ if (fstat(_fd, &st) != 0)
+ throw String("Unable to stat file: ") + String(strerror(errno));
+
// read file
String xml_str;
- char buff[4096];
- ssize_t res;
- res = read_restart(_fd, buff, sizeof(buff));
- if (res <= 0) {
- throw String("error reading batch file: ")
+ while ((off_t) xml_str.size() < st.st_size) {
+ char buff[4096];
+ ssize_t res;
+
+ res = read_restart(_fd, buff, sizeof(buff));
+ if (res <= 0) {
+ throw String("error reading batch file: ")
+ String(strerror(-res));
+ }
+ xml_str.append(buff, res);
+ memset(buff, 0, sizeof(buff));
}
- xml_str.append(buff, res);
- memset(buff, 0, sizeof(buff));
// _xml
_xml = parseXML(xml_str);
^ permalink raw reply [flat|nested] 3+ messages in thread
* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/busy_wai ...
@ 2008-06-10 14:48 rmccabe
0 siblings, 0 replies; 3+ messages in thread
From: rmccabe @ 2008-06-10 14:48 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL4
Changes by: rmccabe at sourceware.org 2008-06-10 14:48:33
Modified files:
. : conga.spec.in.in
luci/cluster : busy_wait-macro index_html
luci/plone-custom: conga.js
luci/site/luci/Extensions: FenceHandler.py HelperFunctions.py
LuciClusterInfo.py
luci/site/luci/Extensions/ClusterModel: TagObject.py
ricci/modules/rpm: PackageHandler.cpp
ricci/modules/service: ServiceManager.cpp
Log message:
Bump support for ssh fence agents out to clusters running RHEL 4.8 or later (since the agents are not going to be in 4.7 now).
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.67.2.30&r2=1.67.2.31
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.3.2.1&r2=1.3.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.30.2.4&r2=1.30.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/conga.js.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4.2.2&r2=1.4.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.17.2.9&r2=1.17.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/HelperFunctions.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.6.2.2&r2=1.6.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.10.2.5&r2=1.10.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/TagObject.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.2.2.2&r2=1.2.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/rpm/PackageHandler.cpp.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.11.2.5&r2=1.11.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/service/ServiceManager.cpp.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.7.2.5&r2=1.7.2.6
--- conga/conga.spec.in.in 2008/05/16 17:10:08 1.67.2.30
+++ conga/conga.spec.in.in 2008/06/10 14:48:26 1.67.2.31
@@ -301,6 +301,9 @@
%changelog
+* Tue Jun 10 2008 Ryan McCabe <rmccabe@redhat.com> 0.11.1-6
+- Fix bz442729 (Add UI support for ssh for fence agents that now support it)
+
* Mon May 12 2008 Ryan McCabe <rmccabe@redhat.com> 0.11.1-5
- Fix bz446082 (Adding multiple fence devices at the same time causes duplicate entries)
--- conga/luci/cluster/busy_wait-macro 2008/03/25 01:27:10 1.3.2.1
+++ conga/luci/cluster/busy_wait-macro 2008/06/10 14:48:29 1.3.2.2
@@ -3,7 +3,7 @@
<p><img src="100wait.gif" /></p>
- <span tal:define="global nodereports isBusy/nodereports" />
+ <span tal:define="global nodereports isBusy/nodereports | nothing" />
<div tal:repeat="nodereport nodereports">
<tal:block
--- conga/luci/cluster/index_html 2008/03/25 01:27:10 1.30.2.4
+++ conga/luci/cluster/index_html 2008/06/10 14:48:29 1.30.2.5
@@ -27,20 +27,26 @@
<tal:block tal:define="
global sinfo nothing;
global hascluster request/clustername | nothing;
- global isBusy python: False;
+ global isBusy python:False;
+ global isVirtualized python:False;
global firsttime nothing;
global ri_agent nothing;
+ global os_version nothing;
global busywaiting python:None" />
<tal:block tal:condition="hascluster">
<tal:block tal:define="
global ri_agent python:here.getRicciAgentForCluster(request);
- resmap python:here.getClusterOS(ri_agent);
- global isVirtualized resmap/isVirtualized | nothing;
- global os_version resmap/os | nothing;
- global isBusy python:here.isClusterBusy(request);
global firsttime request/busyfirst | nothing" />
+ <tal:block tal:condition="ri_agent"
+ tal:define="resmap python:here.getClusterOS(ri_agent)">
+ <tal:block tal:define="
+ global isVirtualized resmap/isVirtualized | nothing;
+ global os_version resmap/os | nothing;
+ global isBusy python:here.isClusterBusy(request)" />
+ </tal:block>
+
<tal:block tal:condition="firsttime">
<tal:block tal:define="global busywaiting python:True" />
<meta http-equiv="refresh"
--- conga/luci/plone-custom/conga.js 2008/03/25 01:27:11 1.4.2.2
+++ conga/luci/plone-custom/conga.js 2008/06/10 14:48:30 1.4.2.3
@@ -45,7 +45,7 @@
}
function display_msgs(id, msg_list) {
- if (!msg_list || msg_list.length < 1) {
+ if (id === null || !msg_list || msg_list.length < 1) {
return (-1);
}
--- conga/luci/site/luci/Extensions/FenceHandler.py 2008/04/21 18:15:18 1.17.2.9
+++ conga/luci/site/luci/Extensions/FenceHandler.py 2008/06/10 14:48:30 1.17.2.10
@@ -657,8 +657,10 @@
use_ssh = form.has_key('secure') and (form['secure'] == '1' or form['secure'].lower() == 'true')
if use_ssh:
fencedev.addAttribute('secure', '1')
+ fencedev.addAttribute('agent', 'fence_drac5')
else:
fencedev.removeAttribute('secure')
+ fencedev.addAttribute('agent', 'fence_drac')
return errors
@@ -833,6 +835,7 @@
'fence_ilo': val_ilo_fd,
'fence_ipmilan': val_ipmilan_fd,
'fence_drac': val_drac_fd,
+ 'fence_drac5': val_drac_fd,
'fence_rsa': val_rsa_fd,
'fence_rsb': val_rsa_fd, # same params as rsa
'fence_rps10': val_rps10_fd,
@@ -1112,6 +1115,7 @@
'fence_ilo': val_noop_fi,
'fence_ipmilan': val_noop_fi,
'fence_drac': val_noop_fi,
+ 'fence_drac5': val_noop_fi,
'fence_rsa': val_noop_fi,
'fence_rsb': val_noop_fi,
'fence_rps10': val_noop_fi
--- conga/luci/site/luci/Extensions/HelperFunctions.py 2008/03/25 01:27:12 1.6.2.2
+++ conga/luci/site/luci/Extensions/HelperFunctions.py 2008/06/10 14:48:31 1.6.2.3
@@ -9,7 +9,7 @@
import threading
def resolveOSType(os_str):
- if not os_str or os_str.find('Tikanga') != (-1) or os_str.find('FC6') != (-1) or os_str.find('Zod') != (-1) or os_str.find('Moonshine') != (-1) or os_str.find('Werewolf') != (-1):
+ if not os_str or os_str.find('Tikanga') != (-1) or os_str.find('Zod') != (-1) or os_str.find('Moonshine') != (-1) or os_str.find('Werewolf') != (-1) or os.str_find('Sulphur') != (-1):
return 'rhel5'
else:
return 'rhel4'
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py 2008/04/21 18:15:18 1.10.2.5
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py 2008/06/10 14:48:31 1.10.2.6
@@ -588,7 +588,7 @@
clumap['os_major'] = 4
os_minor = int(cluster_os[cluster_os.find('Update ') + 7])
clumap['os_minor'] = os_minor
- if os_minor > 6:
+ if os_minor > 7:
clumap['has_fence_ssh'] = True
except Exception, e:
if LUCI_DEBUG_MODE is True:
--- conga/luci/site/luci/Extensions/ClusterModel/TagObject.py 2008/03/25 01:27:13 1.2.2.2
+++ conga/luci/site/luci/Extensions/ClusterModel/TagObject.py 2008/06/10 14:48:32 1.2.2.3
@@ -53,10 +53,7 @@
return self.attr_hash
def getAttribute(self, kee):
- try:
- return self.attr_hash[kee]
- except KeyError, e:
- return None
+ return self.attr_hash.get(kee)
def getChildren(self):
return self.children
--- conga/ricci/modules/rpm/PackageHandler.cpp 2008/04/14 15:55:08 1.11.2.5
+++ conga/ricci/modules/rpm/PackageHandler.cpp 2008/06/10 14:48:32 1.11.2.6
@@ -684,6 +684,8 @@
FC6 = true;
else if (release.find("Werewolf") != release.npos)
FC6 = true;
+ else if (release.find("Sulphur") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else
--- conga/ricci/modules/service/ServiceManager.cpp 2008/04/14 15:55:08 1.7.2.5
+++ conga/ricci/modules/service/ServiceManager.cpp 2008/06/10 14:48:33 1.7.2.6
@@ -835,6 +835,8 @@
FC6 = true;
else if (release.find("Werewolf") != release.npos)
FC6 = true;
+ else if (release.find("Sulphur") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else {
^ permalink raw reply [flat|nested] 3+ messages in thread
* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/busy_wai ...
@ 2008-06-13 18:38 rmccabe
0 siblings, 0 replies; 3+ messages in thread
From: rmccabe @ 2008-06-13 18:38 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2008-06-13 18:38:40
Modified files:
. : conga.spec.in.in
luci/cluster : busy_wait-macro form-macros index_html
validate_create_gulm.js
luci/plone-custom: conga.js update_hostinfo.js
luci/site/luci/Extensions: FenceHandler.py HelperFunctions.py
LuciClusterInfo.py
luci/site/luci/Extensions/ClusterModel: TagObject.py
luci/site/luci/var: Data.fs
ricci/modules/rpm: PackageHandler.cpp
ricci/modules/service: ServiceManager.cpp
Log message:
Fix 450854
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.88&r2=1.45.2.89
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.2&r2=1.2.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.35&r2=1.90.2.36
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/index_html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.20.2.15&r2=1.20.2.16
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_create_gulm.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.3&r2=1.1.4.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/conga.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.3.2.5&r2=1.3.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/update_hostinfo.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.13&r2=1.4.2.14
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/HelperFunctions.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.6&r2=1.4.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.10&r2=1.1.4.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/TagObject.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.2&r2=1.1.4.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/var/Data.fs.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.33&r2=1.15.2.34
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/rpm/PackageHandler.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.9&r2=1.9.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/service/ServiceManager.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.5.2.7&r2=1.5.2.8
--- conga/conga.spec.in.in 2008/05/16 17:08:28 1.45.2.88
+++ conga/conga.spec.in.in 2008/06/13 18:37:46 1.45.2.89
@@ -76,7 +76,7 @@
Group: System Environment/Base
Summary: Remote Management System - Management Station
-Requires: chkconfig initscripts
+Requires: chkconfig initscripts bc
Requires: python >= 2.4.1
%if "%{include_zope_and_plone}" == "yes"
Provides: config(luci) = %{version}-%{release}
@@ -297,6 +297,7 @@
- Fix bz444210 (Adding multiple fence devices at the same time causes duplicate entries)
- Fix bz444381 (conga writes 'exportpath' instead of 'export' attribute for netfs)
- Fix bz446907 (rpm -V luci fails)
+- Fix bz450854 (luci doesn't start because of a missing dep /usr/bin/dc)
* Fri Apr 18 2008 Ryan McCabe <rmccabe@redhat.com> 0.12.0-8
- Fix bz441580 (conga should install 'sg3_utils' and start service 'scsi_reserve' when scsi fencing is used)
--- conga/luci/cluster/busy_wait-macro 2008/01/25 17:18:37 1.2.2.2
+++ conga/luci/cluster/busy_wait-macro 2008/06/13 18:37:46 1.2.2.3
@@ -3,7 +3,7 @@
<p><img src="100wait.gif" /></p>
- <span tal:define="global nodereports isBusy/nodereports" />
+ <span tal:define="global nodereports isBusy/nodereports | nothing" />
<div tal:repeat="nodereport nodereports">
<tal:block
--- conga/luci/cluster/form-macros 2008/04/23 17:27:10 1.90.2.35
+++ conga/luci/cluster/form-macros 2008/06/13 18:37:46 1.90.2.36
@@ -148,6 +148,80 @@
</div>
</div>
+<div metal:define-macro="lockserver-macro" tal:omit-tag="">
+ <strong class="cluster">Lock Manager</strong>
+ <ul class="vanilla">
+ <li class="vanilla">
+ <input type="radio" name="lockmanager" value="dlm"
+ onchange="toggle_gulm(this.form, this.value)"
+ tal:attributes="checked python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm'">DLM (preferred)
+ </li>
+ <li class="vanilla">
+ <input type="radio" name="lockmanager" value="gulm"
+ onchange="toggle_gulm(this.form, this.value)"
+ tal:attributes="checked python: add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] == 'gulm'">GULM
+ </li>
+ <div id="gulm_lockservers"
+ tal:attributes="class python: (add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] != 'gulm') and 'invisible' or ''">
+ <fieldset>
+ <legend class="rescfg">GULM lock server properties</legend>
+ <p>You must enter exactly 1, 3, or 5 GULM lock servers.</p>
+ <table class="systemsTable">
+ <tr>
+ <td class="pad_right">Lock Server 1</td>
+ <td>
+ <input type="text" name="__GULM__:server1"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server1 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 2</td>
+ <td>
+ <input type="text" name="__GULM__:server2"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server2 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 3</td>
+ <td>
+ <input type="text" name="__GULM__:server3"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server3 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 4</td>
+ <td>
+ <input type="text" name="__GULM__:server4"
+ class="hostname" tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server4 | nothing" />
+ </td>
+ </tr>
+ <tr>
+ <td class="pad_right">Lock Server 5</td>
+ <td>
+ <input type="text" name="__GULM__:server5"
+ class="hostname"
+ tal:attributes="
+ disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
+ value add_cluster/gulm_lockservers/server5 | nothing" />
+ </td>
+ </tr>
+ </table>
+ </fieldset>
+ </div>
+ </ul>
+</div>
+
<div metal:define-macro="cluster-form">
<h2>Cluster Form</h2>
</div>
@@ -171,10 +245,17 @@
tal:define="
global add_cluster request/SESSION/create_cluster | nothing">
+ <div class="invisible"
+ tal:condition="not:exists:add_cluster/gulm_support">
+ <div id="lockserver_div">
+ <tal:block metal:use-macro="here/form-macros/macros/lockserver-macro" />
+ </div>
+ </div>
+
<input name="pagetype" type="hidden"
tal:attributes="value request/form/pagetype | request/pagetype |string:6" />
- <input name="cluster_os" type="hidden"
+ <input name="cluster_os" type="hidden" id="cluster_os"
tal:attributes="value add_cluster/cluster_os | nothing" />
<table id="systemsTable" class="systemsTable" cellspacing="0">
@@ -241,91 +322,11 @@
</ul>
</td></tr>
- <tr class="systemsTable"
- tal:condition="exists:add_cluster/gulm_support">
- <td class="systemsTable" colspan="2">
- <strong class="cluster">Lock Manager</strong>
- <ul class="vanilla">
- <li class="vanilla">
- <input type="radio"
- name="lockmanager" value="dlm"
- onchange="toggle_gulm(this.form, this.value)"
- tal:attributes="checked python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm'"
- >DLM (preferred)
- </li>
- <li class="vanilla">
- <input type="radio"
- name="lockmanager" value="gulm"
- onchange="toggle_gulm(this.form, this.value)"
- tal:attributes="checked python: add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] == 'gulm'"
- >GULM
- </li>
- <div id="gulm_lockservers"
- tal:attributes="class python: (add_cluster and 'lockmanager' in add_cluster and add_cluster['lockmanager'] != 'gulm') and 'invisible' or ''">
- <fieldset>
- <legend class="rescfg">GULM lock server properties</legend>
- <p>You must enter exactly 1, 3, or 5 GULM lock servers.</p>
-
- <table class="systemsTable">
- <tr>
- <td class="pad_right">Lock Server 1</td>
- <td>
- <input type="text"
- name="__GULM__:server1"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server1 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 2</td>
- <td>
- <input type="text"
- name="__GULM__:server2"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server2 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 3</td>
- <td>
- <input type="text"
- name="__GULM__:server3"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server3 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 4</td>
- <td>
- <input type="text"
- name="__GULM__:server4"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server4 | nothing" />
- </td>
- </tr>
- <tr>
- <td class="pad_right">Lock Server 5</td>
- <td>
- <input type="text"
- name="__GULM__:server5"
- class="hostname"
- tal:attributes="
- disabled python: not add_cluster or not 'lockmanager' in add_cluster or add_cluster['lockmanager'] != 'gulm';
- value add_cluster/gulm_lockservers/server5 | nothing" />
- </td>
- </tr>
- </table>
- </fieldset>
- </div>
- <ul>
+ <tr class="systemsTable">
+ <td class="systemsTable" colspan="2" id="lockserver_dialog">
+ <tal:block tal:condition="exists:add_cluster/gulm_support">
+ <tal:block metal:use-macro="here/form-macros/macros/lockserver-macro" />
+ </tal:block>
</td>
</tr>
</tfoot>
@@ -471,6 +472,8 @@
</div>
<div class="hbSubmit" id="hbSubmit">
+ <input id="cluster_version" name="cluster_version" type="hidden"
+ tal:attributes="value os_version | nothing" />
<input type="hidden" name="cluster_create" value="1" />
<input type="button" name="Submit" value="Submit"
onClick="validate_cluster_create(this.form)" />
@@ -1094,7 +1097,7 @@
<input name="pagetype" type="hidden"
tal:attributes="value request/form/pagetype | request/pagetype | string:15" />
- <input name="cluster_os" type="hidden"
+ <input name="cluster_os" type="hidden" id="cluster_os"
tal:attributes="value add_cluster/cluster_os | nothing" />
<table id="systemsTable" class="systemsTable" cellspacing="0">
--- conga/luci/cluster/index_html 2008/02/08 21:56:33 1.20.2.15
+++ conga/luci/cluster/index_html 2008/06/13 18:37:46 1.20.2.16
@@ -27,20 +27,26 @@
<tal:block tal:define="
global sinfo nothing;
global hascluster request/clustername | nothing;
- global isBusy python: False;
+ global isBusy python:False;
+ global isVirtualized python:False;
global firsttime nothing;
global ri_agent nothing;
+ global os_version nothing;
global busywaiting python:None" />
<tal:block tal:condition="hascluster">
<tal:block tal:define="
global ri_agent python:here.getRicciAgentForCluster(request);
- resmap python:here.getClusterOS(ri_agent);
- global isVirtualized resmap/isVirtualized | nothing;
- global os_version resmap/os | nothing;
- global isBusy python:here.isClusterBusy(request);
global firsttime request/busyfirst | nothing" />
+ <tal:block tal:condition="ri_agent"
+ tal:define="resmap python:here.getClusterOS(ri_agent)">
+ <tal:block tal:define="
+ global isVirtualized resmap/isVirtualized | nothing;
+ global os_version resmap/os | nothing;
+ global isBusy python:here.isClusterBusy(request)" />
+ </tal:block>
+
<tal:block tal:condition="firsttime">
<tal:block tal:define="global busywaiting python:True" />
<meta http-equiv="refresh"
--- conga/luci/cluster/validate_create_gulm.js 2008/01/23 04:44:30 1.1.4.3
+++ conga/luci/cluster/validate_create_gulm.js 2008/06/13 18:37:46 1.1.4.4
@@ -65,10 +65,19 @@
}
function validate_cluster_create(form) {
- if (form.lockmanager && !form.lockmanager[0].checked) {
- var errors = check_gulm_lkserv();
- if (error_dialog(errors))
- return (-1);
+ try {
+ if (!form.cluster_os.value) {
+ get_system_info(null, null, 1);
+ }
+ } catch (e) {
+ }
+
+ if (form.cluster_os.value) {
+ if (form.lockmanager && !form.lockmanager[0].checked) {
+ var errors = check_gulm_lkserv();
+ if (error_dialog(errors))
+ return (-1);
+ }
+ return validateForm(form);
}
- return validateForm(form);
}
--- conga/luci/plone-custom/conga.js 2008/02/07 06:52:56 1.3.2.5
+++ conga/luci/plone-custom/conga.js 2008/06/13 18:37:46 1.3.2.6
@@ -45,7 +45,7 @@
}
function display_msgs(id, msg_list) {
- if (!msg_list || msg_list.length < 1) {
+ if (id === null || !msg_list || msg_list.length < 1) {
return (-1);
}
--- conga/luci/plone-custom/update_hostinfo.js 2008/01/23 04:44:32 1.1.2.1
+++ conga/luci/plone-custom/update_hostinfo.js 2008/06/13 18:37:46 1.1.2.2
@@ -97,6 +97,7 @@
var errors = [];
var msgs = [];
var systems_list = get_systems_list_offsets(errors);
+ var cluster_os = null;
for (var i = 0 ; i < systems_list.length ; i++) {
var cur_node_props = node_props[systems_list[i][0]];
@@ -106,6 +107,10 @@
var addr_elem = document.getElementById('__SYSTEM' + offset + ':Addr');
var pwd_elem = document.getElementById('__SYSTEM' + offset + ':Passwd');
var fp_elem = document.getElementById('__SYSTEM' + offset + 'Fingerprint');
+ try {
+ cluster_os = cur_node_props['OS'];
+ } catch (e) {
+ }
if (cur_node_props['available'] != 'True') {
errors.push(cur_node_props['err_msg']);
@@ -130,7 +135,32 @@
}
}
} catch (e) {
- alert(e);
+ //alert(e);
+ }
+ }
+
+ if (cluster_os !== null) {
+ var os_str = null;
+
+ if (cluster_os.match(/Nahant/)) {
+ var lsdiv = document.getElementById('lockserver_div');
+ if (lsdiv !== null) {
+ var ls_cell = document.getElementById('lockserver_dialog');
+ if (ls_cell !== null) {
+ lsdiv.id = null;
+ ls_cell.appendChild(lsdiv);
+ }
+ }
+ os_str = 'rhel4';
+ } else {
+ os_str = 'rhel5';
+ }
+
+ if (os_str !== null) {
+ var os_in = document.getElementById('cluster_os');
+ if (os_in !== null) {
+ os_in.value = os_str;
+ }
}
}
--- conga/luci/site/luci/Extensions/FenceHandler.py 2008/04/23 17:27:10 1.4.2.13
+++ conga/luci/site/luci/Extensions/FenceHandler.py 2008/06/13 18:37:46 1.4.2.14
@@ -657,8 +657,10 @@
use_ssh = form.has_key('secure') and (form['secure'] == '1' or form['secure'].lower() == 'true')
if use_ssh:
fencedev.addAttribute('secure', '1')
+ fencedev.addAttribute('agent', 'fence_drac5')
else:
fencedev.removeAttribute('secure')
+ fencedev.addAttribute('agent', 'fence_drac')
return errors
@@ -833,6 +835,7 @@
'fence_ilo': val_ilo_fd,
'fence_ipmilan': val_ipmilan_fd,
'fence_drac': val_drac_fd,
+ 'fence_drac5': val_drac_fd,
'fence_rsa': val_rsa_fd,
'fence_rsb': val_rsa_fd, # same params as rsa
'fence_rps10': val_rps10_fd,
@@ -1112,6 +1115,7 @@
'fence_ilo': val_noop_fi,
'fence_ipmilan': val_noop_fi,
'fence_drac': val_noop_fi,
+ 'fence_drac5': val_noop_fi,
'fence_rsa': val_noop_fi,
'fence_rsb': val_noop_fi,
'fence_rps10': val_noop_fi
--- conga/luci/site/luci/Extensions/HelperFunctions.py 2008/01/23 04:44:32 1.4.2.6
+++ conga/luci/site/luci/Extensions/HelperFunctions.py 2008/06/13 18:37:47 1.4.2.7
@@ -9,7 +9,7 @@
import threading
def resolveOSType(os_str):
- if not os_str or os_str.find('Tikanga') != (-1) or os_str.find('FC6') != (-1) or os_str.find('Zod') != (-1) or os_str.find('Moonshine') != (-1) or os_str.find('Werewolf') != (-1):
+ if not os_str or os_str.find('Tikanga') != (-1) or os_str.find('Zod') != (-1) or os_str.find('Moonshine') != (-1) or os_str.find('Werewolf') != (-1) or os.str_find('Sulphur') != (-1):
return 'rhel5'
else:
return 'rhel4'
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py 2008/04/23 17:27:10 1.1.4.10
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py 2008/06/13 18:37:47 1.1.4.11
@@ -588,7 +588,7 @@
clumap['os_major'] = 4
os_minor = int(cluster_os[cluster_os.find('Update ') + 7])
clumap['os_minor'] = os_minor
- if os_minor > 6:
+ if os_minor > 7:
clumap['has_fence_ssh'] = True
except Exception, e:
if LUCI_DEBUG_MODE is True:
--- conga/luci/site/luci/Extensions/ClusterModel/TagObject.py 2008/01/23 04:44:34 1.1.4.2
+++ conga/luci/site/luci/Extensions/ClusterModel/TagObject.py 2008/06/13 18:37:47 1.1.4.3
@@ -53,10 +53,7 @@
return self.attr_hash
def getAttribute(self, kee):
- try:
- return self.attr_hash[kee]
- except KeyError, e:
- return None
+ return self.attr_hash.get(kee)
def getChildren(self):
return self.children
Binary files /cvs/cluster/conga/luci/site/luci/var/Data.fs 2008/04/23 17:27:10 1.15.2.33 and /cvs/cluster/conga/luci/site/luci/var/Data.fs 2008/06/13 18:37:47 1.15.2.34 differ
rcsdiff: /cvs/cluster/conga/luci/site/luci/var/Data.fs: diff failed
--- conga/ricci/modules/rpm/PackageHandler.cpp 2008/04/14 15:58:35 1.9.2.9
+++ conga/ricci/modules/rpm/PackageHandler.cpp 2008/06/13 18:38:40 1.9.2.10
@@ -684,6 +684,8 @@
FC6 = true;
else if (release.find("Werewolf") != release.npos)
FC6 = true;
+ else if (release.find("Sulphur") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else
--- conga/ricci/modules/service/ServiceManager.cpp 2008/04/14 15:58:35 1.5.2.7
+++ conga/ricci/modules/service/ServiceManager.cpp 2008/06/13 18:38:40 1.5.2.8
@@ -835,6 +835,8 @@
FC6 = true;
else if (release.find("Werewolf") != release.npos)
FC6 = true;
+ else if (release.find("Sulphur") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else {
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2008-06-13 18:38 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-06-13 18:38 [Cluster-devel] conga ./conga.spec.in.in luci/cluster/busy_wai rmccabe
-- strict thread matches above, loose matches on Subject: below --
2008-06-10 14:48 rmccabe
2008-01-25 17:19 rmccabe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).