* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2006-12-12 13:26 kupcevic
0 siblings, 0 replies; 7+ messages in thread
From: kupcevic @ 2006-12-12 13:26 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: kupcevic at sourceware.org 2006-12-12 13:26:24
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros
luci/site/luci/Extensions: ricci_bridge.py
ricci/common : File.cpp
ricci/docs : storage_api.html
ricci/modules/storage: LV.cpp LVM.cpp LVM.h PV.cpp
StorageModule.cpp VG.cpp
Added files:
ricci/test_suite/storage: disable_clustered_lvm.xml
enable_clustered_lvm.xml
Log message:
Fixed bz217703 (clustered vg creation fails)
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.10&r2=1.45.2.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.8&r2=1.90.2.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.30.2.12&r2=1.30.2.13
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/common/File.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/docs/storage_api.html.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.5&r2=1.5.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/LV.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.6&r2=1.6.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/LVM.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.7.2.2&r2=1.7.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/LVM.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/PV.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/StorageModule.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.5&r2=1.5.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/storage/VG.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.8&r2=1.8.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/test_suite/storage/disable_clustered_lvm.xml.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/test_suite/storage/enable_clustered_lvm.xml.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.2.1
--- conga/conga.spec.in.in 2006/12/12 05:21:34 1.45.2.10
+++ conga/conga.spec.in.in 2006/12/12 13:26:23 1.45.2.11
@@ -297,6 +297,7 @@
- Fixed bz218040 (luci reports "unknown" as cluster name if cluster creation fails)
- Fixed bz218941 (Conga/luci - cannot add node to cluster via luci web app)
- Fixed bz219156 (Errors in performing node-specific tasks due to ricci outage not reflected in luci display)
+- Fixed bz217703 (clustered vg creation fails)
* Thu Nov 16 2006 Stanko Kupcevic <kupcevic@redhat.com> 0.8-25
- Fix build issues (D-BUS detection)
--- conga/luci/cluster/form-macros 2006/12/11 23:57:59 1.90.2.8
+++ conga/luci/cluster/form-macros 2006/12/12 13:26:23 1.90.2.9
@@ -45,8 +45,7 @@
<img src="rebooted.png" alt="[cluster node rebooted]" />
</span>
- <span tal:condition="
- python: nodereport['statusindex'] == 4 or nodereport['statusindex'] == 5">
+ <span tal:condition="python: nodereport['statusindex'] == 4 or nodereport['statusindex'] == 5">
<img src="configured.png" alt="[cluster node configured]" />
</span>
--- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/12/11 23:58:00 1.30.2.12
+++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/12/12 13:26:23 1.30.2.13
@@ -43,15 +43,16 @@
return (-1, err_msg)
def addClusterNodeBatch(os_str,
- cluster_name,
- install_base,
- install_services,
- install_shared_storage,
- install_LVS,
- upgrade_rpms):
-
+ cluster_name,
+ install_base,
+ install_services,
+ install_shared_storage,
+ install_LVS,
+ upgrade_rpms):
+
batch = '<?xml version="1.0" ?>'
batch += '<batch>'
+
batch += '<module name="rpm">'
batch += '<request API_version="1.0">'
batch += '<function_call name="install">'
@@ -61,9 +62,7 @@
else:
batch += 'false'
batch += '"/>'
-
batch += '<var name="sets" type="list_xml">'
-
if install_base or install_services or install_shared_storage:
batch += '<set name="Cluster Base"/>'
if install_services:
@@ -72,45 +71,42 @@
batch += '<set name="Clustered Storage"/>'
if install_LVS:
batch += '<set name="Linux Virtual Server"/>'
-
batch += '</var>'
batch += '</function_call>'
batch += '</request>'
batch += '</module>'
-
+
+ batch += '<module name="service">'
+ batch += '<request API_version="1.0">'
+ batch += '<function_call name="disable">'
+ batch += '<var mutable="false" name="services" type="list_xml">'
+ if install_base or install_services or install_shared_storage:
+ batch += '<set name="Cluster Base"/>'
+ if install_services:
+ batch += '<set name="Cluster Service Manager"/>'
+ if install_shared_storage:
+ batch += '<set name="Clustered Storage"/>'
+ if install_LVS:
+ batch += '<set name="Linux Virtual Server"/>'
+ batch += '</var>'
+ batch += '</function_call>'
+ batch += '</request>'
+ batch += '</module>'
+
need_reboot = install_base or install_services or install_shared_storage or install_LVS
if need_reboot:
- batch += '<module name="service">'
- batch += '<request API_version="1.0">'
- batch += '<function_call name="disable">'
- batch += '<var mutable="false" name="services" type="list_xml">'
- if os_str == 'rhel4':
- batch += '<service name="ccsd"/>'
- batch += '<service name="cman"/>'
- batch += '</var>'
- batch += '</function_call>'
- batch += '</request>'
- batch += '</module>'
-
batch += '<module name="reboot">'
batch += '<request API_version="1.0">'
batch += '<function_call name="reboot_now"/>'
batch += '</request>'
batch += '</module>'
else:
- # need 2 placeholders instead of disable services / reboot
batch += '<module name="rpm">'
batch += '<request API_version="1.0">'
batch += '<function_call name="install"/>'
batch += '</request>'
batch += '</module>'
-
- batch += '<module name="rpm">'
- batch += '<request API_version="1.0">'
- batch += '<function_call name="install"/>'
- batch += '</request>'
- batch += '</module>'
-
+
batch += '<module name="cluster">'
batch += '<request API_version="1.0">'
batch += '<function_call name="set_cluster.conf">'
@@ -127,27 +123,20 @@
batch += '</function_call>'
batch += '</request>'
batch += '</module>'
-
- if need_reboot:
- batch += '<module name="service">'
+
+ if install_shared_storage:
+ batch += '<module name="storage">'
batch += '<request API_version="1.0">'
- batch += '<function_call name="enable">'
- batch += '<var mutable="false" name="services" type="list_xml">'
- if os_str == 'rhel4':
- batch += '<service name="ccsd"/>'
- batch += '<service name="cman"/>'
- batch += '</var>'
- batch += '</function_call>'
+ batch += '<function_call name="enable_clustered_lvm"/>'
batch += '</request>'
batch += '</module>'
else:
- # placeholder instead of enable services
batch += '<module name="rpm">'
batch += '<request API_version="1.0">'
batch += '<function_call name="install"/>'
batch += '</request>'
batch += '</module>'
-
+
batch += '<module name="cluster">'
batch += '<request API_version="1.0">'
batch += '<function_call name="start_node"/>'
@@ -157,18 +146,19 @@
return minidom.parseString(batch).firstChild
-def createClusterBatch( os_str,
- cluster_name,
- cluster_alias,
- nodeList,
- install_base,
- install_services,
- install_shared_storage,
- install_LVS,
- upgrade_rpms):
-
+def createClusterBatch(os_str,
+ cluster_name,
+ cluster_alias,
+ nodeList,
+ install_base,
+ install_services,
+ install_shared_storage,
+ install_LVS,
+ upgrade_rpms):
+
batch = '<?xml version="1.0" ?>'
batch += '<batch>'
+
batch += '<module name="rpm">'
batch += '<request API_version="1.0">'
batch += '<function_call name="install">'
@@ -179,7 +169,6 @@
batch += 'false'
batch += '"/>'
batch += '<var name="sets" type="list_xml">'
-
if install_base or install_services or install_shared_storage:
batch += '<set name="Cluster Base"/>'
if install_services:
@@ -192,40 +181,38 @@
batch += '</function_call>'
batch += '</request>'
batch += '</module>'
-
+
+ batch += '<module name="service">'
+ batch += '<request API_version="1.0">'
+ batch += '<function_call name="disable">'
+ batch += '<var mutable="false" name="services" type="list_xml">'
+ if install_base or install_services or install_shared_storage:
+ batch += '<set name="Cluster Base"/>'
+ if install_services:
+ batch += '<set name="Cluster Service Manager"/>'
+ if install_shared_storage:
+ batch += '<set name="Clustered Storage"/>'
+ if install_LVS:
+ batch += '<set name="Linux Virtual Server"/>'
+ batch += '</var>'
+ batch += '</function_call>'
+ batch += '</request>'
+ batch += '</module>'
+
need_reboot = install_base or install_services or install_shared_storage or install_LVS
if need_reboot:
- batch += '<module name="service">'
- batch += '<request API_version="1.0">'
- batch += '<function_call name="disable">'
- batch += '<var mutable="false" name="services" type="list_xml">'
- if os_str == 'rhel4':
- batch += '<service name="ccsd"/>'
- batch += '<service name="cman"/>'
- batch += '</var>'
- batch += '</function_call>'
- batch += '</request>'
- batch += '</module>'
-
batch += '<module name="reboot">'
batch += '<request API_version="1.0">'
batch += '<function_call name="reboot_now"/>'
batch += '</request>'
batch += '</module>'
else:
- # need 2 placeholders instead of disable services / reboot
batch += '<module name="rpm">'
batch += '<request API_version="1.0">'
batch += '<function_call name="install"/>'
batch += '</request>'
batch += '</module>'
-
- batch += '<module name="rpm">'
- batch += '<request API_version="1.0">'
- batch += '<function_call name="install"/>'
- batch += '</request>'
- batch += '</module>'
-
+
batch += '<module name="cluster">'
batch += '<request API_version="1.0">'
batch += '<function_call name="set_cluster.conf">'
@@ -233,9 +220,7 @@
batch += '<var mutable="false" name="cluster.conf" type="xml">'
batch += '<cluster config_version="1" name="' + cluster_name + '" alias="' + cluster_alias + '">'
batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
-
batch += '<clusternodes>'
-
x = 1
for i in nodeList:
if os_str == "rhel4":
@@ -243,14 +228,11 @@
else:
batch += '<clusternode name="' + i + '" votes="1" nodeid="' + str(x) + '" />'
x = x + 1
-
batch += '</clusternodes>'
-
if len(nodeList) == 2:
batch += '<cman expected_votes="1" two_node="1"/>'
else:
batch += '<cman/>'
-
batch += '<fencedevices/>'
batch += '<rm/>'
batch += '</cluster>'
@@ -258,27 +240,20 @@
batch += '</function_call>'
batch += '</request>'
batch += '</module>'
-
- if need_reboot:
- batch += '<module name="service">'
+
+ if install_shared_storage:
+ batch += '<module name="storage">'
batch += '<request API_version="1.0">'
- batch += '<function_call name="enable">'
- batch += '<var mutable="false" name="services" type="list_xml">'
- if os_str == 'rhel4':
- batch += '<service name="ccsd"/>'
- batch += '<service name="cman"/>'
- batch += '</var>'
- batch += '</function_call>'
+ batch += '<function_call name="enable_clustered_lvm"/>'
batch += '</request>'
batch += '</module>'
else:
- # placeholder instead of enable services
batch += '<module name="rpm">'
batch += '<request API_version="1.0">'
batch += '<function_call name="install"/>'
batch += '</request>'
batch += '</module>'
-
+
batch += '<module name="cluster">'
batch += '<request API_version="1.0">'
batch += '<function_call name="start_node">'
--- conga/ricci/common/File.cpp 2006/10/24 21:57:36 1.1.2.2
+++ conga/ricci/common/File.cpp 2006/12/12 13:26:23 1.1.2.3
@@ -51,7 +51,7 @@
bool rw)
{
if (access(filepath.c_str(), R_OK))
- throw String("missing file ") + filepath;
+ throw String("unable to read file ") + filepath;
ios_base::openmode mode = ios_base::in;
if (rw)
mode |= ios_base::out;
--- conga/ricci/docs/storage_api.html 2006/10/16 20:26:48 1.5
+++ conga/ricci/docs/storage_api.html 2006/12/12 13:26:24 1.5.2.1
@@ -374,6 +374,30 @@
especially unmount error if content fails to unmount</P>
</UL>
</UL>
+ <LI><P>enable_clustered_lvm</P>
+ <UL>
+ <P>Enable LVM's cluster locking.
+ It is an error to call this function if lvm2-cluster
+ is not installed.
+ </P>
+ <P>No input variables.
+ </P>
+ <P>No output variables.</P>
+ <P>On failure:<BR>- No special errors defined, only generic ones
+ might get returned.</P>
+ </UL>
+ <LI><P>disable_clustered_lvm</P>
+ <UL>
+ <P>Disable LVM's cluster locking.
+ It is an error to call this function if lvm2-cluster
+ is not installed.
+ </P>
+ <P>No input variables.
+ </P>
+ <P>No output variables.</P>
+ <P>On failure:<BR>- No special errors defined, only generic ones
+ might get returned.</P>
+ </UL>
<P ALIGN=LEFT><BR><BR>
</P>
</BODY>
--- conga/ricci/modules/storage/LV.cpp 2006/09/26 01:16:13 1.6
+++ conga/ricci/modules/storage/LV.cpp 2006/12/12 13:26:24 1.6.2.1
@@ -45,7 +45,7 @@
// if VG is marked as clustered, but cluster locking is not available, throw
- if (!LVM::clustered_available() &&
+ if (!LVM::clustered_enabled() &&
bd_temp.props.get("clustered").get_bool())
throw ClvmdError();
@@ -183,7 +183,7 @@
{
// if VG is marked as clustered, but cluster locking is not available, throw
if (_props.get("clustered").get_bool() &&
- !LVM::clustered_available())
+ !LVM::clustered_enabled())
throw ClvmdError();
// snapshots neither resize nor replace content, see LV()
@@ -247,7 +247,7 @@
{
// if VG is marked as clustered, but cluster locking is not available, throw
if (_props.get("clustered").get_bool() &&
- !LVM::clustered_available())
+ !LVM::clustered_enabled())
throw ClvmdError();
content->remove();
--- conga/ricci/modules/storage/LVM.cpp 2006/11/29 18:26:53 1.7.2.2
+++ conga/ricci/modules/storage/LVM.cpp 2006/12/12 13:26:24 1.7.2.3
@@ -83,6 +83,9 @@
+static String LVMCONF_PATH("/usr/sbin/lvmconf");
+
+
// pvs
static String PVS_OPTIONS = "pv_name,vg_name,pv_size,pv_free,pv_attr,pv_fmt,pv_uuid,vg_extent_size";
static unsigned int PVS_NAME_IDX = 0;
@@ -437,7 +440,7 @@
const list<String>& pv_paths)
{
if (clustered &&
- !clustered_available())
+ !clustered_enabled())
throw ClvmdError();
vector<String> args;
@@ -520,7 +523,7 @@
bool clustered)
{
if (clustered &&
- !clustered_available())
+ !clustered_enabled())
throw ClvmdError();
vector<String> args;
@@ -642,12 +645,39 @@
bool
-LVM::clustered_available()
+LVM::clustered_enabled()
{
String locking_type = get_locking_type();
return (locking_type == "2" || locking_type == "3");
}
+void
+LVM::enable_clustered()
+{
+ String out, err;
+ int status;
+ vector<String> args;
+ args.push_back("--enable-cluster");
+ if (utils::execute(LVMCONF_PATH, args, out, err, status, false))
+ throw command_not_found_error_msg(LVMCONF_PATH);
+ if (status != 0)
+ throw String("Failed to enable LVM's clustered locking");
+}
+
+void
+LVM::disable_clustered()
+{
+ String out, err;
+ int status;
+ vector<String> args;
+ args.push_back("--disable-cluster");
+ if (utils::execute(LVMCONF_PATH, args, out, err, status, false))
+ throw command_not_found_error_msg(LVMCONF_PATH);
+ if (status != 0)
+ throw String("Failed to disable LVM's clustered locking");
+}
+
+
bool
cluster_quorate()
{
@@ -714,7 +744,7 @@
void
LVM::check_locking()
{
- if (clustered_available()) {
+ if (clustered_enabled()) {
if (!cluster_quorate())
throw ClusterNotQuorateError();
--- conga/ricci/modules/storage/LVM.h 2006/08/10 22:53:09 1.4
+++ conga/ricci/modules/storage/LVM.h 2006/12/12 13:26:24 1.4.2.1
@@ -73,9 +73,11 @@
static void lvreduce(const String& path, long long new_size);
static void lvextend(const String& path, long long new_size);
- static void check_locking();
- static bool clustered_available();
static bool vg_clustered(const String& vgname);
+ static void check_locking();
+ static bool clustered_enabled();
+ static void enable_clustered();
+ static void disable_clustered();
};
--- conga/ricci/modules/storage/PV.cpp 2006/08/10 22:53:09 1.4
+++ conga/ricci/modules/storage/PV.cpp 2006/12/12 13:26:24 1.4.2.1
@@ -94,7 +94,7 @@
// if VG is marked as clustered, but cluster locking is not available, throw
- if (!LVM::clustered_available() &&
+ if (!LVM::clustered_enabled() &&
LVM::vg_clustered(vgname))
throw ClvmdError();
@@ -121,7 +121,7 @@
// if VG is marked as clustered, but cluster locking is not available, throw
- if (!LVM::clustered_available() &&
+ if (!LVM::clustered_enabled() &&
(LVM::vg_clustered(vgname_old) ||
LVM::vg_clustered(vgname_new)))
throw ClvmdError();
@@ -153,7 +153,7 @@
// if VG is marked as clustered, but cluster locking is not available, throw
- if (!LVM::clustered_available() &&
+ if (!LVM::clustered_enabled() &&
LVM::vg_clustered(vgname))
throw ClvmdError();
--- conga/ricci/modules/storage/StorageModule.cpp 2006/10/16 20:26:48 1.5
+++ conga/ricci/modules/storage/StorageModule.cpp 2006/12/12 13:26:24 1.5.2.1
@@ -24,6 +24,7 @@
#include "StorageModule.h"
#include "MapperFactory.h"
#include "BDFactory.h"
+#include "LVM.h"
using namespace std;
@@ -47,6 +48,9 @@
static VarMap modify_bd(const VarMap& args);
static VarMap remove_bd(const VarMap& args);
+static VarMap enable_clustered_lvm(const VarMap& args);
+static VarMap disable_clustered_lvm(const VarMap& args);
+
static ApiFcnMap build_fcn_map();
@@ -80,6 +84,9 @@
api_1_0["modify_bd"] = modify_bd;
api_1_0["remove_bd"] = remove_bd;
+ api_1_0["enable_clustered_lvm"] = enable_clustered_lvm;
+ api_1_0["disable_clustered_lvm"] = disable_clustered_lvm;
+
ApiFcnMap api_fcn_map;
api_fcn_map["1.0"] = api_1_0;
@@ -463,3 +470,21 @@
return temp_list;
}
+
+VarMap
+enable_clustered_lvm(const VarMap& args)
+{
+ LVM::enable_clustered();
+
+ VarMap ret;
+ return ret;
+}
+
+VarMap
+disable_clustered_lvm(const VarMap& args)
+{
+ LVM::disable_clustered();
+
+ VarMap ret;
+ return ret;
+}
--- conga/ricci/modules/storage/VG.cpp 2006/10/16 15:32:25 1.8
+++ conga/ricci/modules/storage/VG.cpp 2006/12/12 13:26:24 1.8.2.1
@@ -207,7 +207,7 @@
{
// if VG is marked as clustered, but cluster locking is not available, throw
if (_props.get("clustered").get_bool() &&
- !LVM::clustered_available())
+ !LVM::clustered_enabled())
throw ClvmdError();
String vgname;
@@ -252,7 +252,7 @@
{
// if VG is marked as clustered, but cluster locking is not available, throw
if (_props.get("clustered").get_bool() &&
- !LVM::clustered_available())
+ !LVM::clustered_enabled())
throw ClvmdError();
String vgname = _props.get("vgname").get_string();
@@ -278,7 +278,7 @@
bool clustered = temp.props.get("clustered").get_bool();
if (clustered &&
- !LVM::clustered_available())
+ !LVM::clustered_enabled())
throw ClvmdError();
try {
/cvs/cluster/conga/ricci/test_suite/storage/disable_clustered_lvm.xml,v --> standard output
revision 1.1.2.1
--- conga/ricci/test_suite/storage/disable_clustered_lvm.xml
+++ - 2006-12-12 13:26:29.527944000 +0000
@@ -0,0 +1,13 @@
+<?xml version="1.0" ?>
+<ricci version="1.0" function="process_batch" async="false">
+<batch>
+
+<module name="storage">
+<request sequence="1254" API_version="1.0">
+<function_call name="disable_clustered_lvm" />
+</request>
+</module>
+
+</batch>
+</ricci>
+
/cvs/cluster/conga/ricci/test_suite/storage/enable_clustered_lvm.xml,v --> standard output
revision 1.1.2.1
--- conga/ricci/test_suite/storage/enable_clustered_lvm.xml
+++ - 2006-12-12 13:26:29.742621000 +0000
@@ -0,0 +1,13 @@
+<?xml version="1.0" ?>
+<ricci version="1.0" function="process_batch" async="false">
+<batch>
+
+<module name="storage">
+<request sequence="1254" API_version="1.0">
+<function_call name="enable_clustered_lvm" />
+</request>
+</module>
+
+</batch>
+</ricci>
+
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2009-01-26 17:01 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2009-01-26 17:01 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2009-01-26 17:01:22
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros validate_create_gulm.js
luci/plone-custom: conga_ajax.js update_hostinfo.js
validate_auth.js
luci/site/luci/Extensions: HelperFunctions.py LuciDB.py
cluster_adapters.py
Log message:
Fix problems that caused MSIE to not work correctly.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.107&r2=1.45.2.108
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.37&r2=1.90.2.38
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_create_gulm.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.4&r2=1.1.4.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/conga_ajax.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.3&r2=1.2.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/update_hostinfo.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/plone-custom/validate_auth.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/HelperFunctions.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.8&r2=1.4.2.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.10&r2=1.1.4.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.47&r2=1.120.2.48
--- conga/conga.spec.in.in 2008/09/29 17:11:13 1.45.2.107
+++ conga/conga.spec.in.in 2009/01/26 17:01:19 1.45.2.108
@@ -323,6 +323,11 @@
### changelog ###
%changelog
+* Thu Jan 22 2009 Ryan McCabe <rmccabe@redhat.com> 0.12.2-1
+- Fix bz452704 (Unable to add existing RHEL4 clusters with RHEL5.2 Luci)
+- Fix bz467464 (adding qdisk to existing cluster fails to update cman entry in cluster.conf)
+- Fix bz460038 (Conga fs resource error when editing service)
+
* Thu Sep 25 2008 Ryan McCabe <rmccabe@redhat.com> 0.12.1-7
- Fix a typo in the fix for bz459562
--- conga/luci/cluster/form-macros 2008/08/04 21:49:30 1.90.2.37
+++ conga/luci/cluster/form-macros 2009/01/26 17:01:21 1.90.2.38
@@ -231,6 +231,7 @@
<script type="text/javascript" src="/luci/homebase/validate_cluster_add.js"></script>
<script type="text/javascript" src="validate_create_gulm.js"></script>
<script type="text/javascript" src="update_hostinfo.js"></script>
+ <script type="text/javascript" src="validate_auth.js"></script>
<script type="text/javascript">
set_page_title('Luci ??? cluster ??? Deploy a cluster');
--- conga/luci/cluster/validate_create_gulm.js 2008/06/13 18:37:46 1.1.4.4
+++ conga/luci/cluster/validate_create_gulm.js 2009/01/26 17:01:21 1.1.4.5
@@ -67,12 +67,16 @@
function validate_cluster_create(form) {
try {
if (!form.cluster_os.value) {
- get_system_info(null, null, 1);
+ var err_div = document.getElementById('errmsgsdiv');
+ var ret_div = document.getElementById('retmsgsdiv');
+
+ validate_auth_form(form, ret_div, err_div);
}
} catch (e) {
}
if (form.cluster_os.value) {
+ form.pagetype.value = '6';
if (form.lockmanager && !form.lockmanager[0].checked) {
var errors = check_gulm_lkserv();
if (error_dialog(errors))
--- conga/luci/plone-custom/conga_ajax.js 2008/01/23 04:44:31 1.2.2.3
+++ conga/luci/plone-custom/conga_ajax.js 2009/01/26 17:01:21 1.2.2.4
@@ -12,15 +12,24 @@
function get_xmlhttp_obj() {
xmlHttp_object = false;
- if (!xmlHttp_object && typeof XMLHttpRequest != 'undefined') {
+ if (!xmlHttp_object || typeof XMLHttpRequest != 'undefined') {
try {
xmlHttp_object = new XMLHttpRequest();
+ if (!xmlHttp_object || typeof XMLHttpRequest != 'undefined') {
+ throw "no XMLHttpRequest";
+ }
} catch (e0) {
try {
xmlHttp_object = new ActiveXObject("Msxml2.XMLHTTP");
+ if (!xmlHttp_object || typeof XMLHttpRequest != 'undefined') {
+ throw "no Msxml2.XMLHTTP";
+ }
} catch (e) {
try {
xmlHttp_object = new ActiveXObject("Microsoft.XMLHTTP");
+ if (!xmlHttp_object || typeof XMLHttpRequest != 'undefined') {
+ throw "No Microsoft.XMLHTTP";
+ }
} catch (e2) {
xmlHttp_object = false;
}
--- conga/luci/plone-custom/update_hostinfo.js 2008/06/13 18:37:46 1.1.2.2
+++ conga/luci/plone-custom/update_hostinfo.js 2009/01/26 17:01:21 1.1.2.3
@@ -108,7 +108,7 @@
var pwd_elem = document.getElementById('__SYSTEM' + offset + ':Passwd');
var fp_elem = document.getElementById('__SYSTEM' + offset + 'Fingerprint');
try {
- cluster_os = cur_node_props['OS'];
+ cluster_os = cur_node_props['os'];
} catch (e) {
}
--- conga/luci/plone-custom/validate_auth.js 2008/01/23 04:44:32 1.1.2.1
+++ conga/luci/plone-custom/validate_auth.js 2009/01/26 17:01:21 1.1.2.2
@@ -11,7 +11,7 @@
var msg_div_id = null;
function validate_auth_form(form, msgarea_id, errarea_id) {
- var url = '/luci/cluster?pagetype=1003';
+ var url = '/luci/cluster/index_html?pagetype=1003'
var errors = [];
var systems_list = get_systems_list(errors);
@@ -32,6 +32,7 @@
msg_div_id = msgarea_id;
err_div_id = errarea_id;
+ form.pagetype.value = '1003';
initiate_async_post(form, url, validate_auth_callback);
}
@@ -59,18 +60,18 @@
try {
obj = obj.responseXML.getElementsByTagName('dict')[0];
} catch (e) {
- alert('Received a malformed response from the luci server.');
+ alert('Received a malformed response from the luci server:' + e);
return;
}
var elem = obj.getElementsByTagName('dict');
var node_props = [];
for (var i = 0 ; i < elem.length ; i++) {
+ var cur_node_props = [];
var elem_type = elem[i].getAttribute('name');
if (elem_type != 'clusternodes') {
continue;
}
- var cur_node_props = [];
var var_elem = elem[i].getElementsByTagName('var');
for (var j = 0 ; j < var_elem.length ; j++) {
try {
@@ -81,6 +82,7 @@
continue;
}
}
+
try {
node_props[cur_node_props['hostname']] = cur_node_props;
} catch (e) {
@@ -91,6 +93,7 @@
var errors = [];
var msgs = [];
var systems_list = get_systems_list_offsets(errors);
+ var os_str = null;
for (var i = 0 ; i < systems_list.length ; i++) {
var cur_node_props = node_props[systems_list[i][0]];
@@ -114,6 +117,29 @@
has_valid_fp = true;
}
+ try {
+ os_str = cur_node_props['os'];
+ if (os_str !== null) {
+ var os_in = document.getElementById('cluster_os');
+ if (os_in !== null) {
+ var os_version = 'rhel5';
+ if (os_str.match(/Nahant/)) {
+ var lsdiv = document.getElementById('lockserver_div');
+ if (lsdiv !== null) {
+ var ls_cell = document.getElementById('lockserver_dialog');
+ if (ls_cell !== null) {
+ lsdiv.id = null;
+ ls_cell.appendChild(lsdiv);
+ }
+ }
+ os_version = 'rhel4';
+ }
+ os_in.value = os_version;
+ }
+ }
+ } catch (e1) {
+ }
+
if (cur_node_props['authed'] != 'True') {
errors.push('Authentication for host ' + cur_sys_name + ' failed.');
clr_form_err(addr_elem);
@@ -129,7 +155,6 @@
}
}
} catch (e) {
- alert(e);
}
}
--- conga/luci/site/luci/Extensions/HelperFunctions.py 2008/07/23 19:55:41 1.4.2.8
+++ conga/luci/site/luci/Extensions/HelperFunctions.py 2009/01/26 17:01:21 1.4.2.9
@@ -9,10 +9,9 @@
import threading
def resolveOSType(os_str):
- if not os_str or os_str.find('Tikanga') != (-1) or os_str.find('Zod') != (-1) or os_str.find('Moonshine') != (-1) or os_str.find('Werewolf') != (-1) or os_str.find('Sulphur') != (-1):
- return 'rhel5'
- else:
+ if os_str and os_str.find('Nahant') != (-1):
return 'rhel4'
+ return 'rhel5'
def add_commas(self, str1, str2):
return '%s; %s' % (str1, str2)
@@ -179,7 +178,7 @@
s = {
'hostname' : hostname,
- 'OS' : OS,
+ 'os' : OS,
'cluname' : cluname,
'key_fp' : key_fp,
'clualias' : cluali,
--- conga/luci/site/luci/Extensions/LuciDB.py 2008/08/04 21:49:30 1.1.4.10
+++ conga/luci/site/luci/Extensions/LuciDB.py 2009/01/26 17:01:22 1.1.4.11
@@ -865,7 +865,6 @@
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('GRA8: cluster name is none for %s' \
% ricci_hostname)
- raise Exception, '%s not in a cluster' % ricci_hostname
cur_alias = str(clu_info[1]).strip().lower()
if not cur_alias:
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2008/09/17 06:29:54 1.120.2.47
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2009/01/26 17:01:22 1.120.2.48
@@ -183,7 +183,7 @@
cluster_os = cur_host_os
add_cluster['cluster_os'] = cur_host_os
- elif cluster_os != cur_host_os:
+ elif False and cluster_os != cur_host_os:
cur_system['errors'] = True
incomplete = True
@@ -304,8 +304,6 @@
% (request['URL'], CLUSTER_CONFIG, clustername))
def validateAddClusterNode(self, request):
- import time
-
try:
request.SESSION.delete('add_node')
except:
@@ -479,7 +477,7 @@
continue
cur_host_os = resolveOSType(rc.os())
- if cluster_os is not None and cluster_os != cur_host_os:
+ if False and cluster_os is not None and cluster_os != cur_host_os:
cur_system['errors'] = True
incomplete = True
@@ -1198,8 +1196,7 @@
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('VP1: no handler for page type %d' % pagetype)
return (False, {})
- else:
- return formValidators[pagetype](self, request)
+ return formValidators[pagetype](self, request)
def clusterTaskProcess(self, model, request):
fvar = GetReqVars(request, [ 'task', 'clustername', 'URL' ])
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2007-11-06 19:58 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-11-06 19:58 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2007-11-06 19:58:01
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros
luci/site/luci/Extensions: LuciClusterInfo.py
cluster_adapters.py
Log message:
fix 337041: Add option to not fail-back service
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&r1=1.88&r2=1.89
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.215&r2=1.216
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&r1=1.15&r2=1.16
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.273&r2=1.274
--- conga/conga.spec.in.in 2007/10/09 21:41:46 1.88
+++ conga/conga.spec.in.in 2007/11/06 19:58:00 1.89
@@ -298,6 +298,8 @@
- Fixed bz249619 (luci "Manage Systems and Clusters" display of clusters and storage systems is cascaded)
- Fixed bz315631 (conga doesn't handle the cluster restart operation properly)
- Fixed bz320411 (conga should use the node name in cluster.conf and not FQDN for fencing)
+- Fixed bz253727 (RFE: graphical view should be default in partiton tables view)
+- Fixed bz337041 (Add option to not fail-back service)
* Mon Aug 27 2007 Ryan McCabe <rmccabe@redhat.com> 0.10.0-6
- Fixed bz253783
--- conga/luci/cluster/form-macros 2007/09/27 21:42:15 1.215
+++ conga/luci/cluster/form-macros 2007/11/06 19:58:00 1.216
@@ -4867,6 +4867,13 @@
</strong>
</div>
+ <tal:block tal:define="nofailback python:fdom.get('nofailback')">
+ <div class="formordered"
+ tal:condition="python:nofailback is True">
+ <h4>Services running in this failover domain will not be failed back.
+ </div>
+ </tal:block>
+
<div class="fdomrestricted">
<h4>This Failover Domain is <span tal:replace="python: fdom['restricted'] == True and 'Restricted' or 'Unrestricted'"/></h4>
</div>
@@ -4930,7 +4937,7 @@
<table class="systemsTable" width="100%">
<thead class="systemsTable">
<tr class="systemsTable">
- <td><strong>Failover Domain Name</strong></td>
+ <td width="60%"><strong>Failover Domain Name</strong></td>
<td>
<input type="text" name="name"
tal:attributes="value fdom/name | nothing" />
@@ -4952,6 +4959,13 @@
</td>
</tr>
<tr class="systemsTable">
+ <td>Do not fail back services in this domain</td>
+ <td>
+ <input type="checkbox" name="nofailback"
+ tal:attributes="checked python: (fdom and 'nofailback' in fdom and fdom['nofailback'] == '1') and 'checked' or ''" />
+ </td>
+ </tr>
+ <tr class="systemsTable">
<td class="systemsTable" colspan="2">
<p></p>
<p class="reshdr">Failover domain membership</p>
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py 2007/09/21 16:06:52 1.15
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py 2007/11/06 19:58:00 1.16
@@ -453,6 +453,13 @@
else:
fhash['prioritized'] = '0'
+
+ nofailback = fdom.getAttribute('nofailback')
+ if nofailback is not None and (nofailback == 'true' or nofailback == '1'):
+ fhash['nofailback'] = '1'
+ else:
+ fhash['nofailback'] = '0'
+
restricted_attr = fdom.getAttribute('restricted')
if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
fhash['restricted'] = '1'
@@ -504,6 +511,12 @@
else:
fdom_map['restricted'] = False
+ nofailback = fdom.getAttribute('nofailback')
+ if nofailback is not None and (nofailback == 'true' or nofailback == '1'):
+ fdom_map['nofailback'] = True
+ else:
+ fdom_map['nofailback'] = False
+
nodelist = list()
for node in fdom.getChildren():
nodesmap = {}
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/10/09 21:31:05 1.273
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/11/06 19:58:00 1.274
@@ -1931,6 +1931,12 @@
except:
restricted = False
+ nofailback = False
+ try:
+ nofailback = request.form.has_key('nofailback')
+ except:
+ nofailback = False
+
oldname = fvar['oldname']
if oldname is None or oldname != name:
@@ -1964,6 +1970,11 @@
else:
fdom.addAttribute('restricted', '0')
+ if nofailback:
+ fdom.addAttribute('nofailback', '1')
+ else:
+ fdom.addAttribute('nofailback', '0')
+
for i in model.getNodeNames():
if request.form.has_key(i):
fdn = FailoverDomainNode()
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2007-09-21 3:24 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-09-21 3:24 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL4
Changes by: rmccabe at sourceware.org 2007-09-21 03:24:12
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros
luci/site/luci/Extensions: FenceHandler.py
Log message:
Fix bz286951
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.67.2.16&r2=1.67.2.17
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.176.2.21&r2=1.176.2.22
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/FenceHandler.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.17.2.3&r2=1.17.2.4
--- conga/conga.spec.in.in 2007/08/29 01:02:00 1.67.2.16
+++ conga/conga.spec.in.in 2007/09/21 03:24:12 1.67.2.17
@@ -319,7 +319,7 @@
%changelog
-* Tue Aug 28 2007 Ryan McCabe <rmccabe@redhat.com> 0.11.0-1
+* Thu Sep 20 2007 Ryan McCabe <rmccabe@redhat.com> 0.11.0-1
- Merge in fixes from the RHEL5 code base.
- Fix bz238727 (Conga provides no way to remove a dead node from a cluster)
- Fix bz241414 (Installation using Conga shows "error" in message during reboot cycle.)
@@ -330,9 +330,10 @@
- Fix bz253906 (Quorum disk page: Error when trying to continue w/o a heuristic)
- Fix bz253784
- Fix bz253901 (No node IDs generated on new RHEL4 / DLM cluster)
+- Fix bz286951 (conga passes fence_scsi nodename, where as it accepts only node)
- Related: bz241417
- Resolves: bz238727, bz241414, bz238656, bz245200, bz227723
-- Resolves: bz253905, bz253906, bz253784, bz253901
+- Resolves: bz253905, bz253906, bz253784, bz253901, bz286951
* Tue Apr 30 2007 Ryan McCabe <rmccabe@redhat.com> 0.9.1-9
- Fix bz238656 (conga does not set the "nodename" attribute for manual fencing)
--- conga/luci/cluster/form-macros 2007/08/24 21:55:42 1.176.2.21
+++ conga/luci/cluster/form-macros 2007/09/21 03:24:12 1.176.2.22
@@ -3017,8 +3017,8 @@
<tr>
<td>Node name</td>
<td>
- <input type="text" name="nodename" disabled="disabled"
- tal:attributes="value request/nodename | nothing" />
+ <input type="text" name="node" disabled="disabled"
+ tal:attributes="value request/node | nothing" />
</td>
</tr>
</table>
--- conga/luci/site/luci/Extensions/FenceHandler.py 2007/08/09 21:28:52 1.17.2.3
+++ conga/luci/site/luci/Extensions/FenceHandler.py 2007/09/21 03:24:12 1.17.2.4
@@ -124,7 +124,7 @@
'fence_egenera': ['lpan', 'pserver'],
'fence_bullpap': ['domain'],
'fence_xvm': ['domain'],
- 'fence_scsi': ['nodename'],
+ 'fence_scsi': ['node'],
'fence_ilo': [],
'fence_ipmilan': [],
'fence_drac': [],
@@ -1118,10 +1118,10 @@
errors = list()
try:
- nodename = form['nodename'].strip()
+ nodename = form['node'].strip()
if not nodename:
raise Exception, 'blank'
- fenceinst.addAttribute('nodename', nodename)
+ fenceinst.addAttribute('node', nodename)
except Exception, e:
errors.append(FI_PROVIDE_NODENAME)
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2007-08-27 18:38 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-08-27 18:38 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2007-08-27 18:38:00
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros validate_config_multicast.js
luci/site/luci/Extensions: LuciClusterInfo.py
cluster_adapters.py
luci/site/luci/Extensions/ClusterModel: ModelBuilder.py
luci/site/luci/var: Data.fs
Log message:
- Fix 253994: Cannot specify multicast address for a cluster
- Update the luci Zope DB
- Update the changelog
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.58&r2=1.45.2.59
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.30&r2=1.90.2.31
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_config_multicast.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.3.2.1&r2=1.3.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.7&r2=1.1.4.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.38&r2=1.120.2.39
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.4&r2=1.1.4.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/var/Data.fs.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.24&r2=1.15.2.25
--- conga/conga.spec.in.in 2007/08/23 04:57:49 1.45.2.58
+++ conga/conga.spec.in.in 2007/08/27 18:36:43 1.45.2.59
@@ -311,9 +311,11 @@
### changelog ###
%changelog
* Wed Aug 22 2007 Ryan McCabe <rmccabe@redhat.com> 0.10.0-6
-- Fixed bz253783 (ricci is vulnerable to a connect DoS attack)
+- Fixed bz253783
- Fixed bz253914 (conga doesn't allow you to reuse nfs export and nfs client resources)
-- Resolves: bz253783, bz253914
+- Fixed bz254038 (Impossible to set many valid quorum disk configurations via conga)
+- Fixed bz253994 (Cannot specify multicast address for a cluster)
+- Resolves: bz253783, bz253914, bz254038, bz253994
* Mon Aug 20 2007 Ryan McCabe <rmccabe@redhat.com> 0.10.0-5
- Fixed bz249291 (delete node task fails to do all items listed in the help document)
--- conga/luci/cluster/form-macros 2007/08/09 20:26:11 1.90.2.30
+++ conga/luci/cluster/form-macros 2007/08/27 18:36:45 1.90.2.31
@@ -1082,7 +1082,7 @@
<tr class="systemsTable">
<td class="systemsTable">
<input type="radio" name="mcast" value="False"
- onClick="disable_mcast('mcast_address');"
+ onClick="disable_mcast('mcast_address', 'mcast_interface');"
tal:attributes="checked python: clusterinfo['is_mcast'] != 'True'"
/>
<tal:block tal:condition="python:os_version == 'rhel4'">
@@ -1097,7 +1097,7 @@
<tr class="systemsTable">
<td class="systemsTable">
<input type="radio" name="mcast" value="True"
- onClick="enable_mcast('mcast_address');"
+ onClick="enable_mcast('mcast_address', 'mcast_interface');"
tal:attributes="checked python: clusterinfo['is_mcast'] == 'True'"
/>
@@ -1115,11 +1115,23 @@
Multicast address
</td>
<td class="systemsTable">
- <input type="text" name="mcast_address" id="mcast_address"
+ <input type="text"
+ name="mcast_address" id="mcast_address"
tal:attributes="
- disabled python: clusterinfo['is_mcast'] != 'True' and '1' or '0';
- value python: clusterinfo['is_mcast'] == 'True' and clusterinfo['mcast_addr'] or '';"
- />
+ disabled not:clusterinfo/mcast_addr;
+ value clusterinfo/mcast_addr |nothing" />
+ </td>
+ </tr>
+ <tr class="systemsTable">
+ <td class="systemsTable">
+ Multicast network interface <span tal:condition="python:os_version != 'rhel4'">(optional)</span>
+ </td>
+ <td class="systemsTable">
+ <input type="text"
+ name="mcast_interface" id="mcast_interface"
+ tal:attributes="
+ disabled not:clusterinfo/mcast_addr;
+ value clusterinfo/mcast_interface |nothing" />
</td>
</tr>
</tbody>
--- conga/luci/cluster/validate_config_multicast.js 2007/08/08 21:18:46 1.3.2.1
+++ conga/luci/cluster/validate_config_multicast.js 2007/08/27 18:36:45 1.3.2.2
@@ -8,22 +8,36 @@
*/
var prev_mcast_str = '';
+var prev_mcast_if = '';
-function disable_mcast(addrId) {
- addrObj = document.getElementById(addrId);
- if (!addrObj || addrObj.disabled)
- return;
- addrObj.disabled = true;
- prev_mcast_str = addrObj.value;
- addrObj.value = '';
+function disable_mcast(addrId, ifId) {
+ var addrObj = document.getElementById(addrId);
+ if (addrObj) {
+ prev_mcast_str = addrObj.value;
+ addrObj.disabled = true;
+ addrObj.value = '';
+ }
+
+ var ifObj = document.getElementById(ifId);
+ if (ifObj) {
+ prev_mcast_if = ifObj.value;
+ ifObj.disabled = true;
+ ifObj.value = '';
+ }
}
-function enable_mcast(addrId) {
- addrObj = document.getElementById(addrId);
- if (!addrObj || !addrObj.disabled)
- return;
- addrObj.disabled = false;
- addrObj.value = prev_mcast_str;
+function enable_mcast(addrId, ifId) {
+ var addrObj = document.getElementById(addrId);
+ if (addrObj) {
+ addrObj.disabled = false;
+ addrObj.value = prev_mcast_str;
+ }
+
+ var ifObj = document.getElementById(ifId);
+ if (ifObj) {
+ ifObj.disabled = false;
+ ifObj.value = prev_mcast_if;
+ }
}
function validate_form(form) {
@@ -52,6 +66,7 @@
clr_form_err(form.mcast[0]);
clr_form_err(form.mcast[1]);
clr_form_err(form.mcast_address);
+ clr_form_err(form.mcast_interface);
}
if (mcast == 1) {
@@ -66,6 +81,15 @@
}
clr_form_err(form.mcast_address);
}
+
+ if (form.cluster_version && form.cluster_version.value == 'rhel4') {
+ if (!form.mcast_interface || str_is_blank(form.mcast_interface.value)) {
+ set_form_err(form.mcast_interface);
+ errors.push('No multicast interface was given.');
+ } else {
+ clr_form_err(form.mcast_interface);
+ }
+ }
}
if (error_dialog(errors))
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py 2007/08/08 21:14:38 1.1.4.7
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py 2007/08/27 18:36:45 1.1.4.8
@@ -618,10 +618,12 @@
is_mcast = model.isMulticast()
if is_mcast:
clumap['mcast_addr'] = model.getMcastAddr()
+ clumap['mcast_interface'] = model.getMcastInterface()
clumap['is_mcast'] = 'True'
else:
+ clumap['mcast_addr'] = None
+ clumap['mcast_interface'] = None
clumap['is_mcast'] = 'False'
- clumap['mcast_addr'] = '1.2.3.4'
clumap['gulm'] = False
else:
#-------------
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/08/23 19:09:21 1.120.2.38
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/08/27 18:36:45 1.120.2.39
@@ -974,6 +974,14 @@
errors.append('An invalid multicast selection was made')
return (False, {'errors': errors})
+ mcast_interface = None
+ if form.has_key('mcast_interface'):
+ mcast_interface = form['mcast_interface'].strip()
+
+ if mcast_manual is True and form.has_key('cluster_version') and form['cluster_version'].strip() == 'rhel4' and not mcast_interface:
+ errors.append('No multicast interface was specified')
+ return (False, {'errors': errors})
+
if mcast_manual is True:
import socket
try:
@@ -991,13 +999,14 @@
else:
addr_str = None
- if (addr_str is None and mcast_manual is not True) or (mcast_manual is True and addr_str == model.getMcastAddr()):
- errors.append('No multicast configuration changes were made')
- return (False, {'errors': errors})
-
try:
- model.usesMulticast = True
- model.mcast_address = addr_str
+ if not addr_str:
+ if mcast_interface:
+ errors.append('A multicast interface was specified, but no multicast address was given')
+ return (False, {'errors': errors})
+ model.del_cluster_multicast()
+ else:
+ model.set_cluster_multicast(addr_str, mcast_if=mcast_interface)
except Exception, e:
if LUCI_DEBUG_MODE is True:
luci_log.debug('Error updating mcast properties: %r %s' \
--- conga/luci/site/luci/Extensions/ClusterModel/ModelBuilder.py 2007/07/30 05:06:47 1.1.4.4
+++ conga/luci/site/luci/Extensions/ClusterModel/ModelBuilder.py 2007/08/27 18:36:46 1.1.4.5
@@ -121,6 +121,8 @@
self.filename = filename
self.lock_type = DLM_TYPE
self.mcast_address = mcast_addr
+ self.mcast_interface = None
+ self.mcast_ptr = None
self.cluster_ptr = None
self.GULM_ptr = None
self.CMAN_ptr = None
@@ -269,7 +271,10 @@
if self.usesMulticast is True:
mcast = Multicast()
mcast.addAttribute("addr", self.mcast_address)
+ if self.mcast_interface is not None:
+ mcast.addAttribute("interface", self.mcast_interface)
cman.addChild(mcast)
+ self.mcast_ptr = mcast
fds = FenceDevices()
obj_tree.addChild(fds)
@@ -513,8 +518,9 @@
self.clusternodes_ptr.addChild(clusternode)
if self.usesMulticast is True:
mcast = Multicast()
- mcast.addAttribute("addr", self.mcast_address)
- mcast.addAttribute("interface", "eth0") #eth0 is the default
+ mcast.addAttribute('addr', self.mcast_address)
+ if self.mcast_interface is not None:
+ mcast.addAttribute('interface', self.mcast_interface)
clusternode.addChild(mcast)
self.isModified = True
@@ -676,17 +682,23 @@
children = self.CMAN_ptr.getChildren()
for child in children:
if child.getTagName() == MCAST_STR:
+ self.mcast_ptr = child
addr = child.getAttribute("addr")
if addr is not None:
self.mcast_address = addr
- return
else: #What a mess! a multicast tag, but no addr attribute
self.mcast_address = ""
- return
+ mcastif = child.getAttribute("interface")
+ if mcastif is not None:
+ self.mcast_interface = mcastif
+ return
def getMcastAddr(self):
return self.mcast_address
+ def getMcastInterface(self):
+ return self.mcast_interface
+
def isQuorumd(self):
return self.usesQuorumd
@@ -871,6 +883,8 @@
self.usesMulticast = None
self.mcast_address = None
+ self.mcast_interface = None
+ self.mcast_ptr = None
#reset self.lock_type
self.lock_type = GULM_TYPE
@@ -914,61 +928,84 @@
#set modified
self.isModified = True
- def swap_multicast_state(self, address=None):
- if self.usesMulticast is True:
- #First, eliminate <multicast> tag
- if self.CMAN_ptr is not None:
- children = self.CMAN_ptr.getChildren()
- if len(children) > 0:
- for child in children:
- if child.getTagName() == MCAST_STR:
- self.CMAN_ptr.removeChild(child)
- break
- found_one = True
- while found_one is True:
- found_one = False
- nodes = self.clusternodes_ptr.getChildren()
- for node in nodes:
- node_children = node.getChildren()
- for node_child in node_children:
- if node_child.getTagName() == MCAST_STR:
- node.removeChild(node_child)
- found_one = True
- break
- if found_one is True:
- break
+ def set_nodes_multicast(self, mcast_addr, mcast_if=None):
+ for node in self.getNodes():
+ new_mcast_tag = True
+ mcast = None
+
+ for node_child in node.getChildren():
+ if node_child.getTagName() == MCAST_STR:
+ mcast = node_child
+ new_mcast_tag = False
+ break
- self.usesMulticast = False
- self.mcast_address = None
- self.isModified = True
+ if mcast is None:
+ mcast = Multicast()
+
+ mcast.addAttribute('addr', mcast_addr)
+ if mcast_if:
+ mcast.addAttribute('interface', mcast_if)
+ else:
+ try:
+ mcast.removeAttribute('interface')
+ except:
+ pass
+ if new_mcast_tag is True:
+ node.addChild(mcast)
+
+ def set_cluster_multicast(self, mcast_addr, mcast_if=None):
+ if not self.CMAN_ptr:
+ return False
+ if self.mcast_ptr is None:
+ mcast = Multicast()
+ self.CMAN_ptr.addChild(mcast)
+ self.mcast_ptr = mcast
else:
- if self.CMAN_ptr is not None:
- mcast = Multicast()
- mcast.addAttribute("addr", address)
- self.CMAN_ptr.addChild(mcast)
+ mcast = self.mcast_ptr
- has_one = False
- nodes = self.getNodes()
- for node in nodes:
- has_one = False
- node_children = node.getChildren()
- for node_child in node_children:
- if node_child.getTagName() == MCAST_STR:
- has_one = True
- break
- if has_one is False:
- mcast = Multicast()
- mcast.addAttribute("addr", address)
- mcast.addAttribute("interface", "eth0")
- node.addChild(mcast)
+ mcast.addAttribute('addr', mcast_addr)
+ if mcast_if is not None:
+ mcast.addAttribute('interface', mcast_if)
+ else:
+ try:
+ mcast.removeAttribute('interface')
+ except:
+ pass
+ self.mcast_address = mcast_addr
+ self.mcast_interface = mcast_if
+ self.usesMulticast = True
+ self.set_nodes_multicast(mcast_addr, mcast_if=mcast_if)
+ self.isModified = True
- self.mcast_address = address
- self.usesMulticast = True
- self.isModified = True
+ def del_nodes_multicast(self):
+ for node in self.getNodes():
+ for node_child in node.getChildren():
+ if node_child.getTagName() == MCAST_STR:
+ node.removeChild(node_child)
+ break
+
+ def del_cluster_multicast(self):
+ if self.CMAN_ptr is None:
+ return False
+ for child in self.CMAN_ptr.getChildren():
+ if child.getTagName() == MCAST_STR:
+ self.CMAN_ptr.removeChild(child)
+
+ self.mcast_ptr = None
+ self.usesMulticast = False
+ self.mcast_address = None
+ self.mcast_interface = None
+ self.del_nodes_multicast()
+ self.isModified = True
+ def swap_multicast_state(self, address=None, mcast_if=None):
+ if self.usesMulticast is True:
+ self.del_cluster_multicast()
+ else:
+ self.set_cluster_multicast(address, mcast_if)
def check_fence_daemon(self):
if self.GULM_ptr is None and self.fence_daemon_ptr is None:
Binary files /cvs/cluster/conga/luci/site/luci/var/Data.fs 2007/08/23 04:57:49 1.15.2.24 and /cvs/cluster/conga/luci/site/luci/var/Data.fs 2007/08/27 18:36:46 1.15.2.25 differ
rcsdiff: /cvs/cluster/conga/luci/site/luci/var/Data.fs: diff failed
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2007-08-23 18:47 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-08-23 18:47 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL4
Changes by: rmccabe at sourceware.org 2007-08-23 18:47:34
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros validate_config_qdisk.js
Log message:
Fix 253905: Quorum disk page: Minimum score does not need to be required
Fix 253904: [minor] Quorum disk page: Interval + tko should be together
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.67.2.14&r2=1.67.2.15
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.176.2.18&r2=1.176.2.19
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/validate_config_qdisk.js.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.5.2.2&r2=1.5.2.3
--- conga/conga.spec.in.in 2007/08/11 04:22:03 1.67.2.14
+++ conga/conga.spec.in.in 2007/08/23 18:47:32 1.67.2.15
@@ -326,8 +326,11 @@
- Fix bz238656 (conga does not set the "nodename" attribute for manual fencing)
- Fix bz245200 (Conga needs to support Internet Explorer 6.0 and later)
- Fix bz227723 (Entering bad password when creating a new cluster = UnboundLocalError: local variable 'e' referenced before)
+- Fix bz253904 (Quorum disk page: Interval + tko should be together)
+- Fix bz253905 (Quorum disk page: Minimum score does not need to be required)
- Related: bz241417
- Resolves: bz238727, bz241414, bz238656, bz245200, bz227723
+- Resolves: bz253904, bz253905
* Tue Apr 30 2007 Ryan McCabe <rmccabe@redhat.com> 0.9.1-9
- Fix bz238656 (conga does not set the "nodename" attribute for manual fencing)
--- conga/luci/cluster/form-macros 2007/08/09 21:28:50 1.176.2.18
+++ conga/luci/cluster/form-macros 2007/08/23 18:47:34 1.176.2.19
@@ -1194,18 +1194,18 @@
</tr>
<tr class="systemsTable">
- <td class="systemsTable">Votes</td>
+ <td class="systemsTable">TKO</td>
<td class="systemsTable">
- <input type="text" name="votes"
- tal:attributes="value clusterinfo/votes | nothing" />
+ <input type="text" name="tko"
+ tal:attributes="value clusterinfo/tko | nothing" />
</td>
</tr>
<tr class="systemsTable">
- <td class="systemsTable">TKO</td>
+ <td class="systemsTable">Votes</td>
<td class="systemsTable">
- <input type="text" name="tko"
- tal:attributes="value clusterinfo/tko | nothing" />
+ <input type="text" name="votes"
+ tal:attributes="value clusterinfo/votes | nothing" />
</td>
</tr>
--- conga/luci/cluster/validate_config_qdisk.js 2007/08/09 21:28:50 1.5.2.2
+++ conga/luci/cluster/validate_config_qdisk.js 2007/08/23 18:47:34 1.5.2.3
@@ -180,17 +180,6 @@
clr_form_err(form.interval);
}
- if (!form.votes || str_is_blank(form.votes.value)) {
- errors.push('No votes setting was given.');
- set_form_err(form.votes);
- } else {
- if (!is_valid_int(form.votes.value, 1, null)) {
- errors.push('Votes values must be greater than 0.');
- set_form_err(form.votes);
- } else
- clr_form_err(form.votes);
- }
-
if (!form.tko || str_is_blank(form.tko.value)) {
errors.push('No TKO setting was given.');
set_form_err(form.tko);
@@ -202,15 +191,15 @@
clr_form_err(form.tko);
}
- if (!form.min_score || str_is_blank(form.min_score.value)) {
- errors.push('No minimum score setting was given.');
- set_form_err(form.min_score);
+ if (!form.votes || str_is_blank(form.votes.value)) {
+ errors.push('No votes setting was given.');
+ set_form_err(form.votes);
} else {
- if (!is_valid_int(form.min_score.value, 1, null)) {
- errors.push('Minimum score values must be greater than 0.');
- set_form_err(form.min_score);
+ if (!is_valid_int(form.votes.value, 1, null)) {
+ errors.push('Votes values must be greater than 0.');
+ set_form_err(form.votes);
} else
- clr_form_err(form.min_score);
+ clr_form_err(form.votes);
}
var no_dev = !form.device || str_is_blank(form.device.value);
@@ -226,6 +215,23 @@
if (err)
errors = errors.concat(err);
}
+
+ if (hnum > 1) {
+ if (!form.min_score || str_is_blank(form.min_score.value)) {
+ errors.push('No minimum score setting was given.');
+ set_form_err(form.min_score);
+ } else {
+ if (!is_valid_int(form.min_score.value, 1, null)) {
+ errors.push('Minimum score values must be greater than 0.');
+ set_form_err(form.min_score);
+ } else
+ clr_form_err(form.min_score);
+ }
+ } else {
+ clr_form_err(form.min_score);
+ }
+ } else {
+ clr_form_err(form.min_score);
}
}
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac ...
@ 2006-12-08 20:47 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2006-12-08 20:47 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2006-12-08 20:47:37
Modified files:
. : conga.spec.in.in
luci/cluster : form-macros
luci/homebase : validate_cluster_add.js
luci/site/luci/Extensions: cluster_adapters.py
Log message:
- more fence fixes
- fix for most of the add node fails bug
- fix for 218964
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&r1=1.57&r2=1.58
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.125&r2=1.126
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&r1=1.4&r2=1.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.176&r2=1.177
--- conga/conga.spec.in.in 2006/12/06 23:03:35 1.57
+++ conga/conga.spec.in.in 2006/12/08 20:47:37 1.58
@@ -284,7 +284,7 @@
%changelog
-* Wed Dec 06 2006 2006 Stanko Kupcevic <kupcevic@redhat.com> 0.9.1-2
+* Wed Dec 06 2006 Stanko Kupcevic <kupcevic@redhat.com> 0.9.1-2
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX UPDATE NOT RELEASED YET XXXXXXXXXXXXXXXXXXX
--- conga/luci/cluster/form-macros 2006/12/06 22:44:18 1.125
+++ conga/luci/cluster/form-macros 2006/12/08 20:47:37 1.126
@@ -1325,7 +1325,7 @@
<td>Hostname</td>
<td>
<input name="hostname" type="text"
- tal:attributes="value cur_fencedev/hostname | nothing" />
+ tal:attributes="value cur_fencedev/hostname | cur_fencedev/ipaddr | nothing" />
</td>
</tr>
<tr>
@@ -2299,9 +2299,7 @@
global nodestatus python: here.getClusterStatus(request, ricci_agent);
global nodeinfo python: here.getNodeInfo(modelb, nodestatus, request);
global status_class python: 'node_' + (nodeinfo['nodestate'] == '0' and 'active' or (nodeinfo['nodestate'] == '1' and 'inactive' or 'unknown'));
- global cluster_node_status_str python: (nodeinfo['nodestate'] == '0' and 'Cluster member' or (nodeinfo['nodestate'] == '1' and 'Currently not a cluster participant' or 'This node is not responding'));
- global fenceinfo python: here.getFenceInfo(modelb, request);
- global fencedevinfo python: here.getFencesInfo(modelb, request)"
+ global cluster_node_status_str python: (nodeinfo['nodestate'] == '0' and 'Cluster member' or (nodeinfo['nodestate'] == '1' and 'Currently not a cluster participant' or 'This node is not responding'))"
/>
<table class="cluster node" width="100%">
@@ -2443,6 +2441,17 @@
<tal:block metal:use-macro="here/form-macros/macros/fence-form-list" />
</div>
+ <tal:block tal:define="
+ global fenceinfo python: here.getFenceInfo(modelb, request);
+ global fencedevinfo python: here.getFencesInfo(modelb, request)" />
+
+ <div>
+ fenceinfo:
+ <span tal:replace="fenceinfo" /><br/>
+ fencedevinfo:
+ <span tal:replace="fencedevinfo" />
+ </div>
+
<div class="invisible" id="shared_fence_devices">
<tal:block tal:repeat="cur_fencedev fencedevinfo/fencedevs">
<tal:block metal:use-macro="here/form-macros/macros/shared-fence-device-list" />
@@ -2494,33 +2503,57 @@
</tr>
<tr class="cluster node info_top fence">
- <td class="cluster node fence_main fence"><div class="fence_container">
- <div id="fence_list_level1">
- <tal:comment tal:replace="nothing">
- XXX - fill in any existing fence devices for this node
- and update the counter number for this level
- </tal:comment>
+ <td class="cluster node fence_main fence">
+ <div class="fence_container">
+ <div id="fence_list_level1" tal:define="global cur_fence_num python: 0">
+ <tal:block tal:condition="exists: fenceinfo/level1">
+ <tal:block tal:repeat="cur_fencedev fenceinfo/level1">
+ <tal:block tal:define="
+ cur_fence_type cur_fencedev/agent | nothing;
+ cur_fence_level python: 1;">
+ <div tal:attributes="id python: 'fence1_' + str(cur_fence_num)">
+ <tal:block
+ metal:use-macro="here/form-macros/macros/fencedev-cond-ladder" />
+ </div>
+ </tal:block>
+ <tal:block tal:define="global cur_fence_num python: cur_fence_num + 1" />
+ </tal:block>
+ </tal:block>
+ <tal:block
+ tal:replace="structure python: '<script type='+chr(0x22)+'text/javascript'+chr(0x22)+'>num_fences_level[0] = ' + str(cur_fence_num) + ';</script>'" />
</div>
<div class="fence_control">
<input type="button" value="Add a fence to this level"
onclick="add_node_fence_device(1);" />
</div>
- </div></td>
+ </div>
+ </td>
- <td class="cluster node fence_main fence"><div class="fence_container">
- <div id="fence_list_level2">
- <tal:comment tal:replace="nothing">
- XXX - fill in any existing fence devices for this node
- and update the counter number for this level
- </tal:comment>
+ <td class="cluster node fence_main fence">
+ <div class="fence_container">
+ <div id="fence_list_level2" tal:define="global cur_fence_num python: 0">
+ <tal:block tal:condition="exists: fenceinfo/level2">
+ <tal:block tal:repeat="cur_fencedev fenceinfo/level2">
+ <tal:block tal:define="cur_fence_type cur_fencedev/agent | nothing">
+ <div tal:attributes="id python: 'fence2_' + str(cur_fence_num)">
+ <tal:block
+ metal:use-macro="here/form-macros/macros/fencedev-cond-ladder" />
+ </div>
+ </tal:block>
+ <tal:block tal:define="global cur_fence_num python: cur_fence_num + 1" />
+ </tal:block>
+ </tal:block>
+ <tal:block
+ tal:replace="structure python: '<script type='+chr(0x22)+'text/javascript'+chr(0x22)+'>num_fences_level[1] = ' + str(cur_fence_num) + ';</script>'" />
</div>
<div class="fence_control">
<input type="button" value="Add a fence to this level"
onclick="add_node_fence_device(2)" />
</div>
- </div></td>
+ </div>
+ </td>
</tr>
</tbody>
</table>
@@ -2671,6 +2704,7 @@
<form name="adminform" action="" method="post">
<input name="numStorage" type="hidden" value="1" />
<input name="pagetype" type="hidden" value="15" />
+ <input name="addnode" type="hidden" value="1" />
<input type="hidden" name="clusterName"
tal:attributes="
value request/form/clusterName | request/clustername | nothing"
@@ -3430,20 +3464,7 @@
</tal:block>
</div>
-<div metal:define-macro="fencedev-form">
- <h2>Fence Device Form</h2>
-
- <div class="cluster fencedev">
- <tal:block tal:define="
- global cur_fencename request/fencename | nothing;
- global cur_cluster request/clustername | nothing;
- global cur_fence_type python: 'fence_apc'"/>
-
- <span tal:condition="cur_fencename">
- <span tal:define="global cur_fencedev python:here.getFence(modelb,request);
- global cur_fence_type cur_fencedev/agent"/>
- </span>
-
+<div metal:define-macro="fencedev-cond-ladder">
<tal:block tal:condition="python: cur_fence_type == 'fence_apc'">
<tal:block metal:use-macro="here/form-macros/macros/fence-form-apc" />
</tal:block>
@@ -3515,10 +3536,26 @@
<tal:block tal:condition="python: cur_fence_type == 'fence_manual'">
<tal:block metal:use-macro="here/form-macros/macros/fence-form-manual" />
</tal:block>
+</div>
- <div class="fence_submit">
- <input class="hbInput" type="button" value="Submit" name="Submit" />
- </div>
+
+<div metal:define-macro="fencedev-form">
+ <h2>Fence Device Form</h2>
+
+ <div class="cluster fencedev">
+ <tal:block tal:define="
+ global cur_fencename request/fencename | nothing;
+ global cur_cluster request/clustername | nothing;
+ global cur_fence_type python: 'fence_apc'"/>
+
+ <span tal:condition="cur_fencename">
+ <span tal:define="
+ global cur_fencedev python:here.getFence(modelb,request);
+ global cur_fence_type cur_fencedev/agent" />
+ </span>
+
+ <tal:block
+ metal:use-macro="here/form-macros/macros/fencedev-cond-ladder" />
</div>
</div>
--- conga/luci/homebase/validate_cluster_add.js 2006/09/27 22:49:09 1.4
+++ conga/luci/homebase/validate_cluster_add.js 2006/12/08 20:47:37 1.5
@@ -29,7 +29,13 @@
if (error_dialog(errors))
return (-1);
- if (confirm('Add the cluster \"' + clusterName + '\" to the Luci management interface?'))
+ var confirm_str = '';
+ if (form.addnode)
+ confirm_str = 'Add node' + (added_storage.length > 1 ? 's' : '') + ' to the \"' + clusterName + '\" cluster?';
+ else
+ confirm_str = 'Add the cluster \"' + clusterName + '\" to the Luci management interface?';
+
+ if (confirm(confirm_str))
form.submit();
return (0);
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/06 22:44:18 1.176
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/08 20:47:37 1.177
@@ -280,6 +280,7 @@
if 'clusterName' in request.form:
clusterName = str(request.form['clusterName'])
else:
+ luci_log.debug_verbose('vACN00: no cluster name was given')
return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
rhn_dl = 1
@@ -301,8 +302,9 @@
try:
numStorage = int(request.form['numStorage'])
if numStorage < 1:
- raise
- except:
+ raise Exception, 'no nodes were added'
+ except Exception, e:
+ luci_log.debug_verbose('vACN0: %s: %s' % (clusterName, str(e)))
errors.append('You must specify at least one node to add to the cluster')
return (False, {'errors': [ errors ], 'requestResults': requestResults })
@@ -313,34 +315,56 @@
try:
nodeList = cluster_properties['nodeList']
if len(nodeList) < 1:
- raise
- except:
+ raise Exception, 'no cluster nodes'
+ except Exception, e:
+ luci_log.debug_verbose('vACN1: %s: %s' % (clusterName, str(e)))
errors.append('You must specify@least one valid node to add to the cluster')
+ clusterObj = None
try:
clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
cluster_os = clusterObj.manage_getProperty('cluster_os')
if not cluster_os:
- luci_log.debug('The cluster OS property is missing for cluster ' + clusterName)
- raise Exception, 'no cluster OS was found.'
+ raise Exception, 'no cluster OS was found in DB for %s' % clusterName
+ except Exception, e:
+ luci_log.debug_verbose('vACN2: %s: %s' % (clusterName, str(e)))
try:
- if len(filter(lambda x: x['os'] != cluster_os, nodeList)) > 0:
- raise Exception, 'different operating systems were detected.'
- except:
+ rc = getRicciAgent(self, clusterName)
+ if not rc:
+ raise Exception, 'cannot find a ricci agent for %s' % clusterName
+ cluster_os = getClusterOS(self, rc)['os']
+ if clusterObj is None:
+ try:
+ clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+ except:
+ pass
+
+ try:
+ clusterObj.manage_addProperty('cluster_os', cluster_os, 'string')
+ except:
+ pass
+ except Exception, e:
+ luci_log.debug_verbose('vACN3: %s: %s' % (clusterName, str(e)))
nodeUnauth(nodeList)
+ cluster_os = None
cluster_properties['isComplete'] = False
- errors.append('Cluster nodes must be running compatible operating systems.')
- except:
+ errors.append('Unable to determine the cluster OS for the ' + clusterName + ' cluster.')
+
+ try:
+ if cluster_os is None:
+ raise Exception, 'no cluster OS found for %s' % clusterName
+ if len(filter(lambda x: x['os'] != cluster_os, nodeList)) > 0:
+ raise Exception, 'different operating systems were detected.'
+ except Exception, e:
+ luci_log.debug_verbose('vACN4: %s: %s' % (clusterName, str(e)))
nodeUnauth(nodeList)
cluster_properties['isComplete'] = False
- errors.append('Unable to determine the cluster OS for the ' + clusterName + ' cluster.')
+ errors.append('Cluster nodes must be running compatible operating systems.')
if not cluster_properties['isComplete']:
return (False, {'errors': errors, 'requestResults': cluster_properties})
- i = 0
- while i < len(nodeList):
- clunode = nodeList[i]
+ for clunode in nodeList:
try:
batchNode = addClusterNodeBatch(clunode['os'],
clusterName,
@@ -350,9 +374,11 @@
False,
rhn_dl)
if not batchNode:
- raise
- del nodeList[i]
- except:
+ raise Exception, 'batchnode is None'
+ clunode['batchnode'] = batchNode
+ except Exception, e:
+ luci_log.debug_verbose('vACN5: node add for %s failed: %s' \
+ % (clunode['host'], str(e)))
clunode['errors'] = True
nodeUnauth(nodeList)
cluster_properties['isComplete'] = False
@@ -363,37 +389,42 @@
error = createClusterSystems(self, clusterName, nodeList)
if error:
+ luci_log.debug_verbose('vACN5a: %s: %s' % (clusterName, str(e)))
nodeUnauth(nodeList)
cluster_properties['isComplete'] = False
errors.append(error)
return (False, {'errors': errors, 'requestResults': cluster_properties})
batch_id_map = {}
- for i in nodeList:
- clunode = nodeList[i]
+ for clunode in nodeList:
success = True
try:
rc = RicciCommunicator(clunode['host'])
+ if not rc:
+ raise Exception, 'rc is None'
except Exception, e:
- luci_log.info('Unable to connect to the ricci daemon on host %s: %s'% (clunode['host'], str(e)))
+ nodeUnauth([clunode['host']])
success = False
+ luci_log.info('vACN6: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
if success:
try:
- resultNode = rc.process_batch(batchNode, async=True)
+ resultNode = rc.process_batch(clunode['batchnode'], async=True)
batch_id_map[clunode['host']] = resultNode.getAttribute('batch_id')
- except:
+ except Exception, e:
+ nodeUnauth([clunode['host']])
success = False
+ luci_log.info('vACN7: %s' % (clunode['host'], str(e)))
if not success:
- nodeUnauth(nodeList)
cluster_properties['isComplete'] = False
errors.append('An error occurred while attempting to add cluster node \"' + clunode['host'] + '\"')
- return (False, {'errors': errors, 'requestResults': cluster_properties})
- messages.append('Cluster join initiated for host \"' + clunode['host'] + '\"')
buildClusterCreateFlags(self, batch_id_map, clusterName)
+ if len(errors) > 0:
+ return (False, {'errors': errors, 'requestResults': cluster_properties})
+
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clusterName + '&busyfirst=true')
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2009-01-26 17:01 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-12-12 13:26 [Cluster-devel] conga ./conga.spec.in.in luci/cluster/form-mac kupcevic
-- strict thread matches above, loose matches on Subject: below --
2009-01-26 17:01 rmccabe
2007-11-06 19:58 rmccabe
2007-09-21 3:24 rmccabe
2007-08-27 18:38 rmccabe
2007-08-23 18:47 rmccabe
2006-12-08 20:47 rmccabe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).