* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2006-10-14 18:00 kupcevic
0 siblings, 0 replies; 7+ messages in thread
From: kupcevic @ 2006-10-14 18:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: kupcevic at sourceware.org 2006-10-14 18:00:02
Modified files:
ricci/modules/cluster/clumon/src/daemon: Communicator.cpp
Monitor.cpp Peer.cpp
main.cpp
Log message:
modclusterd: exit as soon as possible, after exit signal is caught:
- make all sockets non-blocking
- make long waits interruptible
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Communicator.cpp.diff?cvsroot=cluster&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&r1=1.8&r2=1.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Peer.cpp.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/main.cpp.diff?cvsroot=cluster&r1=1.3&r2=1.4
--- conga/ricci/modules/cluster/clumon/src/daemon/Communicator.cpp 2006/08/10 22:53:08 1.3
+++ conga/ricci/modules/cluster/clumon/src/daemon/Communicator.cpp 2006/10/14 18:00:02 1.4
@@ -55,6 +55,7 @@
_serv_sock(_port),
_delivery_point(delivery_point)
{
+ _serv_sock.nonblocking(true);
_connect_time = time_sec();
_rand_state = time_mil();
log(String("Communicator created, port ") + _port, LogCommunicator);
@@ -186,7 +187,7 @@
}
// process events
- for (unsigned int i=0; i<socks_num; i++) {
+ for (unsigned int i=0; i<socks_num && !shouldStop(); i++) {
poll_fd& poll_info = poll_data[i];
// server socket
@@ -194,6 +195,7 @@
if (poll_info.revents & POLLIN) {
try {
ClientSocket sock = _serv_sock.accept();
+ sock.nonblocking(true);
String hostname;
for (vector<String>::iterator iter = names.begin();
iter != names.end();
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2006/10/13 09:36:16 1.8
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2006/10/14 18:00:02 1.9
@@ -132,8 +132,12 @@
log(msg, LogTime);
// wait some time
- struct pollfd nothing;
- poll(¬hing, 0, 5000);
+ for (int i=0; i<10; i++) {
+ if (shouldStop())
+ break;
+ struct pollfd nothing;
+ poll(¬hing, 0, 500);
+ }
}
log("Stopping communicator", LogCommunicator);
_comm.stop();
--- conga/ricci/modules/cluster/clumon/src/daemon/Peer.cpp 2006/08/10 22:53:08 1.2
+++ conga/ricci/modules/cluster/clumon/src/daemon/Peer.cpp 2006/10/14 18:00:02 1.3
@@ -40,14 +40,18 @@
_hostname(hostname),
_in(new String()),
_out(new String())
-{}
+{
+ _sock->nonblocking(true);
+}
Peer::Peer(const String& hostname, unsigned short port) :
_sock(new ClientSocket(hostname, port)),
_hostname(hostname),
_in(new String()),
_out(new String())
-{}
+{
+ _sock->nonblocking(true);
+}
Peer::~Peer()
{}
--- conga/ricci/modules/cluster/clumon/src/daemon/main.cpp 2006/10/13 09:36:16 1.3
+++ conga/ricci/modules/cluster/clumon/src/daemon/main.cpp 2006/10/14 18:00:02 1.4
@@ -102,6 +102,7 @@
log("started");
try {
ServerSocket server(MONITORING_CLIENT_SOCKET);
+ server.nonblocking(true);
Monitor monitor(COMMUNICATION_PORT);
if (!foreground && (geteuid() == 0))
@@ -169,7 +170,7 @@
}
// process events
- for (unsigned int i=0; i<socks_num; i++) {
+ for (unsigned int i=0; i<socks_num && !shutdown_pending; i++) {
poll_fd& poll_info = poll_data[i];
// server socket
@@ -177,6 +178,7 @@
if (poll_info.revents & POLLIN) {
try {
ClientSocket sock = server.accept();
+ sock.nonblocking(true);
clients[sock.get_sock()] = ClientInfo(sock);
} catch ( ... ) {}
}
@@ -187,7 +189,9 @@
if (poll_info.revents & POLLIN) {
ClientInfo& info = clients[poll_info.fd];
try {
- info.str = monitor.request(info.sock.recv());
+ String msg = info.sock.recv();
+ if (msg.size())
+ info.str = monitor.request(msg);
} catch ( ... ) {
clients.erase(poll_info.fd);
}
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2007-11-21 17:39 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-11-21 17:39 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL4
Changes by: rmccabe at sourceware.org 2007-11-21 17:39:39
Modified files:
ricci/modules/cluster/clumon/src/daemon: Monitor.cpp
Log message:
Fix obvious error
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.13.2.2&r2=1.13.2.3
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/10/10 18:10:37 1.13.2.2
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/11/21 17:39:39 1.13.2.3
@@ -811,10 +811,8 @@
String ret;
cman_handle_t ch = cman_init(NULL);
- if (ch == NULL) {
- cman_finish(ch);
+ if (ch == NULL)
throw String("quorum not found");
- }
if (cman_is_quorate(ch))
ret = "Quorate";
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2007-11-21 17:41 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-11-21 17:41 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2007-11-21 17:41:21
Modified files:
ricci/modules/cluster/clumon/src/daemon: Monitor.cpp
Log message:
Fix obvious error
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&r1=1.19&r2=1.20
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/10/09 19:58:30 1.19
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/11/21 17:41:21 1.20
@@ -811,10 +811,8 @@
String ret;
cman_handle_t ch = cman_init(NULL);
- if (ch == NULL) {
- cman_finish(ch);
+ if (ch == NULL)
throw String("quorum not found");
- }
if (cman_is_quorate(ch))
ret = "Quorate";
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2007-11-21 17:42 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-11-21 17:42 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2007-11-21 17:42:36
Modified files:
ricci/modules/cluster/clumon/src/daemon: Monitor.cpp
Log message:
Fix obvious error
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.10.2.5&r2=1.10.2.6
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/10/22 19:32:07 1.10.2.5
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/11/21 17:42:36 1.10.2.6
@@ -811,10 +811,8 @@
String ret;
cman_handle_t ch = cman_init(NULL);
- if (ch == NULL) {
- cman_finish(ch);
+ if (ch == NULL)
throw String("quorum not found");
- }
if (cman_is_quorate(ch))
ret = "Quorate";
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2007-12-05 16:46 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-12-05 16:46 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2007-12-05 16:46:16
Modified files:
ricci/modules/cluster/clumon/src/daemon: Monitor.cpp
Log message:
Still return cluster.conf info when cman isn't running
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&r1=1.20&r2=1.21
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/11/21 17:41:21 1.20
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/12/05 16:46:16 1.21
@@ -80,8 +80,8 @@
msg_arrived(my_nodename, msg);
_cluster = merge_data(clustername);
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
- } catch ( ... ) {}
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
+ } catch (...) { }
}
void
@@ -110,7 +110,7 @@
_cluster = merge_data(clustername);
}
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
MutexLocker l(_mutex);
_cluster = counting_auto_ptr<Cluster>();
} catch ( ... ) {
@@ -153,9 +153,9 @@
try {
return cluster2xml(*_cluster) + "\n";
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
return def;
- } catch ( ... ) {
+ } catch (...) {
return def;
}
}
@@ -192,7 +192,7 @@
}
}
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
} catch ( ... ) {}
}
@@ -221,61 +221,69 @@
try {
cluster.set_attr("minQuorum", probe_quorum());
- } catch ( ... ) {}
+ } catch (...) {}
cluster.set_attr("cluster_version", _cl_version);
- // insert current node info
- const vector<String> clustered_nodes = this->clustered_nodes();
- for (list<XMLObject>::const_iterator
- iter = cluster.children().begin() ;
- iter != cluster.children().end() ;
- iter++)
- {
- XMLObject& kid = (XMLObject&) *iter;
- if (kid.tag() == "node") {
- String name(kid.get_attr("name"));
- if (name == nodename) {
- // insert info about this node -> self
- kid.set_attr("uptime", uptime());
- }
+ try {
+ // insert current node info
+ const vector<String> clustered_nodes = this->clustered_nodes();
+ for (list<XMLObject>::const_iterator
+ iter = cluster.children().begin() ;
+ iter != cluster.children().end() ;
+ iter++)
+ {
+ XMLObject& kid = (XMLObject&) *iter;
+ if (kid.tag() == "node") {
+ String name(kid.get_attr("name"));
+ if (name == nodename) {
+ // insert info about this node -> self
+ kid.set_attr("uptime", uptime());
+ }
- if (find(clustered_nodes.begin(), clustered_nodes.end(), name) !=
- clustered_nodes.end())
- {
- kid.set_attr("online", "true");
- kid.set_attr("clustered", "true");
+ if (find(clustered_nodes.begin(), clustered_nodes.end(), name) !=
+ clustered_nodes.end())
+ {
+ kid.set_attr("online", "true");
+ kid.set_attr("clustered", "true");
+ }
}
}
- }
+ } catch (String e) {
+ log(String(__FILE__) + ":" + __LINE__ + ": caught exception: " + e, LogCommunicator);
+ } catch (...) { }
// insert current service info
- const vector<XMLObject> services_info = this->services_info();
- for (vector<XMLObject>::const_iterator
- iter_i = services_info.begin() ;
- iter_i != services_info.end() ;
- iter_i++)
- {
- const XMLObject& service = *iter_i;
- for (list<XMLObject>::const_iterator
- iter_c = cluster.children().begin() ;
- iter_c != cluster.children().end() ;
- iter_c++)
+ try {
+ const vector<XMLObject> services_info = this->services_info();
+ for (vector<XMLObject>::const_iterator
+ iter_i = services_info.begin() ;
+ iter_i != services_info.end() ;
+ iter_i++)
{
- XMLObject& kid = (XMLObject&) *iter_c;
- if (kid.tag() == "service") {
- if (kid.get_attr("name") == service.get_attr("name")) {
- for (map<String, String>::const_iterator
+ const XMLObject& service = *iter_i;
+ for (list<XMLObject>::const_iterator
+ iter_c = cluster.children().begin() ;
+ iter_c != cluster.children().end() ;
+ iter_c++)
+ {
+ XMLObject& kid = (XMLObject&) *iter_c;
+ if (kid.tag() == "service") {
+ if (kid.get_attr("name") == service.get_attr("name")) {
+ for (map<String, String>::const_iterator
iter = service.attrs().begin() ;
iter != service.attrs().end() ;
iter++)
- {
- kid.set_attr(iter->first, iter->second);
+ {
+ kid.set_attr(iter->first, iter->second);
+ }
}
}
}
}
- }
+ } catch (String e) {
+ log(String(__FILE__) + ":" + __LINE__ + ": caught exception: " + e, LogCommunicator);
+ } catch (...) { }
// ** return values **
@@ -289,6 +297,7 @@
vector<String>::iterator iter = find(nodes.begin(), nodes.end(), nodename);
if (iter != nodes.end())
nodes.erase(iter);
+
return nodes;
}
@@ -593,7 +602,7 @@
}
} catch (...) {
free(node_array);
- throw;
+ throw String("error getting node names");
}
free(node_array);
} else if (_cl_version == "4") {
@@ -789,7 +798,7 @@
}
}
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
} catch ( ... ) {}
return services;
@@ -893,7 +902,7 @@
merge_xmls(const XMLObject& what, const XMLObject& with)
{
if (what.tag() != with.tag())
- throw String("merge_xmls(): tag mismatch");
+ throw String("merge_xmls(): tag mismatch: \"" + what.tag() + "\" \"" + with.tag() + "\"");
XMLObject new_xml(what.tag());
for (map<String, String>::const_iterator
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2007-12-05 17:36 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2007-12-05 17:36 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2007-12-05 17:36:24
Modified files:
ricci/modules/cluster/clumon/src/daemon: Monitor.cpp
Log message:
Still return cluster.conf info when cman isn't running
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.10.2.6&r2=1.10.2.7
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/11/21 17:42:36 1.10.2.6
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2007/12/05 17:36:24 1.10.2.7
@@ -80,8 +80,8 @@
msg_arrived(my_nodename, msg);
_cluster = merge_data(clustername);
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
- } catch ( ... ) {}
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
+ } catch (...) { }
}
void
@@ -110,7 +110,7 @@
_cluster = merge_data(clustername);
}
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
MutexLocker l(_mutex);
_cluster = counting_auto_ptr<Cluster>();
} catch ( ... ) {
@@ -153,9 +153,9 @@
try {
return cluster2xml(*_cluster) + "\n";
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
return def;
- } catch ( ... ) {
+ } catch (...) {
return def;
}
}
@@ -192,7 +192,7 @@
}
}
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
} catch ( ... ) {}
}
@@ -221,61 +221,69 @@
try {
cluster.set_attr("minQuorum", probe_quorum());
- } catch ( ... ) {}
+ } catch (...) {}
cluster.set_attr("cluster_version", _cl_version);
- // insert current node info
- const vector<String> clustered_nodes = this->clustered_nodes();
- for (list<XMLObject>::const_iterator
- iter = cluster.children().begin() ;
- iter != cluster.children().end() ;
- iter++)
- {
- XMLObject& kid = (XMLObject&) *iter;
- if (kid.tag() == "node") {
- String name(kid.get_attr("name"));
- if (name == nodename) {
- // insert info about this node -> self
- kid.set_attr("uptime", uptime());
- }
+ try {
+ // insert current node info
+ const vector<String> clustered_nodes = this->clustered_nodes();
+ for (list<XMLObject>::const_iterator
+ iter = cluster.children().begin() ;
+ iter != cluster.children().end() ;
+ iter++)
+ {
+ XMLObject& kid = (XMLObject&) *iter;
+ if (kid.tag() == "node") {
+ String name(kid.get_attr("name"));
+ if (name == nodename) {
+ // insert info about this node -> self
+ kid.set_attr("uptime", uptime());
+ }
- if (find(clustered_nodes.begin(), clustered_nodes.end(), name) !=
- clustered_nodes.end())
- {
- kid.set_attr("online", "true");
- kid.set_attr("clustered", "true");
+ if (find(clustered_nodes.begin(), clustered_nodes.end(), name) !=
+ clustered_nodes.end())
+ {
+ kid.set_attr("online", "true");
+ kid.set_attr("clustered", "true");
+ }
}
}
- }
+ } catch (String e) {
+ log(String(__FILE__) + ":" + __LINE__ + ": caught exception: " + e, LogCommunicator);
+ } catch (...) { }
// insert current service info
- const vector<XMLObject> services_info = this->services_info();
- for (vector<XMLObject>::const_iterator
- iter_i = services_info.begin() ;
- iter_i != services_info.end() ;
- iter_i++)
- {
- const XMLObject& service = *iter_i;
- for (list<XMLObject>::const_iterator
- iter_c = cluster.children().begin() ;
- iter_c != cluster.children().end() ;
- iter_c++)
+ try {
+ const vector<XMLObject> services_info = this->services_info();
+ for (vector<XMLObject>::const_iterator
+ iter_i = services_info.begin() ;
+ iter_i != services_info.end() ;
+ iter_i++)
{
- XMLObject& kid = (XMLObject&) *iter_c;
- if (kid.tag() == "service") {
- if (kid.get_attr("name") == service.get_attr("name")) {
- for (map<String, String>::const_iterator
+ const XMLObject& service = *iter_i;
+ for (list<XMLObject>::const_iterator
+ iter_c = cluster.children().begin() ;
+ iter_c != cluster.children().end() ;
+ iter_c++)
+ {
+ XMLObject& kid = (XMLObject&) *iter_c;
+ if (kid.tag() == "service") {
+ if (kid.get_attr("name") == service.get_attr("name")) {
+ for (map<String, String>::const_iterator
iter = service.attrs().begin() ;
iter != service.attrs().end() ;
iter++)
- {
- kid.set_attr(iter->first, iter->second);
+ {
+ kid.set_attr(iter->first, iter->second);
+ }
}
}
}
}
- }
+ } catch (String e) {
+ log(String(__FILE__) + ":" + __LINE__ + ": caught exception: " + e, LogCommunicator);
+ } catch (...) { }
// ** return values **
@@ -289,6 +297,7 @@
vector<String>::iterator iter = find(nodes.begin(), nodes.end(), nodename);
if (iter != nodes.end())
nodes.erase(iter);
+
return nodes;
}
@@ -593,7 +602,7 @@
}
} catch (...) {
free(node_array);
- throw;
+ throw String("error getting node names");
}
free(node_array);
} else if (_cl_version == "4") {
@@ -789,7 +798,7 @@
}
}
} catch (String e) {
- log(__LINE__ + ": caught exception: " + e, LogCommunicator);
+ log(String(__FILE__) + ":" + __LINE__ + String(": caught exception: ") + e, LogCommunicator);
} catch ( ... ) {}
return services;
@@ -893,7 +902,7 @@
merge_xmls(const XMLObject& what, const XMLObject& with)
{
if (what.tag() != with.tag())
- throw String("merge_xmls(): tag mismatch");
+ throw String("merge_xmls(): tag mismatch: \"" + what.tag() + "\" \"" + with.tag() + "\"");
XMLObject new_xml(what.tag());
for (map<String, String>::const_iterator
^ permalink raw reply [flat|nested] 7+ messages in thread* [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon ...
@ 2008-05-12 15:15 rmccabe
0 siblings, 0 replies; 7+ messages in thread
From: rmccabe @ 2008-05-12 15:15 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2008-05-12 15:15:23
Modified files:
ricci/modules/cluster/clumon/src/daemon: Monitor.cpp
Log message:
Fix type error
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.10.2.8&r2=1.10.2.9
--- conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2008/01/17 17:38:38 1.10.2.8
+++ conga/ricci/modules/cluster/clumon/src/daemon/Monitor.cpp 2008/05/12 15:15:23 1.10.2.9
@@ -845,7 +845,7 @@
if (ch != NULL) {
int ret;
cman_version_t cman_version;
- char *clu_version = "";
+ String clu_version = "";
ret = cman_get_version(ch, &cman_version);
if (ret >= 0) {
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2008-05-12 15:15 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-10-14 18:00 [Cluster-devel] conga/ricci/modules/cluster/clumon/src/daemon kupcevic
-- strict thread matches above, loose matches on Subject: below --
2007-11-21 17:39 rmccabe
2007-11-21 17:41 rmccabe
2007-11-21 17:42 rmccabe
2007-12-05 16:46 rmccabe
2007-12-05 17:36 rmccabe
2008-05-12 15:15 rmccabe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).