cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
From: lhh@sourceware.org <lhh@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] cluster/rgmanager ChangeLog include/rg_locks.h ...
Date: 28 Jan 2008 19:27:21 -0000	[thread overview]
Message-ID: <20080128192721.9096.qmail@sourceware.org> (raw)

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL4
Changes by:	lhh at sourceware.org	2008-01-28 19:27:20

Modified files:
	rgmanager      : ChangeLog 
	rgmanager/include: rg_locks.h 
	rgmanager/src/daemons: groups.c main.c rg_locks.c 
	rgmanager/src/utils: clulog.c 

Log message:
	Fix #430538 - unbounded status/clustat thread counts cause timeout errors

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/ChangeLog.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.5.2.34&r2=1.5.2.35
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/rg_locks.h.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/groups.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.8.2.22&r2=1.8.2.23
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/main.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.9.2.23&r2=1.9.2.24
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_locks.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4.2.3&r2=1.4.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clulog.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1.2.3&r2=1.1.2.4

--- cluster/rgmanager/ChangeLog	2007/10/26 20:27:39	1.5.2.34
+++ cluster/rgmanager/ChangeLog	2008/01/28 19:27:19	1.5.2.35
@@ -1,3 +1,10 @@
+2008-01-28 Lon Hohberger <lhh@redhat.com>
+	* include/rg_locks.h, src/daemons/groups.c, rg_locks.c:
+	fix #430538 - unbounded status / clustat thread counts cause
+	timeout errors
+	* src/daemons/main.c, src/utils/clulog.c: Incorrect ccs API 
+	return code processing results in unwanted log messages
+
 2007-10-26 Lon Hohberger <lhh@redhat.com>
 	* src/daemons/main.c, src/utils/clustat.c, clusvcadm.c: Call
 	msg_set_nodeid() to ensure we route from the right IP on a 
--- cluster/rgmanager/include/rg_locks.h	2006/05/12 21:28:30	1.1.2.1
+++ cluster/rgmanager/include/rg_locks.h	2008/01/28 19:27:20	1.1.2.2
@@ -1,3 +1,20 @@
+/*
+  Copyright Red Hat, Inc. 2004-2007
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License version 2 as published
+  by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to the
+  Free Software Foundation, Inc.,  675 Mass Ave, Cambridge, 
+  MA 02139, USA.
+*/
 #ifndef __RG_LOCKS_H
 #define __RG_LOCKS_H
 
@@ -20,6 +37,10 @@
 int rg_set_uninitialized(void);
 int rg_wait_initialized(void);
 
+int rg_inc_status(void);
+int rg_dec_status(void);
+int rg_set_statusmax(int max);
+
 int ccs_lock(void);
 int ccs_unlock(int fd);
 
--- cluster/rgmanager/src/daemons/groups.c	2007/11/14 19:03:37	1.8.2.22
+++ cluster/rgmanager/src/daemons/groups.c	2008/01/28 19:27:20	1.8.2.23
@@ -43,6 +43,7 @@
 
 pthread_mutex_t config_mutex = PTHREAD_MUTEX_INITIALIZER;
 pthread_rwlock_t resource_lock = PTHREAD_RWLOCK_INITIALIZER;
+pthread_mutex_t status_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 
 struct status_arg {
@@ -750,6 +751,14 @@
 
 	free(arg);
 
+	/* See if we have a slot... */
+	if (rg_inc_status() < 0) {
+		/* Too many outstanding status checks.  try again later. */
+		msg_send_simple(fd, RG_FAIL, RG_EAGAIN, 0);
+		msg_close(fd);
+		pthread_exit(NULL);
+	}
+
 	pthread_rwlock_rdlock(&resource_lock);
 
 	list_do(&_resources, res) {
@@ -768,8 +777,10 @@
 	msg_receive_timeout(fd, &hdr, sizeof(hdr), 10);
 
 	msg_close(fd);
+	
+	rg_dec_status();
 
-	return NULL;
+	pthread_exit(NULL);
 }
 
 
@@ -887,6 +898,10 @@
 	char *name;
 	rg_state_t svcblk;
 
+	/* Only one status thread@a time, please! */
+	if (pthread_mutex_trylock(&status_mutex) != 0)
+		pthread_exit(NULL);
+
 	pthread_rwlock_rdlock(&resource_lock);
 	list_do(&_tree, curr) {
 
@@ -911,8 +926,9 @@
 	} while (!list_done(&_tree, curr));
 
 	pthread_rwlock_unlock(&resource_lock);
+	pthread_mutex_unlock(&status_mutex);
 
-	return NULL;
+	pthread_exit(NULL);
 }
 
 
@@ -1164,6 +1180,12 @@
 		pthread_mutex_unlock(&config_mutex);
 	}
 
+	if (ccs_get(fd, "/cluster/rm/@statusmax", &val) == 0) {
+		if (strlen(val))
+			rg_set_statusmax(atoi(val));
+		free(val);
+	}
+
 	clulog(LOG_DEBUG, "Building Resource Trees\n");
 	/* About to update the entire resource tree... */
 	if (load_resources(fd, &reslist, &rulelist) != 0) {
--- cluster/rgmanager/src/daemons/main.c	2007/10/26 20:15:37	1.9.2.23
+++ cluster/rgmanager/src/daemons/main.c	2008/01/28 19:27:20	1.9.2.24
@@ -653,10 +653,10 @@
 	char *v;
 	char internal = 0;
 
-	if (ccsfd == -1) {
+	if (ccsfd < 0) {
 		internal = 1;
 		ccsfd = ccs_connect();
-		if (ccsfd == -1)
+		if (ccsfd < 0)
 			return -1;
 	}
 
--- cluster/rgmanager/src/daemons/rg_locks.c	2006/05/12 21:28:31	1.4.2.3
+++ cluster/rgmanager/src/daemons/rg_locks.c	2008/01/28 19:27:20	1.4.2.4
@@ -33,11 +33,20 @@
 static int __rg_threadcnt = 0;
 static int __rg_initialized = 0;
 
-static pthread_mutex_t locks_mutex = PTHREAD_MUTEX_INITIALIZER;
+static int _rg_statuscnt = 0;
+static int _rg_statusmax = 5; /* XXX */
+
 static pthread_cond_t unlock_cond = PTHREAD_COND_INITIALIZER;
 static pthread_cond_t zero_cond = PTHREAD_COND_INITIALIZER;
 static pthread_cond_t init_cond = PTHREAD_COND_INITIALIZER;
+
+#ifdef WRAP_LOCKS
+static pthread_mutex_t locks_mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+static pthread_mutex_t _ccs_mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+#else
+static pthread_mutex_t locks_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t _ccs_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
 
 #ifdef NO_CCS
 static xmlDocPtr ccs_doc = NULL;
@@ -264,6 +273,48 @@
 
 
 int
+rg_set_statusmax(int max)
+{
+	int old;
+	
+	if (max <= 3)
+		max = 3;
+	
+	pthread_mutex_lock(&locks_mutex);
+	old = _rg_statusmax;
+	_rg_statusmax = max;
+	pthread_mutex_unlock(&locks_mutex);
+	return old;
+}
+
+
+int
+rg_inc_status(void)
+{
+	pthread_mutex_lock(&locks_mutex);
+	if (_rg_statuscnt >= _rg_statusmax) {
+		pthread_mutex_unlock(&locks_mutex);
+		return -1;
+	}
+	++_rg_statuscnt;
+	pthread_mutex_unlock(&locks_mutex);
+	return 0;
+}
+
+
+int
+rg_dec_status(void)
+{
+	pthread_mutex_lock(&locks_mutex);
+	--_rg_statuscnt;
+	if (_rg_statuscnt < 0)
+		_rg_statuscnt = 0;
+	pthread_mutex_unlock(&locks_mutex);
+	return 0;
+}
+
+
+int
 rg_wait_threads(void)
 {
 	pthread_mutex_lock(&locks_mutex);
@@ -272,4 +323,3 @@
 	pthread_mutex_unlock(&locks_mutex);
 	return 0;
 }
-
--- cluster/rgmanager/src/utils/clulog.c	2007/05/03 15:02:47	1.1.2.3
+++ cluster/rgmanager/src/utils/clulog.c	2008/01/28 19:27:20	1.1.2.4
@@ -54,10 +54,10 @@
 	char *v;
 	char internal = 0;
 
-	if (ccsfd == -1) {
+	if (ccsfd < 0) {
 		internal = 1;
 		ccsfd = ccs_connect();
-		if (ccsfd == -1)
+		if (ccsfd < 0)
 			return -1;
 	}
 



                 reply	other threads:[~2008-01-28 19:27 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20080128192721.9096.qmail@sourceware.org \
    --to=lhh@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).