cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] cluster/group/daemon cpg.c joinleave.c main.c
@ 2007-09-07 19:17 teigland
  0 siblings, 0 replies; 2+ messages in thread
From: teigland @ 2007-09-07 19:17 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-09-07 19:17:27

Modified files:
	group/daemon   : cpg.c joinleave.c main.c 

Log message:
	Do nodedown events when the confchg for the groupd cpg arrives,
	instead of when the per-group cpg confchg's arrive.  This means
	all nodes should have agreed ordering on the sequence of confchg's
	and messages, since all messages go through the groupd cpg.
	
	This should fix bz 258121 but I can't reproduce anything like that
	bug to verify.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/cpg.c.diff?cvsroot=cluster&r1=1.38&r2=1.39
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/joinleave.c.diff?cvsroot=cluster&r1=1.21&r2=1.22
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/main.c.diff?cvsroot=cluster&r1=1.58&r2=1.59

--- cluster/group/daemon/cpg.c	2007/01/05 18:49:00	1.38
+++ cluster/group/daemon/cpg.c	2007/09/07 19:17:27	1.39
@@ -40,13 +40,11 @@
 	event_t *ev, *ev_safe;
 	int no_rev = 0;
 
-	log_group(g, "process_node_down %d", nodeid);
-
 	node = find_group_node(g, nodeid);
-	if (!node) {
-		log_error(g, "process_node_down: no member %d", nodeid);
+	if (!node)
 		return;
-	}
+
+	log_group(g, "process_node_down %d", nodeid);
 
 	list_del(&node->list);
 	g->memb_count--;
@@ -187,6 +185,7 @@
 
 void process_groupd_confchg(void)
 {
+	group_t *g;
 	struct recovery_set *rs;
 	int i, found = 0;
 	uint32_t gid;
@@ -248,6 +247,18 @@
 		}
 		groupd_down(saved_left[i].nodeid);
 	}
+
+	/* we call process_node_down from here, instead of from the other cpg
+	   confchg's because we want everyone to see the same order of
+	   confchg's with respect to messages.  see bz 258121 */
+
+	for (i = 0; i < saved_left_count; i++) {
+		if (saved_left[i].reason == CPG_REASON_NODEDOWN ||
+		    saved_left[i].reason == CPG_REASON_PROCDOWN) {
+			list_for_each_entry(g, &gd_groups, list)
+				process_node_down(g, saved_left[i].nodeid);
+		}
+	}
 }
 
 void copy_groupd_data(group_data_t *data)
@@ -389,7 +400,7 @@
 			break;
 		case CPG_REASON_NODEDOWN:
 		case CPG_REASON_PROCDOWN:
-			process_node_down(g, saved_left[i].nodeid);
+			/* process_node_down(g, saved_left[i].nodeid); */
 			break;
 		default:
 			log_error(g, "unknown leave reason %d node %d",
--- cluster/group/daemon/joinleave.c	2007/08/28 16:51:39	1.21
+++ cluster/group/daemon/joinleave.c	2007/09/07 19:17:27	1.22
@@ -135,10 +135,11 @@
 		return -EBUSY;
 	}
 
-	if (g->app->current_event &&
-	    g->app->current_event->nodeid == our_nodeid) {
+	ev = g->app->current_event;
+
+	if (ev && ev->nodeid == our_nodeid) {
 		log_error(g, "leave: busy event %llx state %s",
-			  ev->id, ev_state_str(g->app->current_event));
+			  ev->id, ev_state_str(ev));
 		return -EAGAIN;
 	}
 
--- cluster/group/daemon/main.c	2007/08/20 14:50:30	1.58
+++ cluster/group/daemon/main.c	2007/09/07 19:17:27	1.59
@@ -238,6 +238,7 @@
 		type = NODE_FAILED;
 	else {
 		/* report error */
+		type = -1;
 	}
 
 	/* start <name> <event_nr> <type> <count> <memb0> <memb1>... */



^ permalink raw reply	[flat|nested] 2+ messages in thread

* [Cluster-devel] cluster/group/daemon cpg.c joinleave.c main.c
@ 2007-09-07 19:22 teigland
  0 siblings, 0 replies; 2+ messages in thread
From: teigland @ 2007-09-07 19:22 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2007-09-07 19:22:08

Modified files:
	group/daemon   : cpg.c joinleave.c main.c 

Log message:
	Do nodedown events when the confchg for the groupd cpg arrives,
	instead of when the per-group cpg confchg's arrive.  This means
	all nodes should have agreed ordering on the sequence of confchg's
	and messages, since all messages go through the groupd cpg.
	
	This should fix bz 258121 but I can't reproduce anything like that
	bug to verify.
	
	Also, set oom_adj to avoid being killed for OOM condition.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/cpg.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.36.2.2&r2=1.36.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/joinleave.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.19.2.1&r2=1.19.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/main.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.51.2.6&r2=1.51.2.7

--- cluster/group/daemon/cpg.c	2007/01/05 18:50:01	1.36.2.2
+++ cluster/group/daemon/cpg.c	2007/09/07 19:22:08	1.36.2.3
@@ -40,13 +40,11 @@
 	event_t *ev, *ev_safe;
 	int no_rev = 0;
 
-	log_group(g, "process_node_down %d", nodeid);
-
 	node = find_group_node(g, nodeid);
-	if (!node) {
-		log_error(g, "process_node_down: no member %d", nodeid);
+	if (!node)
 		return;
-	}
+
+	log_group(g, "process_node_down %d", nodeid);
 
 	list_del(&node->list);
 	g->memb_count--;
@@ -187,6 +185,7 @@
 
 void process_groupd_confchg(void)
 {
+	group_t *g;
 	struct recovery_set *rs;
 	int i, found = 0;
 	uint32_t gid;
@@ -248,6 +247,18 @@
 		}
 		groupd_down(saved_left[i].nodeid);
 	}
+
+	/* we call process_node_down from here, instead of from the other cpg
+	   confchg's because we want everyone to see the same order of
+	   confchg's with respect to messages.  see bz 258121 */
+
+	for (i = 0; i < saved_left_count; i++) {
+		if (saved_left[i].reason == CPG_REASON_NODEDOWN ||
+		    saved_left[i].reason == CPG_REASON_PROCDOWN) {
+			list_for_each_entry(g, &gd_groups, list)
+				process_node_down(g, saved_left[i].nodeid);
+		}
+	}
 }
 
 void copy_groupd_data(group_data_t *data)
@@ -389,7 +400,7 @@
 			break;
 		case CPG_REASON_NODEDOWN:
 		case CPG_REASON_PROCDOWN:
-			process_node_down(g, saved_left[i].nodeid);
+			/* process_node_down(g, saved_left[i].nodeid); */
 			break;
 		default:
 			log_error(g, "unknown leave reason %d node %d",
--- cluster/group/daemon/joinleave.c	2007/08/31 14:26:04	1.19.2.1
+++ cluster/group/daemon/joinleave.c	2007/09/07 19:22:08	1.19.2.2
@@ -131,10 +131,11 @@
 		return -EBUSY;
 	}
 
-	if (g->app->current_event &&
-	    g->app->current_event->nodeid == our_nodeid) {
+	ev = g->app->current_event;
+
+	if (ev && ev->nodeid == our_nodeid) {
 		log_error(g, "leave: busy event %llx state %s",
-			  ev->id, ev_state_str(g->app->current_event));
+			  ev->id, ev_state_str(ev));
 		return -EAGAIN;
 	}
 
--- cluster/group/daemon/main.c	2007/01/11 17:59:25	1.51.2.6
+++ cluster/group/daemon/main.c	2007/09/07 19:22:08	1.51.2.7
@@ -238,6 +238,7 @@
 		type = NODE_FAILED;
 	else {
 		/* report error */
+		type = -1;
 	}
 
 	/* start <name> <event_nr> <type> <count> <memb0> <memb1>... */
@@ -904,6 +905,18 @@
 	}
 }
 
+void set_oom_adj(int val)
+{
+	FILE *fp;
+
+	fp = fopen("/proc/self/oom_adj", "w");
+	if (!fp)
+		return;
+
+	fprintf(fp, "%i", val);
+	fclose(fp);
+}
+
 void set_scheduler(void)
 {
 	struct sched_param sched_param;
@@ -939,8 +952,7 @@
 		write(fd, now_ascii, strlen(now_ascii));
 		write(fd, " groupd segfault log follows:\n", 30);
 		close(fd);
-	}
-	else
+	} else
 		perror(LOG_FILE);
 	if (sig == SIGSEGV)
 		exit(0);
@@ -960,10 +972,12 @@
 
 	signal(SIGSEGV, bail_with_log);
 	signal(SIGUSR1, bail_with_log);
+
 	if (!groupd_debug_opt)
 		daemonize();
 
 	set_scheduler();
+	set_oom_adj(-16);
 
 	pollfd = malloc(NALLOC * sizeof(struct pollfd));
 	if (!pollfd)



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2007-09-07 19:22 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-09-07 19:17 [Cluster-devel] cluster/group/daemon cpg.c joinleave.c main.c teigland
  -- strict thread matches above, loose matches on Subject: below --
2007-09-07 19:22 teigland

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).