From mboxrd@z Thu Jan 1 00:00:00 1970 From: teigland@sourceware.org Date: 7 Sep 2007 19:22:09 -0000 Subject: [Cluster-devel] cluster/group/daemon cpg.c joinleave.c main.c Message-ID: <20070907192209.16153.qmail@sourceware.org> List-Id: To: cluster-devel.redhat.com MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit CVSROOT: /cvs/cluster Module name: cluster Branch: RHEL5 Changes by: teigland at sourceware.org 2007-09-07 19:22:08 Modified files: group/daemon : cpg.c joinleave.c main.c Log message: Do nodedown events when the confchg for the groupd cpg arrives, instead of when the per-group cpg confchg's arrive. This means all nodes should have agreed ordering on the sequence of confchg's and messages, since all messages go through the groupd cpg. This should fix bz 258121 but I can't reproduce anything like that bug to verify. Also, set oom_adj to avoid being killed for OOM condition. Patches: http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/cpg.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.36.2.2&r2=1.36.2.3 http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/joinleave.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.19.2.1&r2=1.19.2.2 http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/main.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.51.2.6&r2=1.51.2.7 --- cluster/group/daemon/cpg.c 2007/01/05 18:50:01 1.36.2.2 +++ cluster/group/daemon/cpg.c 2007/09/07 19:22:08 1.36.2.3 @@ -40,13 +40,11 @@ event_t *ev, *ev_safe; int no_rev = 0; - log_group(g, "process_node_down %d", nodeid); - node = find_group_node(g, nodeid); - if (!node) { - log_error(g, "process_node_down: no member %d", nodeid); + if (!node) return; - } + + log_group(g, "process_node_down %d", nodeid); list_del(&node->list); g->memb_count--; @@ -187,6 +185,7 @@ void process_groupd_confchg(void) { + group_t *g; struct recovery_set *rs; int i, found = 0; uint32_t gid; @@ -248,6 +247,18 @@ } groupd_down(saved_left[i].nodeid); } + + /* we call process_node_down from here, instead of from the other cpg + confchg's because we want everyone to see the same order of + confchg's with respect to messages. see bz 258121 */ + + for (i = 0; i < saved_left_count; i++) { + if (saved_left[i].reason == CPG_REASON_NODEDOWN || + saved_left[i].reason == CPG_REASON_PROCDOWN) { + list_for_each_entry(g, &gd_groups, list) + process_node_down(g, saved_left[i].nodeid); + } + } } void copy_groupd_data(group_data_t *data) @@ -389,7 +400,7 @@ break; case CPG_REASON_NODEDOWN: case CPG_REASON_PROCDOWN: - process_node_down(g, saved_left[i].nodeid); + /* process_node_down(g, saved_left[i].nodeid); */ break; default: log_error(g, "unknown leave reason %d node %d", --- cluster/group/daemon/joinleave.c 2007/08/31 14:26:04 1.19.2.1 +++ cluster/group/daemon/joinleave.c 2007/09/07 19:22:08 1.19.2.2 @@ -131,10 +131,11 @@ return -EBUSY; } - if (g->app->current_event && - g->app->current_event->nodeid == our_nodeid) { + ev = g->app->current_event; + + if (ev && ev->nodeid == our_nodeid) { log_error(g, "leave: busy event %llx state %s", - ev->id, ev_state_str(g->app->current_event)); + ev->id, ev_state_str(ev)); return -EAGAIN; } --- cluster/group/daemon/main.c 2007/01/11 17:59:25 1.51.2.6 +++ cluster/group/daemon/main.c 2007/09/07 19:22:08 1.51.2.7 @@ -238,6 +238,7 @@ type = NODE_FAILED; else { /* report error */ + type = -1; } /* start ... */ @@ -904,6 +905,18 @@ } } +void set_oom_adj(int val) +{ + FILE *fp; + + fp = fopen("/proc/self/oom_adj", "w"); + if (!fp) + return; + + fprintf(fp, "%i", val); + fclose(fp); +} + void set_scheduler(void) { struct sched_param sched_param; @@ -939,8 +952,7 @@ write(fd, now_ascii, strlen(now_ascii)); write(fd, " groupd segfault log follows:\n", 30); close(fd); - } - else + } else perror(LOG_FILE); if (sig == SIGSEGV) exit(0); @@ -960,10 +972,12 @@ signal(SIGSEGV, bail_with_log); signal(SIGUSR1, bail_with_log); + if (!groupd_debug_opt) daemonize(); set_scheduler(); + set_oom_adj(-16); pollfd = malloc(NALLOC * sizeof(struct pollfd)); if (!pollfd)