cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
From: teigland@sourceware.org <teigland@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] cluster/group/daemon app.c cpg.c gd_internal.h
Date: 5 Jan 2007 18:49:02 -0000	[thread overview]
Message-ID: <20070105184902.22567.qmail@sourceware.org> (raw)

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-01-05 18:49:00

Modified files:
	group/daemon   : app.c cpg.c gd_internal.h 

Log message:
	groupd creates uint32 global id's for each group.  It doesn't
	use them itself, but provides them to each registered app to use
	if it wants.  (The dlm and gfs each use the global id in messages
	to distinguish between different lockspaces/fs's.)  groupd's
	method of creating these gid's (local counter | local nodeid)
	can result in duplicate gid's in the cluster given a somewhat
	uncommon sequence of events.
	bz 221629

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/app.c.diff?cvsroot=cluster&r1=1.53&r2=1.54
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/cpg.c.diff?cvsroot=cluster&r1=1.37&r2=1.38
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/daemon/gd_internal.h.diff?cvsroot=cluster&r1=1.45&r2=1.46

--- cluster/group/daemon/app.c	2006/12/01 20:26:01	1.53
+++ cluster/group/daemon/app.c	2007/01/05 18:49:00	1.54
@@ -21,6 +21,8 @@
 		return "recover";
 	case MSG_APP_INTERNAL:
 		return "internal";
+	case MSG_GLOBAL_ID:
+		return "global_id";
 	}
 	return "unknown";
 }
--- cluster/group/daemon/cpg.c	2006/12/01 20:26:01	1.37
+++ cluster/group/daemon/cpg.c	2007/01/05 18:49:00	1.38
@@ -149,10 +149,47 @@
 	queue_app_leave(g, nodeid);
 }
 
+static uint32_t max_global_id(uint32_t add_nodeid)
+{
+	group_t *g;
+	uint32_t nodeid, counter, max_counter = 0, max_gid = 0;
+
+	list_for_each_entry(g, &gd_groups, list) {
+		nodeid = g->global_id & 0x0000FFFF;
+		counter = (g->global_id >> 16) & 0x0000FFFF;
+		if (nodeid != add_nodeid)
+			continue;
+		if (!max_counter || counter > max_counter) {
+			max_counter = counter;
+			max_gid = g->global_id;
+		}
+	}
+	return max_gid;
+}
+
+static int send_gid(uint32_t gid)
+{
+	group_t g;
+	msg_t msg;
+
+	/* just so log_group will work */
+	memset(&g, 0, sizeof(group_t));
+	strcpy(g.name, "groupd");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.ms_type = MSG_GLOBAL_ID;
+	msg.ms_global_id = gid;
+
+	msg_bswap_out(&msg);
+
+	return send_message_groupd(&g, &msg, sizeof(msg), MSG_GLOBAL_ID);
+}
+
 void process_groupd_confchg(void)
 {
 	struct recovery_set *rs;
 	int i, found = 0;
+	uint32_t gid;
 
 	log_debug("groupd confchg total %d left %d joined %d",
 		  saved_member_count, saved_left_count, saved_joined_count);
@@ -167,6 +204,23 @@
 		}
 	}
 
+	if (!groupd_joined)
+		goto next;
+
+	/* find any groups that were created in the past by a new node
+	   and send it the id it used so it can initialize global_id_counter
+	   to avoid creating a new group with a duplicate id */
+
+	for (i = 0; i < saved_joined_count; i++) {
+		gid = max_global_id(saved_joined[i].nodeid);
+		if (!gid)
+			continue;
+		log_debug("joined node %d had old max gid %x",
+			  saved_joined[i].nodeid, gid);
+		send_gid(gid);
+	}
+
+ next:
 	if (found)
 		groupd_joined = 1;
 	else
@@ -235,12 +289,26 @@
 	msg_t *msg = (msg_t *) data;
 	char *buf;
 	char name[MAX_NAMELEN+1];
+	uint32_t to_nodeid, counter;
 	int len;
 
 	memset(&name, 0, sizeof(name));
 
 	msg_bswap_in(msg);
 
+	if (msg->ms_type == MSG_GLOBAL_ID) {
+		to_nodeid = msg->ms_global_id & 0x0000FFFF;
+		counter = (msg->ms_global_id >> 16) & 0x0000FFFF;
+
+		if (to_nodeid == our_nodeid) {
+			log_debug("recv global_id %x from %u cur counter %u",
+			  	  msg->ms_global_id, nodeid, global_id_counter);
+			if (counter > global_id_counter)
+				global_id_counter = counter;
+		}
+		return;
+	}
+
 	if (handle == groupd_handle) {
 		memcpy(&name, &msg->ms_name, MAX_NAMELEN);
 
--- cluster/group/daemon/gd_internal.h	2006/12/01 20:26:01	1.45
+++ cluster/group/daemon/gd_internal.h	2007/01/05 18:49:00	1.46
@@ -189,6 +189,7 @@
 #define MSG_APP_STARTED        2
 #define MSG_APP_RECOVER        3
 #define MSG_APP_INTERNAL       4
+#define MSG_GLOBAL_ID          5
 
 #define MSG_VER_MAJOR          1
 #define MSG_VER_MINOR          0



             reply	other threads:[~2007-01-05 18:49 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-01-05 18:49 teigland [this message]
  -- strict thread matches above, loose matches on Subject: below --
2007-01-05 19:56 [Cluster-devel] cluster/group/daemon app.c cpg.c gd_internal.h teigland
2007-01-05 18:50 teigland
2006-06-22 18:39 teigland
2006-06-21 18:10 teigland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070105184902.22567.qmail@sourceware.org \
    --to=teigland@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).