cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-10-16 14:44 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2006-10-16 14:44 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-10-16 14:44:02

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	A node that was just added would incorrectly conclude that the node
	after it needed to do first mounter recovery.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.19&r2=1.20
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.19&r2=1.20

--- cluster/group/gfs_controld/lock_dlm.h	2006/10/13 20:00:02	1.19
+++ cluster/group/gfs_controld/lock_dlm.h	2006/10/16 14:44:02	1.20
@@ -149,6 +149,7 @@
 	int			first_mount_pending_stop;
 	int			first_mounter;
 	int			first_mounter_done;
+	int			global_first_recover_done;
 	int			emulate_first_mounter;
 	int			wait_first_done;
 	int			low_nodeid;
--- cluster/group/gfs_controld/recover.c	2006/10/13 20:00:02	1.19
+++ cluster/group/gfs_controld/recover.c	2006/10/16 14:44:02	1.20
@@ -822,9 +822,22 @@
 		goto out;
 	}
 
+	/* when we received our journals, no one was flagged with OPT_RECOVER
+	   which means no first mounter recovery is needed or is current */
+
+	if (mg->global_first_recover_done) {
+		log_group(mg, "assign_journal: global_firsts_recover_done");
+		goto out;
+	}
+
 	/* no one has done kernel mount successfully and no one is doing first
 	   mounter recovery, the new node gets to try first mounter recovery */
 
+	log_group(mg, "kernel_mount_done %d kernel_mount_error %d "
+		      "first_mounter %d first_mounter_done %d",
+		      mg->kernel_mount_done, mg->kernel_mount_error,
+		      mg->first_mounter, mg->first_mounter_done);
+
 	log_group(mg, "assign_journal: memb %d gets OPT_RECOVER", new->nodeid);
 	new->opts |= MEMB_OPT_RECOVER;
 
@@ -1007,6 +1020,7 @@
 	struct mg_member *memb, *memb2;
 	struct gdlm_header *hd;
 	int *ids, count, i, nodeid, jid, opts;
+	int current_first_recover = 0;
 
 	hd = (struct gdlm_header *)buf;
 
@@ -1048,8 +1062,16 @@
 			else if (opts & MEMB_OPT_SPECT)
 				memb->spectator = 1;
 		}
+
+		if (opts & MEMB_OPT_RECOVER)
+			current_first_recover = 1;
 	}
 
+	/* FIXME: use global_first_recover_done more widely instead of
+	   as a single special case */
+	if (!current_first_recover)
+		mg->global_first_recover_done = 1;
+
 	process_saved_mount_status(mg);
 
 	/* we delay processing any options messages from new mounters



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-10-23 15:44 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2006-10-23 15:44 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-10-23 15:44:34

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	Patch from Abhi to fix case where a node's mount is rejected by other
	group members causing gfs_controld on the mounter to leave the group
	immediately.  It was sometimes leaving before its join was even
	finished which caused groupd to reject the leave, so we need to wait
	for the join to complete before doing the leave.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.22&r2=1.23

--- cluster/group/gfs_controld/lock_dlm.h	2006/10/16 14:44:02	1.20
+++ cluster/group/gfs_controld/lock_dlm.h	2006/10/23 15:44:33	1.21
@@ -138,6 +138,7 @@
 	int			mount_client_fd;
 	int			mount_client_notified;
 	int			mount_client_delay;
+	int                     group_leave_on_finish;
 	int			remount_client;
 	int			init;
 	int			got_our_options;
--- cluster/group/gfs_controld/recover.c	2006/10/16 17:12:10	1.22
+++ cluster/group/gfs_controld/recover.c	2006/10/23 15:44:33	1.23
@@ -1933,6 +1933,9 @@
 {
 	char buf[MAXLINE];
 	int rv, error = 0;
+	struct mg_member *memb;
+	
+	memb = find_memb_nodeid(mg, our_nodeid);
 
 	memset(buf, 0, MAXLINE);
 
@@ -1963,9 +1966,15 @@
 
 	if (error) {
 		log_group(mg, "leaving due to mount error: %s", mg->error_msg);
-		group_leave(gh, mg->name);
-	} else
+		if (memb->finished)
+			group_leave(gh, mg->name);
+		else {
+			log_group(mg, "delay leave until after join");
+			mg->group_leave_on_finish = 1;
+		}
+	} else {
 		mg->mount_client_notified = 1;
+	}
 }
 
 void ping_kernel_mount(char *table)
@@ -2192,6 +2201,13 @@
 	list_for_each_entry(memb, &mg->members, list)
 		memb->finished = 1;
 
+	if (mg->group_leave_on_finish) {
+		log_group(mg, "leaving group after delay for join to finish");
+		group_leave(gh, mg->name);
+		mg->group_leave_on_finish = 0;
+		return 0;
+	}
+
 	if (mg->needs_recovery) {
 		log_group(mg, "finish: leave locks blocked for needs_recovery");
 		leave_blocked = 1;



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-12-19  1:42 rpeterso
  0 siblings, 0 replies; 10+ messages in thread
From: rpeterso @ 2006-12-19  1:42 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	rpeterso at sourceware.org	2006-12-19 01:42:38

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	Resolves: bz 218560: multiple mount points fail with gfs and gfs2

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.24&r2=1.25
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.24&r2=1.25

--- cluster/group/gfs_controld/lock_dlm.h	2006/12/05 22:19:17	1.24
+++ cluster/group/gfs_controld/lock_dlm.h	2006/12/19 01:42:37	1.25
@@ -113,6 +113,11 @@
         } \
 }
 
+struct mountpoint {
+	struct list_head	list;
+	char			dir[MAXNAME+1];
+};
+
 struct mountgroup {
 	struct list_head	list;
 	uint32_t		id;
@@ -124,7 +129,7 @@
 	char			name[MAXNAME+1];
 	char			table[MAXNAME+1];
 	char			type[5];
-	char			dir[PATH_MAX+1];
+	struct list_head mntpoints;
 	char			options[MAX_OPTIONS_LEN+1];
 	char			dev[PATH_MAX+1];
 
--- cluster/group/gfs_controld/recover.c	2006/12/05 22:19:17	1.24
+++ cluster/group/gfs_controld/recover.c	2006/12/19 01:42:37	1.25
@@ -28,6 +28,8 @@
 void start_spectator_init_2(struct mountgroup *mg);
 void start_spectator_2(struct mountgroup *mg);
 void notify_mount_client(struct mountgroup *mg);
+int do_finish(struct mountgroup *mg);
+int do_terminate(struct mountgroup *mg);
 
 int set_sysfs(struct mountgroup *mg, char *field, int val)
 {
@@ -1468,10 +1470,13 @@
 struct mountgroup *find_mg_dir(char *dir)
 {
 	struct mountgroup *mg;
+	struct mountpoint *mt_point;
 
 	list_for_each_entry(mg, &mounts, list) {
-		if (!strcmp(mg->dir, dir))
-			return mg;
+		list_for_each_entry(mt_point, &mg->mntpoints, list) {
+			if (!strcmp(mt_point->dir, dir))
+				return mg;
+		}
 	}
 	return NULL;
 }
@@ -1496,7 +1501,8 @@
 int do_mount(int ci, char *dir, char *type, char *proto, char *table,
 	     char *options, char *dev, struct mountgroup **mg_ret)
 {
-	struct mountgroup *mg;
+	struct mountgroup *mg, *new_mg = NULL;
+	struct mountpoint *mp;
 	char table2[MAXLINE];
 	char *cluster = NULL, *name = NULL;
 	int rv;
@@ -1538,24 +1544,33 @@
 		goto fail;
 	}
 
-	mg = find_mg(name);
-	if (mg) {
-		rv = -EEXIST;
+	/* Allocate and populate a new mountpoint entry */
+	mp = malloc(sizeof(struct mountpoint));
+	if (!mp) {
+		rv = -ENOMEM;
 		goto fail;
 	}
+	strncpy(mp->dir, dir, sizeof(mp->dir));
 
-	mg = create_mg(name);
+	/* Check if we already have a mount group or need a new one */
+	mg = find_mg(name);
 	if (!mg) {
-		rv = -ENOMEM;
-		goto fail;
+		mg = new_mg = create_mg(name);
+		if (!mg) {
+			free(mp);
+			rv = -ENOMEM;
+			goto fail;
+		}
+		strncpy(mg->type, type, sizeof(mg->type));
+		strncpy(mg->table, table, sizeof(mg->table));
+		strncpy(mg->options, options, sizeof(mg->options));
+		strncpy(mg->dev, dev, sizeof(mg->dev));
+		INIT_LIST_HEAD(&mg->mntpoints);
 	}
 
 	mg->mount_client = ci;
-	strncpy(mg->dir, dir, sizeof(mg->dir));
-	strncpy(mg->type, type, sizeof(mg->type));
-	strncpy(mg->table, table, sizeof(mg->table));
-	strncpy(mg->options, options, sizeof(mg->options));
-	strncpy(mg->dev, dev, sizeof(mg->dev));
+	/* Add the mount point to the list in the mountgroup. */
+	list_add(&mp->list, &mg->mntpoints);
 
 	if (strlen(cluster) != strlen(clustername) ||
 	    strlen(cluster) == 0 || strcmp(cluster, clustername)) {
@@ -1566,38 +1581,42 @@
 	} else
 		log_group(mg, "cluster name matches: %s", clustername);
 
-	if (strstr(options, "spectator")) {
-		log_group(mg, "spectator mount");
-		mg->spectator = 1;
-	} else {
-		if (!we_are_in_fence_domain()) {
-			rv = -EINVAL;
-			log_error("mount: not in default fence domain");
-			goto fail;
+	if (new_mg) {
+		if (strstr(options, "spectator")) {
+			log_group(mg, "spectator mount");
+			mg->spectator = 1;
+		} else {
+			if (!we_are_in_fence_domain()) {
+				rv = -EINVAL;
+				log_error("mount: not in default fence domain");
+				goto fail;
+			}
+		}
+
+		if (!mg->spectator && strstr(options, "rw"))
+			mg->rw = 1;
+		else if (strstr(options, "ro")) {
+			if (mg->spectator) {
+				rv = -EINVAL;
+				log_error("mount: readonly invalid with spectator");
+				goto fail;
+			}
+			mg->readonly = 1;
 		}
-	}
 
-	if (!mg->spectator && strstr(options, "rw"))
-		mg->rw = 1;
-	else if (strstr(options, "ro")) {
-		if (mg->spectator) {
+		if (strlen(options) > MAX_OPTIONS_LEN-1) {
 			rv = -EINVAL;
-			log_error("mount: readonly invalid with spectator");
+			log_error("mount: options too long %d", strlen(options));
 			goto fail;
 		}
-		mg->readonly = 1;
+		list_add(&mg->list, &mounts);
 	}
-
-	if (strlen(options) > MAX_OPTIONS_LEN-1) {
-		rv = -EINVAL;
-		log_error("mount: options too long %d", strlen(options));
-		goto fail;
-	}
-
-	list_add(&mg->list, &mounts);
 	*mg_ret = mg;
 
-	group_join(gh, name);
+	if (new_mg)
+		group_join(gh, name);
+	else
+		notify_mount_client(mg);
 	return 0;
 
  fail:
@@ -1883,14 +1902,27 @@
 int do_unmount(int ci, char *dir, int mnterr)
 {
 	struct mountgroup *mg;
+	struct mountpoint *mt_point, *safe;
 
 	list_for_each_entry(mg, &withdrawn_mounts, list) {
-		if (!strcmp(mg->dir, dir)) {
+		int is_withdrawn = FALSE;
+
+		list_for_each_entry(mt_point, &mg->mntpoints, list) {
+			if (!strcmp(mt_point->dir, dir)) {
+				is_withdrawn = TRUE;
+				break;
+			}
+		}
+		if (is_withdrawn) {
+			list_for_each_entry_safe(mt_point, safe, &mg->mntpoints, list) {
+				list_del(&mt_point->list);
+				free(mt_point);
+			}
 			log_group(mg, "unmount withdrawn fs");
 			list_del(&mg->list);
 			free(mg);
-			return 0;
 		}
+		return 0;
 	}
 
 	mg = find_mg_dir(dir);
@@ -1910,7 +1942,6 @@
 			mg->kernel_mount_error = mnterr;
 			mg->kernel_mount_done = 1;
 		}
-		goto out;
 	}
 
 	if (mg->withdraw) {
@@ -1918,16 +1949,26 @@
 		return -1;
 	}
 
+	/* Delete this mount point out of the list */
+	list_for_each_entry(mt_point, &mg->mntpoints, list) {
+		if (!strcmp(mt_point->dir, dir)) {
+			list_del(&mt_point->list);
+			free(mt_point);
+			break;
+		}
+	}
 	/* Check to see if we're waiting for a kernel recovery_done to do a
 	   start_done().  If so, call the start_done() here because we won't be
 	   getting anything else from gfs-kernel which is now gone. */
 
-	if (need_kernel_recovery_done(mg)) {
+	if (!mg->kernel_mount_error &&
+		list_empty(&mg->mntpoints) && need_kernel_recovery_done(mg)) {
 		log_group(mg, "do_unmount: fill in start_done");
 		start_done(mg);
 	}
- out:
-	group_leave(gh, mg->name);
+
+	if (list_empty(&mg->mntpoints))
+		group_leave(gh, mg->name);
 	return 0;
 }
 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-12-19  1:46 rpeterso
  0 siblings, 0 replies; 10+ messages in thread
From: rpeterso @ 2006-12-19  1:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	rpeterso at sourceware.org	2006-12-19 01:46:47

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	Resolves: bz 218560: multiple mount points fail with gfs and gfs2

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21.2.3&r2=1.21.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.23.2.1&r2=1.23.2.2

--- cluster/group/gfs_controld/lock_dlm.h	2006/12/05 22:24:29	1.21.2.3
+++ cluster/group/gfs_controld/lock_dlm.h	2006/12/19 01:46:47	1.21.2.4
@@ -113,6 +113,11 @@
         } \
 }
 
+struct mountpoint {
+	struct list_head	list;
+	char			dir[MAXNAME+1];
+};
+
 struct mountgroup {
 	struct list_head	list;
 	uint32_t		id;
@@ -124,7 +129,7 @@
 	char			name[MAXNAME+1];
 	char			table[MAXNAME+1];
 	char			type[5];
-	char			dir[PATH_MAX+1];
+	struct list_head mntpoints;
 	char			options[MAX_OPTIONS_LEN+1];
 	char			dev[PATH_MAX+1];
 
--- cluster/group/gfs_controld/recover.c	2006/12/05 22:24:29	1.23.2.1
+++ cluster/group/gfs_controld/recover.c	2006/12/19 01:46:47	1.23.2.2
@@ -28,6 +28,8 @@
 void start_spectator_init_2(struct mountgroup *mg);
 void start_spectator_2(struct mountgroup *mg);
 void notify_mount_client(struct mountgroup *mg);
+int do_finish(struct mountgroup *mg);
+int do_terminate(struct mountgroup *mg);
 
 int set_sysfs(struct mountgroup *mg, char *field, int val)
 {
@@ -1468,10 +1470,13 @@
 struct mountgroup *find_mg_dir(char *dir)
 {
 	struct mountgroup *mg;
+	struct mountpoint *mt_point;
 
 	list_for_each_entry(mg, &mounts, list) {
-		if (!strcmp(mg->dir, dir))
-			return mg;
+		list_for_each_entry(mt_point, &mg->mntpoints, list) {
+			if (!strcmp(mt_point->dir, dir))
+				return mg;
+		}
 	}
 	return NULL;
 }
@@ -1496,7 +1501,8 @@
 int do_mount(int ci, char *dir, char *type, char *proto, char *table,
 	     char *options, char *dev, struct mountgroup **mg_ret)
 {
-	struct mountgroup *mg;
+	struct mountgroup *mg, *new_mg = NULL;
+	struct mountpoint *mp;
 	char table2[MAXLINE];
 	char *cluster = NULL, *name = NULL;
 	int rv;
@@ -1538,24 +1544,33 @@
 		goto fail;
 	}
 
-	mg = find_mg(name);
-	if (mg) {
-		rv = -EEXIST;
+	/* Allocate and populate a new mountpoint entry */
+	mp = malloc(sizeof(struct mountpoint));
+	if (!mp) {
+		rv = -ENOMEM;
 		goto fail;
 	}
+	strncpy(mp->dir, dir, sizeof(mp->dir));
 
-	mg = create_mg(name);
+	/* Check if we already have a mount group or need a new one */
+	mg = find_mg(name);
 	if (!mg) {
-		rv = -ENOMEM;
-		goto fail;
+		mg = new_mg = create_mg(name);
+		if (!mg) {
+			free(mp);
+			rv = -ENOMEM;
+			goto fail;
+		}
+		strncpy(mg->type, type, sizeof(mg->type));
+		strncpy(mg->table, table, sizeof(mg->table));
+		strncpy(mg->options, options, sizeof(mg->options));
+		strncpy(mg->dev, dev, sizeof(mg->dev));
+		INIT_LIST_HEAD(&mg->mntpoints);
 	}
 
 	mg->mount_client = ci;
-	strncpy(mg->dir, dir, sizeof(mg->dir));
-	strncpy(mg->type, type, sizeof(mg->type));
-	strncpy(mg->table, table, sizeof(mg->table));
-	strncpy(mg->options, options, sizeof(mg->options));
-	strncpy(mg->dev, dev, sizeof(mg->dev));
+	/* Add the mount point to the list in the mountgroup. */
+	list_add(&mp->list, &mg->mntpoints);
 
 	if (strlen(cluster) != strlen(clustername) ||
 	    strlen(cluster) == 0 || strcmp(cluster, clustername)) {
@@ -1566,38 +1581,42 @@
 	} else
 		log_group(mg, "cluster name matches: %s", clustername);
 
-	if (strstr(options, "spectator")) {
-		log_group(mg, "spectator mount");
-		mg->spectator = 1;
-	} else {
-		if (!we_are_in_fence_domain()) {
-			rv = -EINVAL;
-			log_error("mount: not in default fence domain");
-			goto fail;
+	if (new_mg) {
+		if (strstr(options, "spectator")) {
+			log_group(mg, "spectator mount");
+			mg->spectator = 1;
+		} else {
+			if (!we_are_in_fence_domain()) {
+				rv = -EINVAL;
+				log_error("mount: not in default fence domain");
+				goto fail;
+			}
+		}
+
+		if (!mg->spectator && strstr(options, "rw"))
+			mg->rw = 1;
+		else if (strstr(options, "ro")) {
+			if (mg->spectator) {
+				rv = -EINVAL;
+				log_error("mount: readonly invalid with spectator");
+				goto fail;
+			}
+			mg->readonly = 1;
 		}
-	}
 
-	if (!mg->spectator && strstr(options, "rw"))
-		mg->rw = 1;
-	else if (strstr(options, "ro")) {
-		if (mg->spectator) {
+		if (strlen(options) > MAX_OPTIONS_LEN-1) {
 			rv = -EINVAL;
-			log_error("mount: readonly invalid with spectator");
+			log_error("mount: options too long %d", strlen(options));
 			goto fail;
 		}
-		mg->readonly = 1;
+		list_add(&mg->list, &mounts);
 	}
-
-	if (strlen(options) > MAX_OPTIONS_LEN-1) {
-		rv = -EINVAL;
-		log_error("mount: options too long %d", strlen(options));
-		goto fail;
-	}
-
-	list_add(&mg->list, &mounts);
 	*mg_ret = mg;
 
-	group_join(gh, name);
+	if (new_mg)
+		group_join(gh, name);
+	else
+		notify_mount_client(mg);
 	return 0;
 
  fail:
@@ -1883,14 +1902,27 @@
 int do_unmount(int ci, char *dir, int mnterr)
 {
 	struct mountgroup *mg;
+	struct mountpoint *mt_point, *safe;
 
 	list_for_each_entry(mg, &withdrawn_mounts, list) {
-		if (!strcmp(mg->dir, dir)) {
+		int is_withdrawn = FALSE;
+
+		list_for_each_entry(mt_point, &mg->mntpoints, list) {
+			if (!strcmp(mt_point->dir, dir)) {
+				is_withdrawn = TRUE;
+				break;
+			}
+		}
+		if (is_withdrawn) {
+			list_for_each_entry_safe(mt_point, safe, &mg->mntpoints, list) {
+				list_del(&mt_point->list);
+				free(mt_point);
+			}
 			log_group(mg, "unmount withdrawn fs");
 			list_del(&mg->list);
 			free(mg);
-			return 0;
 		}
+		return 0;
 	}
 
 	mg = find_mg_dir(dir);
@@ -1910,7 +1942,6 @@
 			mg->kernel_mount_error = mnterr;
 			mg->kernel_mount_done = 1;
 		}
-		goto out;
 	}
 
 	if (mg->withdraw) {
@@ -1918,16 +1949,26 @@
 		return -1;
 	}
 
+	/* Delete this mount point out of the list */
+	list_for_each_entry(mt_point, &mg->mntpoints, list) {
+		if (!strcmp(mt_point->dir, dir)) {
+			list_del(&mt_point->list);
+			free(mt_point);
+			break;
+		}
+	}
 	/* Check to see if we're waiting for a kernel recovery_done to do a
 	   start_done().  If so, call the start_done() here because we won't be
 	   getting anything else from gfs-kernel which is now gone. */
 
-	if (need_kernel_recovery_done(mg)) {
+	if (!mg->kernel_mount_error &&
+		list_empty(&mg->mntpoints) && need_kernel_recovery_done(mg)) {
 		log_group(mg, "do_unmount: fill in start_done");
 		start_done(mg);
 	}
- out:
-	group_leave(gh, mg->name);
+
+	if (list_empty(&mg->mntpoints))
+		group_leave(gh, mg->name);
 	return 0;
 }
 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-12-19  1:48 rpeterso
  0 siblings, 0 replies; 10+ messages in thread
From: rpeterso @ 2006-12-19  1:48 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL50
Changes by:	rpeterso at sourceware.org	2006-12-19 01:48:53

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	Resolves: bz 218560: multiple mount points fail with gfs and gfs2

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.21.4.3&r2=1.21.4.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.23.4.1&r2=1.23.4.2

--- cluster/group/gfs_controld/lock_dlm.h	2006/12/05 22:24:37	1.21.4.3
+++ cluster/group/gfs_controld/lock_dlm.h	2006/12/19 01:48:53	1.21.4.4
@@ -113,6 +113,11 @@
         } \
 }
 
+struct mountpoint {
+	struct list_head	list;
+	char			dir[MAXNAME+1];
+};
+
 struct mountgroup {
 	struct list_head	list;
 	uint32_t		id;
@@ -124,7 +129,7 @@
 	char			name[MAXNAME+1];
 	char			table[MAXNAME+1];
 	char			type[5];
-	char			dir[PATH_MAX+1];
+	struct list_head mntpoints;
 	char			options[MAX_OPTIONS_LEN+1];
 	char			dev[PATH_MAX+1];
 
--- cluster/group/gfs_controld/recover.c	2006/12/05 22:24:37	1.23.4.1
+++ cluster/group/gfs_controld/recover.c	2006/12/19 01:48:53	1.23.4.2
@@ -28,6 +28,8 @@
 void start_spectator_init_2(struct mountgroup *mg);
 void start_spectator_2(struct mountgroup *mg);
 void notify_mount_client(struct mountgroup *mg);
+int do_finish(struct mountgroup *mg);
+int do_terminate(struct mountgroup *mg);
 
 int set_sysfs(struct mountgroup *mg, char *field, int val)
 {
@@ -1468,10 +1470,13 @@
 struct mountgroup *find_mg_dir(char *dir)
 {
 	struct mountgroup *mg;
+	struct mountpoint *mt_point;
 
 	list_for_each_entry(mg, &mounts, list) {
-		if (!strcmp(mg->dir, dir))
-			return mg;
+		list_for_each_entry(mt_point, &mg->mntpoints, list) {
+			if (!strcmp(mt_point->dir, dir))
+				return mg;
+		}
 	}
 	return NULL;
 }
@@ -1496,7 +1501,8 @@
 int do_mount(int ci, char *dir, char *type, char *proto, char *table,
 	     char *options, char *dev, struct mountgroup **mg_ret)
 {
-	struct mountgroup *mg;
+	struct mountgroup *mg, *new_mg = NULL;
+	struct mountpoint *mp;
 	char table2[MAXLINE];
 	char *cluster = NULL, *name = NULL;
 	int rv;
@@ -1538,24 +1544,33 @@
 		goto fail;
 	}
 
-	mg = find_mg(name);
-	if (mg) {
-		rv = -EEXIST;
+	/* Allocate and populate a new mountpoint entry */
+	mp = malloc(sizeof(struct mountpoint));
+	if (!mp) {
+		rv = -ENOMEM;
 		goto fail;
 	}
+	strncpy(mp->dir, dir, sizeof(mp->dir));
 
-	mg = create_mg(name);
+	/* Check if we already have a mount group or need a new one */
+	mg = find_mg(name);
 	if (!mg) {
-		rv = -ENOMEM;
-		goto fail;
+		mg = new_mg = create_mg(name);
+		if (!mg) {
+			free(mp);
+			rv = -ENOMEM;
+			goto fail;
+		}
+		strncpy(mg->type, type, sizeof(mg->type));
+		strncpy(mg->table, table, sizeof(mg->table));
+		strncpy(mg->options, options, sizeof(mg->options));
+		strncpy(mg->dev, dev, sizeof(mg->dev));
+		INIT_LIST_HEAD(&mg->mntpoints);
 	}
 
 	mg->mount_client = ci;
-	strncpy(mg->dir, dir, sizeof(mg->dir));
-	strncpy(mg->type, type, sizeof(mg->type));
-	strncpy(mg->table, table, sizeof(mg->table));
-	strncpy(mg->options, options, sizeof(mg->options));
-	strncpy(mg->dev, dev, sizeof(mg->dev));
+	/* Add the mount point to the list in the mountgroup. */
+	list_add(&mp->list, &mg->mntpoints);
 
 	if (strlen(cluster) != strlen(clustername) ||
 	    strlen(cluster) == 0 || strcmp(cluster, clustername)) {
@@ -1566,38 +1581,42 @@
 	} else
 		log_group(mg, "cluster name matches: %s", clustername);
 
-	if (strstr(options, "spectator")) {
-		log_group(mg, "spectator mount");
-		mg->spectator = 1;
-	} else {
-		if (!we_are_in_fence_domain()) {
-			rv = -EINVAL;
-			log_error("mount: not in default fence domain");
-			goto fail;
+	if (new_mg) {
+		if (strstr(options, "spectator")) {
+			log_group(mg, "spectator mount");
+			mg->spectator = 1;
+		} else {
+			if (!we_are_in_fence_domain()) {
+				rv = -EINVAL;
+				log_error("mount: not in default fence domain");
+				goto fail;
+			}
+		}
+
+		if (!mg->spectator && strstr(options, "rw"))
+			mg->rw = 1;
+		else if (strstr(options, "ro")) {
+			if (mg->spectator) {
+				rv = -EINVAL;
+				log_error("mount: readonly invalid with spectator");
+				goto fail;
+			}
+			mg->readonly = 1;
 		}
-	}
 
-	if (!mg->spectator && strstr(options, "rw"))
-		mg->rw = 1;
-	else if (strstr(options, "ro")) {
-		if (mg->spectator) {
+		if (strlen(options) > MAX_OPTIONS_LEN-1) {
 			rv = -EINVAL;
-			log_error("mount: readonly invalid with spectator");
+			log_error("mount: options too long %d", strlen(options));
 			goto fail;
 		}
-		mg->readonly = 1;
+		list_add(&mg->list, &mounts);
 	}
-
-	if (strlen(options) > MAX_OPTIONS_LEN-1) {
-		rv = -EINVAL;
-		log_error("mount: options too long %d", strlen(options));
-		goto fail;
-	}
-
-	list_add(&mg->list, &mounts);
 	*mg_ret = mg;
 
-	group_join(gh, name);
+	if (new_mg)
+		group_join(gh, name);
+	else
+		notify_mount_client(mg);
 	return 0;
 
  fail:
@@ -1883,14 +1902,27 @@
 int do_unmount(int ci, char *dir, int mnterr)
 {
 	struct mountgroup *mg;
+	struct mountpoint *mt_point, *safe;
 
 	list_for_each_entry(mg, &withdrawn_mounts, list) {
-		if (!strcmp(mg->dir, dir)) {
+		int is_withdrawn = FALSE;
+
+		list_for_each_entry(mt_point, &mg->mntpoints, list) {
+			if (!strcmp(mt_point->dir, dir)) {
+				is_withdrawn = TRUE;
+				break;
+			}
+		}
+		if (is_withdrawn) {
+			list_for_each_entry_safe(mt_point, safe, &mg->mntpoints, list) {
+				list_del(&mt_point->list);
+				free(mt_point);
+			}
 			log_group(mg, "unmount withdrawn fs");
 			list_del(&mg->list);
 			free(mg);
-			return 0;
 		}
+		return 0;
 	}
 
 	mg = find_mg_dir(dir);
@@ -1910,7 +1942,6 @@
 			mg->kernel_mount_error = mnterr;
 			mg->kernel_mount_done = 1;
 		}
-		goto out;
 	}
 
 	if (mg->withdraw) {
@@ -1918,16 +1949,26 @@
 		return -1;
 	}
 
+	/* Delete this mount point out of the list */
+	list_for_each_entry(mt_point, &mg->mntpoints, list) {
+		if (!strcmp(mt_point->dir, dir)) {
+			list_del(&mt_point->list);
+			free(mt_point);
+			break;
+		}
+	}
 	/* Check to see if we're waiting for a kernel recovery_done to do a
 	   start_done().  If so, call the start_done() here because we won't be
 	   getting anything else from gfs-kernel which is now gone. */
 
-	if (need_kernel_recovery_done(mg)) {
+	if (!mg->kernel_mount_error &&
+		list_empty(&mg->mntpoints) && need_kernel_recovery_done(mg)) {
 		log_group(mg, "do_unmount: fill in start_done");
 		start_done(mg);
 	}
- out:
-	group_leave(gh, mg->name);
+
+	if (list_empty(&mg->mntpoints))
+		group_leave(gh, mg->name);
 	return 0;
 }
 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-12-19 17:06 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2006-12-19 17:06 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-12-19 17:06:00

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	revert last checkin

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.25&r2=1.26
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.25&r2=1.26

--- cluster/group/gfs_controld/lock_dlm.h	2006/12/19 01:42:37	1.25
+++ cluster/group/gfs_controld/lock_dlm.h	2006/12/19 17:05:59	1.26
@@ -113,11 +113,6 @@
         } \
 }
 
-struct mountpoint {
-	struct list_head	list;
-	char			dir[MAXNAME+1];
-};
-
 struct mountgroup {
 	struct list_head	list;
 	uint32_t		id;
@@ -129,7 +124,7 @@
 	char			name[MAXNAME+1];
 	char			table[MAXNAME+1];
 	char			type[5];
-	struct list_head mntpoints;
+	char			dir[PATH_MAX+1];
 	char			options[MAX_OPTIONS_LEN+1];
 	char			dev[PATH_MAX+1];
 
--- cluster/group/gfs_controld/recover.c	2006/12/19 01:42:37	1.25
+++ cluster/group/gfs_controld/recover.c	2006/12/19 17:05:59	1.26
@@ -28,8 +28,6 @@
 void start_spectator_init_2(struct mountgroup *mg);
 void start_spectator_2(struct mountgroup *mg);
 void notify_mount_client(struct mountgroup *mg);
-int do_finish(struct mountgroup *mg);
-int do_terminate(struct mountgroup *mg);
 
 int set_sysfs(struct mountgroup *mg, char *field, int val)
 {
@@ -1470,13 +1468,10 @@
 struct mountgroup *find_mg_dir(char *dir)
 {
 	struct mountgroup *mg;
-	struct mountpoint *mt_point;
 
 	list_for_each_entry(mg, &mounts, list) {
-		list_for_each_entry(mt_point, &mg->mntpoints, list) {
-			if (!strcmp(mt_point->dir, dir))
-				return mg;
-		}
+		if (!strcmp(mg->dir, dir))
+			return mg;
 	}
 	return NULL;
 }
@@ -1501,8 +1496,7 @@
 int do_mount(int ci, char *dir, char *type, char *proto, char *table,
 	     char *options, char *dev, struct mountgroup **mg_ret)
 {
-	struct mountgroup *mg, *new_mg = NULL;
-	struct mountpoint *mp;
+	struct mountgroup *mg;
 	char table2[MAXLINE];
 	char *cluster = NULL, *name = NULL;
 	int rv;
@@ -1544,33 +1538,24 @@
 		goto fail;
 	}
 
-	/* Allocate and populate a new mountpoint entry */
-	mp = malloc(sizeof(struct mountpoint));
-	if (!mp) {
-		rv = -ENOMEM;
+	mg = find_mg(name);
+	if (mg) {
+		rv = -EEXIST;
 		goto fail;
 	}
-	strncpy(mp->dir, dir, sizeof(mp->dir));
 
-	/* Check if we already have a mount group or need a new one */
-	mg = find_mg(name);
+	mg = create_mg(name);
 	if (!mg) {
-		mg = new_mg = create_mg(name);
-		if (!mg) {
-			free(mp);
-			rv = -ENOMEM;
-			goto fail;
-		}
-		strncpy(mg->type, type, sizeof(mg->type));
-		strncpy(mg->table, table, sizeof(mg->table));
-		strncpy(mg->options, options, sizeof(mg->options));
-		strncpy(mg->dev, dev, sizeof(mg->dev));
-		INIT_LIST_HEAD(&mg->mntpoints);
+		rv = -ENOMEM;
+		goto fail;
 	}
 
 	mg->mount_client = ci;
-	/* Add the mount point to the list in the mountgroup. */
-	list_add(&mp->list, &mg->mntpoints);
+	strncpy(mg->dir, dir, sizeof(mg->dir));
+	strncpy(mg->type, type, sizeof(mg->type));
+	strncpy(mg->table, table, sizeof(mg->table));
+	strncpy(mg->options, options, sizeof(mg->options));
+	strncpy(mg->dev, dev, sizeof(mg->dev));
 
 	if (strlen(cluster) != strlen(clustername) ||
 	    strlen(cluster) == 0 || strcmp(cluster, clustername)) {
@@ -1581,42 +1566,38 @@
 	} else
 		log_group(mg, "cluster name matches: %s", clustername);
 
-	if (new_mg) {
-		if (strstr(options, "spectator")) {
-			log_group(mg, "spectator mount");
-			mg->spectator = 1;
-		} else {
-			if (!we_are_in_fence_domain()) {
-				rv = -EINVAL;
-				log_error("mount: not in default fence domain");
-				goto fail;
-			}
-		}
-
-		if (!mg->spectator && strstr(options, "rw"))
-			mg->rw = 1;
-		else if (strstr(options, "ro")) {
-			if (mg->spectator) {
-				rv = -EINVAL;
-				log_error("mount: readonly invalid with spectator");
-				goto fail;
-			}
-			mg->readonly = 1;
+	if (strstr(options, "spectator")) {
+		log_group(mg, "spectator mount");
+		mg->spectator = 1;
+	} else {
+		if (!we_are_in_fence_domain()) {
+			rv = -EINVAL;
+			log_error("mount: not in default fence domain");
+			goto fail;
 		}
+	}
 
-		if (strlen(options) > MAX_OPTIONS_LEN-1) {
+	if (!mg->spectator && strstr(options, "rw"))
+		mg->rw = 1;
+	else if (strstr(options, "ro")) {
+		if (mg->spectator) {
 			rv = -EINVAL;
-			log_error("mount: options too long %d", strlen(options));
+			log_error("mount: readonly invalid with spectator");
 			goto fail;
 		}
-		list_add(&mg->list, &mounts);
+		mg->readonly = 1;
 	}
+
+	if (strlen(options) > MAX_OPTIONS_LEN-1) {
+		rv = -EINVAL;
+		log_error("mount: options too long %d", strlen(options));
+		goto fail;
+	}
+
+	list_add(&mg->list, &mounts);
 	*mg_ret = mg;
 
-	if (new_mg)
-		group_join(gh, name);
-	else
-		notify_mount_client(mg);
+	group_join(gh, name);
 	return 0;
 
  fail:
@@ -1902,27 +1883,14 @@
 int do_unmount(int ci, char *dir, int mnterr)
 {
 	struct mountgroup *mg;
-	struct mountpoint *mt_point, *safe;
 
 	list_for_each_entry(mg, &withdrawn_mounts, list) {
-		int is_withdrawn = FALSE;
-
-		list_for_each_entry(mt_point, &mg->mntpoints, list) {
-			if (!strcmp(mt_point->dir, dir)) {
-				is_withdrawn = TRUE;
-				break;
-			}
-		}
-		if (is_withdrawn) {
-			list_for_each_entry_safe(mt_point, safe, &mg->mntpoints, list) {
-				list_del(&mt_point->list);
-				free(mt_point);
-			}
+		if (!strcmp(mg->dir, dir)) {
 			log_group(mg, "unmount withdrawn fs");
 			list_del(&mg->list);
 			free(mg);
+			return 0;
 		}
-		return 0;
 	}
 
 	mg = find_mg_dir(dir);
@@ -1942,6 +1910,7 @@
 			mg->kernel_mount_error = mnterr;
 			mg->kernel_mount_done = 1;
 		}
+		goto out;
 	}
 
 	if (mg->withdraw) {
@@ -1949,26 +1918,16 @@
 		return -1;
 	}
 
-	/* Delete this mount point out of the list */
-	list_for_each_entry(mt_point, &mg->mntpoints, list) {
-		if (!strcmp(mt_point->dir, dir)) {
-			list_del(&mt_point->list);
-			free(mt_point);
-			break;
-		}
-	}
 	/* Check to see if we're waiting for a kernel recovery_done to do a
 	   start_done().  If so, call the start_done() here because we won't be
 	   getting anything else from gfs-kernel which is now gone. */
 
-	if (!mg->kernel_mount_error &&
-		list_empty(&mg->mntpoints) && need_kernel_recovery_done(mg)) {
+	if (need_kernel_recovery_done(mg)) {
 		log_group(mg, "do_unmount: fill in start_done");
 		start_done(mg);
 	}
-
-	if (list_empty(&mg->mntpoints))
-		group_leave(gh, mg->name);
+ out:
+	group_leave(gh, mg->name);
 	return 0;
 }
 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-12-19 17:07 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2006-12-19 17:07 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2006-12-19 17:07:12

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	revert last checkin

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21.2.4&r2=1.21.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.23.2.2&r2=1.23.2.3

--- cluster/group/gfs_controld/lock_dlm.h	2006/12/19 01:46:47	1.21.2.4
+++ cluster/group/gfs_controld/lock_dlm.h	2006/12/19 17:07:12	1.21.2.5
@@ -113,11 +113,6 @@
         } \
 }
 
-struct mountpoint {
-	struct list_head	list;
-	char			dir[MAXNAME+1];
-};
-
 struct mountgroup {
 	struct list_head	list;
 	uint32_t		id;
@@ -129,7 +124,7 @@
 	char			name[MAXNAME+1];
 	char			table[MAXNAME+1];
 	char			type[5];
-	struct list_head mntpoints;
+	char			dir[PATH_MAX+1];
 	char			options[MAX_OPTIONS_LEN+1];
 	char			dev[PATH_MAX+1];
 
--- cluster/group/gfs_controld/recover.c	2006/12/19 01:46:47	1.23.2.2
+++ cluster/group/gfs_controld/recover.c	2006/12/19 17:07:12	1.23.2.3
@@ -28,8 +28,6 @@
 void start_spectator_init_2(struct mountgroup *mg);
 void start_spectator_2(struct mountgroup *mg);
 void notify_mount_client(struct mountgroup *mg);
-int do_finish(struct mountgroup *mg);
-int do_terminate(struct mountgroup *mg);
 
 int set_sysfs(struct mountgroup *mg, char *field, int val)
 {
@@ -1470,13 +1468,10 @@
 struct mountgroup *find_mg_dir(char *dir)
 {
 	struct mountgroup *mg;
-	struct mountpoint *mt_point;
 
 	list_for_each_entry(mg, &mounts, list) {
-		list_for_each_entry(mt_point, &mg->mntpoints, list) {
-			if (!strcmp(mt_point->dir, dir))
-				return mg;
-		}
+		if (!strcmp(mg->dir, dir))
+			return mg;
 	}
 	return NULL;
 }
@@ -1501,8 +1496,7 @@
 int do_mount(int ci, char *dir, char *type, char *proto, char *table,
 	     char *options, char *dev, struct mountgroup **mg_ret)
 {
-	struct mountgroup *mg, *new_mg = NULL;
-	struct mountpoint *mp;
+	struct mountgroup *mg;
 	char table2[MAXLINE];
 	char *cluster = NULL, *name = NULL;
 	int rv;
@@ -1544,33 +1538,24 @@
 		goto fail;
 	}
 
-	/* Allocate and populate a new mountpoint entry */
-	mp = malloc(sizeof(struct mountpoint));
-	if (!mp) {
-		rv = -ENOMEM;
+	mg = find_mg(name);
+	if (mg) {
+		rv = -EEXIST;
 		goto fail;
 	}
-	strncpy(mp->dir, dir, sizeof(mp->dir));
 
-	/* Check if we already have a mount group or need a new one */
-	mg = find_mg(name);
+	mg = create_mg(name);
 	if (!mg) {
-		mg = new_mg = create_mg(name);
-		if (!mg) {
-			free(mp);
-			rv = -ENOMEM;
-			goto fail;
-		}
-		strncpy(mg->type, type, sizeof(mg->type));
-		strncpy(mg->table, table, sizeof(mg->table));
-		strncpy(mg->options, options, sizeof(mg->options));
-		strncpy(mg->dev, dev, sizeof(mg->dev));
-		INIT_LIST_HEAD(&mg->mntpoints);
+		rv = -ENOMEM;
+		goto fail;
 	}
 
 	mg->mount_client = ci;
-	/* Add the mount point to the list in the mountgroup. */
-	list_add(&mp->list, &mg->mntpoints);
+	strncpy(mg->dir, dir, sizeof(mg->dir));
+	strncpy(mg->type, type, sizeof(mg->type));
+	strncpy(mg->table, table, sizeof(mg->table));
+	strncpy(mg->options, options, sizeof(mg->options));
+	strncpy(mg->dev, dev, sizeof(mg->dev));
 
 	if (strlen(cluster) != strlen(clustername) ||
 	    strlen(cluster) == 0 || strcmp(cluster, clustername)) {
@@ -1581,42 +1566,38 @@
 	} else
 		log_group(mg, "cluster name matches: %s", clustername);
 
-	if (new_mg) {
-		if (strstr(options, "spectator")) {
-			log_group(mg, "spectator mount");
-			mg->spectator = 1;
-		} else {
-			if (!we_are_in_fence_domain()) {
-				rv = -EINVAL;
-				log_error("mount: not in default fence domain");
-				goto fail;
-			}
-		}
-
-		if (!mg->spectator && strstr(options, "rw"))
-			mg->rw = 1;
-		else if (strstr(options, "ro")) {
-			if (mg->spectator) {
-				rv = -EINVAL;
-				log_error("mount: readonly invalid with spectator");
-				goto fail;
-			}
-			mg->readonly = 1;
+	if (strstr(options, "spectator")) {
+		log_group(mg, "spectator mount");
+		mg->spectator = 1;
+	} else {
+		if (!we_are_in_fence_domain()) {
+			rv = -EINVAL;
+			log_error("mount: not in default fence domain");
+			goto fail;
 		}
+	}
 
-		if (strlen(options) > MAX_OPTIONS_LEN-1) {
+	if (!mg->spectator && strstr(options, "rw"))
+		mg->rw = 1;
+	else if (strstr(options, "ro")) {
+		if (mg->spectator) {
 			rv = -EINVAL;
-			log_error("mount: options too long %d", strlen(options));
+			log_error("mount: readonly invalid with spectator");
 			goto fail;
 		}
-		list_add(&mg->list, &mounts);
+		mg->readonly = 1;
 	}
+
+	if (strlen(options) > MAX_OPTIONS_LEN-1) {
+		rv = -EINVAL;
+		log_error("mount: options too long %d", strlen(options));
+		goto fail;
+	}
+
+	list_add(&mg->list, &mounts);
 	*mg_ret = mg;
 
-	if (new_mg)
-		group_join(gh, name);
-	else
-		notify_mount_client(mg);
+	group_join(gh, name);
 	return 0;
 
  fail:
@@ -1902,27 +1883,14 @@
 int do_unmount(int ci, char *dir, int mnterr)
 {
 	struct mountgroup *mg;
-	struct mountpoint *mt_point, *safe;
 
 	list_for_each_entry(mg, &withdrawn_mounts, list) {
-		int is_withdrawn = FALSE;
-
-		list_for_each_entry(mt_point, &mg->mntpoints, list) {
-			if (!strcmp(mt_point->dir, dir)) {
-				is_withdrawn = TRUE;
-				break;
-			}
-		}
-		if (is_withdrawn) {
-			list_for_each_entry_safe(mt_point, safe, &mg->mntpoints, list) {
-				list_del(&mt_point->list);
-				free(mt_point);
-			}
+		if (!strcmp(mg->dir, dir)) {
 			log_group(mg, "unmount withdrawn fs");
 			list_del(&mg->list);
 			free(mg);
+			return 0;
 		}
-		return 0;
 	}
 
 	mg = find_mg_dir(dir);
@@ -1942,6 +1910,7 @@
 			mg->kernel_mount_error = mnterr;
 			mg->kernel_mount_done = 1;
 		}
+		goto out;
 	}
 
 	if (mg->withdraw) {
@@ -1949,26 +1918,16 @@
 		return -1;
 	}
 
-	/* Delete this mount point out of the list */
-	list_for_each_entry(mt_point, &mg->mntpoints, list) {
-		if (!strcmp(mt_point->dir, dir)) {
-			list_del(&mt_point->list);
-			free(mt_point);
-			break;
-		}
-	}
 	/* Check to see if we're waiting for a kernel recovery_done to do a
 	   start_done().  If so, call the start_done() here because we won't be
 	   getting anything else from gfs-kernel which is now gone. */
 
-	if (!mg->kernel_mount_error &&
-		list_empty(&mg->mntpoints) && need_kernel_recovery_done(mg)) {
+	if (need_kernel_recovery_done(mg)) {
 		log_group(mg, "do_unmount: fill in start_done");
 		start_done(mg);
 	}
-
-	if (list_empty(&mg->mntpoints))
-		group_leave(gh, mg->name);
+ out:
+	group_leave(gh, mg->name);
 	return 0;
 }
 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2006-12-19 17:07 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2006-12-19 17:07 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL50
Changes by:	teigland at sourceware.org	2006-12-19 17:07:22

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	revert last checkin

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.21.4.4&r2=1.21.4.5
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.23.4.2&r2=1.23.4.3

--- cluster/group/gfs_controld/lock_dlm.h	2006/12/19 01:48:53	1.21.4.4
+++ cluster/group/gfs_controld/lock_dlm.h	2006/12/19 17:07:22	1.21.4.5
@@ -113,11 +113,6 @@
         } \
 }
 
-struct mountpoint {
-	struct list_head	list;
-	char			dir[MAXNAME+1];
-};
-
 struct mountgroup {
 	struct list_head	list;
 	uint32_t		id;
@@ -129,7 +124,7 @@
 	char			name[MAXNAME+1];
 	char			table[MAXNAME+1];
 	char			type[5];
-	struct list_head mntpoints;
+	char			dir[PATH_MAX+1];
 	char			options[MAX_OPTIONS_LEN+1];
 	char			dev[PATH_MAX+1];
 
--- cluster/group/gfs_controld/recover.c	2006/12/19 01:48:53	1.23.4.2
+++ cluster/group/gfs_controld/recover.c	2006/12/19 17:07:22	1.23.4.3
@@ -28,8 +28,6 @@
 void start_spectator_init_2(struct mountgroup *mg);
 void start_spectator_2(struct mountgroup *mg);
 void notify_mount_client(struct mountgroup *mg);
-int do_finish(struct mountgroup *mg);
-int do_terminate(struct mountgroup *mg);
 
 int set_sysfs(struct mountgroup *mg, char *field, int val)
 {
@@ -1470,13 +1468,10 @@
 struct mountgroup *find_mg_dir(char *dir)
 {
 	struct mountgroup *mg;
-	struct mountpoint *mt_point;
 
 	list_for_each_entry(mg, &mounts, list) {
-		list_for_each_entry(mt_point, &mg->mntpoints, list) {
-			if (!strcmp(mt_point->dir, dir))
-				return mg;
-		}
+		if (!strcmp(mg->dir, dir))
+			return mg;
 	}
 	return NULL;
 }
@@ -1501,8 +1496,7 @@
 int do_mount(int ci, char *dir, char *type, char *proto, char *table,
 	     char *options, char *dev, struct mountgroup **mg_ret)
 {
-	struct mountgroup *mg, *new_mg = NULL;
-	struct mountpoint *mp;
+	struct mountgroup *mg;
 	char table2[MAXLINE];
 	char *cluster = NULL, *name = NULL;
 	int rv;
@@ -1544,33 +1538,24 @@
 		goto fail;
 	}
 
-	/* Allocate and populate a new mountpoint entry */
-	mp = malloc(sizeof(struct mountpoint));
-	if (!mp) {
-		rv = -ENOMEM;
+	mg = find_mg(name);
+	if (mg) {
+		rv = -EEXIST;
 		goto fail;
 	}
-	strncpy(mp->dir, dir, sizeof(mp->dir));
 
-	/* Check if we already have a mount group or need a new one */
-	mg = find_mg(name);
+	mg = create_mg(name);
 	if (!mg) {
-		mg = new_mg = create_mg(name);
-		if (!mg) {
-			free(mp);
-			rv = -ENOMEM;
-			goto fail;
-		}
-		strncpy(mg->type, type, sizeof(mg->type));
-		strncpy(mg->table, table, sizeof(mg->table));
-		strncpy(mg->options, options, sizeof(mg->options));
-		strncpy(mg->dev, dev, sizeof(mg->dev));
-		INIT_LIST_HEAD(&mg->mntpoints);
+		rv = -ENOMEM;
+		goto fail;
 	}
 
 	mg->mount_client = ci;
-	/* Add the mount point to the list in the mountgroup. */
-	list_add(&mp->list, &mg->mntpoints);
+	strncpy(mg->dir, dir, sizeof(mg->dir));
+	strncpy(mg->type, type, sizeof(mg->type));
+	strncpy(mg->table, table, sizeof(mg->table));
+	strncpy(mg->options, options, sizeof(mg->options));
+	strncpy(mg->dev, dev, sizeof(mg->dev));
 
 	if (strlen(cluster) != strlen(clustername) ||
 	    strlen(cluster) == 0 || strcmp(cluster, clustername)) {
@@ -1581,42 +1566,38 @@
 	} else
 		log_group(mg, "cluster name matches: %s", clustername);
 
-	if (new_mg) {
-		if (strstr(options, "spectator")) {
-			log_group(mg, "spectator mount");
-			mg->spectator = 1;
-		} else {
-			if (!we_are_in_fence_domain()) {
-				rv = -EINVAL;
-				log_error("mount: not in default fence domain");
-				goto fail;
-			}
-		}
-
-		if (!mg->spectator && strstr(options, "rw"))
-			mg->rw = 1;
-		else if (strstr(options, "ro")) {
-			if (mg->spectator) {
-				rv = -EINVAL;
-				log_error("mount: readonly invalid with spectator");
-				goto fail;
-			}
-			mg->readonly = 1;
+	if (strstr(options, "spectator")) {
+		log_group(mg, "spectator mount");
+		mg->spectator = 1;
+	} else {
+		if (!we_are_in_fence_domain()) {
+			rv = -EINVAL;
+			log_error("mount: not in default fence domain");
+			goto fail;
 		}
+	}
 
-		if (strlen(options) > MAX_OPTIONS_LEN-1) {
+	if (!mg->spectator && strstr(options, "rw"))
+		mg->rw = 1;
+	else if (strstr(options, "ro")) {
+		if (mg->spectator) {
 			rv = -EINVAL;
-			log_error("mount: options too long %d", strlen(options));
+			log_error("mount: readonly invalid with spectator");
 			goto fail;
 		}
-		list_add(&mg->list, &mounts);
+		mg->readonly = 1;
 	}
+
+	if (strlen(options) > MAX_OPTIONS_LEN-1) {
+		rv = -EINVAL;
+		log_error("mount: options too long %d", strlen(options));
+		goto fail;
+	}
+
+	list_add(&mg->list, &mounts);
 	*mg_ret = mg;
 
-	if (new_mg)
-		group_join(gh, name);
-	else
-		notify_mount_client(mg);
+	group_join(gh, name);
 	return 0;
 
  fail:
@@ -1902,27 +1883,14 @@
 int do_unmount(int ci, char *dir, int mnterr)
 {
 	struct mountgroup *mg;
-	struct mountpoint *mt_point, *safe;
 
 	list_for_each_entry(mg, &withdrawn_mounts, list) {
-		int is_withdrawn = FALSE;
-
-		list_for_each_entry(mt_point, &mg->mntpoints, list) {
-			if (!strcmp(mt_point->dir, dir)) {
-				is_withdrawn = TRUE;
-				break;
-			}
-		}
-		if (is_withdrawn) {
-			list_for_each_entry_safe(mt_point, safe, &mg->mntpoints, list) {
-				list_del(&mt_point->list);
-				free(mt_point);
-			}
+		if (!strcmp(mg->dir, dir)) {
 			log_group(mg, "unmount withdrawn fs");
 			list_del(&mg->list);
 			free(mg);
+			return 0;
 		}
-		return 0;
 	}
 
 	mg = find_mg_dir(dir);
@@ -1942,6 +1910,7 @@
 			mg->kernel_mount_error = mnterr;
 			mg->kernel_mount_done = 1;
 		}
+		goto out;
 	}
 
 	if (mg->withdraw) {
@@ -1949,26 +1918,16 @@
 		return -1;
 	}
 
-	/* Delete this mount point out of the list */
-	list_for_each_entry(mt_point, &mg->mntpoints, list) {
-		if (!strcmp(mt_point->dir, dir)) {
-			list_del(&mt_point->list);
-			free(mt_point);
-			break;
-		}
-	}
 	/* Check to see if we're waiting for a kernel recovery_done to do a
 	   start_done().  If so, call the start_done() here because we won't be
 	   getting anything else from gfs-kernel which is now gone. */
 
-	if (!mg->kernel_mount_error &&
-		list_empty(&mg->mntpoints) && need_kernel_recovery_done(mg)) {
+	if (need_kernel_recovery_done(mg)) {
 		log_group(mg, "do_unmount: fill in start_done");
 		start_done(mg);
 	}
-
-	if (list_empty(&mg->mntpoints))
-		group_leave(gh, mg->name);
+ out:
+	group_leave(gh, mg->name);
 	return 0;
 }
 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2007-09-04 19:22 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2007-09-04 19:22 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-09-04 19:22:52

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	Reject mount attempts on an fs that's still in the process of unmounting.
	This regressed 8 months ago due to the bz 218560 changes.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.28&r2=1.29
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.32&r2=1.33

--- cluster/group/gfs_controld/lock_dlm.h	2007/06/12 20:04:41	1.28
+++ cluster/group/gfs_controld/lock_dlm.h	2007/09/04 19:22:52	1.29
@@ -164,6 +164,7 @@
 	int			low_nodeid;
 	int			master_nodeid;
 	int			save_plocks;
+	int			reject_mounts;
 
 	uint64_t		cp_handle;
 	time_t			last_checkpoint_time;
--- cluster/group/gfs_controld/recover.c	2007/07/11 17:01:23	1.32
+++ cluster/group/gfs_controld/recover.c	2007/09/04 19:22:52	1.33
@@ -1617,7 +1617,13 @@
 
 	mg = find_mg(name);
 	if (mg) {
-		rv = add_another_mountpoint(mg, dir, dev, ci);
+		if (mg->reject_mounts) {
+			/* fs is being unmounted */
+			rv = -ESTALE;
+			log_error("mount: reject mount due to unmount");
+		} else {
+			rv = add_another_mountpoint(mg, dir, dev, ci);
+		}
 		goto out;
 	}
 
@@ -2030,6 +2036,7 @@
 	}
 
  out:
+	mg->reject_mounts = 1;
 	group_leave(gh, mg->name);
 	return 0;
 }



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c
@ 2007-09-04 19:27 teigland
  0 siblings, 0 replies; 10+ messages in thread
From: teigland @ 2007-09-04 19:27 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2007-09-04 19:27:34

Modified files:
	group/gfs_controld: lock_dlm.h recover.c 

Log message:
	Reject mount attempts on an fs that's still in the process of unmounting.
	This regressed 8 months ago due to the bz 218560 changes.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21.2.7&r2=1.21.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.23.2.9&r2=1.23.2.10

--- cluster/group/gfs_controld/lock_dlm.h	2007/06/12 20:05:12	1.21.2.7
+++ cluster/group/gfs_controld/lock_dlm.h	2007/09/04 19:27:34	1.21.2.8
@@ -164,6 +164,7 @@
 	int			low_nodeid;
 	int			master_nodeid;
 	int			save_plocks;
+	int			reject_mounts;
 
 	uint64_t		cp_handle;
 	time_t			last_checkpoint_time;
--- cluster/group/gfs_controld/recover.c	2007/07/19 20:23:16	1.23.2.9
+++ cluster/group/gfs_controld/recover.c	2007/09/04 19:27:34	1.23.2.10
@@ -1617,7 +1617,13 @@
 
 	mg = find_mg(name);
 	if (mg) {
-		rv = add_another_mountpoint(mg, dir, dev, ci);
+		if (mg->reject_mounts) {
+			/* fs is being unmounted */
+			rv = -ESTALE;
+			log_error("mount: reject mount due to unmount");
+		} else {
+			rv = add_another_mountpoint(mg, dir, dev, ci);
+		}
 		goto out;
 	}
 
@@ -2030,6 +2036,7 @@
 	}
 
  out:
+	mg->reject_mounts = 1;
 	group_leave(gh, mg->name);
 	return 0;
 }



^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2007-09-04 19:27 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-10-16 14:44 [Cluster-devel] cluster/group/gfs_controld lock_dlm.h recover.c teigland
  -- strict thread matches above, loose matches on Subject: below --
2006-10-23 15:44 teigland
2006-12-19  1:42 rpeterso
2006-12-19  1:46 rpeterso
2006-12-19  1:48 rpeterso
2006-12-19 17:06 teigland
2006-12-19 17:07 teigland
2006-12-19 17:07 teigland
2007-09-04 19:22 teigland
2007-09-04 19:27 teigland

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).