cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-02 19:23 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-02 19:23 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-02 19:23:42

Modified files:
	group/gfs_controld: plock.c 

Log message:
	do byte-swapping

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.4&r2=1.5

--- cluster/group/gfs_controld/plock.c	2006/08/02 18:27:57	1.4
+++ cluster/group/gfs_controld/plock.c	2006/08/02 19:23:41	1.5
@@ -87,6 +87,36 @@
 	struct gdlm_plock_info	info;
 };
 
+static void info_bswap_out(struct gdlm_plock_info *i)
+{
+	i->version[0]	= cpu_to_le32(i->version[0]);
+	i->version[1]	= cpu_to_le32(i->version[1]);
+	i->version[2]	= cpu_to_le32(i->version[2]);
+	i->pid		= cpu_to_le32(i->pid);
+	i->nodeid	= cpu_to_le32(i->nodeid);
+	i->rv		= cpu_to_le32(i->rv);
+	i->fsid		= cpu_to_le32(i->fsid);
+	i->number	= cpu_to_le64(i->number);
+	i->start	= cpu_to_le64(i->start);
+	i->end		= cpu_to_le64(i->end);
+	i->owner	= cpu_to_le64(i->owner);
+}
+
+static void info_bswap_in(struct gdlm_plock_info *i)
+{
+	i->version[0]	= le32_to_cpu(i->version[0]);
+	i->version[1]	= le32_to_cpu(i->version[1]);
+	i->version[2]	= le32_to_cpu(i->version[2]);
+	i->pid		= le32_to_cpu(i->pid);
+	i->nodeid	= le32_to_cpu(i->nodeid);
+	i->rv		= le32_to_cpu(i->rv);
+	i->fsid		= le32_to_cpu(i->fsid);
+	i->number	= le64_to_cpu(i->number);
+	i->start	= le64_to_cpu(i->start);
+	i->end		= le64_to_cpu(i->end);
+	i->owner	= le64_to_cpu(i->owner);
+}
+
 static int get_proc_number(const char *file, const char *name, uint32_t *number)
 {
 	FILE *fl;
@@ -267,14 +297,15 @@
 
 	info.nodeid = our_nodeid;
 
-	/* FIXME: do byte swapping */
-
 	hd = (struct gdlm_header *)buf;
 	hd->type = MSG_PLOCK;
 	hd->nodeid = our_nodeid;
 	hd->to_nodeid = 0;
 	memcpy(buf + sizeof(struct gdlm_header), &info, sizeof(info));
 
+	info_bswap_out((struct gdlm_plock_info *) buf +
+						  sizeof(struct gdlm_header));
+
 	rv = send_group_message(mg, len, buf);
 
 	free(buf);
@@ -755,7 +786,7 @@
 
 	memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
 
-	/* FIXME: do byte swapping */
+	info_bswap_in(&info);
 
 	log_group(mg, "receive_plock from %d op %d fs %x num %llx ex %d w %d",
 		  from, info.optype, info.fsid, info.number, info.ex,
@@ -847,11 +878,11 @@
 	pp = (struct pack_plock *) &section_buf;
 
 	list_for_each_entry(po, &r->locks, list) {
-		pp->start	= po->start;
-		pp->end		= po->end;
-		pp->owner	= po->owner;
-		pp->pid		= po->pid;
-		pp->nodeid	= po->nodeid;
+		pp->start	= cpu_to_le64(po->start);
+		pp->end		= cpu_to_le64(po->end);
+		pp->owner	= cpu_to_le64(po->owner);
+		pp->pid		= cpu_to_le32(po->pid);
+		pp->nodeid	= cpu_to_le32(po->nodeid);
 		pp->ex		= po->ex;
 		pp->waiter	= 0;
 		pp++;
@@ -859,11 +890,11 @@
 	}
 
 	list_for_each_entry(w, &r->waiters, list) {
-		pp->start	= w->info.start;
-		pp->end		= w->info.end;
-		pp->owner	= w->info.owner;
-		pp->pid		= w->info.pid;
-		pp->nodeid	= w->info.nodeid;
+		pp->start	= cpu_to_le64(w->info.start);
+		pp->end		= cpu_to_le64(w->info.end);
+		pp->owner	= cpu_to_le64(w->info.owner);
+		pp->pid		= cpu_to_le32(w->info.pid);
+		pp->nodeid	= cpu_to_le32(w->info.nodeid);
 		pp->ex		= w->info.ex;
 		pp->waiter	= 1;
 		pp++;
@@ -899,20 +930,20 @@
 	for (i = 0; i < count; i++) {
 		if (!pp->waiter) {
 			po = malloc(sizeof(struct posix_lock));
-			po->start	= pp->start;
-			po->end		= pp->end;
-			po->owner	= pp->owner;
-			po->pid		= pp->pid;
-			po->nodeid	= pp->nodeid;
+			po->start	= le64_to_cpu(pp->start);
+			po->end		= le64_to_cpu(pp->end);
+			po->owner	= le64_to_cpu(pp->owner);
+			po->pid		= le32_to_cpu(pp->pid);
+			po->nodeid	= le32_to_cpu(pp->nodeid);
 			po->ex		= pp->ex;
 			list_add_tail(&po->list, &r->locks);
 		} else {
 			w = malloc(sizeof(struct lock_waiter));
-			w->info.start	= pp->start;
-			w->info.end	= pp->end;
-			w->info.owner	= pp->owner;
-			w->info.pid	= pp->pid;
-			w->info.nodeid	= pp->nodeid;
+			w->info.start	= le64_to_cpu(pp->start);
+			w->info.end	= le64_to_cpu(pp->end);
+			w->info.owner	= le64_to_cpu(pp->owner);
+			w->info.pid	= le32_to_cpu(pp->pid);
+			w->info.nodeid	= le32_to_cpu(pp->nodeid);
 			w->info.ex	= pp->ex;
 			list_add_tail(&w->list, &r->waiters);
 		}



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-08 18:43 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-08 18:43 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-08 18:43:25

Modified files:
	group/gfs_controld: plock.c 

Log message:
	use the correct (global) handle when unlinking a checkpoint

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.8&r2=1.9

--- cluster/group/gfs_controld/plock.c	2006/08/07 16:57:50	1.8
+++ cluster/group/gfs_controld/plock.c	2006/08/08 18:43:25	1.9
@@ -1084,7 +1084,7 @@
 	log_group(mg, "unlink ckpt %llx", h);
 
  unlink_retry:
-	rv = saCkptCheckpointUnlink(h, name);
+	rv = saCkptCheckpointUnlink(ckpt_handle, name);
 	if (rv == SA_AIS_ERR_TRY_AGAIN) {
 		log_group(mg, "unlink ckpt retry");
 		sleep(1);



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-08 19:37 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-08 19:37 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-08 19:37:33

Modified files:
	group/gfs_controld: plock.c 

Log message:
	if a node has a saved ckpt when it unmounts, it needs to unlink it
	so another node can create a new ckpt for the next mounter

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.9&r2=1.10

--- cluster/group/gfs_controld/plock.c	2006/08/08 18:43:25	1.9
+++ cluster/group/gfs_controld/plock.c	2006/08/08 19:37:33	1.10
@@ -942,43 +942,6 @@
 	}
 }
 
-void purge_plocks(struct mountgroup *mg, int nodeid, int unmount)
-{
-	struct posix_lock *po, *po2;
-	struct lock_waiter *w, *w2;
-	struct resource *r, *r2;
-	int purged = 0;
-
-	list_for_each_entry_safe(r, r2, &mg->resources, list) {
-		list_for_each_entry_safe(po, po2, &r->locks, list) {
-			if (po->nodeid == nodeid || unmount) {
-				list_del(&po->list);
-				free(po);
-				purged++;
-			}
-		}
-
-		list_for_each_entry_safe(w, w2, &r->waiters, list) {
-			if (w->info.nodeid == nodeid || unmount) {
-				list_del(&w->list);
-				free(w);
-				purged++;
-			}
-		}
-
-		if (list_empty(&r->locks) && list_empty(&r->waiters)) {
-			list_del(&r->list);
-			free(r);
-		} else
-			do_waiters(mg, r);
-	}
-	
-	if (purged)
-		mg->last_plock_time = time(NULL);
-
-	log_group(mg, "purged %d plocks for %d", purged, nodeid);
-}
-
 void plock_exit(void)
 {
 	if (plocks_online)
@@ -1260,6 +1223,13 @@
 			sleep(1);
 			goto create_retry;
 		}
+		if (rv == SA_AIS_ERR_EXIST) {
+			/* this shouldn't happen in general */
+			log_group(mg, "store_plocks: clearing old ckpt");
+			saCkptCheckpointClose(h);
+			unlink_checkpoint(mg, &name);
+			goto open_retry;
+		}
 		if (rv != SA_AIS_OK) {
 			log_error("store_plocks: ckpt section create err %d %s",
 				  rv, mg->name);
@@ -1369,6 +1339,55 @@
 	saCkptCheckpointClose(h);
 }
 
+void purge_plocks(struct mountgroup *mg, int nodeid, int unmount)
+{
+	struct posix_lock *po, *po2;
+	struct lock_waiter *w, *w2;
+	struct resource *r, *r2;
+	int len, purged = 0;
+	SaNameT name;
+
+	list_for_each_entry_safe(r, r2, &mg->resources, list) {
+		list_for_each_entry_safe(po, po2, &r->locks, list) {
+			if (po->nodeid == nodeid || unmount) {
+				list_del(&po->list);
+				free(po);
+				purged++;
+			}
+		}
+
+		list_for_each_entry_safe(w, w2, &r->waiters, list) {
+			if (w->info.nodeid == nodeid || unmount) {
+				list_del(&w->list);
+				free(w);
+				purged++;
+			}
+		}
+
+		if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+			list_del(&r->list);
+			free(r);
+		} else
+			do_waiters(mg, r);
+	}
+	
+	if (purged)
+		mg->last_plock_time = time(NULL);
+
+	log_group(mg, "purged %d plocks for %d", purged, nodeid);
+
+	/* we may have a saved ckpt that we created for the last mounter,
+	   we need to unlink it so another node can create a new ckpt for
+	   the next mounter after we leave */
+
+	if (unmount && mg->cp_handle) {
+		len = snprintf(name.value, SA_MAX_NAME_LENGTH,
+			       "gfsplock.%s", mg->name);
+		name.length = len;
+		unlink_checkpoint(mg, &name);
+	}
+}
+
 int dump_plocks(char *name, int fd)
 {
 	struct mountgroup *mg;



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-14 20:15 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-14 20:15 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-14 20:15:25

Modified files:
	group/gfs_controld: plock.c 

Log message:
	remove a couple log_error's

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.12&r2=1.13

--- cluster/group/gfs_controld/plock.c	2006/08/09 19:35:26	1.12
+++ cluster/group/gfs_controld/plock.c	2006/08/14 20:15:25	1.13
@@ -1086,7 +1086,8 @@
 		goto out_close;
 	}
 	if (rv != SA_AIS_OK) {
-		log_error("unlink ckpt close error %d %s", rv, mg->name);
+		/* should this be log_error */
+		log_group(mg, "unlink ckpt close error %d", rv);
 		ret = -1;
 	}
 
@@ -1333,7 +1334,10 @@
 			  iov.readSize);
 		section_len = iov.readSize;
 
-		if (!section_len || section_len % sizeof(struct pack_plock)) {
+		if (!section_len)
+		       continue;
+
+		if (section_len % sizeof(struct pack_plock)) {
 			log_error("retrieve_plocks: bad section len %d %s",
 				  section_len, mg->name);
 			continue;



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-15 22:21 rpeterso
  0 siblings, 0 replies; 23+ messages in thread
From: rpeterso @ 2006-08-15 22:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	rpeterso at sourceware.org	2006-08-15 22:21:29

Modified files:
	group/gfs_controld: plock.c 

Log message:
	Fixed segfault in gfs_controld.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.13&r2=1.14

--- cluster/group/gfs_controld/plock.c	2006/08/14 20:15:25	1.13
+++ cluster/group/gfs_controld/plock.c	2006/08/15 22:21:29	1.14
@@ -826,8 +826,10 @@
 	if (in->nodeid == our_nodeid)
 		write_result(mg, in, rv);
 
-	do_waiters(mg, r);
-	put_resource(r);
+	if (r) {
+		do_waiters(mg, r);
+		put_resource(r);
+	}
 }
 
 static void do_get(struct mountgroup *mg, struct gdlm_plock_info *in)



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-16 17:05 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-16 17:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-16 17:05:24

Modified files:
	group/gfs_controld: plock.c 

Log message:
	change log_plock() to log_group() for packing/unpacking plocks in
	checkpoint

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.14&r2=1.15

--- cluster/group/gfs_controld/plock.c	2006/08/15 22:21:29	1.14
+++ cluster/group/gfs_controld/plock.c	2006/08/16 17:05:24	1.15
@@ -987,7 +987,7 @@
 
 	section_len = count * sizeof(struct pack_plock);
 
-	log_plock(mg, "pack %llx count %d", r->number, count);
+	log_group(mg, "plock pack %llx count %d", r->number, count);
 }
 
 int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
@@ -1007,7 +1007,7 @@
 	INIT_LIST_HEAD(&r->waiters);
 	sscanf(numbuf, "r%llu", &r->number);
 
-	log_plock(mg, "unpack %llx count %d", r->number, count);
+	log_group(mg, "plock unpack %llx count %d", r->number, count);
 
 	pp = (struct pack_plock *) &section_buf;
 



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-16 19:30 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-16 19:30 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-16 19:30:37

Modified files:
	group/gfs_controld: plock.c 

Log message:
	after unlinking a ckpt, don't try to close it if we don't have it open,
	(no big problem, the close would just fail) and go back to syslogging
	ckpt close errors

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.15&r2=1.16

--- cluster/group/gfs_controld/plock.c	2006/08/16 17:05:24	1.15
+++ cluster/group/gfs_controld/plock.c	2006/08/16 19:30:36	1.16
@@ -1081,6 +1081,9 @@
 		 s.numberOfSections, s.memoryUsed);
 
  out_close:
+	if (!h)
+		goto out;
+
 	rv = saCkptCheckpointClose(h);
 	if (rv == SA_AIS_ERR_TRY_AGAIN) {
 		log_group(mg, "unlink ckpt close retry");
@@ -1088,11 +1091,12 @@
 		goto out_close;
 	}
 	if (rv != SA_AIS_OK) {
-		/* should this be log_error */
-		log_group(mg, "unlink ckpt close error %d", rv);
-		ret = -1;
+		log_error("unlink ckpt %llx close err %d %s", h, rv, mg->name);
+		/* should we return an error here and possibly cause
+		   store_plocks() to fail on this? */
+		/* ret = -1; */
 	}
-
+ out:
 	mg->cp_handle = 0;
 	return ret;
 }



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-08-17 19:39 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-08-17 19:39 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-08-17 19:39:17

Modified files:
	group/gfs_controld: plock.c 

Log message:
	change debug messages related to storing/retrieving plocks to/from
	checkpoints to see more details about the ckpt

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.16&r2=1.17

--- cluster/group/gfs_controld/plock.c	2006/08/16 19:30:36	1.16
+++ cluster/group/gfs_controld/plock.c	2006/08/17 19:39:17	1.17
@@ -986,8 +986,6 @@
 	}
 
 	section_len = count * sizeof(struct pack_plock);
-
-	log_group(mg, "plock pack %llx count %d", r->number, count);
 }
 
 int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
@@ -1007,8 +1005,6 @@
 	INIT_LIST_HEAD(&r->waiters);
 	sscanf(numbuf, "r%llu", &r->number);
 
-	log_group(mg, "plock unpack %llx count %d", r->number, count);
-
 	pp = (struct pack_plock *) &section_buf;
 
 	for (i = 0; i < count; i++) {
@@ -1220,6 +1216,9 @@
 
 		pack_section_buf(mg, r);
 
+		log_group(mg, "store_plocks: section size %u id %u \"%s\"",
+			  section_len, section_id.idLen, buf);
+
 	 create_retry:
 		rv = saCkptSectionCreate(h, &section_attr, &section_buf,
 					 section_len);
@@ -1265,6 +1264,7 @@
 	SaCkptIOVectorElementT iov;
 	SaNameT name;
 	SaAisErrorT rv;
+	char buf[32];
 	int len;
 
 	if (!plocks_online)
@@ -1323,6 +1323,11 @@
 		iov.dataSize = desc.sectionSize;
 		iov.dataOffset = 0;
 
+		memset(&buf, 0, 32);
+		snprintf(buf, 32, "%s", desc.sectionId.id);
+		log_group(mg, "retrieve_plocks: section size %llu id %u \"%s\"",
+			  iov.dataSize, iov.sectionId.idLen, buf);
+
 	 read_retry:
 		rv = saCkptCheckpointRead(h, &iov, 1, NULL);
 		if (rv == SA_AIS_ERR_TRY_AGAIN) {



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-10-09 21:51 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-10-09 21:51 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-10-09 21:51:49

Modified files:
	group/gfs_controld: plock.c 

Log message:
	if we get a plock request from the kernel when plocks are disabled,
	return -ENOSYS for the request

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.22&r2=1.23

--- cluster/group/gfs_controld/plock.c	2006/10/09 21:48:57	1.22
+++ cluster/group/gfs_controld/plock.c	2006/10/09 21:51:49	1.23
@@ -311,6 +311,11 @@
 
 	rv = read(control_fd, &info, sizeof(info));
 
+	if (!plocks_online) {
+		rv = -ENOSYS;
+		goto fail;
+	}
+
 	mg = find_mg_id(info.fsid);
 	if (!mg) {
 		log_debug("process_plocks: no mg id %x", info.fsid);



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-03 15:33 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-03 15:33 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-11-03 15:33:46

Modified files:
	group/gfs_controld: plock.c 

Log message:
	When a new master joins the mountgroup, it retrieves plocks from
	the ckpt created by the old master, then unlinks and closes the
	ckpt so it can create another new ckpt later.
	
	Bug found by sdake where the ckpt close following the unlink was
	being skipped because the ckpt handle wasn't being set.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.24&r2=1.25

--- cluster/group/gfs_controld/plock.c	2006/10/16 17:12:10	1.24
+++ cluster/group/gfs_controld/plock.c	2006/11/03 15:33:46	1.25
@@ -1389,6 +1389,7 @@
 	if (mg->low_nodeid == our_nodeid) {
 		/* we're the new low nodeid, will be master */
 		log_group(mg, "retrieve_plocks: unlink ckpt from old master");
+		mg->cp_handle = (uint64_t) h;
 		_unlink_checkpoint(mg, &name);
 	} else
 		saCkptCheckpointClose(h);



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-20 18:10 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-20 18:10 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-11-20 18:10:00

Modified files:
	group/gfs_controld: plock.c 

Log message:
	The plock rate limiting code should use the full timeval to measure
	the 1 sec limit interval instead of just the rough difference in
	tv_sec values.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.26&r2=1.27

--- cluster/group/gfs_controld/plock.c	2006/11/14 20:20:43	1.26
+++ cluster/group/gfs_controld/plock.c	2006/11/20 18:10:00	1.27
@@ -311,6 +311,21 @@
 	return control_fd;
 }
 
+static unsigned long time_diff_ms(struct timeval *begin, struct timeval *end)
+{
+	unsigned long a_us, b_us, c_us, s, us, ms;
+
+	a_us = begin->tv_sec * 1000000 + begin->tv_usec;
+	b_us = end->tv_sec * 1000000 + end->tv_usec;
+	c_us = b_us - a_us;
+
+	s = c_us / 1000000;
+	us = c_us % 1000000;
+	ms = us / 1000;
+
+	return (s * 1000 + ms);
+}
+
 int process_plocks(void)
 {
 	struct mountgroup *mg;
@@ -324,13 +339,13 @@
 	if (message_flow_control_on)
 		return 0;
 
-	/* do we want to do something a little more accurate than tv_sec? */
+	/* Every N ops we check how long it's taken to do those N ops.
+	   If it's less than 1000 ms, we don't take any more. */
 
-	/* limit plock rate within one second */
 	if (plock_rate_limit && plock_read_count &&
 	    !(plock_read_count % plock_rate_limit)) {
 		gettimeofday(&now, NULL);
-		if (now.tv_sec - plock_rate_last.tv_sec <= 0) {
+		if (time_diff_ms(&plock_rate_last, &now) < 1000) {
 			plock_rate_delays++;
 			return -EBUSY;
 		}



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-20 18:12 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-20 18:12 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2006-11-20 18:12:54

Modified files:
	group/gfs_controld: plock.c 

Log message:
	The plock rate limiting code should use the full timeval to measure
	the 1 sec limit interval instead of just the rough difference in
	tv_sec values.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25.2.1&r2=1.25.2.2

--- cluster/group/gfs_controld/plock.c	2006/11/14 20:33:32	1.25.2.1
+++ cluster/group/gfs_controld/plock.c	2006/11/20 18:12:54	1.25.2.2
@@ -311,6 +311,21 @@
 	return control_fd;
 }
 
+static unsigned long time_diff_ms(struct timeval *begin, struct timeval *end)
+{
+	unsigned long a_us, b_us, c_us, s, us, ms;
+
+	a_us = begin->tv_sec * 1000000 + begin->tv_usec;
+	b_us = end->tv_sec * 1000000 + end->tv_usec;
+	c_us = b_us - a_us;
+
+	s = c_us / 1000000;
+	us = c_us % 1000000;
+	ms = us / 1000;
+
+	return (s * 1000 + ms);
+}
+
 int process_plocks(void)
 {
 	struct mountgroup *mg;
@@ -324,13 +339,13 @@
 	if (message_flow_control_on)
 		return 0;
 
-	/* do we want to do something a little more accurate than tv_sec? */
+	/* Every N ops we check how long it's taken to do those N ops.
+	   If it's less than 1000 ms, we don't take any more. */
 
-	/* limit plock rate within one second */
 	if (plock_rate_limit && plock_read_count &&
 	    !(plock_read_count % plock_rate_limit)) {
 		gettimeofday(&now, NULL);
-		if (now.tv_sec - plock_rate_last.tv_sec <= 0) {
+		if (time_diff_ms(&plock_rate_last, &now) < 1000) {
 			plock_rate_delays++;
 			return -EBUSY;
 		}



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-20 18:13 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-20 18:13 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL50
Changes by:	teigland at sourceware.org	2006-11-20 18:13:03

Modified files:
	group/gfs_controld: plock.c 

Log message:
	The plock rate limiting code should use the full timeval to measure
	the 1 sec limit interval instead of just the rough difference in
	tv_sec values.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.25.4.1&r2=1.25.4.2

--- cluster/group/gfs_controld/plock.c	2006/11/14 21:30:59	1.25.4.1
+++ cluster/group/gfs_controld/plock.c	2006/11/20 18:13:02	1.25.4.2
@@ -311,6 +311,21 @@
 	return control_fd;
 }
 
+static unsigned long time_diff_ms(struct timeval *begin, struct timeval *end)
+{
+	unsigned long a_us, b_us, c_us, s, us, ms;
+
+	a_us = begin->tv_sec * 1000000 + begin->tv_usec;
+	b_us = end->tv_sec * 1000000 + end->tv_usec;
+	c_us = b_us - a_us;
+
+	s = c_us / 1000000;
+	us = c_us % 1000000;
+	ms = us / 1000;
+
+	return (s * 1000 + ms);
+}
+
 int process_plocks(void)
 {
 	struct mountgroup *mg;
@@ -324,13 +339,13 @@
 	if (message_flow_control_on)
 		return 0;
 
-	/* do we want to do something a little more accurate than tv_sec? */
+	/* Every N ops we check how long it's taken to do those N ops.
+	   If it's less than 1000 ms, we don't take any more. */
 
-	/* limit plock rate within one second */
 	if (plock_rate_limit && plock_read_count &&
 	    !(plock_read_count % plock_rate_limit)) {
 		gettimeofday(&now, NULL);
-		if (now.tv_sec - plock_rate_last.tv_sec <= 0) {
+		if (time_diff_ms(&plock_rate_last, &now) < 1000) {
 			plock_rate_delays++;
 			return -EBUSY;
 		}



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-20 21:28 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-20 21:28 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-11-20 21:28:54

Modified files:
	group/gfs_controld: plock.c 

Log message:
	use timersub() macro to subtract timevals instead of coding it

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.28&r2=1.29

--- cluster/group/gfs_controld/plock.c	2006/11/20 21:07:18	1.28
+++ cluster/group/gfs_controld/plock.c	2006/11/20 21:28:53	1.29
@@ -313,17 +313,9 @@
 
 static unsigned long time_diff_ms(struct timeval *begin, struct timeval *end)
 {
-	unsigned long a_us, b_us, c_us, s, us, ms;
-
-	a_us = begin->tv_sec * 1000000 + begin->tv_usec;
-	b_us = end->tv_sec * 1000000 + end->tv_usec;
-	c_us = b_us - a_us;
-
-	s = c_us / 1000000;
-	us = c_us % 1000000;
-	ms = us / 1000;
-
-	return (s * 1000 + ms);
+	struct timeval result;
+	timersub(end, begin, &result);
+	return (result.tv_sec * 1000) + (result.tv_usec / 1000);
 }
 
 int process_plocks(void)



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-20 21:29 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-20 21:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2006-11-20 21:29:41

Modified files:
	group/gfs_controld: plock.c 

Log message:
	use timersub() macro to subtract timevals instead of coding it

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25.2.3&r2=1.25.2.4

--- cluster/group/gfs_controld/plock.c	2006/11/20 21:10:28	1.25.2.3
+++ cluster/group/gfs_controld/plock.c	2006/11/20 21:29:40	1.25.2.4
@@ -313,17 +313,9 @@
 
 static unsigned long time_diff_ms(struct timeval *begin, struct timeval *end)
 {
-	unsigned long a_us, b_us, c_us, s, us, ms;
-
-	a_us = begin->tv_sec * 1000000 + begin->tv_usec;
-	b_us = end->tv_sec * 1000000 + end->tv_usec;
-	c_us = b_us - a_us;
-
-	s = c_us / 1000000;
-	us = c_us % 1000000;
-	ms = us / 1000;
-
-	return (s * 1000 + ms);
+	struct timeval result;
+	timersub(end, begin, &result);
+	return (result.tv_sec * 1000) + (result.tv_usec / 1000);
 }
 
 int process_plocks(void)



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2006-11-20 21:29 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2006-11-20 21:29 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL50
Changes by:	teigland at sourceware.org	2006-11-20 21:29:50

Modified files:
	group/gfs_controld: plock.c 

Log message:
	use timersub() macro to subtract timevals instead of coding it

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.25.4.3&r2=1.25.4.4

--- cluster/group/gfs_controld/plock.c	2006/11/20 21:10:36	1.25.4.3
+++ cluster/group/gfs_controld/plock.c	2006/11/20 21:29:50	1.25.4.4
@@ -313,17 +313,9 @@
 
 static unsigned long time_diff_ms(struct timeval *begin, struct timeval *end)
 {
-	unsigned long a_us, b_us, c_us, s, us, ms;
-
-	a_us = begin->tv_sec * 1000000 + begin->tv_usec;
-	b_us = end->tv_sec * 1000000 + end->tv_usec;
-	c_us = b_us - a_us;
-
-	s = c_us / 1000000;
-	us = c_us % 1000000;
-	ms = us / 1000;
-
-	return (s * 1000 + ms);
+	struct timeval result;
+	timersub(end, begin, &result);
+	return (result.tv_sec * 1000) + (result.tv_usec / 1000);
 }
 
 int process_plocks(void)



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2007-06-08 21:30 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2007-06-08 21:30 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-06-08 21:30:54

Modified files:
	group/gfs_controld: plock.c 

Log message:
	Return 1 or 0 GETLK result to the kernel for conflict/no-conflict.
	We were always returning 0 before.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.30&r2=1.31

--- cluster/group/gfs_controld/plock.c	2006/11/21 17:28:09	1.30
+++ cluster/group/gfs_controld/plock.c	2007/06/08 21:30:54	1.31
@@ -915,9 +915,9 @@
 		goto out;
 
 	if (is_conflict(r, in, 1))
-		in->rv = 1;
+		rv = 1;
 	else
-		in->rv = 0;
+		rv = 0;
  out:
 	write_result(mg, in, rv);
 }



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2007-06-08 21:31 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2007-06-08 21:31 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2007-06-08 21:31:56

Modified files:
	group/gfs_controld: plock.c 

Log message:
	Return 1 or 0 GETLK result to the kernel for conflict/no-conflict.
	We were always returning 0 before.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25.2.5&r2=1.25.2.6

--- cluster/group/gfs_controld/plock.c	2006/11/21 17:28:46	1.25.2.5
+++ cluster/group/gfs_controld/plock.c	2007/06/08 21:31:56	1.25.2.6
@@ -915,9 +915,9 @@
 		goto out;
 
 	if (is_conflict(r, in, 1))
-		in->rv = 1;
+		rv = 1;
 	else
-		in->rv = 0;
+		rv = 0;
  out:
 	write_result(mg, in, rv);
 }



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2007-11-29 21:27 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2007-11-29 21:27 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-11-29 21:27:58

Modified files:
	group/gfs_controld: plock.c 

Log message:
	Testing revealed a couple more races I hadn't expected.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.34&r2=1.35

--- cluster/group/gfs_controld/plock.c	2007/11/28 20:49:08	1.34
+++ cluster/group/gfs_controld/plock.c	2007/11/29 21:27:58	1.35
@@ -1404,8 +1404,12 @@
 	}
 
 	if (r->owner != 0) {
-		/* shouldn't happen */
-		log_error("receive_drop from %d r %llx owner %d", from,
+		/* - A sent drop, B sent drop, receive drop A, C sent own,
+		     receive drop B (this warning on C, owner -1)
+	   	   - A sent drop, B sent drop, receive drop A, A sent own,
+		     receive own A, receive drop B (this warning on all,
+		     owner A) */
+		log_debug("receive_drop from %d r %llx owner %d", from,
 			  (unsigned long long)r->number, r->owner);
 		return;
 	}



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2007-11-30 16:20 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2007-11-30 16:20 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-11-30 16:20:56

Modified files:
	group/gfs_controld: plock.c 

Log message:
	change some log messages

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.35&r2=1.36

--- cluster/group/gfs_controld/plock.c	2007/11/29 21:27:58	1.35
+++ cluster/group/gfs_controld/plock.c	2007/11/30 16:20:55	1.36
@@ -971,6 +971,9 @@
 	   - A sends drop, B sends plock, receive drop, receive plock.
 	   This is addressed above.
 
+	   - A sends drop, B sends plock, receive drop, B reads plock
+	   and sends own, receive plock, on B we find owner of -1.
+
 	   - A sends drop, B sends two plocks, receive drop, receive plocks.
 	   Receiving the first plock is the previous case, receiving the
 	   second plock will find r with owner of -1.
@@ -983,23 +986,29 @@
 	   last case below; receiving a plock from ourself and finding
 	   we're the owner of r. */
 
-	/* may want to supress this if some of them are common enough */
-	if (r->owner)
-		log_error("receive_plock from %d r %llx owner %d", from,
-			  (unsigned long long)info.number, r->owner);
-
 	if (!r->owner) {
 		__receive_plock(mg, &info, from, r);
 
 	} else if (r->owner == -1) {
+		log_debug("receive_plock from %d r %llx owner %d", from,
+			  (unsigned long long)info.number, r->owner);
+
 		if (from == our_nodeid)
 			save_pending_plock(mg, r, &info);
 
 	} else if (r->owner != our_nodeid) {
+		/* might happen, if frequent change to log_debug */
+		log_error("receive_plock from %d r %llx owner %d", from,
+			  (unsigned long long)info.number, r->owner);
+
 		if (from == our_nodeid)
 			save_pending_plock(mg, r, &info);
 
 	} else if (r->owner == our_nodeid) {
+		/* might happen, if frequent change to log_debug */
+		log_error("receive_plock from %d r %llx owner %d", from,
+			  (unsigned long long)info.number, r->owner);
+
 		if (from == our_nodeid)
 			__receive_plock(mg, &info, from, r);
 	}



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2008-01-21 20:17 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2008-01-21 20:17 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	teigland at sourceware.org	2008-01-21 20:17:44

Modified files:
	group/gfs_controld: plock.c 

Log message:
	bz 429546
	
	Fix an alignment problem with ppc64.  Things work if we do the
	byte-swapping on the original structure and then copy it into the
	final buffer, instead of copying first and then trying to do the
	byte-swapping at an offset within the send buffer.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25.2.8&r2=1.25.2.9

--- cluster/group/gfs_controld/plock.c	2007/12/05 22:11:32	1.25.2.8
+++ cluster/group/gfs_controld/plock.c	2008/01/21 20:17:44	1.25.2.9
@@ -1044,13 +1044,14 @@
 	}
 	memset(buf, 0, len);
 
+	info_bswap_out(in);
+
 	hd = (struct gdlm_header *)buf;
 	hd->type = msg_type;
 	hd->nodeid = our_nodeid;
 	hd->to_nodeid = 0;
 
 	memcpy(buf + sizeof(struct gdlm_header), in, sizeof(*in));
-	info_bswap_out((struct gdlm_plock_info *) buf + sizeof(*hd));
 
 	rv = send_group_message(mg, len, buf);
 



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2008-01-21 20:19 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2008-01-21 20:19 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL51
Changes by:	teigland at sourceware.org	2008-01-21 20:19:08

Modified files:
	group/gfs_controld: plock.c 

Log message:
	bz 429546
	
	Fix an alignment problem with ppc64.  Things work if we do the
	byte-swapping on the original structure and then copy it into the
	final buffer, instead of copying first and then trying to do the
	byte-swapping at an offset within the send buffer.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL51&r1=1.25.2.6&r2=1.25.2.6.2.1

--- cluster/group/gfs_controld/plock.c	2007/06/08 21:31:56	1.25.2.6
+++ cluster/group/gfs_controld/plock.c	2008/01/21 20:19:08	1.25.2.6.2.1
@@ -399,15 +399,14 @@
 
 	info.nodeid = our_nodeid;
 
+	info_bswap_out(&info);
+
 	hd = (struct gdlm_header *)buf;
 	hd->type = MSG_PLOCK;
 	hd->nodeid = our_nodeid;
 	hd->to_nodeid = 0;
 	memcpy(buf + sizeof(struct gdlm_header), &info, sizeof(info));
 
-	info_bswap_out((struct gdlm_plock_info *) buf +
-						  sizeof(struct gdlm_header));
-
 	rv = send_group_message(mg, len, buf);
 
 	free(buf);



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Cluster-devel] cluster/group/gfs_controld plock.c
@ 2008-01-21 20:21 teigland
  0 siblings, 0 replies; 23+ messages in thread
From: teigland @ 2008-01-21 20:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2008-01-21 20:21:08

Modified files:
	group/gfs_controld: plock.c 

Log message:
	bz 429546
	
	Fix an alignment problem with ppc64.  Things work if we do the
	byte-swapping on the original structure and then copy it into the
	final buffer, instead of copying first and then trying to do the
	byte-swapping at an offset within the send buffer.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.36&r2=1.37

--- cluster/group/gfs_controld/plock.c	2007/11/30 16:20:55	1.36
+++ cluster/group/gfs_controld/plock.c	2008/01/21 20:21:08	1.37
@@ -1044,13 +1044,14 @@
 	}
 	memset(buf, 0, len);
 
+	info_bswap_out(in);
+
 	hd = (struct gdlm_header *)buf;
 	hd->type = msg_type;
 	hd->nodeid = our_nodeid;
 	hd->to_nodeid = 0;
 
 	memcpy(buf + sizeof(struct gdlm_header), in, sizeof(*in));
-	info_bswap_out((struct gdlm_plock_info *) buf + sizeof(*hd));
 
 	rv = send_group_message(mg, len, buf);
 



^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2008-01-21 20:21 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-11-20 18:10 [Cluster-devel] cluster/group/gfs_controld plock.c teigland
  -- strict thread matches above, loose matches on Subject: below --
2008-01-21 20:21 teigland
2008-01-21 20:19 teigland
2008-01-21 20:17 teigland
2007-11-30 16:20 teigland
2007-11-29 21:27 teigland
2007-06-08 21:31 teigland
2007-06-08 21:30 teigland
2006-11-20 21:29 teigland
2006-11-20 21:29 teigland
2006-11-20 21:28 teigland
2006-11-20 18:13 teigland
2006-11-20 18:12 teigland
2006-11-03 15:33 teigland
2006-10-09 21:51 teigland
2006-08-17 19:39 teigland
2006-08-16 19:30 teigland
2006-08-16 17:05 teigland
2006-08-15 22:21 rpeterso
2006-08-14 20:15 teigland
2006-08-08 19:37 teigland
2006-08-08 18:43 teigland
2006-08-02 19:23 teigland

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).