linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH]IPC locks breaking down with RCU
@ 2002-10-18  0:14 mingming cao
  2002-10-20 13:14 ` Hugh Dickins
  0 siblings, 1 reply; 51+ messages in thread
From: mingming cao @ 2002-10-18  0:14 UTC (permalink / raw)
  To: torvalds, akpm, Hugh Dickins, linux-kernel; +Cc: cmm, dipankar

[-- Attachment #1: Type: text/plain, Size: 1197 bytes --]

Hi Linus,

This is the latest version of the ipc lock patch.  It breaks down the
three global IPC locks into one lock per IPC ID,  also addresses the
cache line bouncing problem  introduced in the original patch. The
original post could be found at:
http://marc.theaimsgroup.com/?l=linux-kernel&m=102980357802682&w=2
\x18
The original patch breaks down the global IPC locks, yet added another
layer of locking to protect the IPC ID array in case of resizing. Some
concern was raised that the read/write lock may cause cache line
bouncing.

Since write lock is only used when the array is dynamically resized, 
RCU seems perfectly fit for this situation.  By doing so it could reduce
the possible lock contention in some applications where the IPC
resources are heavily used, without introducing cache line bouncing.

Besides the RCU changes, it also remove the redundant ipc_lockall() and
ipc_unlockall() as suggested by Hugh Dickins.

Patch is against 2.5.43 kernel. It requires Dipankar Sarma's
read_barrier_depends RCU helper patch:
http://marc.theaimsgroup.com/?l=linux-kernel&m=103479438017486&w=2

We use the ipc lock on OracleApps and it gave us the best number. 
Please include.

Mingming Cao

[-- Attachment #2: ipclock-rcu-2543.patch --]
[-- Type: text/plain, Size: 5233 bytes --]

Binary files linux-2.5.43/arch/i386/boot/compressed/vmlinux.bin.gz and linux-2.5.43-ipc/arch/i386/boot/compressed/vmlinux.bin.gz differ
diff -urN -X dontdiff linux-2.5.43/include/linux/ipc.h linux-2.5.43-ipc/include/linux/ipc.h
--- linux-2.5.43/include/linux/ipc.h	Tue Oct 15 20:26:43 2002
+++ linux-2.5.43-ipc/include/linux/ipc.h	Wed Oct 16 09:48:28 2002
@@ -56,6 +56,7 @@
 /* used by in-kernel data structures */
 struct kern_ipc_perm
 {
+	spinlock_t	lock;
 	key_t		key;
 	uid_t		uid;
 	gid_t		gid;
diff -urN -X dontdiff linux-2.5.43/ipc/shm.c linux-2.5.43-ipc/ipc/shm.c
--- linux-2.5.43/ipc/shm.c	Tue Oct 15 20:28:22 2002
+++ linux-2.5.43-ipc/ipc/shm.c	Wed Oct 16 09:48:28 2002
@@ -38,8 +38,6 @@
 
 #define shm_lock(id)	((struct shmid_kernel*)ipc_lock(&shm_ids,id))
 #define shm_unlock(id)	ipc_unlock(&shm_ids,id)
-#define shm_lockall()	ipc_lockall(&shm_ids)
-#define shm_unlockall()	ipc_unlockall(&shm_ids)
 #define shm_get(id)	((struct shmid_kernel*)ipc_get(&shm_ids,id))
 #define shm_buildid(id, seq) \
 	ipc_buildid(&shm_ids, id, seq)
@@ -409,14 +407,12 @@
 
 		memset(&shm_info,0,sizeof(shm_info));
 		down(&shm_ids.sem);
-		shm_lockall();
 		shm_info.used_ids = shm_ids.in_use;
 		shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
 		shm_info.shm_tot = shm_tot;
 		shm_info.swap_attempts = 0;
 		shm_info.swap_successes = 0;
 		err = shm_ids.max_id;
-		shm_unlockall();
 		up(&shm_ids.sem);
 		if(copy_to_user (buf, &shm_info, sizeof(shm_info)))
 			return -EFAULT;
diff -urN -X dontdiff linux-2.5.43/ipc/util.c linux-2.5.43-ipc/ipc/util.c
--- linux-2.5.43/ipc/util.c	Tue Oct 15 20:27:54 2002
+++ linux-2.5.43-ipc/ipc/util.c	Wed Oct 16 09:48:28 2002
@@ -92,8 +92,10 @@
 {
 	int id;
 	struct kern_ipc_perm* p;
+	int max_id = ids->max_id;
 
-	for (id = 0; id <= ids->max_id; id++) {
+	read_barrier_depends();
+	for (id = 0; id <= max_id; id++) {
 		p = ids->entries[id].p;
 		if(p==NULL)
 			continue;
@@ -106,8 +108,8 @@
 static int grow_ary(struct ipc_ids* ids, int newsize)
 {
 	struct ipc_id* new;
-	struct ipc_id* old;
 	int i;
+	struct rcu_ipc_array *arg = NULL;
 
 	if(newsize > IPCMNI)
 		newsize = IPCMNI;
@@ -121,14 +123,19 @@
 	for(i=ids->size;i<newsize;i++) {
 		new[i].p = NULL;
 	}
+	arg = (struct rcu_ipc_array *) kmalloc(sizeof(*arg), GFP_KERNEL);
+	if(arg == NULL)
+		return ids->size;
+	arg->entries = ids->entries;
+	arg->size = ids->size;
+	
 	spin_lock(&ids->ary);
-
-	old = ids->entries;
 	ids->entries = new;
-	i = ids->size;
+	wmb();
 	ids->size = newsize;
 	spin_unlock(&ids->ary);
-	ipc_free(old, sizeof(struct ipc_id)*i);
+
+	call_rcu(&arg->rh, ipc_free_callback, arg);
 	return ids->size;
 }
 
@@ -166,7 +173,9 @@
 	if(ids->seq > ids->seq_max)
 		ids->seq = 0;
 
-	spin_lock(&ids->ary);
+	new->lock = SPIN_LOCK_UNLOCKED;
+	rcu_read_lock();
+	spin_lock(&new->lock);
 	ids->entries[id].p = new;
 	return id;
 }
@@ -188,6 +197,7 @@
 	int lid = id % SEQ_MULTIPLIER;
 	if(lid >= ids->size)
 		BUG();
+	rmb();
 	p = ids->entries[lid].p;
 	ids->entries[lid].p = NULL;
 	if(p==NULL)
@@ -239,7 +249,12 @@
 	else
 		kfree(ptr);
 }
-
+static void ipc_free_callback(void * arg)
+{
+	struct rcu_ipc_array *a = (struct rcu_ipc_array *)arg;
+	ipc_free(a->entries, a->size);
+	kfree(arg);
+}
 /**
  *	ipcperms	-	check IPC permissions
  *	@ipcp: IPC permission set
diff -urN -X dontdiff linux-2.5.43/ipc/util.h linux-2.5.43-ipc/ipc/util.h
--- linux-2.5.43/ipc/util.h	Tue Oct 15 20:28:24 2002
+++ linux-2.5.43-ipc/ipc/util.h	Wed Oct 16 09:48:28 2002
@@ -4,6 +4,7 @@
  *
  * ipc helper functions (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
  */
+#include <linux/rcupdate.h>
 
 #define USHRT_MAX 0xffff
 #define SEQ_MULTIPLIER	(IPCMNI)
@@ -12,6 +13,12 @@
 void msg_init (void);
 void shm_init (void);
 
+struct rcu_ipc_array {
+	struct rcu_head rh;
+	struct ipc_id* entries;
+	int size;
+};
+
 struct ipc_ids {
 	int size;
 	int in_use;
@@ -44,11 +51,7 @@
  */
 void* ipc_alloc(int size);
 void ipc_free(void* ptr, int size);
-
-extern inline void ipc_lockall(struct ipc_ids* ids)
-{
-	spin_lock(&ids->ary);
-}
+void ipc_free_callback(void* arg);
 
 extern inline struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
 {
@@ -56,32 +59,43 @@
 	int lid = id % SEQ_MULTIPLIER;
 	if(lid >= ids->size)
 		return NULL;
-
+	rmb();
 	out = ids->entries[lid].p;
 	return out;
 }
 
-extern inline void ipc_unlockall(struct ipc_ids* ids)
-{
-	spin_unlock(&ids->ary);
-}
 extern inline struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
 {
 	struct kern_ipc_perm* out;
 	int lid = id % SEQ_MULTIPLIER;
-	if(lid >= ids->size)
-		return NULL;
 
-	spin_lock(&ids->ary);
+	rcu_read_lock();
+	if(lid >= ids->size) {
+		rcu_read_unlock();
+		return NULL;
+	}
+	rmb();
 	out = ids->entries[lid].p;
-	if(out==NULL)
-		spin_unlock(&ids->ary);
+	if(out==NULL) {
+		rcu_read_unlock();
+		return NULL;
+	}
+	spin_lock(&out->lock);
 	return out;
 }
 
 extern inline void ipc_unlock(struct ipc_ids* ids, int id)
 {
-	spin_unlock(&ids->ary);
+	int lid = id % SEQ_MULTIPLIER;
+	struct kern_ipc_perm* out;
+
+        if(lid >= ids->size)
+		return;
+	rmb();	
+	out = ids->entries[lid].p;
+	if (out)
+		spin_unlock(&out->lock);
+	rcu_read_unlock();
 }
 
 extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq)

^ permalink raw reply	[flat|nested] 51+ messages in thread
* Re: [PATCH]updated ipc lock patch
@ 2002-10-25 17:20 Cliff White
  0 siblings, 0 replies; 51+ messages in thread
From: Cliff White @ 2002-10-25 17:20 UTC (permalink / raw)
  To: linux-kernel, lse-tech


> mingming cao wrote:
> > 
> > Hi Andrew,
> > 
> > Here is the updated ipc lock patch:
> 
> Well I can get you a bit of testing and attention, but I'm afraid
> my knowledge of the IPC code is negligible.
> 
> So to be able to commend this change to Linus I'd have to rely on
> assurances from people who _do_ understand IPC (Hugh?) and on lots
> of testing.
> 
> So yes, I'll include it, and would solicit success reports from
> people who are actually exercising that code path, thanks.
> 
> > http://www.osdl.org/projects/dbt1prfrns/results/mingming/index.html
> 
> DBT1 is really interesting, and I'm glad the OSDL team have
> put it together.  If people would only stop sending me patches
> I'd be using it ;)
> 
Thank you very much for that :)

> Could someone please help explain the results?  Comparing, say,
> http://www.osdl.org/projects/dbt1prfrns/results/mingming/run.2cpu.42-mm2.r5/index.html
> and
> http://www.osdl.org/projects/dbt1prfrns/results/mingming/run.18.r5/index.html
> 
> It would appear that 2.5 completely smoked 2.4 on response time,
> yet the overall bogotransactions/sec is significantly lower.
> What should we conclude from this?

Whoa - we ran these 5 times for an average. The 2.5 run you picked was the 
'off' run -
It has the worse results. You will notice on this run, there are a large 
number of errors
which didn't happen on the other runs - this lowered the BT/sec number. Use 
one of the
other 2.5 ones and you'll see something more sensible. ( say, 42-mm2.r3) 
Unfortunately,
on average, 2.4 still beats 2.5 on both response time and BT's

 		         2.5.42-mm2     2.5.42-mm2-ipclock  2.4.18
 Average over 5 runs     85.0 BT           89.8 BT          96.92 BT
 Std Deviation 5 runs    7.4  BT           1.0 BT           2.07 BT
 Average of best 4 runs  88.15 BT          90.2 BT          97.2 BT
 Std Deviation 4 run     2.8 BT            0.5 BT            2.3 BT 
> 
One other place to start comparing is in the system information which is at 
the bottom of the page.
Some points (might be minor) : 
Cpu statistics:
	2.4.18 - cpu %idle averages around 1.5% %system swings between 3-7% %nice 
steady at ~3.6%
	2.5.42-mm2 cpu %idle 0.0 all thru run, %system steady at ~6% % nice up ~5.5
Swap (sar -r) 
	Very slight differences - we consumed ~98% of the memory in both cases, 2.4 
swapped a little
		bit (%28) more than 2.5 (%26) 
We also include profile data for both the load and run phase. (profile=2)

> Also I see:
> 
> 	14.7 minute duration
> and
> 	Time for DBT run 19:36
> 
> What is the 14.7 minutes referring to?
> 
The 14.7 minute time comes from the workload driver log, which are parsed to 
get the
response numbers. The 'Time for' stamps come from the master driver script, 
and include some
of the workload startup and shutdown time. The workload driver waits a bit to 
be sure things are
stable, before the official run data is collected.  The script timestamp waits 
until the run clients are
dead. So there's always a bit of a delta between the two. 

> Also:
> 
> 	2.5: Time for key creation 1:27
> 	2.4: Time for key creation 14:24
> versus:
> 	2.5: Time for table creation 16:48
> 	2.4: Time for table creation 8:58
>  
	
	
This is a Mystery Question - we don't have an answer, we were hoping _you 
would see something :)
Table creation involves sequential inserts of data from a flat file to an 
SAPDB B-tree on a devspace.
Our devspace is a raw device, so we're doing raw io, plus some processing. 
This op is write-intensive
'Key creation' is establishing a foreign key column contraint on various 
tables.  For each table, it examines every row in the table,
looks up (does a B-tree index lookup) the column value in a second table to 
find a specific primary key that matches the
column value in the first table. So again, some I/O, a bit of processing. Key 
creation (foreign key) is read-intensive.
Also interesting is the delta in index creation:
	2.5 Time for index creation 27:58
	2.4 Time for index creation 17:21
Index creation requires a read of the table, a sort, then creation of a B-tree 
index.  Both the index and
table creates build a B-tree for SAP-DB ( both run slower on 2.5 ) - the table 
creation does no sorting.
We also notice that the times for both index and key creation varies a bit 
more across runs with the -mm2 kernel,
as shown by the standard deviation across the runs. 
mingming and 2.4.18 are a bit more consistent. ( we threw out -mm2 run 5 for 
this average, due to the errors)

Results are: average time[std dev] 
Action           2.4.18        2.5.42-mm2     2.5.42-mm2-ipclock
table create 	 8:55 [0:04]   19:03 [2:40]    19.39 [0:50]
index create     17:17 [0:11]  25:19 [5:31]    28:05 [0:02]
key create       14:23 [0:16]  15:21 [6:37]    18:46 [0:17]

Also interesting is -mm2 run2 - foreign key creation took 5:26, the run 
completed with no errors...why so fast, only one time?
 It is an ongoing mystery. We Just Don't Know Why Right Now.
We are working on better data capture of db/run errors, and we'd love to hear 
suggestions
on improving the instrumentation. 


> So it's all rather confusing.  Masses of numbers usually _are_
> confusing.  What really adds tons of value to such an exercise is
> for the person who ran the test to write up some conclusions. 

Yes, agreed. We don't yet know enough to map from test results to an exact 
kernel area.
We just added a database expert to staff (Mary Edie Meredith) so we intend to 
get better.
We'll probably be nagging you a bit, and again we very much appreciate all 
suggestions.

 To
> tell the developers what went well, what went poorly, what areas
> to focus on, etc.  To use your own judgement to tell us what to
> zoom in on.
> 
> Is that something which could be added?
> 
It is something we are working on adding.  
cliffw

> 
> -------------------------------------------------------
> This sf.net email is sponsored by: Influence the future 
> of Java(TM) technology. Join the Java Community 
> Process(SM) (JCP(SM)) program now. 
> http://ads.sourceforge.net/cgi-bin/redirect.pl?sunm0003en
> _______________________________________________
> Lse-tech mailing list
> Lse-tech@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/lse-tech
> 



------- End of Forwarded Message




^ permalink raw reply	[flat|nested] 51+ messages in thread
[parent not found: <Pine.LNX.4.44.0210270748560.1704-100000@localhost.localdomain>]
* Re: [PATCH]updated ipc lock patch
@ 2002-10-28  1:15 Rusty Russell
  2002-10-28  1:35 ` Davide Libenzi
  0 siblings, 1 reply; 51+ messages in thread
From: Rusty Russell @ 2002-10-28  1:15 UTC (permalink / raw)
  To: Hugh Dickins; +Cc: mingming cao, Andrew Morton, linux-kernel

> diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal working-2.5.44-mm4-ipc-rcu/include/linux/msg.h working-2.5.44-mm4-ipc-rcu-fix/include/linux/msg.h
> --- working-2.5.44-mm4-ipc-rcu/include/linux/msg.h	2002-07-21 17:43:10.000000000 +1000
> +++ working-2.5.44-mm4-ipc-rcu-fix/include/linux/msg.h	2002-10-28 11:12:54.000000000 +1100

Oops.  That patch had some fluff in msg.h and sem.h.  Delete those, or
just use this instead (still against Mingming's mm4 "ignore kmalloc
failure" patch):

diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal working-2.5.44-mm4-ipc-rcu/ipc/util.c working-2.5.44-mm4-ipc-rcu-fix/ipc/util.c
--- working-2.5.44-mm4-ipc-rcu/ipc/util.c	2002-10-28 11:08:35.000000000 +1100
+++ working-2.5.44-mm4-ipc-rcu-fix/ipc/util.c	2002-10-28 12:01:09.000000000 +1100
@@ -213,21 +213,49 @@ struct kern_ipc_perm* ipc_rmid(struct ip
 	return p;
 }
 
+struct ipc_rcu_kmalloc
+{
+	struct rcu_head rcu;
+	/* "void *" makes sure alignment of following data is sane. */
+	void *data[0];
+};
+
+struct ipc_rcu_vmalloc
+{
+	struct rcu_head rcu;
+	struct work_struct work;
+	/* "void *" makes sure alignment of following data is sane. */
+	void *data[0];
+};
+
+static inline int use_vmalloc(int size)
+{
+	/* Too big for a single page? */
+	if (sizeof(struct ipc_rcu_kmalloc) + size > PAGE_SIZE)
+		return 1;
+	return 0;
+}
+
 /**
  *	ipc_alloc	-	allocate ipc space
  *	@size: size desired
  *
  *	Allocate memory from the appropriate pools and return a pointer to it.
- *	NULL is returned if the allocation fails
+ *	NULL is returned if the allocation fails.  This can be freed with
+ *	ipc_free (to free immediately) or ipc_rcu_free (to free once safe).
  */
- 
 void* ipc_alloc(int size)
 {
 	void* out;
-	if(size > PAGE_SIZE)
-		out = vmalloc(size);
-	else
-		out = kmalloc(size, GFP_KERNEL);
+	/* We prepend the allocation with the rcu struct, and
+           workqueue if necessary (for vmalloc). */
+	if (use_vmalloc(size)) {
+		out = vmalloc(sizeof(struct ipc_rcu_vmalloc) + size);
+		if (out) out += sizeof(struct ipc_rcu_vmalloc);
+	} else {
+		out = kmalloc(sizeof(struct ipc_rcu_kmalloc)+size, GFP_KERNEL);
+		if (out) out += sizeof(struct ipc_rcu_kmalloc);
+	}
 	return out;
 }
 
@@ -242,48 +270,36 @@ void* ipc_alloc(int size)
  
 void ipc_free(void* ptr, int size)
 {
-	if(size > PAGE_SIZE)
-		vfree(ptr);
+	if (use_vmalloc(size))
+		vfree(ptr - sizeof(struct ipc_rcu_vmalloc));
 	else
-		kfree(ptr);
+		kfree(ptr - sizeof(struct ipc_rcu_kmalloc));
 }
 
 /* 
  * Since RCU callback function is called in bh,
  * we need to defer the vfree to schedule_work
  */
-static void ipc_free_scheduled(void* arg)
+static void ipc_schedule_free(void *arg)
 {
-	struct rcu_ipc_free *a = (struct rcu_ipc_free *)arg;
-	vfree(a->ptr);
-	kfree(a);
-}
+	struct ipc_rcu_vmalloc *free = arg;
 
-static void ipc_free_callback(void* arg)
-{
-	struct rcu_ipc_free *a = (struct rcu_ipc_free *)arg;
-	/* 
-	 * if data is vmalloced, then we need to delay the free
-	 */
-	if (a->size > PAGE_SIZE) {
-		INIT_WORK(&a->work, ipc_free_scheduled, arg);
-		schedule_work(&a->work);
-	} else {
-		kfree(a->ptr);
-		kfree(a);
-	}
+	INIT_WORK(&free->work, vfree, free);
+	schedule_work(&free->work);
 }
 
 void ipc_rcu_free(void* ptr, int size)
 {
-	struct rcu_ipc_free* arg;
-
-	arg = (struct rcu_ipc_free *) kmalloc(sizeof(*arg), GFP_KERNEL);
-	if (arg == NULL)
-		return;
-	arg->ptr = ptr;
-	arg->size = size;
-	call_rcu(&arg->rcu_head, ipc_free_callback, arg);
+	if (use_vmalloc(size)) {
+		struct ipc_rcu_vmalloc *free;
+		free = ptr - sizeof(*free);
+		call_rcu(&free->rcu, ipc_schedule_free, free);
+	} else {
+		struct ipc_rcu_kmalloc *free;
+		free = ptr - sizeof(*free);
+		/* kfree takes a "const void *" so gcc warns.  So we cast. */
+		call_rcu(&free->rcu, (void (*)(void *))kfree, free);
+	}
 }
 
 /**

--
  Anyone who quotes me in their sig is an idiot. -- Rusty Russell.

^ permalink raw reply	[flat|nested] 51+ messages in thread

end of thread, other threads:[~2002-10-31 17:49 UTC | newest]

Thread overview: 51+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2002-10-18  0:14 [PATCH]IPC locks breaking down with RCU mingming cao
2002-10-20 13:14 ` Hugh Dickins
2002-10-20 17:27   ` Hugh Dickins
2002-10-21 18:11     ` mingming cao
2002-10-21 19:00       ` Hugh Dickins
2002-10-24 21:49         ` [PATCH]updated ipc lock patch mingming cao
2002-10-24 22:29           ` Andrew Morton
2002-10-24 22:56             ` Hugh Dickins
2002-10-24 23:30               ` Andrew Morton
2002-10-24 23:59                 ` Hugh Dickins
2002-10-25  0:35                   ` [Lse-tech] " Rick Lindsley
2002-10-25  1:07                     ` Robert Love
2002-10-25  0:07                 ` mingming cao
2002-10-25  0:24                   ` Andrew Morton
2002-10-25  4:18                 ` Rusty Russell
2002-10-25  5:53                   ` mingming cao
2002-10-25  7:27                     ` Rusty Russell
2002-10-25  5:36                 ` Manfred Spraul
2002-10-25 16:53                 ` Rik van Riel
2002-10-24 23:23             ` mingming cao
2002-10-25 14:21               ` [Lse-tech] " Paul Larson
2002-10-25 17:17                 ` mingming cao
2002-10-25 18:20                   ` Paul Larson
2002-10-25 18:51                     ` mingming cao
2002-10-25 19:06                       ` Paul Larson
2002-10-25 20:14                         ` mingming cao
2002-10-25 20:23                       ` Manfred Spraul
2002-10-25  0:38             ` Cliff White
2002-10-31 17:52             ` [Lse-tech] Re: [PATCH]updated ipc lock patch [PERFORMANCE RESULTS] Bill Hartner
2002-10-21 19:18       ` [PATCH]IPC locks breaking down with RCU Dipankar Sarma
2002-10-21 19:36         ` Hugh Dickins
2002-10-21 19:41         ` mingming cao
2002-10-21 20:14           ` Dipankar Sarma
2002-10-21 18:07   ` mingming cao
  -- strict thread matches above, loose matches on Subject: below --
2002-10-25 17:20 [PATCH]updated ipc lock patch Cliff White
     [not found] <Pine.LNX.4.44.0210270748560.1704-100000@localhost.localdomain>
2002-10-28  1:06 ` Rusty Russell
2002-10-28 14:21   ` Hugh Dickins
2002-10-28 21:47     ` Rusty Russell
2002-10-29  0:26       ` Hugh Dickins
2002-10-29  2:51         ` Rusty Russell
2002-10-28 20:00   ` Dipankar Sarma
2002-10-28 21:41     ` Rusty Russell
2002-10-29  6:11       ` Dipankar Sarma
2002-10-28 22:07     ` mingming cao
2002-10-29  1:06       ` Rusty Russell
2002-10-28  1:15 Rusty Russell
2002-10-28  1:35 ` Davide Libenzi
2002-10-28  4:10   ` Rusty Russell
2002-10-28 17:08     ` Davide Libenzi
2002-10-28 22:39       ` Rusty Russell
2002-10-28 23:52         ` Davide Libenzi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).