netfilter-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
@ 2010-04-09  4:13 Changli Gao
  2010-04-09  4:50 ` Eric Dumazet
  2010-04-13 10:09 ` Patrick McHardy
  0 siblings, 2 replies; 22+ messages in thread
From: Changli Gao @ 2010-04-09  4:13 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: netfilter-devel, xiaosuo

use hash table to speed up entry finding.

If verdicts aren't received in order, list isn't efficient, and hash
table is better.

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
----
include/linux/netfilter/nfnetlink_queue.h | 1
net/netfilter/nfnetlink_queue.c | 118 ++++++++++++++++++++++++++----
2 files changed, 105 insertions(+), 14 deletions(-)

diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 2455fe5..77b1566 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -83,6 +83,7 @@ enum nfqnl_attr_config {
 	NFQA_CFG_CMD,			/* nfqnl_msg_config_cmd */
 	NFQA_CFG_PARAMS,		/* nfqnl_msg_config_params */
 	NFQA_CFG_QUEUE_MAXLEN,		/* __u32 */
+	NFQA_CFG_QUEUE_HTBLSIZ,		/* __u32 */
 	__NFQA_CFG_MAX
 };
 #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index e70a6ef..82bec94 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -28,6 +28,7 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_queue.h>
 #include <linux/list.h>
+#include <linux/flex_array.h>
 #include <net/sock.h>
 #include <net/netfilter/nf_queue.h>
 
@@ -37,7 +38,8 @@
 #include "../bridge/br_private.h"
 #endif
 
-#define NFQNL_QMAX_DEFAULT 1024
+#define NFQNL_QMAX_DEFAULT	1024
+#define NFQNL_QHTBLSIZ_DEFAULT	1
 
 struct nfqnl_instance {
 	struct hlist_node hlist;		/* global list of queues */
@@ -49,6 +51,7 @@ struct nfqnl_instance {
 	unsigned int queue_total;
 	unsigned int queue_dropped;
 	unsigned int queue_user_dropped;
+	unsigned int queue_htblsiz;
 
 	unsigned int id_sequence;		/* 'sequence' of pkt ids */
 
@@ -57,7 +60,7 @@ struct nfqnl_instance {
 
 	spinlock_t lock;
 
-	struct list_head queue_list;		/* packets in queue */
+	struct flex_array *fa;
 };
 
 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
@@ -91,7 +94,7 @@ static struct nfqnl_instance *
 instance_create(u_int16_t queue_num, int pid)
 {
 	struct nfqnl_instance *inst;
-	unsigned int h;
+	unsigned int h, i;
 	int err;
 
 	spin_lock(&instances_lock);
@@ -112,11 +115,24 @@ instance_create(u_int16_t queue_num, int pid)
 	inst->copy_range = 0xfffff;
 	inst->copy_mode = NFQNL_COPY_NONE;
 	spin_lock_init(&inst->lock);
-	INIT_LIST_HEAD(&inst->queue_list);
+	inst->queue_htblsiz = NFQNL_QHTBLSIZ_DEFAULT;
+	inst->fa = flex_array_alloc(sizeof(struct list_head),
+	                            inst->queue_htblsiz,
+				    __GFP_ZERO | GFP_ATOMIC);
+	if (inst->fa == NULL) {
+		err = -ENOMEM;
+		goto out_free_inst;
+	}
+	err = flex_array_prealloc(inst->fa, 0, inst->queue_htblsiz - 1,
+				  __GFP_ZERO | GFP_ATOMIC);
+	if (err != 0)
+		goto out_free_fa;
+	for (i = 0; i < inst->queue_htblsiz; i++)
+		INIT_LIST_HEAD((struct list_head*)flex_array_get(inst->fa, i));
 
 	if (!try_module_get(THIS_MODULE)) {
 		err = -EAGAIN;
-		goto out_free;
+		goto out_free_fa;
 	}
 
 	h = instance_hashfn(queue_num);
@@ -126,7 +142,9 @@ instance_create(u_int16_t queue_num, int pid)
 
 	return inst;
 
-out_free:
+out_free_fa:
+	flex_array_free(inst->fa);
+out_free_inst:
 	kfree(inst);
 out_unlock:
 	spin_unlock(&instances_lock);
@@ -143,6 +161,7 @@ instance_destroy_rcu(struct rcu_head *head)
 						   rcu);
 
 	nfqnl_flush(inst, NULL, 0);
+	flex_array_free(inst->fa);
 	kfree(inst);
 	module_put(THIS_MODULE);
 }
@@ -162,21 +181,32 @@ instance_destroy(struct nfqnl_instance *inst)
 	spin_unlock(&instances_lock);
 }
 
+static inline struct list_head *nfqnl_head_get(struct nfqnl_instance *queue,
+					       unsigned int id)
+{
+	return flex_array_get(queue->fa, id % queue->queue_htblsiz);
+}
+
 static inline void
 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
 {
-       list_add_tail(&entry->list, &queue->queue_list);
-       queue->queue_total++;
+	struct list_head *head;
+
+	head = nfqnl_head_get(queue, entry->id);
+	list_add_tail(&entry->list, head);
+	queue->queue_total++;
 }
 
 static struct nf_queue_entry *
 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
 {
 	struct nf_queue_entry *entry = NULL, *i;
+	struct list_head *head;
 
 	spin_lock_bh(&queue->lock);
 
-	list_for_each_entry(i, &queue->queue_list, list) {
+	head = nfqnl_head_get(queue, id);
+	list_for_each_entry(i, head, list) {
 		if (i->id == id) {
 			entry = i;
 			break;
@@ -197,13 +227,22 @@ static void
 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
 {
 	struct nf_queue_entry *entry, *next;
+	unsigned int i, total;
+	struct list_head *head;
 
 	spin_lock_bh(&queue->lock);
-	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
-		if (!cmpfn || cmpfn(entry, data)) {
-			list_del(&entry->list);
-			queue->queue_total--;
-			nf_reinject(entry, NF_DROP);
+	total = queue->queue_total;
+	for (i = 0; i < queue->queue_htblsiz; i++) {
+		if (total < 1)
+			break;
+		head = flex_array_get(queue->fa, i);
+		list_for_each_entry_safe(entry, next, head, list) {
+			if (!cmpfn || cmpfn(entry, data)) {
+				list_del(&entry->list);
+				queue->queue_total--;
+				nf_reinject(entry, NF_DROP);
+			}
+			--total;
 		}
 	}
 	spin_unlock_bh(&queue->lock);
@@ -686,6 +725,46 @@ static const struct nf_queue_handler nfqh = {
 	.outfn	= &nfqnl_enqueue_packet,
 };
 
+static int nfqnl_htbl_resize(struct nfqnl_instance *queue, unsigned int size)
+{
+	struct flex_array *fa;
+	unsigned int i, total;
+	struct list_head *h;
+	struct nf_queue_entry *entry, *next;
+
+	if (size < 1)
+		return -EINVAL;
+	
+	fa = flex_array_alloc(sizeof(struct list_head), size,
+			      __GFP_ZERO | GFP_ATOMIC);
+	if (fa == NULL)
+		return -ENOMEM;
+	if (flex_array_prealloc(fa, 0, size - 1, __GFP_ZERO | GFP_ATOMIC)) {
+		flex_array_free(fa);
+		return -ENOMEM;
+	}
+	for (i = 0; i < size; i++)
+		INIT_LIST_HEAD((struct list_head*)flex_array_get(fa, i));
+	spin_lock_bh(&queue->lock);
+	swap(size, queue->queue_htblsiz);
+	swap(fa, queue->fa);
+	total = queue->queue_total;
+	for (i = 0; i < size; i++) {
+		if (total < 1)
+			break;
+		h = flex_array_get(fa, i);
+		list_for_each_entry_safe(entry, next, h, list) {
+			list_move_tail(&entry->list,
+				       nfqnl_head_get(queue, entry->id));
+			--total;
+		}
+	}
+	spin_unlock_bh(&queue->lock);
+	flex_array_free(fa);
+
+	return 0;
+}
+
 static int
 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
 		  const struct nlmsghdr *nlh,
@@ -772,6 +851,17 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
 		spin_unlock_bh(&queue->lock);
 	}
 
+	if (nfqa[NFQA_CFG_QUEUE_HTBLSIZ]) {
+		__be32 *htblsiz;
+
+		if (!queue) {
+			ret = -ENODEV;
+			goto err_out_unlock;
+		}
+		htblsiz = nla_data(nfqa[NFQA_CFG_QUEUE_HTBLSIZ]);
+		ret = nfqnl_htbl_resize(queue, ntohl(*htblsiz));
+	}
+
 err_out_unlock:
 	rcu_read_unlock();
 	return ret;



^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-09  4:13 [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding Changli Gao
@ 2010-04-09  4:50 ` Eric Dumazet
  2010-04-13 10:09 ` Patrick McHardy
  1 sibling, 0 replies; 22+ messages in thread
From: Eric Dumazet @ 2010-04-09  4:50 UTC (permalink / raw)
  To: xiaosuo; +Cc: Patrick McHardy, netfilter-devel

Le vendredi 09 avril 2010 à 12:13 +0800, Changli Gao a écrit :
> use hash table to speed up entry finding.
> 
> If verdicts aren't received in order, list isn't efficient, and hash
> table is better.
> 
> Signed-off-by: Changli Gao <xiaosuo@gmail.com>

You might add in Changelog that this would be the first flex_array use
in kernel.

>  
> +static inline struct list_head *nfqnl_head_get(struct nfqnl_instance *queue,
> +					       unsigned int id)
> +{
> +	return flex_array_get(queue->fa, id % queue->queue_htblsiz);
> +}
> +

A divide is expensive on many arches, yet in 2010.

When a divide by always the same unsigned int is performed, its good to
use a reciprocal divide.

See include/linux/reciprocal_div.h

I dont see flex_array use being useful if you preallocate all its slots.

flex_array_get()/fa_element_to_part_nr() are a monster if you ask me,
with many divides. You could submit patches to flex_array to use
reciprocal divide, and fa_element_to_part_nr() should be inlined, so
that flex_array_get() becomes a leaf function.

vmalloc() is way faster in my opinion. If not, vmalloc() should be
fixed.

ffffffff811b7174 <fa_element_to_part_nr>:
ffffffff811b7174:	55                   	push   %rbp
ffffffff811b7175:	48 63 3f             	movslq (%rdi),%rdi
ffffffff811b7178:	b8 00 10 00 00       	mov    $0x1000,%eax
ffffffff811b717d:	31 d2                	xor    %edx,%edx
ffffffff811b717f:	89 f6                	mov    %esi,%esi
ffffffff811b7181:	48 89 e5             	mov    %rsp,%rbp
ffffffff811b7184:	c9                   	leaveq 
ffffffff811b7185:	48 f7 f7             	div    %rdi
ffffffff811b7188:	31 d2                	xor    %edx,%edx
ffffffff811b718a:	48 89 c1             	mov    %rax,%rcx
ffffffff811b718d:	48 89 f0             	mov    %rsi,%rax
ffffffff811b7190:	48 f7 f1             	div    %rcx
ffffffff811b7193:	c3                   	retq   

ffffffff811b7194 <flex_array_get>:
ffffffff811b7194:	55                   	push   %rbp
ffffffff811b7195:	48 89 e5             	mov    %rsp,%rbp
ffffffff811b7198:	41 54                	push   %r12
ffffffff811b719a:	41 89 f4             	mov    %esi,%r12d
ffffffff811b719d:	53                   	push   %rbx
ffffffff811b719e:	48 89 fb             	mov    %rdi,%rbx
ffffffff811b71a1:	e8 ce ff ff ff       	callq  ffffffff811b7174 <fa_element_to_part_nr>
ffffffff811b71a6:	8b 53 04             	mov    0x4(%rbx),%edx
ffffffff811b71a9:	41 39 d4             	cmp    %edx,%r12d
ffffffff811b71ac:	73 35                	jae    ffffffff811b71e3 <flex_array_get+0x4f>
ffffffff811b71ae:	8b 0b                	mov    (%rbx),%ecx
ffffffff811b71b0:	0f af d1             	imul   %ecx,%edx
ffffffff811b71b3:	81 fa f8 0f 00 00    	cmp    $0xff8,%edx
ffffffff811b71b9:	77 2f                	ja     ffffffff811b71ea <flex_array_get+0x56>
ffffffff811b71bb:	48 83 c3 08          	add    $0x8,%rbx
ffffffff811b71bf:	48 63 f9             	movslq %ecx,%rdi
ffffffff811b71c2:	b8 00 10 00 00       	mov    $0x1000,%eax
ffffffff811b71c7:	31 d2                	xor    %edx,%edx
ffffffff811b71c9:	48 f7 f7             	div    %rdi
ffffffff811b71cc:	45 89 e4             	mov    %r12d,%r12d
ffffffff811b71cf:	31 d2                	xor    %edx,%edx
ffffffff811b71d1:	48 89 c6             	mov    %rax,%rsi
ffffffff811b71d4:	4c 89 e0             	mov    %r12,%rax
ffffffff811b71d7:	48 f7 f6             	div    %rsi
ffffffff811b71da:	0f af ca             	imul   %edx,%ecx
ffffffff811b71dd:	48 8d 04 0b          	lea    (%rbx,%rcx,1),%rax
ffffffff811b71e1:	eb 02                	jmp    ffffffff811b71e5 <flex_array_get+0x51>
ffffffff811b71e3:	31 c0                	xor    %eax,%eax
ffffffff811b71e5:	5b                   	pop    %rbx
ffffffff811b71e6:	41 5c                	pop    %r12
ffffffff811b71e8:	c9                   	leaveq 
ffffffff811b71e9:	c3                   	retq   
ffffffff811b71ea:	48 98                	cltq   
ffffffff811b71ec:	48 8b 5c c3 08       	mov    0x8(%rbx,%rax,8),%rbx
ffffffff811b71f1:	48 85 db             	test   %rbx,%rbx
ffffffff811b71f4:	75 c9                	jne    ffffffff811b71bf <flex_array_get+0x2b>
ffffffff811b71f6:	eb eb                	jmp    ffffffff811b71e3 <flex_array_get+0x4f>


--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-09  4:13 [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding Changli Gao
  2010-04-09  4:50 ` Eric Dumazet
@ 2010-04-13 10:09 ` Patrick McHardy
  2010-04-13 11:06   ` Changli Gao
  1 sibling, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-13 10:09 UTC (permalink / raw)
  To: xiaosuo; +Cc: netfilter-devel

Changli Gao wrote:
> use hash table to speed up entry finding.
> 
> If verdicts aren't received in order, list isn't efficient, and hash
> table is better.

Any what is the advantage of using flex arrays compared to a simple
open code hash table?

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 10:09 ` Patrick McHardy
@ 2010-04-13 11:06   ` Changli Gao
  2010-04-13 12:44     ` Eric Dumazet
  0 siblings, 1 reply; 22+ messages in thread
From: Changli Gao @ 2010-04-13 11:06 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: netfilter-devel

On Tue, Apr 13, 2010 at 6:09 PM, Patrick McHardy <kaber@trash.net> wrote:
> Changli Gao wrote:
>> use hash table to speed up entry finding.
>>
>> If verdicts aren't received in order, list isn't efficient, and hash
>> table is better.
>
> Any what is the advantage of using flex arrays compared to a simple
> open code hash table?
>

From Documentation/flexible-arrays.txt

Large contiguous memory allocations can be unreliable in the Linux kernel.
Kernel programmers will sometimes respond to this problem by allocating
pages with vmalloc().  This solution not ideal, though.  On 32-bit systems,
memory from vmalloc() must be mapped into a relatively small address space;
it's easy to run out.  On SMP systems, the page table changes required by
vmalloc() allocations can require expensive cross-processor interrupts on
all CPUs.  And, on all systems, use of space in the vmalloc() range
increases pressure on the translation lookaside buffer (TLB), reducing the
performance of the system.

User may create lots of queues, which have large hash tables. Flex
array has better scalability than vmalloc() and kmalloc().

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 11:06   ` Changli Gao
@ 2010-04-13 12:44     ` Eric Dumazet
  2010-04-13 13:02       ` Changli Gao
  2010-04-13 13:09       ` Changli Gao
  0 siblings, 2 replies; 22+ messages in thread
From: Eric Dumazet @ 2010-04-13 12:44 UTC (permalink / raw)
  To: Changli Gao; +Cc: Patrick McHardy, netfilter-devel

Le mardi 13 avril 2010 à 19:06 +0800, Changli Gao a écrit :
> On Tue, Apr 13, 2010 at 6:09 PM, Patrick McHardy <kaber@trash.net> wrote:
> > Changli Gao wrote:
> >> use hash table to speed up entry finding.
> >>
> >> If verdicts aren't received in order, list isn't efficient, and hash
> >> table is better.
> >
> > Any what is the advantage of using flex arrays compared to a simple
> > open code hash table?
> >
> 
> From Documentation/flexible-arrays.txt
> 
> Large contiguous memory allocations can be unreliable in the Linux kernel.
> Kernel programmers will sometimes respond to this problem by allocating
> pages with vmalloc().  This solution not ideal, though.  On 32-bit systems,
> memory from vmalloc() must be mapped into a relatively small address space;
> it's easy to run out.  On SMP systems, the page table changes required by
> vmalloc() allocations can require expensive cross-processor interrupts on
> all CPUs.  And, on all systems, use of space in the vmalloc() range
> increases pressure on the translation lookaside buffer (TLB), reducing the
> performance of the system.
> 
> User may create lots of queues, which have large hash tables. Flex
> array has better scalability than vmalloc() and kmalloc().
> 

Thats theory. And for sparse arrays, that pro might be true.
In your case, you prealloc all the array, using more ram than vmalloc...

In practice, x86 cpu can access vmalloced() data much faster then
flex_array_managed data (take a look at all the expensive divides in
flex code... :'( )

If vmalloc() happens to be slow, it can trivially be extended to use
huge pages too. It will happen one day, when/if necessary.



--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 12:44     ` Eric Dumazet
@ 2010-04-13 13:02       ` Changli Gao
  2010-04-13 13:09       ` Changli Gao
  1 sibling, 0 replies; 22+ messages in thread
From: Changli Gao @ 2010-04-13 13:02 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: Patrick McHardy, netfilter-devel

On Tue, Apr 13, 2010 at 8:44 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Le mardi 13 avril 2010 à 19:06 +0800, Changli Gao a écrit :
>
> Thats theory. And for sparse arrays, that pro might be true.
> In your case, you prealloc all the array, using more ram than vmalloc...
>
> In practice, x86 cpu can access vmalloced() data much faster then
> flex_array_managed data (take a look at all the expensive divides in
> flex code... :'( )
>
> If vmalloc() happens to be slow, it can trivially be extended to use
> huge pages too. It will happen one day, when/if necessary.
>

The current flex_array isn't optimized well. If it is well optimized,
will it be faster than vmalloc()?

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 12:44     ` Eric Dumazet
  2010-04-13 13:02       ` Changli Gao
@ 2010-04-13 13:09       ` Changli Gao
  2010-04-13 13:23         ` Eric Dumazet
  1 sibling, 1 reply; 22+ messages in thread
From: Changli Gao @ 2010-04-13 13:09 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: Patrick McHardy, netfilter-devel

On Tue, Apr 13, 2010 at 8:44 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>
> Thats theory. And for sparse arrays, that pro might be true.
> In your case, you prealloc all the array, using more ram than vmalloc...
>

I prealloc all the arrays, because all of them are needed later in any way.

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 13:09       ` Changli Gao
@ 2010-04-13 13:23         ` Eric Dumazet
  2010-04-13 13:25           ` Patrick McHardy
  0 siblings, 1 reply; 22+ messages in thread
From: Eric Dumazet @ 2010-04-13 13:23 UTC (permalink / raw)
  To: Changli Gao; +Cc: Patrick McHardy, netfilter-devel

Le mardi 13 avril 2010 à 21:09 +0800, Changli Gao a écrit :
> On Tue, Apr 13, 2010 at 8:44 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> >
> > Thats theory. And for sparse arrays, that pro might be true.
> > In your case, you prealloc all the array, using more ram than vmalloc...
> >
> 
> I prealloc all the arrays, because all of them are needed later in any way.
> 

Yes, that is why vmalloc() is perfect for this case. No extra memory for
management, but one pointer for each page of memory.



--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 13:23         ` Eric Dumazet
@ 2010-04-13 13:25           ` Patrick McHardy
  2010-04-15  6:53             ` Changli Gao
  0 siblings, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-13 13:25 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: Changli Gao, netfilter-devel

Eric Dumazet wrote:
> Le mardi 13 avril 2010 à 21:09 +0800, Changli Gao a écrit :
>> On Tue, Apr 13, 2010 at 8:44 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>>> Thats theory. And for sparse arrays, that pro might be true.
>>> In your case, you prealloc all the array, using more ram than vmalloc...
>>>
>> I prealloc all the arrays, because all of them are needed later in any way.
>>
> 
> Yes, that is why vmalloc() is perfect for this case. No extra memory for
> management, but one pointer for each page of memory.

I agree, if it works for conntrack, it certainly also works for
nfnetlink_queue.
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-13 13:25           ` Patrick McHardy
@ 2010-04-15  6:53             ` Changli Gao
  2010-04-15  8:23               ` Eric Dumazet
  2010-04-15 10:36               ` Patrick McHardy
  0 siblings, 2 replies; 22+ messages in thread
From: Changli Gao @ 2010-04-15  6:53 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: Eric Dumazet, netfilter-devel

On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
>> Yes, that is why vmalloc() is perfect for this case. No extra memory for
>> management, but one pointer for each page of memory.
>
> I agree, if it works for conntrack, it certainly also works for
> nfnetlink_queue.
>

I need to allocate memory in atomic section, so vmalloc() can't be used. :(

does double check mechanism works?

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15  6:53             ` Changli Gao
@ 2010-04-15  8:23               ` Eric Dumazet
  2010-04-15  8:30                 ` Changli Gao
  2010-04-15 10:36               ` Patrick McHardy
  1 sibling, 1 reply; 22+ messages in thread
From: Eric Dumazet @ 2010-04-15  8:23 UTC (permalink / raw)
  To: Changli Gao; +Cc: Patrick McHardy, netfilter-devel

Le jeudi 15 avril 2010 à 14:53 +0800, Changli Gao a écrit :
> On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
> >> Yes, that is why vmalloc() is perfect for this case. No extra memory for
> >> management, but one pointer for each page of memory.
> >
> > I agree, if it works for conntrack, it certainly also works for
> > nfnetlink_queue.
> >
> 
> I need to allocate memory in atomic section, so vmalloc() can't be used. :(
> 
> does double check mechanism works?
> 

Allocate memory to setup a hash table in atomic section ?

Changli you can forget this right now, this is not serious.

We are not going to use this thing because your requirements are crazy.



--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15  8:23               ` Eric Dumazet
@ 2010-04-15  8:30                 ` Changli Gao
  0 siblings, 0 replies; 22+ messages in thread
From: Changli Gao @ 2010-04-15  8:30 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: Patrick McHardy, netfilter-devel

On Thu, Apr 15, 2010 at 4:23 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Le jeudi 15 avril 2010 à 14:53 +0800, Changli Gao a écrit :
>> On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
>> >> Yes, that is why vmalloc() is perfect for this case. No extra memory for
>> >> management, but one pointer for each page of memory.
>> >
>> > I agree, if it works for conntrack, it certainly also works for
>> > nfnetlink_queue.
>> >
>>
>> I need to allocate memory in atomic section, so vmalloc() can't be used. :(
>>
>> does double check mechanism works?
>>
>
> Allocate memory to setup a hash table in atomic section ?
>
> Changli you can forget this right now, this is not serious.
>
> We are not going to use this thing because your requirements are crazy.
>

OK, I'll keep it myself. :)

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15  6:53             ` Changli Gao
  2010-04-15  8:23               ` Eric Dumazet
@ 2010-04-15 10:36               ` Patrick McHardy
  2010-04-15 14:35                 ` Changli Gao
  1 sibling, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-15 10:36 UTC (permalink / raw)
  To: Changli Gao; +Cc: Eric Dumazet, netfilter-devel

Changli Gao wrote:
> On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
>>> Yes, that is why vmalloc() is perfect for this case. No extra memory for
>>> management, but one pointer for each page of memory.
>> I agree, if it works for conntrack, it certainly also works for
>> nfnetlink_queue.
>>
> 
> I need to allocate memory in atomic section, so vmalloc() can't be used. :(

Why?

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 10:36               ` Patrick McHardy
@ 2010-04-15 14:35                 ` Changli Gao
  2010-04-15 14:40                   ` Patrick McHardy
  0 siblings, 1 reply; 22+ messages in thread
From: Changli Gao @ 2010-04-15 14:35 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: Eric Dumazet, netfilter-devel

On Thu, Apr 15, 2010 at 6:36 PM, Patrick McHardy <kaber@trash.net> wrote:
> Changli Gao wrote:
>> On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
>>>> Yes, that is why vmalloc() is perfect for this case. No extra memory for
>>>> management, but one pointer for each page of memory.
>>> I agree, if it works for conntrack, it certainly also works for
>>> nfnetlink_queue.
>>>
>>
>> I need to allocate memory in atomic section, so vmalloc() can't be used. :(
>
> Why?
>

instance_create() is called in rcu read-side critical section, and the
whole body of this function is protected by the spinlock
instances_lock. All these make memory allocation for queue instances
should be atomic.

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 14:35                 ` Changli Gao
@ 2010-04-15 14:40                   ` Patrick McHardy
  2010-04-15 14:46                     ` Patrick McHardy
  0 siblings, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-15 14:40 UTC (permalink / raw)
  To: Changli Gao; +Cc: Eric Dumazet, netfilter-devel

Changli Gao wrote:
> On Thu, Apr 15, 2010 at 6:36 PM, Patrick McHardy <kaber@trash.net> wrote:
>> Changli Gao wrote:
>>> On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
>>>>> Yes, that is why vmalloc() is perfect for this case. No extra memory for
>>>>> management, but one pointer for each page of memory.
>>>> I agree, if it works for conntrack, it certainly also works for
>>>> nfnetlink_queue.
>>>>
>>> I need to allocate memory in atomic section, so vmalloc() can't be used. :(
>> Why?
>>
> 
> instance_create() is called in rcu read-side critical section, and the
> whole body of this function is protected by the spinlock
> instances_lock. All these make memory allocation for queue instances
> should be atomic.

That should be easily fixable. For the lookup we can add a reference
counter so we don't need the rcu read side critical section.
For creation the lock actually looks unnecessary since all nfnetlink
handlers run under nfnl_mutex, so we can't have concurrent creation
and removal of queuing instances. Well, we need it for list insertion
to avoid races with the seq file handlers, but we don't need it
before that.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 14:40                   ` Patrick McHardy
@ 2010-04-15 14:46                     ` Patrick McHardy
  2010-04-15 15:01                       ` Changli Gao
  0 siblings, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-15 14:46 UTC (permalink / raw)
  To: Changli Gao; +Cc: Eric Dumazet, netfilter-devel

Patrick McHardy wrote:
> Changli Gao wrote:
>> On Thu, Apr 15, 2010 at 6:36 PM, Patrick McHardy <kaber@trash.net> wrote:
>>> Changli Gao wrote:
>>>> On Tue, Apr 13, 2010 at 9:25 PM, Patrick McHardy <kaber@trash.net> wrote:
>>>>>> Yes, that is why vmalloc() is perfect for this case. No extra memory for
>>>>>> management, but one pointer for each page of memory.
>>>>> I agree, if it works for conntrack, it certainly also works for
>>>>> nfnetlink_queue.
>>>>>
>>>> I need to allocate memory in atomic section, so vmalloc() can't be used. :(
>>> Why?
>>>
>> instance_create() is called in rcu read-side critical section, and the
>> whole body of this function is protected by the spinlock
>> instances_lock. All these make memory allocation for queue instances
>> should be atomic.
> 
> That should be easily fixable. For the lookup we can add a reference
> counter so we don't need the rcu read side critical section.

Actually we don't even need that, since we're holding nfnl_mutex the
instance can't go away.

> For creation the lock actually looks unnecessary since all nfnetlink
> handlers run under nfnl_mutex, so we can't have concurrent creation
> and removal of queuing instances. Well, we need it for list insertion
> to avoid races with the seq file handlers, but we don't need it
> before that.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 14:46                     ` Patrick McHardy
@ 2010-04-15 15:01                       ` Changli Gao
  2010-04-15 15:05                         ` Patrick McHardy
  0 siblings, 1 reply; 22+ messages in thread
From: Changli Gao @ 2010-04-15 15:01 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: Eric Dumazet, netfilter-devel

On Thu, Apr 15, 2010 at 10:46 PM, Patrick McHardy <kaber@trash.net> wrote:
> Patrick McHardy wrote:
>>
>> That should be easily fixable. For the lookup we can add a reference
>> counter so we don't need the rcu read side critical section.
>
> Actually we don't even need that, since we're holding nfnl_mutex the
> instance can't go away.
>

I don't think so. a netlink queue instance maybe destroyed by
nfqnl_rcv_nl_event(). And the function is called by netlink directly
without nfnl_mutex.

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 15:01                       ` Changli Gao
@ 2010-04-15 15:05                         ` Patrick McHardy
  2010-04-15 15:11                           ` Changli Gao
  0 siblings, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-15 15:05 UTC (permalink / raw)
  To: Changli Gao; +Cc: Eric Dumazet, netfilter-devel

Changli Gao wrote:
> On Thu, Apr 15, 2010 at 10:46 PM, Patrick McHardy <kaber@trash.net> wrote:
>> Patrick McHardy wrote:
>>> That should be easily fixable. For the lookup we can add a reference
>>> counter so we don't need the rcu read side critical section.
>> Actually we don't even need that, since we're holding nfnl_mutex the
>> instance can't go away.
>>
> 
> I don't think so. a netlink queue instance maybe destroyed by
> nfqnl_rcv_nl_event(). And the function is called by netlink directly
> without nfnl_mutex.

No its not:

static void nfnetlink_rcv(struct sk_buff *skb)
{
	nfnl_lock();
	netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
	nfnl_unlock();
}


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 15:05                         ` Patrick McHardy
@ 2010-04-15 15:11                           ` Changli Gao
  2010-04-15 15:35                             ` Patrick McHardy
  0 siblings, 1 reply; 22+ messages in thread
From: Changli Gao @ 2010-04-15 15:11 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: Eric Dumazet, netfilter-devel

On Thu, Apr 15, 2010 at 11:05 PM, Patrick McHardy <kaber@trash.net> wrote:
>
> No its not:
>
> static void nfnetlink_rcv(struct sk_buff *skb)
> {
>        nfnl_lock();
>        netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
>        nfnl_unlock();
> }
>
>

static int
nfqnl_rcv_nl_event(struct notifier_block *this,
                   unsigned long event, void *ptr)
{
        struct netlink_notify *n = ptr;

        if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
                int i;

                /* destroy all instances for this pid */
                spin_lock(&instances_lock);
                for (i = 0; i < INSTANCE_BUCKETS; i++) {
                        struct hlist_node *tmp, *t2;
                        struct nfqnl_instance *inst;
                        struct hlist_head *head = &instance_table[i];

                        hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
                                if ((n->net == &init_net) &&
                                    (n->pid == inst->peer_pid))
                                        __instance_destroy(inst);
                        }
                }
                spin_unlock(&instances_lock);
        }
        return NOTIFY_DONE;
}

static struct notifier_block nfqnl_rtnl_notifier = {
        .notifier_call  = nfqnl_rcv_nl_event,
};

...

netlink_register_notifier(&nfqnl_rtnl_notifier);

...

int netlink_register_notifier(struct notifier_block *nb)
{
        return atomic_notifier_chain_register(&netlink_chain, nb);
}

...

static int netlink_release(struct socket *sock)
{
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk;

        if (!sk)
                return 0;

        netlink_remove(sk);
        sock_orphan(sk);
        nlk = nlk_sk(sk);

        /*
         * OK. Socket is unlinked, any packets that arrive now
         * will be purged.
         */

        sock->sk = NULL;
        wake_up_interruptible_all(&nlk->wait);

        skb_queue_purge(&sk->sk_write_queue);

        if (nlk->pid) {
                struct netlink_notify n = {
                                                .net = sock_net(sk),
                                                .protocol = sk->sk_protocol,
                                                .pid = nlk->pid,
                                          };
                atomic_notifier_call_chain(&netlink_chain,
                                NETLINK_URELEASE, &n);
        }

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)
--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 15:11                           ` Changli Gao
@ 2010-04-15 15:35                             ` Patrick McHardy
  2010-04-16 13:50                               ` Changli Gao
  0 siblings, 1 reply; 22+ messages in thread
From: Patrick McHardy @ 2010-04-15 15:35 UTC (permalink / raw)
  To: Changli Gao; +Cc: Eric Dumazet, netfilter-devel

Changli Gao wrote:
> static int
> nfqnl_rcv_nl_event(struct notifier_block *this,
>                    unsigned long event, void *ptr)
> {
>         struct netlink_notify *n = ptr;
> 
>         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
>                 int i;
> 
>                 /* destroy all instances for this pid */
>                 spin_lock(&instances_lock);
>                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
>                         struct hlist_node *tmp, *t2;
>                         struct nfqnl_instance *inst;
>                         struct hlist_head *head = &instance_table[i];
> 
>                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
>                                 if ((n->net == &init_net) &&
>                                     (n->pid == inst->peer_pid))
>                                         __instance_destroy(inst);
>                         }
>                 }
>                 spin_unlock(&instances_lock);
>         }
>         return NOTIFY_DONE;
> }
> 
> static struct notifier_block nfqnl_rtnl_notifier = {
>         .notifier_call  = nfqnl_rcv_nl_event,
> };
>

Ah, right. So call nfnl_lock() or convert the spinlock to a
mutex.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-15 15:35                             ` Patrick McHardy
@ 2010-04-16 13:50                               ` Changli Gao
  2010-04-16 15:30                                 ` Patrick McHardy
  0 siblings, 1 reply; 22+ messages in thread
From: Changli Gao @ 2010-04-16 13:50 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: Eric Dumazet, netfilter-devel

On Thu, Apr 15, 2010 at 11:35 PM, Patrick McHardy <kaber@trash.net> wrote:
> Changli Gao wrote:
>> static int
>> nfqnl_rcv_nl_event(struct notifier_block *this,
>>                    unsigned long event, void *ptr)
>> {
>>         struct netlink_notify *n = ptr;
>>
>>         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
>>                 int i;
>>
>>                 /* destroy all instances for this pid */
>>                 spin_lock(&instances_lock);
>>                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
>>                         struct hlist_node *tmp, *t2;
>>                         struct nfqnl_instance *inst;
>>                         struct hlist_head *head = &instance_table[i];
>>
>>                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
>>                                 if ((n->net == &init_net) &&
>>                                     (n->pid == inst->peer_pid))
>>                                         __instance_destroy(inst);
>>                         }
>>                 }
>>                 spin_unlock(&instances_lock);
>>         }
>>         return NOTIFY_DONE;
>> }
>>
>> static struct notifier_block nfqnl_rtnl_notifier = {
>>         .notifier_call  = nfqnl_rcv_nl_event,
>> };
>>
>
> Ah, right. So call nfnl_lock() or convert the spinlock to a
> mutex.
>

We can't convert the spinlock to a mutex simply. The notifier chain is
an atomic notifier chain.

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding.
  2010-04-16 13:50                               ` Changli Gao
@ 2010-04-16 15:30                                 ` Patrick McHardy
  0 siblings, 0 replies; 22+ messages in thread
From: Patrick McHardy @ 2010-04-16 15:30 UTC (permalink / raw)
  To: Changli Gao; +Cc: Eric Dumazet, netfilter-devel

Changli Gao wrote:
> On Thu, Apr 15, 2010 at 11:35 PM, Patrick McHardy <kaber@trash.net> wrote:
>> Changli Gao wrote:
>>> static int
>>> nfqnl_rcv_nl_event(struct notifier_block *this,
>>>                    unsigned long event, void *ptr)
>>> {
>>>         struct netlink_notify *n = ptr;
>>>
>>>         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
>>>                 int i;
>>>
>>>                 /* destroy all instances for this pid */
>>>                 spin_lock(&instances_lock);
>>>                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
>>>                         struct hlist_node *tmp, *t2;
>>>                         struct nfqnl_instance *inst;
>>>                         struct hlist_head *head = &instance_table[i];
>>>
>>>                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
>>>                                 if ((n->net == &init_net) &&
>>>                                     (n->pid == inst->peer_pid))
>>>                                         __instance_destroy(inst);
>>>                         }
>>>                 }
>>>                 spin_unlock(&instances_lock);
>>>         }
>>>         return NOTIFY_DONE;
>>> }
>>>
>>> static struct notifier_block nfqnl_rtnl_notifier = {
>>>         .notifier_call  = nfqnl_rcv_nl_event,
>>> };
>>>
>> Ah, right. So call nfnl_lock() or convert the spinlock to a
>> mutex.
>>
> 
> We can't convert the spinlock to a mutex simply. The notifier chain is
> an atomic notifier chain.

Well, then use reference counting, redo the lookup, or whatever.
Really, this isn't that hard.

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2010-04-16 15:30 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-04-09  4:13 [PATCH 3/3] nfnetlink_queue: use hash table to speed up entry finding Changli Gao
2010-04-09  4:50 ` Eric Dumazet
2010-04-13 10:09 ` Patrick McHardy
2010-04-13 11:06   ` Changli Gao
2010-04-13 12:44     ` Eric Dumazet
2010-04-13 13:02       ` Changli Gao
2010-04-13 13:09       ` Changli Gao
2010-04-13 13:23         ` Eric Dumazet
2010-04-13 13:25           ` Patrick McHardy
2010-04-15  6:53             ` Changli Gao
2010-04-15  8:23               ` Eric Dumazet
2010-04-15  8:30                 ` Changli Gao
2010-04-15 10:36               ` Patrick McHardy
2010-04-15 14:35                 ` Changli Gao
2010-04-15 14:40                   ` Patrick McHardy
2010-04-15 14:46                     ` Patrick McHardy
2010-04-15 15:01                       ` Changli Gao
2010-04-15 15:05                         ` Patrick McHardy
2010-04-15 15:11                           ` Changli Gao
2010-04-15 15:35                             ` Patrick McHardy
2010-04-16 13:50                               ` Changli Gao
2010-04-16 15:30                                 ` Patrick McHardy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).