From: Patrick McHardy <kaber@trash.net>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, Patrick McHardy <kaber@trash.net>,
netfilter-devel@vger.kernel.org
Subject: netfilter 14/41: iptables: lock free counters
Date: Tue, 24 Mar 2009 15:03:25 +0100 (MET) [thread overview]
Message-ID: <20090324140321.31401.51310.sendpatchset@x2.localnet> (raw)
In-Reply-To: <20090324140302.31401.37732.sendpatchset@x2.localnet>
commit 784544739a25c30637397ace5489eeb6e15d7d49
Author: Stephen Hemminger <shemminger@vyatta.com>
Date: Fri Feb 20 10:35:32 2009 +0100
netfilter: iptables: lock free counters
The reader/writer lock in ip_tables is acquired in the critical path of
processing packets and is one of the reasons just loading iptables can cause
a 20% performance loss. The rwlock serves two functions:
1) it prevents changes to table state (xt_replace) while table is in use.
This is now handled by doing rcu on the xt_table. When table is
replaced, the new table(s) are put in and the old one table(s) are freed
after RCU period.
2) it provides synchronization when accesing the counter values.
This is now handled by swapping in new table_info entries for each cpu
then summing the old values, and putting the result back onto one
cpu. On a busy system it may cause sampling to occur at different
times on each cpu, but no packet/byte counts are lost in the process.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Sucessfully tested on my dual quad core machine too, but iptables only (no ipv6 here)
BTW, my new "tbench 8" result is 2450 MB/s, (it was 2150 MB/s not so long ago)
Acked-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 9fac88f..e8e08d0 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -353,7 +353,7 @@ struct xt_table
unsigned int valid_hooks;
/* Lock for the curtain */
- rwlock_t lock;
+ struct mutex lock;
/* Man behind the curtain... */
struct xt_table_info *private;
@@ -385,7 +385,7 @@ struct xt_table_info
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
- char *entries[1];
+ void *entries[1];
};
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
@@ -432,6 +432,8 @@ extern void xt_proto_fini(struct net *net, u_int8_t af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
+extern void xt_table_entry_swap_rcu(struct xt_table_info *old,
+ struct xt_table_info *new);
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b5db463..64a7c6c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -261,9 +261,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
- read_lock_bh(&table->lock);
- private = table->private;
- table_base = (void *)private->entries[smp_processor_id()];
+ rcu_read_lock();
+ private = rcu_dereference(table->private);
+ table_base = rcu_dereference(private->entries[smp_processor_id()]);
+
e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]);
@@ -335,7 +336,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
e = (void *)e + e->next_offset;
}
} while (!hotdrop);
- read_unlock_bh(&table->lock);
+
+ rcu_read_unlock();
if (hotdrop)
return NF_DROP;
@@ -738,11 +740,65 @@ static void get_counters(const struct xt_table_info *t,
}
}
-static inline struct xt_counters *alloc_counters(struct xt_table *table)
+
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static int
+add_counter_to_entry(struct arpt_entry *e,
+ const struct xt_counters addme[],
+ unsigned int *i)
+{
+ ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+/* Take values from counters and add them back onto the current cpu */
+static void put_counters(struct xt_table_info *t,
+ const struct xt_counters counters[])
+{
+ unsigned int i, cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ i = 0;
+ ARPT_ENTRY_ITERATE(t->entries[cpu],
+ t->size,
+ add_counter_to_entry,
+ counters,
+ &i);
+ local_bh_enable();
+}
+
+static inline int
+zero_entry_counter(struct arpt_entry *e, void *arg)
+{
+ e->counters.bcnt = 0;
+ e->counters.pcnt = 0;
+ return 0;
+}
+
+static void
+clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
+{
+ unsigned int cpu;
+ const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
+
+ memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ for_each_possible_cpu(cpu) {
+ memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
+ ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
+ zero_entry_counter, NULL);
+ }
+}
+
+static struct xt_counters *alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ struct xt_table_info *private = table->private;
+ struct xt_table_info *info;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
@@ -752,14 +808,30 @@ static inline struct xt_counters *alloc_counters(struct xt_table *table)
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
- return ERR_PTR(-ENOMEM);
+ goto nomem;
+
+ info = xt_alloc_table_info(private->size);
+ if (!info)
+ goto free_counters;
- /* First, sum counters... */
- write_lock_bh(&table->lock);
- get_counters(private, counters);
- write_unlock_bh(&table->lock);
+ clone_counters(info, private);
+
+ mutex_lock(&table->lock);
+ xt_table_entry_swap_rcu(private, info);
+ synchronize_net(); /* Wait until smoke has cleared */
+
+ get_counters(info, counters);
+ put_counters(private, counters);
+ mutex_unlock(&table->lock);
+
+ xt_free_table_info(info);
return counters;
+
+ free_counters:
+ vfree(counters);
+ nomem:
+ return ERR_PTR(-ENOMEM);
}
static int copy_entries_to_user(unsigned int total_size,
@@ -1099,20 +1171,6 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK.
- */
-static inline int add_counter_to_entry(struct arpt_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
-
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
-
static int do_add_counters(struct net *net, void __user *user, unsigned int len,
int compat)
{
@@ -1172,13 +1230,14 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
goto free;
}
- write_lock_bh(&t->lock);
+ mutex_lock(&t->lock);
private = t->private;
if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
+ preempt_disable();
i = 0;
/* Choose the copy that is on our node */
loc_cpu_entry = private->entries[smp_processor_id()];
@@ -1187,8 +1246,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
add_counter_to_entry,
paddc,
&i);
+ preempt_enable();
unlock_up_free:
- write_unlock_bh(&t->lock);
+ mutex_unlock(&t->lock);
+
xt_table_unlock(t);
module_put(t->me);
free:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ef8b6ca..08cde5b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -347,10 +347,12 @@ ipt_do_table(struct sk_buff *skb,
mtpar.family = tgpar.family = NFPROTO_IPV4;
tgpar.hooknum = hook;
- read_lock_bh(&table->lock);
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- private = table->private;
- table_base = (void *)private->entries[smp_processor_id()];
+
+ rcu_read_lock();
+ private = rcu_dereference(table->private);
+ table_base = rcu_dereference(private->entries[smp_processor_id()]);
+
e = get_entry(table_base, private->hook_entry[hook]);
/* For return from builtin chain */
@@ -445,7 +447,7 @@ ipt_do_table(struct sk_buff *skb,
}
} while (!hotdrop);
- read_unlock_bh(&table->lock);
+ rcu_read_unlock();
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
@@ -924,13 +926,68 @@ get_counters(const struct xt_table_info *t,
counters,
&i);
}
+
+}
+
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static int
+add_counter_to_entry(struct ipt_entry *e,
+ const struct xt_counters addme[],
+ unsigned int *i)
+{
+ ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+/* Take values from counters and add them back onto the current cpu */
+static void put_counters(struct xt_table_info *t,
+ const struct xt_counters counters[])
+{
+ unsigned int i, cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ i = 0;
+ IPT_ENTRY_ITERATE(t->entries[cpu],
+ t->size,
+ add_counter_to_entry,
+ counters,
+ &i);
+ local_bh_enable();
+}
+
+
+static inline int
+zero_entry_counter(struct ipt_entry *e, void *arg)
+{
+ e->counters.bcnt = 0;
+ e->counters.pcnt = 0;
+ return 0;
+}
+
+static void
+clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
+{
+ unsigned int cpu;
+ const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
+
+ memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ for_each_possible_cpu(cpu) {
+ memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
+ IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
+ zero_entry_counter, NULL);
+ }
}
static struct xt_counters * alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ struct xt_table_info *private = table->private;
+ struct xt_table_info *info;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -939,14 +996,30 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
- return ERR_PTR(-ENOMEM);
+ goto nomem;
- /* First, sum counters... */
- write_lock_bh(&table->lock);
- get_counters(private, counters);
- write_unlock_bh(&table->lock);
+ info = xt_alloc_table_info(private->size);
+ if (!info)
+ goto free_counters;
+
+ clone_counters(info, private);
+
+ mutex_lock(&table->lock);
+ xt_table_entry_swap_rcu(private, info);
+ synchronize_net(); /* Wait until smoke has cleared */
+
+ get_counters(info, counters);
+ put_counters(private, counters);
+ mutex_unlock(&table->lock);
+
+ xt_free_table_info(info);
return counters;
+
+ free_counters:
+ vfree(counters);
+ nomem:
+ return ERR_PTR(-ENOMEM);
}
static int
@@ -1312,27 +1385,6 @@ do_replace(struct net *net, void __user *user, unsigned int len)
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
-static int
-add_counter_to_entry(struct ipt_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
-#if 0
- duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
- *i,
- (long unsigned int)e->counters.pcnt,
- (long unsigned int)e->counters.bcnt,
- (long unsigned int)addme[*i].pcnt,
- (long unsigned int)addme[*i].bcnt);
-#endif
-
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
static int
do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
@@ -1393,13 +1445,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
goto free;
}
- write_lock_bh(&t->lock);
+ mutex_lock(&t->lock);
private = t->private;
if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
+ preempt_disable();
i = 0;
/* Choose the copy that is on our node */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
@@ -1408,8 +1461,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
add_counter_to_entry,
paddc,
&i);
+ preempt_enable();
unlock_up_free:
- write_unlock_bh(&t->lock);
+ mutex_unlock(&t->lock);
xt_table_unlock(t);
module_put(t->me);
free:
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index d64594b..34af7bb 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -382,10 +382,12 @@ ip6t_do_table(struct sk_buff *skb,
mtpar.family = tgpar.family = NFPROTO_IPV6;
tgpar.hooknum = hook;
- read_lock_bh(&table->lock);
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- private = table->private;
- table_base = (void *)private->entries[smp_processor_id()];
+
+ rcu_read_lock();
+ private = rcu_dereference(table->private);
+ table_base = rcu_dereference(private->entries[smp_processor_id()]);
+
e = get_entry(table_base, private->hook_entry[hook]);
/* For return from builtin chain */
@@ -483,7 +485,7 @@ ip6t_do_table(struct sk_buff *skb,
#ifdef CONFIG_NETFILTER_DEBUG
((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
#endif
- read_unlock_bh(&table->lock);
+ rcu_read_unlock();
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
@@ -964,11 +966,64 @@ get_counters(const struct xt_table_info *t,
}
}
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static int
+add_counter_to_entry(struct ip6t_entry *e,
+ const struct xt_counters addme[],
+ unsigned int *i)
+{
+ ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+/* Take values from counters and add them back onto the current cpu */
+static void put_counters(struct xt_table_info *t,
+ const struct xt_counters counters[])
+{
+ unsigned int i, cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ i = 0;
+ IP6T_ENTRY_ITERATE(t->entries[cpu],
+ t->size,
+ add_counter_to_entry,
+ counters,
+ &i);
+ local_bh_enable();
+}
+
+static inline int
+zero_entry_counter(struct ip6t_entry *e, void *arg)
+{
+ e->counters.bcnt = 0;
+ e->counters.pcnt = 0;
+ return 0;
+}
+
+static void
+clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
+{
+ unsigned int cpu;
+ const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
+
+ memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ for_each_possible_cpu(cpu) {
+ memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
+ IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
+ zero_entry_counter, NULL);
+ }
+}
+
static struct xt_counters *alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = table->private;
+ struct xt_table_info *private = table->private;
+ struct xt_table_info *info;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -977,14 +1032,28 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
- return ERR_PTR(-ENOMEM);
+ goto nomem;
+
+ info = xt_alloc_table_info(private->size);
+ if (!info)
+ goto free_counters;
+
+ clone_counters(info, private);
+
+ mutex_lock(&table->lock);
+ xt_table_entry_swap_rcu(private, info);
+ synchronize_net(); /* Wait until smoke has cleared */
+
+ get_counters(info, counters);
+ put_counters(private, counters);
+ mutex_unlock(&table->lock);
- /* First, sum counters... */
- write_lock_bh(&table->lock);
- get_counters(private, counters);
- write_unlock_bh(&table->lock);
+ xt_free_table_info(info);
- return counters;
+ free_counters:
+ vfree(counters);
+ nomem:
+ return ERR_PTR(-ENOMEM);
}
static int
@@ -1351,28 +1420,6 @@ do_replace(struct net *net, void __user *user, unsigned int len)
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
-static inline int
-add_counter_to_entry(struct ip6t_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
-#if 0
- duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
- *i,
- (long unsigned int)e->counters.pcnt,
- (long unsigned int)e->counters.bcnt,
- (long unsigned int)addme[*i].pcnt,
- (long unsigned int)addme[*i].bcnt);
-#endif
-
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
-
static int
do_add_counters(struct net *net, void __user *user, unsigned int len,
int compat)
@@ -1433,13 +1480,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
goto free;
}
- write_lock_bh(&t->lock);
+ mutex_lock(&t->lock);
private = t->private;
if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
+ preempt_disable();
i = 0;
/* Choose the copy that is on our node */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
@@ -1448,8 +1496,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
add_counter_to_entry,
paddc,
&i);
+ preempt_enable();
unlock_up_free:
- write_unlock_bh(&t->lock);
+ mutex_unlock(&t->lock);
xt_table_unlock(t);
module_put(t->me);
free:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index bfbf521..bfcac92 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -625,6 +625,20 @@ void xt_free_table_info(struct xt_table_info *info)
}
EXPORT_SYMBOL(xt_free_table_info);
+void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo,
+ struct xt_table_info *newinfo)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *p = oldinfo->entries[cpu];
+ rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]);
+ newinfo->entries[cpu] = p;
+ }
+
+}
+EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu);
+
/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name)
@@ -671,21 +685,22 @@ xt_replace_table(struct xt_table *table,
struct xt_table_info *oldinfo, *private;
/* Do the substitution. */
- write_lock_bh(&table->lock);
+ mutex_lock(&table->lock);
private = table->private;
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
duprintf("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
- write_unlock_bh(&table->lock);
+ mutex_unlock(&table->lock);
*error = -EAGAIN;
return NULL;
}
oldinfo = private;
- table->private = newinfo;
+ rcu_assign_pointer(table->private, newinfo);
newinfo->initial_entries = oldinfo->initial_entries;
- write_unlock_bh(&table->lock);
+ mutex_unlock(&table->lock);
+ synchronize_net();
return oldinfo;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -719,7 +734,8 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
/* Simplifies replace_table code. */
table->private = bootstrap;
- rwlock_init(&table->lock);
+ mutex_init(&table->lock);
+
if (!xt_replace_table(table, 0, newinfo, &ret))
goto unlock;
next prev parent reply other threads:[~2009-03-24 14:03 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-03-24 14:03 netfilter 00/41: Netfilter update for 2.6.30 Patrick McHardy
2009-03-24 14:03 ` netfilter 01/41: change generic l4 protocol number Patrick McHardy
2009-03-24 14:03 ` netfilter 02/41: remove unneeded goto Patrick McHardy
2009-03-24 14:03 ` netfilter 03/41: x_tables: change elements in x_tables Patrick McHardy
2009-03-24 14:03 ` netfilter 04/41: x_tables: remove unneeded initializations Patrick McHardy
2009-03-24 14:03 ` netfilter 05/41: ebtables: " Patrick McHardy
2009-03-24 14:03 ` netfilter 06/41: log invalid new icmpv6 packet with nf_log_packet() Patrick McHardy
2009-03-24 14:03 ` netfilter 07/41: arp_tables: unfold two critical loops in arp_packet_match() Patrick McHardy
2009-03-24 20:29 ` David Miller
2009-03-24 21:06 ` Eric Dumazet
2009-03-24 21:16 ` David Miller
2009-03-24 21:17 ` Jan Engelhardt
2009-03-24 21:18 ` David Miller
2009-03-24 21:23 ` Jan Engelhardt
2009-03-24 21:25 ` David Miller
2009-03-24 21:39 ` Eric Dumazet
2009-03-24 21:52 ` Jan Engelhardt
2009-03-25 11:27 ` [PATCH] netfilter: factorize ifname_compare() Eric Dumazet
2009-03-25 16:32 ` Patrick McHardy
2009-03-25 10:33 ` netfilter 07/41: arp_tables: unfold two critical loops in arp_packet_match() Andi Kleen
2009-03-24 14:03 ` netfilter 08/41: Combine ipt_TTL and ip6t_HL source Patrick McHardy
2009-03-24 14:03 ` netfilter 09/41: Combine ipt_ttl and ip6t_hl source Patrick McHardy
2009-03-24 14:03 ` netfilter 10/41: xt_physdev fixes Patrick McHardy
2009-03-24 14:03 ` netfilter 11/41: xtables: add backward-compat options Patrick McHardy
2009-03-24 14:03 ` netfilter 12/41: xt_physdev: unfold two loops in physdev_mt() Patrick McHardy
2009-03-24 14:03 ` netfilter 13/41: ip6_tables: unfold two loops in ip6_packet_match() Patrick McHardy
2009-03-24 14:03 ` Patrick McHardy [this message]
2009-03-24 14:03 ` netfilter 15/41: nf_conntrack: table max size should hold at least table size Patrick McHardy
2009-03-24 14:03 ` netfilter 16/41: fix hardcoded size assumptions Patrick McHardy
2009-03-24 14:03 ` netfilter 17/41: x_tables: add LED trigger target Patrick McHardy
2009-03-24 14:03 ` netfilter 18/41: ip_tables: unfold two critical loops in ip_packet_match() Patrick McHardy
2009-03-24 14:03 ` netfilter 19/41: nf_conntrack: account packets drop by tcp_packet() Patrick McHardy
2009-03-24 14:03 ` netfilter 20/41: install missing headers Patrick McHardy
2009-03-24 14:03 ` netfilter 21/41: xt_hashlimit fix Patrick McHardy
2009-03-24 14:03 ` netfilter 22/41: use a linked list of loggers Patrick McHardy
2009-03-24 14:03 ` netfilter 23/41: print the list of register loggers Patrick McHardy
2009-03-24 14:03 ` netfilter 24/41: remove IPvX specific parts from nf_conntrack_l4proto.h Patrick McHardy
2009-03-24 14:03 ` netfilter 25/41: Kconfig spelling fixes (trivial) Patrick McHardy
2009-03-24 14:20 ` Jan Engelhardt
2009-03-24 20:35 ` David Miller
2009-03-24 14:03 ` netfilter 26/41: conntrack: increase drop stats if sequence adjustment fails Patrick McHardy
2009-03-24 14:03 ` netfilter 27/41: ctnetlink: cleanup master conntrack assignation Patrick McHardy
2009-03-24 14:03 ` netfilter 28/41: ctnetlink: cleanup conntrack update preliminary checkings Patrick McHardy
2009-03-24 14:03 ` netfilter 29/41: ctnetlink: move event reporting for new entries outside the lock Patrick McHardy
2009-03-24 14:03 ` netfilter 30/41: auto-load ip6_queue module when socket opened Patrick McHardy
2009-03-24 14:03 ` netfilter 31/41: auto-load ip_queue " Patrick McHardy
2009-03-24 14:03 ` netfilter 32/41: xtables: avoid pointer to self Patrick McHardy
2009-03-24 14:03 ` net 33/41: sysctl_net - use net_eq to compare nets Patrick McHardy
2009-03-24 14:03 ` net 34/41: netfilter conntrack - add per-net functionality for DCCP protocol Patrick McHardy
2009-03-24 14:03 ` netfilter 35/41: xtables: add cluster match Patrick McHardy
2009-03-24 14:03 ` netfilter 36/41: ctnetlink: remove remaining module refcounting Patrick McHardy
2009-03-24 14:03 ` netfilter 37/41: remove nf_ct_l4proto_find_get/nf_ct_l4proto_put Patrick McHardy
2009-03-24 14:03 ` netfilter 38/41: ctnetlink: fix rcu context imbalance Patrick McHardy
2009-03-24 14:03 ` netfilter 39/41: sysctl support of logger choice Patrick McHardy
2009-03-24 14:04 ` nefilter 40/41: nfnetlink: add nfnetlink_set_err and use it in ctnetlink Patrick McHardy
2009-03-24 14:04 ` netfilter 41/41: nf_conntrack: Reduce conntrack count in nf_conntrack_free() Patrick McHardy
2009-03-24 20:26 ` netfilter 00/41: Netfilter update for 2.6.30 David Miller
2009-03-25 16:29 ` Patrick McHardy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090324140321.31401.51310.sendpatchset@x2.localnet \
--to=kaber@trash.net \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
--cc=netfilter-devel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).