* [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
@ 2010-04-14 9:52 Changli Gao
2010-04-14 11:58 ` jamal
` (2 more replies)
0 siblings, 3 replies; 13+ messages in thread
From: Changli Gao @ 2010-04-14 9:52 UTC (permalink / raw)
To: David S. Miller; +Cc: Eric Dumazet, netdev, Changli Gao
batch skb dequeueing from softnet input_pkt_queue
batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
contention and irq disabling/enabling.
Signed-off-by: Changli Gao <xiaosuo@gmail.com>
----
include/linux/netdevice.h | 1
net/core/dev.c | 56 ++++++++++++++++++++++++++++++++--------------
2 files changed, 40 insertions(+), 17 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d1a21b5..898bc62 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1335,6 +1335,7 @@ struct softnet_data {
struct call_single_data csd ____cacheline_aligned_in_smp;
#endif
struct sk_buff_head input_pkt_queue;
+ struct sk_buff_head processing_queue;
struct napi_struct backlog;
};
diff --git a/net/core/dev.c b/net/core/dev.c
index a10a216..c635a71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -131,6 +131,7 @@
#include <linux/random.h>
#include <trace/events/napi.h>
#include <linux/pci.h>
+#include <linux/stop_machine.h>
#include "net-sysfs.h"
@@ -2332,6 +2333,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu)
{
struct softnet_data *queue;
unsigned long flags;
+ u32 qlen;
queue = &per_cpu(softnet_data, cpu);
@@ -2339,8 +2341,9 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu)
__get_cpu_var(netdev_rx_stat).total++;
rps_lock(queue);
- if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
- if (queue->input_pkt_queue.qlen) {
+ qlen = queue->input_pkt_queue.qlen + queue->processing_queue.qlen;
+ if (qlen <= netdev_max_backlog) {
+ if (qlen) {
enqueue:
__skb_queue_tail(&queue->input_pkt_queue, skb);
rps_unlock(queue);
@@ -2791,19 +2794,31 @@ int netif_receive_skb(struct sk_buff *skb)
EXPORT_SYMBOL(netif_receive_skb);
/* Network device is going away, flush any packets still pending */
-static void flush_backlog(void *arg)
+static void __flush_backlog(struct sk_buff_head *head, struct net_device *dev)
{
- struct net_device *dev = arg;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
struct sk_buff *skb, *tmp;
- rps_lock(queue);
- skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+ skb_queue_walk_safe(head, skb, tmp) {
if (skb->dev == dev) {
- __skb_unlink(skb, &queue->input_pkt_queue);
+ __skb_unlink(skb, head);
kfree_skb(skb);
}
- rps_unlock(queue);
+ }
+}
+
+static int flush_backlog(void *arg)
+{
+ struct net_device *dev = arg;
+ struct softnet_data *queue;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ queue = &per_cpu(softnet_data, cpu);
+ __flush_backlog(&queue->input_pkt_queue, dev);
+ __flush_backlog(&queue->processing_queue, dev);
+ }
+
+ return 0;
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -3118,20 +3133,23 @@ static int process_backlog(struct napi_struct *napi, int quota)
local_irq_disable();
rps_lock(queue);
- skb = __skb_dequeue(&queue->input_pkt_queue);
- if (!skb) {
+ skb_queue_splice_tail_init(&queue->input_pkt_queue,
+ &queue->processing_queue);
+ if (skb_queue_empty(&queue->processing_queue)) {
__napi_complete(napi);
rps_unlock(queue);
local_irq_enable();
- break;
+ return work;
}
rps_unlock(queue);
local_irq_enable();
- __netif_receive_skb(skb);
- } while (++work < quota && jiffies == start_time);
-
- return work;
+ while ((skb = __skb_dequeue(&queue->processing_queue))) {
+ __netif_receive_skb(skb);
+ if (++work >= quota || jiffies != start_time)
+ return work;
+ }
+ } while (1);
}
/**
@@ -5027,7 +5045,7 @@ void netdev_run_todo(void)
dev->reg_state = NETREG_UNREGISTERED;
- on_each_cpu(flush_backlog, dev, 1);
+ stop_machine(flush_backlog, dev, NULL);
netdev_wait_allrefs(dev);
@@ -5487,6 +5505,9 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+ while ((skb = __skb_dequeue(&oldsd->processing_queue)))
+ netif_rx(skb);
+
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
netif_rx(skb);
@@ -5709,6 +5730,7 @@ static int __init net_dev_init(void)
queue = &per_cpu(softnet_data, i);
skb_queue_head_init(&queue->input_pkt_queue);
+ skb_queue_head_init(&queue->processing_queue);
queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list);
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-14 9:52 [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue Changli Gao
@ 2010-04-14 11:58 ` jamal
2010-04-14 12:13 ` Changli Gao
2010-04-14 15:20 ` Eric Dumazet
2010-04-21 23:05 ` Eric Dumazet
2 siblings, 1 reply; 13+ messages in thread
From: jamal @ 2010-04-14 11:58 UTC (permalink / raw)
To: Changli Gao; +Cc: David S. Miller, Eric Dumazet, netdev
On Wed, 2010-04-14 at 17:52 +0800, Changli Gao wrote:
> batch skb dequeueing from softnet input_pkt_queue
>
> batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
> contention and irq disabling/enabling.
>
> Signed-off-by: Changli Gao <xiaosuo@gmail.com>
It seems we are now going to generate a lot more IPIs with such a
change. At least this is what i am imagining.
CPU0: packet comes in,queue empty, generate an IPI to CPU1
CPU0: second packet comes in, enqueue
CPU1: grab two packets to process and run with them
CPU0: packet comes in,queue empty, generate an IPI to CPU1
..
...
.....
IPIs add to latency (refer to my other email). Did you test this
to reach some conclusion that it improves thing or was it just by
inspection?
cheers,
jamal
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-14 11:58 ` jamal
@ 2010-04-14 12:13 ` Changli Gao
2010-04-14 12:28 ` jamal
0 siblings, 1 reply; 13+ messages in thread
From: Changli Gao @ 2010-04-14 12:13 UTC (permalink / raw)
To: hadi; +Cc: David S. Miller, Eric Dumazet, netdev
On Wed, Apr 14, 2010 at 7:58 PM, jamal <hadi@cyberus.ca> wrote:
>
> It seems we are now going to generate a lot more IPIs with such a
> change. At least this is what i am imagining.
> CPU0: packet comes in,queue empty, generate an IPI to CPU1
> CPU0: second packet comes in, enqueue
> CPU1: grab two packets to process and run with them
> CPU0: packet comes in,queue empty, generate an IPI to CPU1
No extra IPI is needed.
+ qlen = queue->input_pkt_queue.qlen + queue->processing_queue.qlen;
+ if (qlen <= netdev_max_backlog) {
+ if (qlen) {
the packets in processing_queue are counted too.
>
> IPIs add to latency (refer to my other email). Did you test this
> to reach some conclusion that it improves thing or was it just by
> inspection?
>
:( only insepection.
--
Regards,
Changli Gao(xiaosuo@gmail.com)
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-14 12:13 ` Changli Gao
@ 2010-04-14 12:28 ` jamal
0 siblings, 0 replies; 13+ messages in thread
From: jamal @ 2010-04-14 12:28 UTC (permalink / raw)
To: Changli Gao; +Cc: David S. Miller, Eric Dumazet, netdev
On Wed, 2010-04-14 at 20:13 +0800, Changli Gao wrote:
> On Wed, Apr 14, 2010 at 7:58 PM, jamal <hadi@cyberus.ca> wrote:
> No extra IPI is needed.
>
> + qlen = queue->input_pkt_queue.qlen + queue->processing_queue.qlen;
> + if (qlen <= netdev_max_backlog) {
> + if (qlen) {
>
> the packets in processing_queue are counted too.
Ok - Looks reasonable.
> > IPIs add to latency (refer to my other email). Did you test this
> > to reach some conclusion that it improves thing or was it just by
> > inspection?
> >
>
> :( only insepection.
I am probably being pushy, but one simple test for latency of single
flow is:
from machine 1, send ping -f
on rps machine:
Base test: no rps on ( a fresh boot with no sysctls should do fine)
Test 1: irq affinity on cpuX, rps to cpuY
Test 2: repeat test1 with your change.
It should show no difference between test1 and 2. If it shows
improvement better - but showing worse latency is bad.
cheers,
jamal
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-14 9:52 [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue Changli Gao
2010-04-14 11:58 ` jamal
@ 2010-04-14 15:20 ` Eric Dumazet
2010-04-14 23:13 ` Changli Gao
2010-04-21 23:05 ` Eric Dumazet
2 siblings, 1 reply; 13+ messages in thread
From: Eric Dumazet @ 2010-04-14 15:20 UTC (permalink / raw)
To: Changli Gao; +Cc: David S. Miller, netdev
Le mercredi 14 avril 2010 à 17:52 +0800, Changli Gao a écrit :
> batch skb dequeueing from softnet input_pkt_queue
>
> batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
> contention and irq disabling/enabling.
>
> Signed-off-by: Changli Gao <xiaosuo@gmail.com>
Adding stop_machine() with no explanation ?
No ack from my previous comments, suggestions, and still same logic ?
Are we supposed to read patch, test it, make some benches, correct bugs,
say Amen ?
This is becoming silly, if you ask me.
This is a NACK of this patch, obviously.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-14 15:20 ` Eric Dumazet
@ 2010-04-14 23:13 ` Changli Gao
0 siblings, 0 replies; 13+ messages in thread
From: Changli Gao @ 2010-04-14 23:13 UTC (permalink / raw)
To: Eric Dumazet; +Cc: David S. Miller, netdev
On Wed, Apr 14, 2010 at 11:20 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Le mercredi 14 avril 2010 à 17:52 +0800, Changli Gao a écrit :
>> batch skb dequeueing from softnet input_pkt_queue
>>
>> batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
>> contention and irq disabling/enabling.
>>
>> Signed-off-by: Changli Gao <xiaosuo@gmail.com>
>
> Adding stop_machine() with no explanation ?
stop_machine() is added to flush the processing_queue. Because the old
flush_backlog() runs in IRQ context, it can't touch the things, which
are only valid in softirq context.
>
> No ack from my previous comments, suggestions, and still same logic ?
In this patch, the volatile variable flush_processing_queue is
removed, and the corresponding lines are removed too.
Oh, I should splice the old message back, as stop_machine is used
instead. So, the processing queue will be removed from softnet_data,
and a single int counter will be used instead to count the packets
which are being processing.
one issue you concern is potential cache miss when summing in enqueue
function. It won't happen all the time, in fact, I think it should
happen seldom, and it is the responsibility of hardware to cache the
data frequently used.
>
> Are we supposed to read patch, test it, make some benches, correct bugs,
> say Amen ?
>
OK, I'll test it.
--
Regards,
Changli Gao(xiaosuo@gmail.com)
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-14 9:52 [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue Changli Gao
2010-04-14 11:58 ` jamal
2010-04-14 15:20 ` Eric Dumazet
@ 2010-04-21 23:05 ` Eric Dumazet
2010-04-21 23:23 ` Tom Herbert
` (2 more replies)
2 siblings, 3 replies; 13+ messages in thread
From: Eric Dumazet @ 2010-04-21 23:05 UTC (permalink / raw)
To: Changli Gao; +Cc: David S. Miller, netdev, Tom Herbert, jamal
Le mercredi 14 avril 2010 à 17:52 +0800, Changli Gao a écrit :
> batch skb dequeueing from softnet input_pkt_queue
>
> batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
> contention and irq disabling/enabling.
>
> Signed-off-by: Changli Gao <xiaosuo@gmail.com>
> ----
lock contention _is_ a problem, Jamal tests can show it.
irq disabling/enabling is not, and force to use stop_machine() killer.
I suggest something very simple, like a small buffer (16 slots), so that
process_backlog() can batch 16 buffers at once.
Following patch not tested, but its late here and I need to sleep ;)
This is a RFC, not for inclusion, and based on current net-next-2.6 tree
[RFC] net: introduce a batch mode in process_backlog()
We see a lock contention on input_pkt_queue.lock in RPS benches.
As suggested by Changli Gao, we can batch several skbs at once in
process_backlog(), so that we dirty input_pkt_queue less often.
I chose to batch at most 16 skbs per round, and place them in
softnet_data zone where flush_backlog() can find them and eventually
free this skbs at device dismantle.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
---
include/linux/netdevice.h | 2 +
net/core/dev.c | 48 +++++++++++++++++++++++++++---------
2 files changed, 38 insertions(+), 12 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3c5ed5f..16da8db 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1383,11 +1383,13 @@ static inline int unregister_gifconf(unsigned int family)
/*
* Incoming packets are placed on per-cpu queues
*/
+#define SD_BATCH_SZ 16
struct softnet_data {
struct Qdisc *output_queue;
struct list_head poll_list;
struct sk_buff *completion_queue;
+ struct sk_buff *batch[SD_BATCH_SZ]; /* process_backlog() & flush_backlog() */
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
diff --git a/net/core/dev.c b/net/core/dev.c
index e904c47..2673ce0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2932,6 +2932,7 @@ static void flush_backlog(void *arg)
struct net_device *dev = arg;
struct softnet_data *sd = &__get_cpu_var(softnet_data);
struct sk_buff *skb, *tmp;
+ int i;
rps_lock(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp)
@@ -2941,6 +2942,13 @@ static void flush_backlog(void *arg)
input_queue_head_incr(sd);
}
rps_unlock(sd);
+ for (i = 0; i < ARRAY_SIZE(sd->batch); i++) {
+ skb = sd->batch[i];
+ if (skb && skb->dev == dev) {
+ kfree_skb(skb);
+ sd->batch[i] = NULL;
+ }
+ }
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -3245,29 +3253,47 @@ EXPORT_SYMBOL(napi_gro_frags);
static int process_backlog(struct napi_struct *napi, int quota)
{
- int work = 0;
+ int i, n, lim, work = 0;
struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ struct sk_buff *skb;
napi->weight = weight_p;
+ local_irq_disable();
+
do {
- struct sk_buff *skb;
+ lim = quota - work;
+ if (lim > ARRAY_SIZE(sd->batch))
+ lim = ARRAY_SIZE(sd->batch);
+ /* batch at most 16 buffers */
- local_irq_disable();
rps_lock(sd);
- skb = __skb_dequeue(&sd->input_pkt_queue);
- if (!skb) {
+ for (n = 0; n < lim; n++) {
+ sd->batch[n] = __skb_dequeue(&sd->input_pkt_queue);
+ if (!sd->batch[n])
+ break;
+ }
+ if (!sd->input_pkt_queue.qlen) {
__napi_complete(napi);
- rps_unlock(sd);
- local_irq_enable();
- break;
+ quota = 0;
}
- input_queue_head_incr(sd);
rps_unlock(sd);
- local_irq_enable();
- __netif_receive_skb(skb);
- } while (++work < quota);
+ /* Now process our batch */
+ for (i = 0; i < n; i++) {
+ skb = sd->batch[i];
+ /* flush_backlog() might have stolen this skb */
+ input_queue_head_incr(sd);
+ if (likely(skb)) {
+ sd->batch[i] = NULL;
+ local_irq_enable();
+ __netif_receive_skb(skb);
+ local_irq_disable();
+ }
+ }
+ work += n;
+ } while (work < quota);
+ local_irq_enable();
return work;
}
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-21 23:05 ` Eric Dumazet
@ 2010-04-21 23:23 ` Tom Herbert
2010-04-22 1:35 ` Changli Gao
2010-04-22 6:33 ` Changli Gao
2010-04-22 12:13 ` jamal
2 siblings, 1 reply; 13+ messages in thread
From: Tom Herbert @ 2010-04-21 23:23 UTC (permalink / raw)
To: Eric Dumazet; +Cc: Changli Gao, David S. Miller, netdev, jamal
> do {
> - struct sk_buff *skb;
> + lim = quota - work;
> + if (lim > ARRAY_SIZE(sd->batch))
> + lim = ARRAY_SIZE(sd->batch);
> + /* batch at most 16 buffers */
>
How about just using two input_pkt_queue's (define
input_pkt_queue[2])? One that is used to enqueue from RPS, and one
that is being processed by process_backlog. Then the only thing that
needs to be done under lock in process_backlog is to switch the
queues; something like sd->current_input_pkt_queue ^= 1
Tom
> - local_irq_disable();
> rps_lock(sd);
> - skb = __skb_dequeue(&sd->input_pkt_queue);
> - if (!skb) {
> + for (n = 0; n < lim; n++) {
> + sd->batch[n] = __skb_dequeue(&sd->input_pkt_queue);
> + if (!sd->batch[n])
> + break;
> + }
> + if (!sd->input_pkt_queue.qlen) {
> __napi_complete(napi);
> - rps_unlock(sd);
> - local_irq_enable();
> - break;
> + quota = 0;
> }
> - input_queue_head_incr(sd);
> rps_unlock(sd);
> - local_irq_enable();
>
> - __netif_receive_skb(skb);
> - } while (++work < quota);
> + /* Now process our batch */
> + for (i = 0; i < n; i++) {
> + skb = sd->batch[i];
> + /* flush_backlog() might have stolen this skb */
> + input_queue_head_incr(sd);
> + if (likely(skb)) {
> + sd->batch[i] = NULL;
> + local_irq_enable();
> + __netif_receive_skb(skb);
> + local_irq_disable();
> + }
> + }
> + work += n;
> + } while (work < quota);
>
> + local_irq_enable();
> return work;
> }
>
>
>
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-21 23:23 ` Tom Herbert
@ 2010-04-22 1:35 ` Changli Gao
0 siblings, 0 replies; 13+ messages in thread
From: Changli Gao @ 2010-04-22 1:35 UTC (permalink / raw)
To: Tom Herbert; +Cc: Eric Dumazet, David S. Miller, netdev, jamal
On Thu, Apr 22, 2010 at 7:23 AM, Tom Herbert <therbert@google.com> wrote:
>
> How about just using two input_pkt_queue's (define
> input_pkt_queue[2])? One that is used to enqueue from RPS, and one
> that is being processed by process_backlog. Then the only thing that
> needs to be done under lock in process_backlog is to switch the
> queues; something like sd->current_input_pkt_queue ^= 1
>
It is a better idea, IMO.
--
Regards,
Changli Gao(xiaosuo@gmail.com)
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-21 23:05 ` Eric Dumazet
2010-04-21 23:23 ` Tom Herbert
@ 2010-04-22 6:33 ` Changli Gao
2010-04-22 7:13 ` Eric Dumazet
2010-04-22 12:13 ` jamal
2 siblings, 1 reply; 13+ messages in thread
From: Changli Gao @ 2010-04-22 6:33 UTC (permalink / raw)
To: Eric Dumazet; +Cc: David S. Miller, netdev, Tom Herbert, jamal
On Thu, Apr 22, 2010 at 7:05 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Le mercredi 14 avril 2010 à 17:52 +0800, Changli Gao a écrit :
>> batch skb dequeueing from softnet input_pkt_queue
>>
>> batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
>> contention and irq disabling/enabling.
>>
>> Signed-off-by: Changli Gao <xiaosuo@gmail.com>
>> ----
>
> lock contention _is_ a problem, Jamal tests can show it.
>
> irq disabling/enabling is not, and force to use stop_machine() killer.
>
Although irq_disabling/enabling is not, we should do our best to make
fast path as quickly as possible, and because stop_machine() is used
in slow patch, I think we can afford its weight.
--
Regards,
Changli Gao(xiaosuo@gmail.com)
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-22 6:33 ` Changli Gao
@ 2010-04-22 7:13 ` Eric Dumazet
2010-04-22 7:17 ` David Miller
0 siblings, 1 reply; 13+ messages in thread
From: Eric Dumazet @ 2010-04-22 7:13 UTC (permalink / raw)
To: Changli Gao; +Cc: David S. Miller, netdev, Tom Herbert, jamal
Le jeudi 22 avril 2010 à 14:33 +0800, Changli Gao a écrit :
> On Thu, Apr 22, 2010 at 7:05 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> > Le mercredi 14 avril 2010 à 17:52 +0800, Changli Gao a écrit :
> >> batch skb dequeueing from softnet input_pkt_queue
> >>
> >> batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
> >> contention and irq disabling/enabling.
> >>
> >> Signed-off-by: Changli Gao <xiaosuo@gmail.com>
> >> ----
> >
> > lock contention _is_ a problem, Jamal tests can show it.
> >
> > irq disabling/enabling is not, and force to use stop_machine() killer.
> >
>
> Although irq_disabling/enabling is not, we should do our best to make
> fast path as quickly as possible, and because stop_machine() is used
> in slow patch, I think we can afford its weight.
>
>
No thanks, this is out of the question.
Talk to ixiacom guys, some people settle/dismantle dozens of network
device per second, on production machines.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-22 7:13 ` Eric Dumazet
@ 2010-04-22 7:17 ` David Miller
0 siblings, 0 replies; 13+ messages in thread
From: David Miller @ 2010-04-22 7:17 UTC (permalink / raw)
To: eric.dumazet; +Cc: xiaosuo, netdev, therbert, hadi
From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Thu, 22 Apr 2010 09:13:22 +0200
> No thanks, this is out of the question.
>
> Talk to ixiacom guys, some people settle/dismantle dozens of network
> device per second, on production machines.
Yes, ifup/ifdown performance is very important these days.
Recently we've had to do a lot of work to increase scalability and
latency in this area, let's not undo that.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue
2010-04-21 23:05 ` Eric Dumazet
2010-04-21 23:23 ` Tom Herbert
2010-04-22 6:33 ` Changli Gao
@ 2010-04-22 12:13 ` jamal
2 siblings, 0 replies; 13+ messages in thread
From: jamal @ 2010-04-22 12:13 UTC (permalink / raw)
To: Eric Dumazet; +Cc: Changli Gao, David S. Miller, netdev, Tom Herbert
On Thu, 2010-04-22 at 01:05 +0200, Eric Dumazet wrote:
> [RFC] net: introduce a batch mode in process_backlog()
>
> We see a lock contention on input_pkt_queue.lock in RPS benches.
>
> As suggested by Changli Gao, we can batch several skbs at once in
> process_backlog(), so that we dirty input_pkt_queue less often.
>
Ok, so i grab the latest and greatest net-next and apply this before
testing? Let me know..
cheers,
jamal
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2010-04-22 12:13 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-04-14 9:52 [PATCH v3] net: batch skb dequeueing from softnet input_pkt_queue Changli Gao
2010-04-14 11:58 ` jamal
2010-04-14 12:13 ` Changli Gao
2010-04-14 12:28 ` jamal
2010-04-14 15:20 ` Eric Dumazet
2010-04-14 23:13 ` Changli Gao
2010-04-21 23:05 ` Eric Dumazet
2010-04-21 23:23 ` Tom Herbert
2010-04-22 1:35 ` Changli Gao
2010-04-22 6:33 ` Changli Gao
2010-04-22 7:13 ` Eric Dumazet
2010-04-22 7:17 ` David Miller
2010-04-22 12:13 ` jamal
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).