* [PATCH 0/3] netpoll/netconsole fixes
@ 2006-10-19 17:15 Stephen Hemminger
2006-10-19 17:15 ` [PATCH 1/3] netpoll: initialize skb for UDP Stephen Hemminger
` (3 more replies)
0 siblings, 4 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-19 17:15 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
The netpoll transmit skb management is a mess, it has two
paths and it's on Txq. These patches try and clean this up.
--
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 1/3] netpoll: initialize skb for UDP
2006-10-19 17:15 [PATCH 0/3] netpoll/netconsole fixes Stephen Hemminger
@ 2006-10-19 17:15 ` Stephen Hemminger
2006-10-20 6:58 ` David Miller
2006-10-19 17:15 ` [PATCH 2/3] netpoll: rework skb transmit queue Stephen Hemminger
` (2 subsequent siblings)
3 siblings, 1 reply; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-19 17:15 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
[-- Attachment #1: netpoll-protofix.patch --]
[-- Type: text/plain, Size: 1150 bytes --]
Need to fully initialize skb to keep lower layers and queueing happy.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- linux-2.6.orig/net/core/netpoll.c 2006-10-18 15:26:36.000000000 -0700
+++ linux-2.6/net/core/netpoll.c 2006-10-19 08:28:04.000000000 -0700
@@ -331,13 +331,13 @@
memcpy(skb->data, msg, len);
skb->len += len;
- udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
+ skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
udph->source = htons(np->local_port);
udph->dest = htons(np->remote_port);
udph->len = htons(udp_len);
udph->check = 0;
- iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
+ skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
/* iph->version = 4; iph->ihl = 5; */
put_unaligned(0x45, (unsigned char *)iph);
@@ -353,8 +353,8 @@
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-
- eth->h_proto = htons(ETH_P_IP);
+ skb->mac.raw = skb->data;
+ skb->protocol = eth->h_proto = htons(ETH_P_IP);
memcpy(eth->h_source, np->local_mac, 6);
memcpy(eth->h_dest, np->remote_mac, 6);
--
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-19 17:15 [PATCH 0/3] netpoll/netconsole fixes Stephen Hemminger
2006-10-19 17:15 ` [PATCH 1/3] netpoll: initialize skb for UDP Stephen Hemminger
@ 2006-10-19 17:15 ` Stephen Hemminger
2006-10-20 7:15 ` David Miller
2006-10-19 17:15 ` [PATCH 3/3] netpoll: use skb_buff_head for skb cache Stephen Hemminger
2006-10-20 6:57 ` [PATCH 0/3] netpoll/netconsole fixes Andrew Morton
3 siblings, 1 reply; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-19 17:15 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
[-- Attachment #1: netpoll-txq.patch --]
[-- Type: text/plain, Size: 5837 bytes --]
The original skb management for netpoll was a mess, it had two queue paths
and a callback. This changes it to have a per-instance transmit queue
and use a tasklet rather than a work queue for the congested case.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
---
drivers/net/netconsole.c | 1
include/linux/netpoll.h | 3 -
net/core/netpoll.c | 123 +++++++++++++++--------------------------------
3 files changed, 42 insertions(+), 85 deletions(-)
--- linux-2.6.orig/drivers/net/netconsole.c 2006-10-16 14:15:01.000000000 -0700
+++ linux-2.6/drivers/net/netconsole.c 2006-10-19 08:28:25.000000000 -0700
@@ -60,7 +60,6 @@
.local_port = 6665,
.remote_port = 6666,
.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .drop = netpoll_queue,
};
static int configured = 0;
--- linux-2.6.orig/include/linux/netpoll.h 2006-10-16 14:15:04.000000000 -0700
+++ linux-2.6/include/linux/netpoll.h 2006-10-19 08:28:25.000000000 -0700
@@ -27,11 +27,12 @@
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
- int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
+ struct sk_buff_head tx_q;
+ struct tasklet_struct tx_task;
};
void netpoll_poll(struct netpoll *np);
--- linux-2.6.orig/net/core/netpoll.c 2006-10-19 08:28:04.000000000 -0700
+++ linux-2.6/net/core/netpoll.c 2006-10-19 09:47:38.000000000 -0700
@@ -40,10 +40,6 @@
static int nr_skbs;
static struct sk_buff *skbs;
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
-
static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1
@@ -56,49 +52,33 @@
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void netpoll_run(unsigned long arg)
{
- unsigned long flags;
+ struct net_device *dev = (struct net_device *) arg;
+ struct netpoll_info *npinfo = dev->npinfo;
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
-
- spin_unlock_irqrestore(&queue_lock, flags);
+ while ((skb = skb_dequeue(&npinfo->tx_q))) {
+ int rc;
- dev_queue_xmit(skb);
- }
-}
-
-static DECLARE_WORK(send_queue, queue_process, NULL);
+ if (!netif_running(dev) || !netif_device_present(dev)) {
+ __kfree_skb(skb);
+ continue;
+ }
-void netpoll_queue(struct sk_buff *skb)
-{
- unsigned long flags;
+ netif_tx_lock(dev);
+ if (netif_queue_stopped(dev))
+ rc = NETDEV_TX_BUSY;
+ else
+ rc = dev->hard_start_xmit(skb, dev);
+ netif_tx_unlock(dev);
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
+ if (rc != NETDEV_TX_OK) {
+ skb_queue_head(&npinfo->tx_q, skb);
+ tasklet_schedule(&npinfo->tx_task);
+ break;
+ }
}
- WARN_ON(skb->protocol == 0);
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
-
- schedule_work(&send_queue);
}
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
@@ -232,6 +212,7 @@
static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
{
+ int once = 1, count = 0;
unsigned long flags;
struct sk_buff *skb = NULL;
@@ -254,6 +235,11 @@
}
if(!skb) {
+ count++;
+ if (once && (count == 1000000)) {
+ printk("out of netpoll skbs!\n");
+ once = 0;
+ }
netpoll_poll(np);
goto repeat;
}
@@ -265,51 +251,17 @@
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status;
- struct netpoll_info *npinfo;
+ struct net_device *dev = np->dev;
+ struct netpoll_info *npinfo = dev->npinfo;
- if (!np || !np->dev || !netif_device_present(np->dev) || !netif_running(np->dev)) {
- __kfree_skb(skb);
- return;
- }
-
- npinfo = np->dev->npinfo;
+ skb_queue_tail(&npinfo->tx_q, skb);
/* avoid recursion */
if (npinfo->poll_owner == smp_processor_id() ||
- np->dev->xmit_lock_owner == smp_processor_id()) {
- if (np->drop)
- np->drop(skb);
- else
- __kfree_skb(skb);
- return;
- }
-
- do {
- npinfo->tries--;
- netif_tx_lock(np->dev);
-
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- status = NETDEV_TX_BUSY;
- if (!netif_queue_stopped(np->dev))
- status = np->dev->hard_start_xmit(skb, np->dev);
-
- netif_tx_unlock(np->dev);
-
- /* success */
- if(!status) {
- npinfo->tries = MAX_RETRIES; /* reset */
- return;
- }
-
- /* transmit busy */
- netpoll_poll(np);
- udelay(50);
- } while (npinfo->tries > 0);
- __kfree_skb(skb);
+ dev->xmit_lock_owner == smp_processor_id())
+ tasklet_schedule(&npinfo->tx_task);
+ else
+ netpoll_run((unsigned long) dev);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -662,9 +614,10 @@
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1;
- npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
+ skb_queue_head_init(&npinfo->tx_q);
+ tasklet_init(&npinfo->tx_task, netpoll_run, (unsigned long) ndev);
} else
npinfo = ndev->npinfo;
@@ -772,6 +725,11 @@
npinfo->rx_np = NULL;
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->tx_q);
+
+ tasklet_kill(&npinfo->tx_task);
}
dev_put(np->dev);
}
@@ -799,4 +757,3 @@
EXPORT_SYMBOL(netpoll_cleanup);
EXPORT_SYMBOL(netpoll_send_udp);
EXPORT_SYMBOL(netpoll_poll);
-EXPORT_SYMBOL(netpoll_queue);
--
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 3/3] netpoll: use skb_buff_head for skb cache
2006-10-19 17:15 [PATCH 0/3] netpoll/netconsole fixes Stephen Hemminger
2006-10-19 17:15 ` [PATCH 1/3] netpoll: initialize skb for UDP Stephen Hemminger
2006-10-19 17:15 ` [PATCH 2/3] netpoll: rework skb transmit queue Stephen Hemminger
@ 2006-10-19 17:15 ` Stephen Hemminger
2006-10-20 6:57 ` [PATCH 0/3] netpoll/netconsole fixes Andrew Morton
3 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-19 17:15 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
[-- Attachment #1: netpoll-freelist.patch --]
[-- Type: text/plain, Size: 2842 bytes --]
The private skb cache should be managed with normal skb_buff_head rather
than a DIY queue. If pool is exhausted, don't print anything that just
makes the problem worse. After a number of attempts, punt and drop
the message (callers handle it already).
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
---
net/core/netpoll.c | 55 +++++++++++++++++++++--------------------------------
1 file changed, 22 insertions(+), 33 deletions(-)
--- linux-2.6.orig/net/core/netpoll.c 2006-10-19 09:49:03.000000000 -0700
+++ linux-2.6/net/core/netpoll.c 2006-10-19 10:06:39.000000000 -0700
@@ -36,9 +36,7 @@
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
#define MAX_RETRIES 20000
-static DEFINE_SPINLOCK(skb_list_lock);
-static int nr_skbs;
-static struct sk_buff *skbs;
+static struct sk_buff_head skb_list;
static atomic_t trapped;
@@ -51,6 +49,7 @@
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
+static void refill_skbs(void);
static void netpoll_run(unsigned long arg)
{
@@ -79,6 +78,7 @@
break;
}
}
+ refill_skbs();
}
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
@@ -169,19 +169,14 @@
static void refill_skbs(void)
{
struct sk_buff *skb;
- unsigned long flags;
- spin_lock_irqsave(&skb_list_lock, flags);
- while (nr_skbs < MAX_SKBS) {
+ while (skb_queue_len(&skb_list) < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
- skb->next = skbs;
- skbs = skb;
- nr_skbs++;
+ skb_queue_tail(&skb_list, skb);
}
- spin_unlock_irqrestore(&skb_list_lock, flags);
}
static void zap_completion_queue(void)
@@ -210,37 +205,24 @@
put_cpu_var(softnet_data);
}
-static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
- int once = 1, count = 0;
- unsigned long flags;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
+ int tries = 0;
zap_completion_queue();
-repeat:
- if (nr_skbs < MAX_SKBS)
- refill_skbs();
+repeat:
skb = alloc_skb(len, GFP_ATOMIC);
-
- if (!skb) {
- spin_lock_irqsave(&skb_list_lock, flags);
- skb = skbs;
- if (skb) {
- skbs = skb->next;
- skb->next = NULL;
- nr_skbs--;
- }
- spin_unlock_irqrestore(&skb_list_lock, flags);
- }
+ if (!skb)
+ skb = skb_dequeue(&skb_list);
if(!skb) {
- count++;
- if (once && (count == 1000000)) {
- printk("out of netpoll skbs!\n");
- once = 0;
- }
+ if (++tries > MAX_RETRIES)
+ return NULL;
+
netpoll_poll(np);
+ tasklet_schedule(&np->dev->npinfo->tx_task);
goto repeat;
}
@@ -589,6 +571,13 @@
return -1;
}
+static __init int netpoll_init(void)
+{
+ skb_queue_head_init(&skb_list);
+ return 0;
+}
+core_initcall(netpoll_init);
+
int netpoll_setup(struct netpoll *np)
{
struct net_device *ndev = NULL;
--
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 0/3] netpoll/netconsole fixes
2006-10-19 17:15 [PATCH 0/3] netpoll/netconsole fixes Stephen Hemminger
` (2 preceding siblings ...)
2006-10-19 17:15 ` [PATCH 3/3] netpoll: use skb_buff_head for skb cache Stephen Hemminger
@ 2006-10-20 6:57 ` Andrew Morton
2006-10-20 7:16 ` David Miller
3 siblings, 1 reply; 37+ messages in thread
From: Andrew Morton @ 2006-10-20 6:57 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: David Miller, netdev, linux-kernel
On Thu, 19 Oct 2006 10:15:41 -0700
Stephen Hemminger <shemminger@osdl.org> wrote:
> The netpoll transmit skb management is a mess, it has two
> paths and it's on Txq. These patches try and clean this up.
>
I got a reject storm when applying the third patch then screwed it up
rather than fixing it up.
A rediff and resend is needed, please. Make sure it's against the latest
-linus or -davem, thanks.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 1/3] netpoll: initialize skb for UDP
2006-10-19 17:15 ` [PATCH 1/3] netpoll: initialize skb for UDP Stephen Hemminger
@ 2006-10-20 6:58 ` David Miller
0 siblings, 0 replies; 37+ messages in thread
From: David Miller @ 2006-10-20 6:58 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Thu, 19 Oct 2006 10:15:42 -0700
> Need to fully initialize skb to keep lower layers and queueing happy.
>
> Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Applied, thanks Stephen.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-19 17:15 ` [PATCH 2/3] netpoll: rework skb transmit queue Stephen Hemminger
@ 2006-10-20 7:15 ` David Miller
2006-10-20 15:18 ` Stephen Hemminger
2006-10-20 15:40 ` Stephen Hemminger
0 siblings, 2 replies; 37+ messages in thread
From: David Miller @ 2006-10-20 7:15 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Thu, 19 Oct 2006 10:15:43 -0700
> The original skb management for netpoll was a mess, it had two queue paths
> and a callback. This changes it to have a per-instance transmit queue
> and use a tasklet rather than a work queue for the congested case.
>
> Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
I think you mis-diffed this one:
- WARN_ON(skb->protocol == 0);
That line doesn't exist in my copy of net/core/netpoll.c
even with your first patch applied.
Also, you forgot to remove the ->drop callback pointer
from struct netpoll, which you should do if it really
isn't used any more.
I think you might run into problems there, as I believe the netdump
stuff does make non-trivial use of the ->drop callback. Indeed, it
uses the ->dump callback for invoking a special
netpoll_start_netdump() function. I'm pretty sure ->dump was created
specifically to accomodate netdump.
So this is something else which will need to be worked out before we
can apply this patch.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 0/3] netpoll/netconsole fixes
2006-10-20 6:57 ` [PATCH 0/3] netpoll/netconsole fixes Andrew Morton
@ 2006-10-20 7:16 ` David Miller
0 siblings, 0 replies; 37+ messages in thread
From: David Miller @ 2006-10-20 7:16 UTC (permalink / raw)
To: akpm; +Cc: shemminger, netdev, linux-kernel
From: Andrew Morton <akpm@osdl.org>
Date: Thu, 19 Oct 2006 23:57:19 -0700
> I got a reject storm when applying the third patch then screwed it up
> rather than fixing it up.
>
> A rediff and resend is needed, please. Make sure it's against the latest
> -linus or -davem, thanks.
And I got rejects on #2 :-)
His first patch is in my net-2.6 tree as of a few minutes
ago, that one was fine.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 7:15 ` David Miller
@ 2006-10-20 15:18 ` Stephen Hemminger
2006-10-20 19:24 ` David Miller
2006-10-20 15:40 ` Stephen Hemminger
1 sibling, 1 reply; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 15:18 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Fri, 20 Oct 2006 00:15:30 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Thu, 19 Oct 2006 10:15:43 -0700
>
> > The original skb management for netpoll was a mess, it had two queue paths
> > and a callback. This changes it to have a per-instance transmit queue
> > and use a tasklet rather than a work queue for the congested case.
> >
> > Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
>
> I think you mis-diffed this one:
>
> - WARN_ON(skb->protocol == 0);
>
> That line doesn't exist in my copy of net/core/netpoll.c
> even with your first patch applied.
>
> Also, you forgot to remove the ->drop callback pointer
> from struct netpoll, which you should do if it really
> isn't used any more.
>
> I think you might run into problems there, as I believe the netdump
> stuff does make non-trivial use of the ->drop callback. Indeed, it
> uses the ->dump callback for invoking a special
> netpoll_start_netdump() function. I'm pretty sure ->dump was created
> specifically to accomodate netdump.
>
Netdump is not in the tree, so I can't fix it. Also netdump is pretty
much superseded by kdump.
> So this is something else which will need to be worked out before we
> can apply this patch.
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 7:15 ` David Miller
2006-10-20 15:18 ` Stephen Hemminger
@ 2006-10-20 15:40 ` Stephen Hemminger
2006-10-20 19:27 ` David Miller
2006-10-20 20:42 ` David Miller
1 sibling, 2 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 15:40 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
Change netpoll to use a skb queue for transmit. This solves problems
where there are two tx paths that can cause things to get out of order
and different semantics when netconsole output goes through fast and
slow paths.
The only user of the drop hook was netconsole, and I fixed that path.
This probably breaks netdump, but that is out of tree, so it needs
to fix itself.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
---
drivers/net/netconsole.c | 1
include/linux/netpoll.h | 3 +
net/core/netpoll.c | 109 ++++++++++++----------------------------------
3 files changed, 31 insertions(+), 82 deletions(-)
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index bf58db2..fad5bf2 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -60,7 +60,6 @@ static struct netpoll np = {
.local_port = 6665,
.remote_port = 6666,
.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .drop = netpoll_queue,
};
static int configured = 0;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 1efe60c..835ca0d 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -27,11 +27,12 @@ struct netpoll {
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
- int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
+ struct sk_buff_head tx_q;
+ struct tasklet_struct tx_task;
};
void netpoll_poll(struct netpoll *np);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9308af0..22332fd 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -40,10 +40,6 @@ static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
static struct sk_buff *skbs;
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
-
static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1
@@ -56,50 +52,31 @@ #define MAX_SKB_SIZE \
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void netpoll_run(unsigned long arg)
{
- unsigned long flags;
+ struct net_device *dev = (struct net_device *) arg;
+ struct netpoll_info *npinfo = dev->npinfo;
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
+ while ((skb = skb_dequeue(&npinfo->tx_q))) {
+ if (!netif_running(dev) || !netif_device_present(dev)) {
+ __kfree_skb(skb);
+ continue;
+ }
- spin_unlock_irqrestore(&queue_lock, flags);
+ netif_tx_lock(dev);
+ if (netif_queue_stopped(dev) ||
+ dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+ skb_queue_head(&npinfo->tx_q, skb);
+ netif_tx_unlock(dev);
+ tasklet_schedule(&npinfo->tx_task);
+ return;
+ }
- dev_queue_xmit(skb);
+ netif_tx_unlock(dev);
}
}
-static DECLARE_WORK(send_queue, queue_process, NULL);
-
-void netpoll_queue(struct sk_buff *skb)
-{
- unsigned long flags;
-
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
-
- schedule_work(&send_queue);
-}
-
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
unsigned short ulen, u32 saddr, u32 daddr)
{
@@ -270,50 +247,17 @@ repeat:
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status;
- struct netpoll_info *npinfo;
-
- if (!np || !np->dev || !netif_running(np->dev)) {
- __kfree_skb(skb);
- return;
- }
+ struct net_device *dev = np->dev;
+ struct netpoll_info *npinfo = dev->npinfo;
- npinfo = np->dev->npinfo;
+ skb_queue_tail(&npinfo->tx_q, skb);
/* avoid recursion */
if (npinfo->poll_owner == smp_processor_id() ||
- np->dev->xmit_lock_owner == smp_processor_id()) {
- if (np->drop)
- np->drop(skb);
- else
- __kfree_skb(skb);
+ dev->xmit_lock_owner == smp_processor_id())
return;
- }
- do {
- npinfo->tries--;
- netif_tx_lock(np->dev);
-
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- status = NETDEV_TX_BUSY;
- if (!netif_queue_stopped(np->dev))
- status = np->dev->hard_start_xmit(skb, np->dev);
-
- netif_tx_unlock(np->dev);
-
- /* success */
- if(!status) {
- npinfo->tries = MAX_RETRIES; /* reset */
- return;
- }
-
- /* transmit busy */
- netpoll_poll(np);
- udelay(50);
- } while (npinfo->tries > 0);
+ netpoll_run((unsigned long) dev);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -666,9 +610,10 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1;
- npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
+ skb_queue_head_init(&npinfo->tx_q);
+ tasklet_init(&npinfo->tx_task, netpoll_run, (unsigned long) ndev);
} else
npinfo = ndev->npinfo;
@@ -776,6 +721,11 @@ void netpoll_cleanup(struct netpoll *np)
npinfo->rx_np = NULL;
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->tx_q);
+
+ tasklet_kill(&npinfo->tx_task);
}
dev_put(np->dev);
}
@@ -803,4 +753,3 @@ EXPORT_SYMBOL(netpoll_setup);
EXPORT_SYMBOL(netpoll_cleanup);
EXPORT_SYMBOL(netpoll_send_udp);
EXPORT_SYMBOL(netpoll_poll);
-EXPORT_SYMBOL(netpoll_queue);
--
1.4.2.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 15:18 ` Stephen Hemminger
@ 2006-10-20 19:24 ` David Miller
2006-10-20 19:25 ` Stephen Hemminger
0 siblings, 1 reply; 37+ messages in thread
From: David Miller @ 2006-10-20 19:24 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Fri, 20 Oct 2006 08:18:57 -0700
> Netdump is not in the tree, so I can't fix it. Also netdump is pretty
> much superseded by kdump.
Unless kdump is %100 ready you can be sure vendors will ship netdump
for a little while longer. I think gratuitously breaking netdump is
not the best idea.
It's not like netdump is some binary blob you can't get the source
to easily. :-)
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 19:24 ` David Miller
@ 2006-10-20 19:25 ` Stephen Hemminger
2006-10-20 19:52 ` David Miller
0 siblings, 1 reply; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 19:25 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Fri, 20 Oct 2006 12:24:27 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Fri, 20 Oct 2006 08:18:57 -0700
>
> > Netdump is not in the tree, so I can't fix it. Also netdump is pretty
> > much superseded by kdump.
>
> Unless kdump is %100 ready you can be sure vendors will ship netdump
> for a little while longer. I think gratuitously breaking netdump is
> not the best idea.
>
> It's not like netdump is some binary blob you can't get the source
> to easily. :-)
>
Sorry, but why should we treat out-of-tree vendor code any differently
than out-of-tree other code.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 15:40 ` Stephen Hemminger
@ 2006-10-20 19:27 ` David Miller
2006-10-20 19:31 ` Stephen Hemminger
2006-10-20 20:42 ` David Miller
1 sibling, 1 reply; 37+ messages in thread
From: David Miller @ 2006-10-20 19:27 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Fri, 20 Oct 2006 08:40:15 -0700
> The only user of the drop hook was netconsole, and I fixed that path.
> This probably breaks netdump, but that is out of tree, so it needs
> to fix itself.
I believe that netdump needs to requeue things because dropping the
packet is simply not allowed, and the ->drop callback gives the
netdump code a way to handle things without actually dropping the
packet. If that's true, you can't just free the SKB on it.
Are you sure your new TX strategy can avoid such drops properly?
Please take a quick peek at the netdump code, it's available, and make
some reasonable effort to determine whether it can still work with
your new code.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 19:27 ` David Miller
@ 2006-10-20 19:31 ` Stephen Hemminger
0 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 19:31 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Fri, 20 Oct 2006 12:27:53 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Fri, 20 Oct 2006 08:40:15 -0700
>
> > The only user of the drop hook was netconsole, and I fixed that path.
> > This probably breaks netdump, but that is out of tree, so it needs
> > to fix itself.
>
> I believe that netdump needs to requeue things because dropping the
> packet is simply not allowed, and the ->drop callback gives the
> netdump code a way to handle things without actually dropping the
> packet. If that's true, you can't just free the SKB on it.
>
> Are you sure your new TX strategy can avoid such drops properly?
Yes, it has a queue. if it can't send it waits and retries.
>
> Please take a quick peek at the netdump code, it's available, and make
> some reasonable effort to determine whether it can still work with
> your new code.
Where, I'm not digging in side some RHEL rpm patch pile to find it.
--
Stephen Hemminger <shemminger@osdl.org>
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 19:25 ` Stephen Hemminger
@ 2006-10-20 19:52 ` David Miller
2006-10-20 20:14 ` Stephen Hemminger
2006-10-20 20:25 ` Stephen Hemminger
0 siblings, 2 replies; 37+ messages in thread
From: David Miller @ 2006-10-20 19:52 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Fri, 20 Oct 2006 12:25:27 -0700
> Sorry, but why should we treat out-of-tree vendor code any
> differently than out-of-tree other code.
I think what netdump was trying to do, provide a way to
requeue instead of fully drop the SKB, is quite reasonable.
Don't you think?
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 19:52 ` David Miller
@ 2006-10-20 20:14 ` Stephen Hemminger
2006-10-20 20:25 ` Stephen Hemminger
1 sibling, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 20:14 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Fri, 20 Oct 2006 12:52:26 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Fri, 20 Oct 2006 12:25:27 -0700
>
> > Sorry, but why should we treat out-of-tree vendor code any
> > differently than out-of-tree other code.
>
> I think what netdump was trying to do, provide a way to
> requeue instead of fully drop the SKB, is quite reasonable.
> Don't you think?
Yes, but the queued vs non-queued stuff showed up out of order.
The queued messages go through the wrong Tx path. ie. they end up
going into to NIT etc, since the deferred send uses a work queue
it wouldn't work for last-gasp messages or netdump since getting
a work queue to run requires going back to scheduler and processes
running... and it should use skb_buff_head instead
of roll your own queueing.
The other alternative would be to make the send logic non-blocking
and fully push retry to the caller.
I'll make a fix to netdump, if I can find it.
--
Stephen Hemminger <shemminger@osdl.org>
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 19:52 ` David Miller
2006-10-20 20:14 ` Stephen Hemminger
@ 2006-10-20 20:25 ` Stephen Hemminger
2006-10-21 5:00 ` Dave Jones
1 sibling, 1 reply; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 20:25 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Fri, 20 Oct 2006 12:52:26 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Fri, 20 Oct 2006 12:25:27 -0700
>
> > Sorry, but why should we treat out-of-tree vendor code any
> > differently than out-of-tree other code.
>
> I think what netdump was trying to do, provide a way to
> requeue instead of fully drop the SKB, is quite reasonable.
> Don't you think?
Netdump doesn't even exist in the current Fedora source rpm.
I think Dave dropped it.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 15:40 ` Stephen Hemminger
2006-10-20 19:27 ` David Miller
@ 2006-10-20 20:42 ` David Miller
2006-10-20 20:48 ` Stephen Hemminger
1 sibling, 1 reply; 37+ messages in thread
From: David Miller @ 2006-10-20 20:42 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Fri, 20 Oct 2006 08:40:15 -0700
> -static void queue_process(void *p)
> +static void netpoll_run(unsigned long arg)
> {
...
> - spin_unlock_irqrestore(&queue_lock, flags);
> + netif_tx_lock(dev);
> + if (netif_queue_stopped(dev) ||
> + dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
> + skb_queue_head(&npinfo->tx_q, skb);
> + netif_tx_unlock(dev);
> + tasklet_schedule(&npinfo->tx_task);
> + return;
> + }
We really can't handle TX stopped this way from the netpoll_send_skb()
path. All that old retry logic in netpoll_send_skb() is really
necessary.
If we are in deep IRQ context, took an OOPS, and are trying to get a
netpoll packet out for the kernel log message, we have to try as hard
as possible to get the packet out then and there, even if that means
waiting some amount of time for netif_queue_stopped() to become false.
That is what the existing code is trying to do.
If you defer to a tasklet, the kernel state from the OOPS can be so
corrupted that the tasklet will never run and we'll never get the
netconsole message needed to debug the problem.
Also, if we tasklet schedule from the tasklet, we'll just keep looping
in the tasklet and never leave softirq context, which is also bad
behavior. Even in the tasklet, we should spin and poll when possible
like the current netpoll_send_skb() code does.
So we really can't apply this patch.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 20:42 ` David Miller
@ 2006-10-20 20:48 ` Stephen Hemminger
2006-10-20 21:01 ` Andi Kleen
2006-10-20 21:01 ` David Miller
0 siblings, 2 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 20:48 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Fri, 20 Oct 2006 13:42:09 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Fri, 20 Oct 2006 08:40:15 -0700
>
> > -static void queue_process(void *p)
> > +static void netpoll_run(unsigned long arg)
> > {
> ...
> > - spin_unlock_irqrestore(&queue_lock, flags);
> > + netif_tx_lock(dev);
> > + if (netif_queue_stopped(dev) ||
> > + dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
> > + skb_queue_head(&npinfo->tx_q, skb);
> > + netif_tx_unlock(dev);
> > + tasklet_schedule(&npinfo->tx_task);
> > + return;
> > + }
>
> We really can't handle TX stopped this way from the netpoll_send_skb()
> path. All that old retry logic in netpoll_send_skb() is really
> necessary.
>
> If we are in deep IRQ context, took an OOPS, and are trying to get a
> netpoll packet out for the kernel log message, we have to try as hard
> as possible to get the packet out then and there, even if that means
> waiting some amount of time for netif_queue_stopped() to become false.
>
But, it also violates the assumptions of the network devices.
It calls NAPI poll back with IRQ's disabled and potentially doesn't
obey the semantics about only running on the same CPU as the
received packet.
> That is what the existing code is trying to do.
>
> If you defer to a tasklet, the kernel state from the OOPS can be so
> corrupted that the tasklet will never run and we'll never get the
> netconsole message needed to debug the problem.
So we can try once and if that fails we have to defer to tasklet.
We can't call NAPI, we can't try and cleanup the device will need
an IRQ to get unblocked.
Or add another device callback that just to handle that case.
> Also, if we tasklet schedule from the tasklet, we'll just keep looping
> in the tasklet and never leave softirq context, which is also bad
> behavior. Even in the tasklet, we should spin and poll when possible
> like the current netpoll_send_skb() code does.
>
> So we really can't apply this patch.
--
Stephen Hemminger <shemminger@osdl.org>
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 20:48 ` Stephen Hemminger
@ 2006-10-20 21:01 ` Andi Kleen
2006-10-20 21:08 ` David Miller
2006-10-20 21:01 ` David Miller
1 sibling, 1 reply; 37+ messages in thread
From: Andi Kleen @ 2006-10-20 21:01 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: David Miller, netdev, linux-kernel
> But, it also violates the assumptions of the network devices.
> It calls NAPI poll back with IRQ's disabled and potentially doesn't
> obey the semantics about only running on the same CPU as the
> received packet.
netpoll always played a little fast'n'lose with various locking rules.
Also often inside the drivers are a little sloppy in the poll path.
That's fine for getting an oops out, but risky for normal kernel
messages when the driver must be still working afterwards.
The standard console code makes this conditional on oops_in_progress -
it breaks locks when true and otherwise it follows the safe
approach.
Perhaps it would be better to use different paths in netconsole too
depending on whether oops_in_progress is true or not.
e.g. if !oops_in_progress (use the standard output path)
else use current path.
That would avoid breaking the driver during normal operation
and also have the advantage that the normal netconsole messages
would go through the queueing disciplines etc.
-Andi
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 20:48 ` Stephen Hemminger
2006-10-20 21:01 ` Andi Kleen
@ 2006-10-20 21:01 ` David Miller
2006-10-20 22:30 ` [PATCH 1/3] netpoll: use sk_buff_head for txq Stephen Hemminger
` (2 more replies)
1 sibling, 3 replies; 37+ messages in thread
From: David Miller @ 2006-10-20 21:01 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Fri, 20 Oct 2006 13:48:26 -0700
> On Fri, 20 Oct 2006 13:42:09 -0700 (PDT)
> David Miller <davem@davemloft.net> wrote:
>
> > We really can't handle TX stopped this way from the netpoll_send_skb()
> > path. All that old retry logic in netpoll_send_skb() is really
> > necessary.
> >
> > If we are in deep IRQ context, took an OOPS, and are trying to get a
> > netpoll packet out for the kernel log message, we have to try as hard
> > as possible to get the packet out then and there, even if that means
> > waiting some amount of time for netif_queue_stopped() to become false.
>
> But, it also violates the assumptions of the network devices.
> It calls NAPI poll back with IRQ's disabled and potentially doesn't
> obey the semantics about only running on the same CPU as the
> received packet.
Actually, all the locking here is fine, that's why it checks
poll_owner for current smp_processor_id().
Otherwise it is safe to sit and spin waiting for link up/down or
TX full conditions to pass.
> So we can try once and if that fails we have to defer to tasklet.
Not true, we should spin and retry for some reasonable amount of time.
That "reasonable amount of time" should be the maximum of the time
it takes to free up space from a TX full condition and the time it
takes to bring the link up.
That is what the current code is trying to do, and you've erroneously
deleted all of that logic.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 21:01 ` Andi Kleen
@ 2006-10-20 21:08 ` David Miller
2006-10-20 21:16 ` Andi Kleen
0 siblings, 1 reply; 37+ messages in thread
From: David Miller @ 2006-10-20 21:08 UTC (permalink / raw)
To: ak; +Cc: shemminger, netdev, linux-kernel
From: Andi Kleen <ak@suse.de>
Date: Fri, 20 Oct 2006 23:01:29 +0200
> netpoll always played a little fast'n'lose with various locking rules.
The current code is fine, it never reenters ->poll, because it
maintains a "->poll_owner" which it checks in netpoll_send_skb()
before trying to call back into ->poll.
Every call to ->poll first sets ->poll_owner to the current cpu id.
netpoll_send_skb() aborts and does a drop if ->poll_owner is set to
the current smp_processor_id().
I sometimes feel like I'm the only person actually reading the sources
and past threads on this topic before replying.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 21:08 ` David Miller
@ 2006-10-20 21:16 ` Andi Kleen
2006-10-20 21:41 ` Stephen Hemminger
0 siblings, 1 reply; 37+ messages in thread
From: Andi Kleen @ 2006-10-20 21:16 UTC (permalink / raw)
To: David Miller; +Cc: shemminger, netdev, linux-kernel
On Friday 20 October 2006 23:08, David Miller wrote:
> From: Andi Kleen <ak@suse.de>
> Date: Fri, 20 Oct 2006 23:01:29 +0200
>
> > netpoll always played a little fast'n'lose with various locking rules.
>
> The current code is fine, it never reenters ->poll, because it
> maintains a "->poll_owner" which it checks in netpoll_send_skb()
> before trying to call back into ->poll.
I was more thinking of reentry of the interrupt handler in
the driver etc. A lot of them also do printk, but that is fortunately
caught higher level.
-Andi
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 21:16 ` Andi Kleen
@ 2006-10-20 21:41 ` Stephen Hemminger
0 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 21:41 UTC (permalink / raw)
To: Andi Kleen; +Cc: David Miller, netdev, linux-kernel
On Fri, 20 Oct 2006 23:16:03 +0200
Andi Kleen <ak@suse.de> wrote:
> On Friday 20 October 2006 23:08, David Miller wrote:
> > From: Andi Kleen <ak@suse.de>
> > Date: Fri, 20 Oct 2006 23:01:29 +0200
> >
> > > netpoll always played a little fast'n'lose with various locking rules.
> >
> > The current code is fine, it never reenters ->poll, because it
> > maintains a "->poll_owner" which it checks in netpoll_send_skb()
> > before trying to call back into ->poll.
>
> I was more thinking of reentry of the interrupt handler in
> the driver etc. A lot of them also do printk, but that is fortunately
> caught higher level.
>
> -Andi
One problem is that with netconsole the NAPI poll routine can be
called with IRQ's disabled. This means that calls to dev_kfree_skb()
are incorrect in this context. The driver would have to use dev_kfree_skb_any()
instead.
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 1/3] netpoll: use sk_buff_head for txq
2006-10-20 21:01 ` David Miller
@ 2006-10-20 22:30 ` Stephen Hemminger
2006-10-23 3:42 ` David Miller
2006-10-20 22:32 ` [PATCH 2/3] netpoll: use device xmit directly Stephen Hemminger
2006-10-20 22:35 ` [PATCH 3/3] netpoll: retry logic cleanup Stephen Hemminger
2 siblings, 1 reply; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 22:30 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
This is similar in spirit to earlier patches with less changes.
Get rid of DIY queue for skb's used in netpoll. Add missing code to cleanup
queue on shutdown.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -37,10 +37,7 @@
#define MAX_RETRIES 20000
static struct sk_buff_head netpoll_pool;
-
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
+static struct sk_buff_head netpoll_txq;
static atomic_t trapped;
@@ -56,44 +53,18 @@ static void arp_reply(struct sk_buff *sk
static void queue_process(void *p)
{
- unsigned long flags;
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
-
- spin_unlock_irqrestore(&queue_lock, flags);
-
+ while ((skb = skb_dequeue(&netpoll_txq)))
dev_queue_xmit(skb);
- }
+
}
static DECLARE_WORK(send_queue, queue_process, NULL);
void netpoll_queue(struct sk_buff *skb)
{
- unsigned long flags;
-
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
+ skb_queue_tail(&netpoll_txq, skb);
schedule_work(&send_queue);
}
@@ -745,6 +716,7 @@ int netpoll_setup(struct netpoll *np)
}
static int __init netpoll_init(void) {
+ skb_queue_head_init(&netpoll_txq);
skb_queue_head_init(&netpoll_pool);
return 0;
}
@@ -752,20 +724,30 @@ core_initcall(netpoll_init);
void netpoll_cleanup(struct netpoll *np)
{
- struct netpoll_info *npinfo;
+ struct net_device *dev = np->dev;
+ struct sk_buff *skb, *next;
unsigned long flags;
- if (np->dev) {
- npinfo = np->dev->npinfo;
+ if (dev) {
+ struct netpoll_info *npinfo = dev->npinfo;
+
if (npinfo && npinfo->rx_np == np) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_np = NULL;
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
- dev_put(np->dev);
+ dev_put(dev);
+
+ spin_lock_irqsave(&netpoll_txq.lock, flags);
+ for (skb = (struct sk_buff *)netpoll_txq.next;
+ skb != (struct sk_buff *)&netpoll_txq; skb = next) {
+ next = skb->next;
+ if (skb->dev == dev)
+ skb_unlink(skb, &netpoll_txq);
+ }
+ spin_unlock_irqrestore(&netpoll_txq.lock, flags);
}
-
np->dev = NULL;
}
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 2/3] netpoll: use device xmit directly
2006-10-20 21:01 ` David Miller
2006-10-20 22:30 ` [PATCH 1/3] netpoll: use sk_buff_head for txq Stephen Hemminger
@ 2006-10-20 22:32 ` Stephen Hemminger
2006-10-20 22:35 ` [PATCH 3/3] netpoll: retry logic cleanup Stephen Hemminger
2 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 22:32 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
For most normal sends, the netpoll code was using the dev->hard_start_xmit
directly. If it got busy, it would process later, but this code path uses
dev_queue_xmit() and that code would send the skb through NIT and other
stuff that netpoll doesn't want.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -51,17 +51,28 @@ static atomic_t trapped;
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
+static void queue_process(void *);
+static DECLARE_WORK(send_queue, queue_process, NULL);
+
static void queue_process(void *p)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&netpoll_txq)))
- dev_queue_xmit(skb);
+ while ((skb = skb_dequeue(&netpoll_txq))) {
+ struct net_device *dev = skb->dev;
+ netif_tx_lock_bh(dev);
+ if (netif_queue_stopped(dev) ||
+ dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+ skb_queue_head(&netpoll_txq, skb);
+ netif_tx_unlock_bh(dev);
+ schedule_delayed_work(&send_queue, 1);
+ break;
+ }
+ netif_tx_unlock_bh(dev);
+ }
}
-static DECLARE_WORK(send_queue, queue_process, NULL);
-
void netpoll_queue(struct sk_buff *skb)
{
skb_queue_tail(&netpoll_txq, skb);
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 3/3] netpoll: retry logic cleanup
2006-10-20 21:01 ` David Miller
2006-10-20 22:30 ` [PATCH 1/3] netpoll: use sk_buff_head for txq Stephen Hemminger
2006-10-20 22:32 ` [PATCH 2/3] netpoll: use device xmit directly Stephen Hemminger
@ 2006-10-20 22:35 ` Stephen Hemminger
2 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-20 22:35 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
Change retry logic of netpoll. Always requeue if unable to send
instead of dropping. Make retry counter per send rather than causing
mass migration when tries gets less than zero. Since callers are
either netpoll_send_arp or netpoll_send_udp, we knwo that np and np->dev
can't be null.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/include/linux/netpoll.h
+++ netpoll/include/linux/netpoll.h
@@ -27,7 +27,6 @@ struct netpoll {
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
- int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -232,50 +232,43 @@ static struct sk_buff * find_skb(struct
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status;
- struct netpoll_info *npinfo;
-
- if (!np || !np->dev || !netif_running(np->dev))
- goto free_skb;
+ struct net_device *dev = np->dev;
+ struct netpoll_info *npinfo = dev->npinfo;
+ int status, tries;
- npinfo = np->dev->npinfo;
+ if (!netif_running(dev) || !netif_device_present(dev))
+ __kfree_skb(skb);
/* avoid recursion */
if (npinfo->poll_owner == smp_processor_id() ||
- np->dev->xmit_lock_owner == smp_processor_id()) {
- if (np->drop)
- np->drop(skb);
- else
- __kfree_skb(skb);
- return;
- }
-
- do {
- npinfo->tries--;
- netif_tx_lock(np->dev);
+ dev->xmit_lock_owner == smp_processor_id())
+ goto busy;
+ for (tries = MAX_RETRIES; tries; --tries) {
/*
* network drivers do not expect to be called if the queue is
* stopped.
*/
- status = NETDEV_TX_BUSY;
- if (!netif_queue_stopped(np->dev))
- status = np->dev->hard_start_xmit(skb, np->dev);
-
- netif_tx_unlock(np->dev);
+ if (netif_queue_stopped(dev))
+ status = NETDEV_TX_BUSY;
+ else
+ status = dev->hard_start_xmit(skb, dev);
+ netif_tx_unlock(dev);
/* success */
- if(!status) {
- npinfo->tries = MAX_RETRIES; /* reset */
+ if(status == NETDEV_TX_OK)
return;
- }
/* transmit busy */
netpoll_poll(np);
udelay(50);
- } while (npinfo->tries > 0);
-free_skb:
- __kfree_skb(skb);
+ }
+
+busy:
+ if (np->drop)
+ np->drop(skb);
+ else
+ __kfree_skb(skb);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -628,7 +621,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1;
- npinfo->tries = MAX_RETRIES;
+
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
} else
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-20 20:25 ` Stephen Hemminger
@ 2006-10-21 5:00 ` Dave Jones
2006-10-21 6:38 ` David Miller
0 siblings, 1 reply; 37+ messages in thread
From: Dave Jones @ 2006-10-21 5:00 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: David Miller, netdev, linux-kernel
On Fri, Oct 20, 2006 at 01:25:32PM -0700, Stephen Hemminger wrote:
> On Fri, 20 Oct 2006 12:52:26 -0700 (PDT)
> David Miller <davem@davemloft.net> wrote:
>
> > From: Stephen Hemminger <shemminger@osdl.org>
> > Date: Fri, 20 Oct 2006 12:25:27 -0700
> >
> > > Sorry, but why should we treat out-of-tree vendor code any
> > > differently than out-of-tree other code.
> >
> > I think what netdump was trying to do, provide a way to
> > requeue instead of fully drop the SKB, is quite reasonable.
> > Don't you think?
>
>
> Netdump doesn't even exist in the current Fedora source rpm.
> I think Dave dropped it.
Indeed. Practically no-one cared about it, so it bit-rotted
really fast after we shipped RHEL4. That, along with the focus
shifting to making kdump work seemed to kill it off over the last
12 months.
Dave
--
http://www.codemonkey.org.uk
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 2/3] netpoll: rework skb transmit queue
2006-10-21 5:00 ` Dave Jones
@ 2006-10-21 6:38 ` David Miller
0 siblings, 0 replies; 37+ messages in thread
From: David Miller @ 2006-10-21 6:38 UTC (permalink / raw)
To: davej; +Cc: shemminger, netdev, linux-kernel
From: Dave Jones <davej@redhat.com>
Date: Sat, 21 Oct 2006 01:00:16 -0400
> Practically no-one cared about it, so it bit-rotted really fast
> after we shipped RHEL4. That, along with the focus shifting to
> making kdump work seemed to kill it off over the last 12 months.
Then we can truly kill off the ->drop() callback as part
of Stephen's patches.
Stephen, I'll review your new set over the weekend.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 1/3] netpoll: use sk_buff_head for txq
2006-10-20 22:30 ` [PATCH 1/3] netpoll: use sk_buff_head for txq Stephen Hemminger
@ 2006-10-23 3:42 ` David Miller
[not found] ` <20061023115337.1f636ffb@dxpl.pdx.osdl.net>
` (2 more replies)
0 siblings, 3 replies; 37+ messages in thread
From: David Miller @ 2006-10-23 3:42 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Fri, 20 Oct 2006 15:30:27 -0700
> + spin_lock_irqsave(&netpoll_txq.lock, flags);
> + for (skb = (struct sk_buff *)netpoll_txq.next;
> + skb != (struct sk_buff *)&netpoll_txq; skb = next) {
> + next = skb->next;
> + if (skb->dev == dev)
> + skb_unlink(skb, &netpoll_txq);
> + }
> + spin_unlock_irqrestore(&netpoll_txq.lock, flags);
> }
All SKBs removed here will be leaked, nothing frees them up.
Since #2 and #3 depend upon this patch, I'll hold off on those
until you fix this bug.
Thanks.
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 4/5] netpoll: move drop hook inline
[not found] ` <20061023115337.1f636ffb@dxpl.pdx.osdl.net>
@ 2006-10-23 19:02 ` Stephen Hemminger
2006-10-23 19:03 ` [PATCH 3/5] netpoll: cleanup transmit retry logic Stephen Hemminger
1 sibling, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-23 19:02 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
Rather than have separate drop callback hook, just put
the logic inline in netpoll. The code is cleaner and netconsole
is the only user of netpoll.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/drivers/net/netconsole.c
+++ netpoll/drivers/net/netconsole.c
@@ -60,7 +60,6 @@ static struct netpoll np = {
.local_port = 6665,
.remote_port = 6666,
.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .drop = netpoll_queue,
};
static int configured = 0;
--- netpoll.orig/include/linux/netpoll.h
+++ netpoll/include/linux/netpoll.h
@@ -18,7 +18,6 @@ struct netpoll {
struct net_device *dev;
char dev_name[16], *name;
void (*rx_hook)(struct netpoll *, int, char *, int);
- void (*drop)(struct sk_buff *skb);
u32 local_ip, remote_ip;
u16 local_port, remote_port;
unsigned char local_mac[6], remote_mac[6];
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -91,13 +91,6 @@ static void queue_purge(struct net_devic
spin_unlock_irqrestore(&netpoll_txq.lock, flags);
}
-void netpoll_queue(struct sk_buff *skb)
-{
- skb_queue_tail(&netpoll_txq, skb);
-
- schedule_work(&send_queue);
-}
-
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
unsigned short ulen, u32 saddr, u32 daddr)
{
@@ -282,10 +275,10 @@ static void netpoll_send_skb(struct netp
udelay(50);
} while (--tries > 0);
- if (np->drop)
- np->drop(skb);
- else
- __kfree_skb(skb);
+ /* if transmitter is busy, try send later. */
+ skb_queue_tail(&netpoll_txq, skb);
+
+ schedule_work(&send_queue);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -784,4 +777,3 @@ EXPORT_SYMBOL(netpoll_setup);
EXPORT_SYMBOL(netpoll_cleanup);
EXPORT_SYMBOL(netpoll_send_udp);
EXPORT_SYMBOL(netpoll_poll);
-EXPORT_SYMBOL(netpoll_queue);
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 1/5] netpoll: use sk_buff_head for txq
2006-10-23 3:42 ` David Miller
[not found] ` <20061023115337.1f636ffb@dxpl.pdx.osdl.net>
@ 2006-10-23 19:02 ` Stephen Hemminger
2006-10-23 19:04 ` [PATCH 5/5] netpoll: interface cleanup Stephen Hemminger
2006-10-24 6:03 ` [PATCH 1/5] netpoll: use sk_buff_head for txq David Miller
[not found] ` <20061023115111.0d69846e@dxpl.pdx.osdl.net>
2 siblings, 2 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-23 19:02 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
This is the 3rd version of the netpoll patches. The first patch
switches from open-coded skb list to using a skb_buff_head.
It also flushes packets from queue when device is removed from
netpoll.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -37,10 +37,7 @@
#define MAX_RETRIES 20000
static struct sk_buff_head netpoll_pool;
-
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
+static struct sk_buff_head netpoll_txq;
static atomic_t trapped;
@@ -56,44 +53,35 @@ static void arp_reply(struct sk_buff *sk
static void queue_process(void *p)
{
- unsigned long flags;
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
+ while ((skb = skb_dequeue(&netpoll_txq)))
+ dev_queue_xmit(skb);
- queue_depth--;
+}
- spin_unlock_irqrestore(&queue_lock, flags);
+static void queue_purge(struct net_device *dev)
+{
+ struct sk_buff *skb, *next;
+ unsigned long flags;
- dev_queue_xmit(skb);
+ spin_lock_irqsave(&netpoll_txq.lock, flags);
+ for (skb = (struct sk_buff *)netpoll_txq.next;
+ skb != (struct sk_buff *)&netpoll_txq; skb = next) {
+ next = skb->next;
+ if (skb->dev == dev) {
+ skb_unlink(skb, &netpoll_txq);
+ kfree_skb(skb);
+ }
}
+ spin_unlock_irqrestore(&netpoll_txq.lock, flags);
}
static DECLARE_WORK(send_queue, queue_process, NULL);
void netpoll_queue(struct sk_buff *skb)
{
- unsigned long flags;
-
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
+ skb_queue_tail(&netpoll_txq, skb);
schedule_work(&send_queue);
}
@@ -745,6 +733,7 @@ int netpoll_setup(struct netpoll *np)
}
static int __init netpoll_init(void) {
+ skb_queue_head_init(&netpoll_txq);
skb_queue_head_init(&netpoll_pool);
return 0;
}
@@ -752,20 +741,22 @@ core_initcall(netpoll_init);
void netpoll_cleanup(struct netpoll *np)
{
- struct netpoll_info *npinfo;
+ struct net_device *dev = np->dev;
unsigned long flags;
- if (np->dev) {
- npinfo = np->dev->npinfo;
+ if (dev) {
+ struct netpoll_info *npinfo = dev->npinfo;
+
if (npinfo && npinfo->rx_np == np) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_np = NULL;
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
- dev_put(np->dev);
- }
+ dev_put(dev);
+ queue_purge(dev);
+ }
np->dev = NULL;
}
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 2/5] netpoll: cleanup queued transmit
[not found] ` <20061023115111.0d69846e@dxpl.pdx.osdl.net>
@ 2006-10-23 19:02 ` Stephen Hemminger
0 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-23 19:02 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
This patch changes the queued transmit path of netpoll, to
use similar logic to the non-queued path. We don't want netpoll
messages going into NIT and network qdisc logic.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -51,13 +51,27 @@ static atomic_t trapped;
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
+static void queue_process(void *);
+static DECLARE_WORK(send_queue, queue_process, NULL);
+
static void queue_process(void *p)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&netpoll_txq)))
- dev_queue_xmit(skb);
+ while ((skb = skb_dequeue(&netpoll_txq))) {
+ struct net_device *dev = skb->dev;
+ netif_tx_lock_bh(dev);
+ if (netif_queue_stopped(dev) ||
+ dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+ skb_queue_head(&netpoll_txq, skb);
+ netif_tx_unlock_bh(dev);
+
+ schedule_delayed_work(&send_queue, 1);
+ return;
+ }
+ netif_tx_unlock_bh(dev);
+ }
}
static void queue_purge(struct net_device *dev)
@@ -77,8 +91,6 @@ static void queue_purge(struct net_devic
spin_unlock_irqrestore(&netpoll_txq.lock, flags);
}
-static DECLARE_WORK(send_queue, queue_process, NULL);
-
void netpoll_queue(struct sk_buff *skb)
{
skb_queue_tail(&netpoll_txq, skb);
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 3/5] netpoll: cleanup transmit retry logic
[not found] ` <20061023115337.1f636ffb@dxpl.pdx.osdl.net>
2006-10-23 19:02 ` [PATCH 4/5] netpoll: move drop hook inline Stephen Hemminger
@ 2006-10-23 19:03 ` Stephen Hemminger
1 sibling, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-23 19:03 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
Change netpoll transmit logic so:
* retries are per attempt not global. don't want to start drop
packets just because of temporary blockage.
* acquire xmit_lock correctly
* if device is not available just drop
* always queue if send fails, don't drop
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/include/linux/netpoll.h
+++ netpoll/include/linux/netpoll.h
@@ -27,7 +27,6 @@ struct netpoll {
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
- int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
--- netpoll.orig/net/core/netpoll.c
+++ netpoll/net/core/netpoll.c
@@ -250,50 +250,42 @@ static struct sk_buff * find_skb(struct
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status;
- struct netpoll_info *npinfo;
-
- if (!np || !np->dev || !netif_running(np->dev))
- goto free_skb;
+ struct net_device *dev = np->dev;
+ int tries = MAX_RETRIES;
- npinfo = np->dev->npinfo;
+ do {
+ int status;
- /* avoid recursion */
- if (npinfo->poll_owner == smp_processor_id() ||
- np->dev->xmit_lock_owner == smp_processor_id()) {
- if (np->drop)
- np->drop(skb);
- else
+ /* if device is offline, give up */
+ if (!netif_running(dev) || !netif_device_present(dev)) {
__kfree_skb(skb);
- return;
- }
+ return;
+ }
- do {
- npinfo->tries--;
- netif_tx_lock(np->dev);
+ /* grap tx lock, but avoid recursion problems */
+ if (!netif_tx_trylock(dev))
+ break;
+
+ /* drivers do not expect to be called if queue is stopped. */
+ if (netif_queue_stopped(dev))
+ status = NETDEV_TX_BUSY;
+ else
+ status = dev->hard_start_xmit(skb, dev);
+ netif_tx_unlock(dev);
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- status = NETDEV_TX_BUSY;
- if (!netif_queue_stopped(np->dev))
- status = np->dev->hard_start_xmit(skb, np->dev);
-
- netif_tx_unlock(np->dev);
-
- /* success */
- if(!status) {
- npinfo->tries = MAX_RETRIES; /* reset */
+ /* succesfull send */
+ if (status == NETDEV_TX_OK)
return;
- }
- /* transmit busy */
+ /* transmit busy, maybe cleaning up will help */
netpoll_poll(np);
udelay(50);
- } while (npinfo->tries > 0);
-free_skb:
- __kfree_skb(skb);
+ } while (--tries > 0);
+
+ if (np->drop)
+ np->drop(skb);
+ else
+ __kfree_skb(skb);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -646,7 +638,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1;
- npinfo->tries = MAX_RETRIES;
+
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
} else
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH 5/5] netpoll: interface cleanup
2006-10-23 19:02 ` [PATCH 1/5] netpoll: use sk_buff_head for txq Stephen Hemminger
@ 2006-10-23 19:04 ` Stephen Hemminger
2006-10-24 6:03 ` [PATCH 1/5] netpoll: use sk_buff_head for txq David Miller
1 sibling, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-23 19:04 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
Trivial cleanup of netpoll interface. Use constants
for size, to make usage clear.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
--- netpoll.orig/include/linux/netpoll.h
+++ netpoll/include/linux/netpoll.h
@@ -12,15 +12,14 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
-struct netpoll;
-
struct netpoll {
struct net_device *dev;
- char dev_name[16], *name;
+ char dev_name[IFNAMSIZ];
+ const char *name;
void (*rx_hook)(struct netpoll *, int, char *, int);
u32 local_ip, remote_ip;
u16 local_port, remote_port;
- unsigned char local_mac[6], remote_mac[6];
+ u8 local_mac[ETH_ALEN], remote_mac[ETH_ALEN];
};
struct netpoll_info {
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 1/5] netpoll: use sk_buff_head for txq
2006-10-23 19:02 ` [PATCH 1/5] netpoll: use sk_buff_head for txq Stephen Hemminger
2006-10-23 19:04 ` [PATCH 5/5] netpoll: interface cleanup Stephen Hemminger
@ 2006-10-24 6:03 ` David Miller
2006-10-24 14:51 ` Stephen Hemminger
1 sibling, 1 reply; 37+ messages in thread
From: David Miller @ 2006-10-24 6:03 UTC (permalink / raw)
To: shemminger; +Cc: netdev, linux-kernel
From: Stephen Hemminger <shemminger@osdl.org>
Date: Mon, 23 Oct 2006 12:02:53 -0700
> + spin_lock_irqsave(&netpoll_txq.lock, flags);
> + for (skb = (struct sk_buff *)netpoll_txq.next;
> + skb != (struct sk_buff *)&netpoll_txq; skb = next) {
> + next = skb->next;
> + if (skb->dev == dev) {
> + skb_unlink(skb, &netpoll_txq);
> + kfree_skb(skb);
> + }
> }
> + spin_unlock_irqrestore(&netpoll_txq.lock, flags);
IRQ's are disabled, I think we can't call kfree_skb() in such a
context.
That's why zap_completion_queue() has all of these funny
skb->destructor checks and such, all of this stuff potentially runs in
IRQ context.
^ permalink raw reply [flat|nested] 37+ messages in thread
* Re: [PATCH 1/5] netpoll: use sk_buff_head for txq
2006-10-24 6:03 ` [PATCH 1/5] netpoll: use sk_buff_head for txq David Miller
@ 2006-10-24 14:51 ` Stephen Hemminger
0 siblings, 0 replies; 37+ messages in thread
From: Stephen Hemminger @ 2006-10-24 14:51 UTC (permalink / raw)
To: David Miller; +Cc: netdev, linux-kernel
On Mon, 23 Oct 2006 23:03:50 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> From: Stephen Hemminger <shemminger@osdl.org>
> Date: Mon, 23 Oct 2006 12:02:53 -0700
>
> > + spin_lock_irqsave(&netpoll_txq.lock, flags);
> > + for (skb = (struct sk_buff *)netpoll_txq.next;
> > + skb != (struct sk_buff *)&netpoll_txq; skb = next) {
> > + next = skb->next;
> > + if (skb->dev == dev) {
> > + skb_unlink(skb, &netpoll_txq);
> > + kfree_skb(skb);
> > + }
> > }
> > + spin_unlock_irqrestore(&netpoll_txq.lock, flags);
>
> IRQ's are disabled, I think we can't call kfree_skb() in such a
> context.
It is save since the skb's only come from this code (no destructors).
>
> That's why zap_completion_queue() has all of these funny
> skb->destructor checks and such, all of this stuff potentially runs in
> IRQ context.
It should use __kfree_skb in the purge routine (like other places).
^ permalink raw reply [flat|nested] 37+ messages in thread
end of thread, other threads:[~2006-10-24 14:51 UTC | newest]
Thread overview: 37+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-10-19 17:15 [PATCH 0/3] netpoll/netconsole fixes Stephen Hemminger
2006-10-19 17:15 ` [PATCH 1/3] netpoll: initialize skb for UDP Stephen Hemminger
2006-10-20 6:58 ` David Miller
2006-10-19 17:15 ` [PATCH 2/3] netpoll: rework skb transmit queue Stephen Hemminger
2006-10-20 7:15 ` David Miller
2006-10-20 15:18 ` Stephen Hemminger
2006-10-20 19:24 ` David Miller
2006-10-20 19:25 ` Stephen Hemminger
2006-10-20 19:52 ` David Miller
2006-10-20 20:14 ` Stephen Hemminger
2006-10-20 20:25 ` Stephen Hemminger
2006-10-21 5:00 ` Dave Jones
2006-10-21 6:38 ` David Miller
2006-10-20 15:40 ` Stephen Hemminger
2006-10-20 19:27 ` David Miller
2006-10-20 19:31 ` Stephen Hemminger
2006-10-20 20:42 ` David Miller
2006-10-20 20:48 ` Stephen Hemminger
2006-10-20 21:01 ` Andi Kleen
2006-10-20 21:08 ` David Miller
2006-10-20 21:16 ` Andi Kleen
2006-10-20 21:41 ` Stephen Hemminger
2006-10-20 21:01 ` David Miller
2006-10-20 22:30 ` [PATCH 1/3] netpoll: use sk_buff_head for txq Stephen Hemminger
2006-10-23 3:42 ` David Miller
[not found] ` <20061023115337.1f636ffb@dxpl.pdx.osdl.net>
2006-10-23 19:02 ` [PATCH 4/5] netpoll: move drop hook inline Stephen Hemminger
2006-10-23 19:03 ` [PATCH 3/5] netpoll: cleanup transmit retry logic Stephen Hemminger
2006-10-23 19:02 ` [PATCH 1/5] netpoll: use sk_buff_head for txq Stephen Hemminger
2006-10-23 19:04 ` [PATCH 5/5] netpoll: interface cleanup Stephen Hemminger
2006-10-24 6:03 ` [PATCH 1/5] netpoll: use sk_buff_head for txq David Miller
2006-10-24 14:51 ` Stephen Hemminger
[not found] ` <20061023115111.0d69846e@dxpl.pdx.osdl.net>
2006-10-23 19:02 ` [PATCH 2/5] netpoll: cleanup queued transmit Stephen Hemminger
2006-10-20 22:32 ` [PATCH 2/3] netpoll: use device xmit directly Stephen Hemminger
2006-10-20 22:35 ` [PATCH 3/3] netpoll: retry logic cleanup Stephen Hemminger
2006-10-19 17:15 ` [PATCH 3/3] netpoll: use skb_buff_head for skb cache Stephen Hemminger
2006-10-20 6:57 ` [PATCH 0/3] netpoll/netconsole fixes Andrew Morton
2006-10-20 7:16 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).