* [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups
@ 2005-02-18 5:25 Matt Mackall
2005-02-18 5:25 ` [PATCH 1/6] netpoll: shorten carrier detect timeout Matt Mackall
2005-03-23 2:33 ` [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups David S. Miller
0 siblings, 2 replies; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
This patch series against -rc4 fixes up some recursion deadlocks in
netpoll and adds support for fallback to queueing. Various cleanups
along the way.
Holds up under load testing via ipt_LOG on a dual Opteron with tg3.
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 1/6] netpoll: shorten carrier detect timeout
2005-02-18 5:25 [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups Matt Mackall
@ 2005-02-18 5:25 ` Matt Mackall
2005-02-18 5:25 ` [PATCH 2/6] netpoll: filter inlines Matt Mackall
2005-03-23 2:33 ` [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups David S. Miller
1 sibling, 1 reply; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
Shorten carrier detect timeout to 4 seconds.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: tiny-new/net/core/netpoll.c
===================================================================
--- tiny-new.orig/net/core/netpoll.c 2004-11-17 00:05:28.000000000 -0800
+++ tiny-new/net/core/netpoll.c 2004-12-02 11:51:15.775256063 -0800
@@ -584,7 +584,7 @@
rtnl_shunlock();
atleast = jiffies + HZ/10;
- atmost = jiffies + 10*HZ;
+ atmost = jiffies + 4*HZ;
while (!netif_carrier_ok(ndev)) {
if (time_after(jiffies, atmost)) {
printk(KERN_NOTICE
@@ -597,7 +597,7 @@
if (time_before(jiffies, atleast)) {
printk(KERN_NOTICE "%s: carrier detect appears flaky,"
- " waiting 10 seconds\n",
+ " waiting 4 seconds\n",
np->name);
while (time_before(jiffies, atmost))
cond_resched();
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 2/6] netpoll: filter inlines
2005-02-18 5:25 ` [PATCH 1/6] netpoll: shorten carrier detect timeout Matt Mackall
@ 2005-02-18 5:25 ` Matt Mackall
2005-02-18 5:25 ` [PATCH 3/6] netpoll: add netpoll point to net_device Matt Mackall
0 siblings, 1 reply; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
Add netpoll rx helpers
Move skb_free for rx into __netpoll_rx
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rc4bk2/net/core/netpoll.c
===================================================================
--- rc4bk2.orig/net/core/netpoll.c 2005-02-14 10:34:12.000000000 -0800
+++ rc4bk2/net/core/netpoll.c 2005-02-14 17:10:34.000000000 -0800
@@ -368,7 +368,7 @@
netpoll_send_skb(np, send_skb);
}
-int netpoll_rx(struct sk_buff *skb)
+int __netpoll_rx(struct sk_buff *skb)
{
int proto, len, ulen;
struct iphdr *iph;
@@ -440,12 +440,18 @@
(char *)(uh+1),
ulen - sizeof(struct udphdr));
+ kfree_skb(skb);
return 1;
}
spin_unlock_irqrestore(&rx_list_lock, flags);
out:
- return atomic_read(&trapped);
+ if (atomic_read(&trapped)) {
+ kfree_skb(skb);
+ return 1;
+ }
+
+ return 0;
}
int netpoll_parse_options(struct netpoll *np, char *opt)
Index: rc4bk2/include/linux/netpoll.h
===================================================================
--- rc4bk2.orig/include/linux/netpoll.h 2005-02-14 10:34:08.000000000 -0800
+++ rc4bk2/include/linux/netpoll.h 2005-02-14 17:10:34.000000000 -0800
@@ -30,7 +30,15 @@
int netpoll_trap(void);
void netpoll_set_trap(int trap);
void netpoll_cleanup(struct netpoll *np);
-int netpoll_rx(struct sk_buff *skb);
+int __netpoll_rx(struct sk_buff *skb);
+#ifdef CONFIG_NETPOLL
+static inline int netpoll_rx(struct sk_buff *skb)
+{
+ return skb->dev->netpoll_rx && __netpoll_rx(skb);
+}
+#else
+#define netpoll_rx(a) 0
+#endif
#endif
Index: rc4bk2/net/core/dev.c
===================================================================
--- rc4bk2.orig/net/core/dev.c 2005-02-14 10:34:12.000000000 -0800
+++ rc4bk2/net/core/dev.c 2005-02-14 17:10:34.000000000 -0800
@@ -1427,13 +1427,10 @@
struct softnet_data *queue;
unsigned long flags;
-#ifdef CONFIG_NETPOLL
- if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
- kfree_skb(skb);
+ /* if netpoll wants it, pretend we never saw it */
+ if (netpoll_rx(skb))
return NET_RX_DROP;
- }
-#endif
-
+
if (!skb->stamp.tv_sec)
net_timestamp(&skb->stamp);
@@ -1629,12 +1626,9 @@
int ret = NET_RX_DROP;
unsigned short type;
-#ifdef CONFIG_NETPOLL
- if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
- kfree_skb(skb);
+ /* if we've gotten here through NAPI, check netpoll */
+ if (skb->dev->poll && netpoll_rx(skb))
return NET_RX_DROP;
- }
-#endif
if (!skb->stamp.tv_sec)
net_timestamp(&skb->stamp);
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 3/6] netpoll: add netpoll point to net_device
2005-02-18 5:25 ` [PATCH 2/6] netpoll: filter inlines Matt Mackall
@ 2005-02-18 5:25 ` Matt Mackall
2005-02-18 5:25 ` [PATCH 4/6] netpoll: fix ->poll() locking Matt Mackall
0 siblings, 1 reply; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
Add struct netpoll pointer to struct netdevice
Move netpoll rx flags to netpoll struct
Stop traversing rx_list and get np pointer from skb->dev->np
Remove now unneeded rx_list
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rc4/include/linux/netdevice.h
===================================================================
--- rc4.orig/include/linux/netdevice.h 2005-02-17 22:32:12.000000000 -0600
+++ rc4/include/linux/netdevice.h 2005-02-17 22:32:20.000000000 -0600
@@ -41,7 +41,7 @@
struct divert_blk;
struct vlan_group;
struct ethtool_ops;
-
+struct netpoll;
/* source back-compat hooks */
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
@@ -471,7 +471,7 @@
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
int (*accept_fastpath)(struct net_device *, struct dst_entry*);
#ifdef CONFIG_NETPOLL
- int netpoll_rx;
+ struct netpoll *np;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*poll_controller)(struct net_device *dev);
Index: rc4/net/core/netpoll.c
===================================================================
--- rc4.orig/net/core/netpoll.c 2005-02-17 22:32:19.000000000 -0600
+++ rc4/net/core/netpoll.c 2005-02-17 22:39:59.000000000 -0600
@@ -35,9 +35,6 @@
static int nr_skbs;
static struct sk_buff *skbs;
-static DEFINE_SPINLOCK(rx_list_lock);
-static LIST_HEAD(rx_list);
-
static atomic_t trapped;
static DEFINE_SPINLOCK(netpoll_poll_lock);
@@ -84,13 +81,13 @@
queue = &__get_cpu_var(softnet_data);
if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
!list_empty(&queue->poll_list)) {
- np->dev->netpoll_rx |= NETPOLL_RX_DROP;
+ np->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
np->dev->poll(np->dev, &budget);
atomic_dec(&trapped);
- np->dev->netpoll_rx &= ~NETPOLL_RX_DROP;
+ np->rx_flags &= ~NETPOLL_RX_DROP;
}
spin_unlock_irqrestore(&netpoll_poll_lock, flags);
}
@@ -279,18 +276,7 @@
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
u32 sip, tip;
struct sk_buff *send_skb;
- unsigned long flags;
- struct list_head *p;
- struct netpoll *np = NULL;
-
- spin_lock_irqsave(&rx_list_lock, flags);
- list_for_each(p, &rx_list) {
- np = list_entry(p, struct netpoll, rx_list);
- if ( np->dev == skb->dev )
- break;
- np = NULL;
- }
- spin_unlock_irqrestore(&rx_list_lock, flags);
+ struct netpoll *np = skb->dev->np;
if (!np) return;
@@ -373,10 +359,10 @@
int proto, len, ulen;
struct iphdr *iph;
struct udphdr *uh;
- struct netpoll *np;
- struct list_head *p;
- unsigned long flags;
+ struct netpoll *np = skb->dev->np;
+ if (!np->rx_hook)
+ goto out;
if (skb->dev->type != ARPHRD_ETHER)
goto out;
@@ -420,30 +406,19 @@
goto out;
if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0)
goto out;
+ if (np->local_ip && np->local_ip != ntohl(iph->daddr))
+ goto out;
+ if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
+ goto out;
+ if (np->local_port && np->local_port != ntohs(uh->dest))
+ goto out;
- spin_lock_irqsave(&rx_list_lock, flags);
- list_for_each(p, &rx_list) {
- np = list_entry(p, struct netpoll, rx_list);
- if (np->dev && np->dev != skb->dev)
- continue;
- if (np->local_ip && np->local_ip != ntohl(iph->daddr))
- continue;
- if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
- continue;
- if (np->local_port && np->local_port != ntohs(uh->dest))
- continue;
-
- spin_unlock_irqrestore(&rx_list_lock, flags);
-
- if (np->rx_hook)
- np->rx_hook(np, ntohs(uh->source),
- (char *)(uh+1),
- ulen - sizeof(struct udphdr));
+ np->rx_hook(np, ntohs(uh->source),
+ (char *)(uh+1),
+ ulen - sizeof(struct udphdr));
- kfree_skb(skb);
- return 1;
- }
- spin_unlock_irqrestore(&rx_list_lock, flags);
+ kfree_skb(skb);
+ return 1;
out:
if (atomic_read(&trapped)) {
@@ -574,6 +549,10 @@
np->name, np->dev_name);
return -1;
}
+
+ np->dev = ndev;
+ ndev->np = np;
+
if (!ndev->poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
@@ -639,36 +618,22 @@
np->name, HIPQUAD(np->local_ip));
}
- np->dev = ndev;
-
- if(np->rx_hook) {
- unsigned long flags;
-
- np->dev->netpoll_rx = NETPOLL_RX_ENABLED;
-
- spin_lock_irqsave(&rx_list_lock, flags);
- list_add(&np->rx_list, &rx_list);
- spin_unlock_irqrestore(&rx_list_lock, flags);
- }
+ if(np->rx_hook)
+ np->rx_flags = NETPOLL_RX_ENABLED;
return 0;
+
release:
+ ndev->np = NULL;
+ np->dev = NULL;
dev_put(ndev);
return -1;
}
void netpoll_cleanup(struct netpoll *np)
{
- if (np->rx_hook) {
- unsigned long flags;
-
- spin_lock_irqsave(&rx_list_lock, flags);
- list_del(&np->rx_list);
- spin_unlock_irqrestore(&rx_list_lock, flags);
- }
-
if (np->dev)
- np->dev->netpoll_rx = 0;
+ np->dev->np = NULL;
dev_put(np->dev);
np->dev = NULL;
}
Index: rc4/include/linux/netpoll.h
===================================================================
--- rc4.orig/include/linux/netpoll.h 2005-02-17 22:32:19.000000000 -0600
+++ rc4/include/linux/netpoll.h 2005-02-17 22:39:59.000000000 -0600
@@ -16,11 +16,11 @@
struct netpoll {
struct net_device *dev;
char dev_name[16], *name;
+ int rx_flags;
void (*rx_hook)(struct netpoll *, int, char *, int);
u32 local_ip, remote_ip;
u16 local_port, remote_port;
unsigned char local_mac[6], remote_mac[6];
- struct list_head rx_list;
};
void netpoll_poll(struct netpoll *np);
@@ -35,7 +35,7 @@
#ifdef CONFIG_NETPOLL
static inline int netpoll_rx(struct sk_buff *skb)
{
- return skb->dev->netpoll_rx && __netpoll_rx(skb);
+ return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb);
}
#else
#define netpoll_rx(a) 0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 6/6] netpoll: handle xmit_lock recursion similarly
2005-02-18 5:25 ` [PATCH 5/6] netpoll: add optional dropping and queueing support Matt Mackall
@ 2005-02-18 5:25 ` Matt Mackall
0 siblings, 0 replies; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
Handle possible recursion on xmit_lock while we're at it.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rc4/net/core/netpoll.c
===================================================================
--- rc4.orig/net/core/netpoll.c 2005-02-17 22:40:05.000000000 -0600
+++ rc4/net/core/netpoll.c 2005-02-17 22:40:07.000000000 -0600
@@ -247,8 +247,9 @@
return;
}
- /* avoid ->poll recursion */
- if(np->poll_owner == __smp_processor_id()) {
+ /* avoid recursion */
+ if(np->poll_owner == __smp_processor_id() ||
+ np->dev->xmit_lock_owner == __smp_processor_id()) {
if (np->drop)
np->drop(skb);
else
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 5/6] netpoll: add optional dropping and queueing support
2005-02-18 5:25 ` [PATCH 4/6] netpoll: fix ->poll() locking Matt Mackall
@ 2005-02-18 5:25 ` Matt Mackall
2005-02-18 5:25 ` [PATCH 6/6] netpoll: handle xmit_lock recursion similarly Matt Mackall
0 siblings, 1 reply; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
This adds a callback for packets we can't deliver immediately and a
helper function for clients to queue such packets to the device
post-interrupt.
Netconsole is modified to use the queueing function for best-effort
delivery.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rc4/drivers/net/netconsole.c
===================================================================
--- rc4.orig/drivers/net/netconsole.c 2005-02-17 22:39:29.000000000 -0600
+++ rc4/drivers/net/netconsole.c 2005-02-17 22:40:05.000000000 -0600
@@ -60,6 +60,7 @@
.local_port = 6665,
.remote_port = 6666,
.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .drop = netpoll_queue,
};
static int configured = 0;
Index: rc4/net/core/netpoll.c
===================================================================
--- rc4.orig/net/core/netpoll.c 2005-02-17 22:40:02.000000000 -0600
+++ rc4/net/core/netpoll.c 2005-02-17 22:40:05.000000000 -0600
@@ -19,6 +19,7 @@
#include <linux/netpoll.h>
#include <linux/sched.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <asm/unaligned.h>
@@ -28,13 +29,18 @@
* message gets out even in extreme OOM situations.
*/
-#define MAX_SKBS 32
#define MAX_UDP_CHUNK 1460
+#define MAX_SKBS 32
+#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
static struct sk_buff *skbs;
+static DEFINE_SPINLOCK(queue_lock);
+static int queue_depth;
+static struct sk_buff *queue_head, *queue_tail;
+
static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1
@@ -46,6 +52,50 @@
static void zap_completion_queue(void);
+static void queue_process(void *p)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ while (queue_head) {
+ spin_lock_irqsave(&queue_lock, flags);
+
+ skb = queue_head;
+ queue_head = skb->next;
+ if (skb == queue_tail)
+ queue_head = NULL;
+
+ queue_depth--;
+
+ spin_unlock_irqrestore(&queue_lock, flags);
+
+ dev_queue_xmit(skb);
+ }
+}
+
+static DECLARE_WORK(send_queue, queue_process, NULL);
+
+void netpoll_queue(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ if (queue_depth == MAX_QUEUE_DEPTH) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&queue_lock, flags);
+ if (!queue_head)
+ queue_head = skb;
+ else
+ queue_tail->next = skb;
+ queue_tail = skb;
+ queue_depth++;
+ spin_unlock_irqrestore(&queue_lock, flags);
+
+ schedule_work(&send_queue);
+}
+
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
unsigned short ulen, u32 saddr, u32 daddr)
{
@@ -199,7 +249,10 @@
/* avoid ->poll recursion */
if(np->poll_owner == __smp_processor_id()) {
- __kfree_skb(skb);
+ if (np->drop)
+ np->drop(skb);
+ else
+ __kfree_skb(skb);
return;
}
@@ -275,6 +328,8 @@
memcpy(eth->h_source, np->local_mac, 6);
memcpy(eth->h_dest, np->remote_mac, 6);
+ skb->dev = np->dev;
+
netpoll_send_skb(np, skb);
}
Index: rc4/include/linux/netpoll.h
===================================================================
--- rc4.orig/include/linux/netpoll.h 2005-02-17 22:40:02.000000000 -0600
+++ rc4/include/linux/netpoll.h 2005-02-17 22:40:05.000000000 -0600
@@ -18,6 +18,7 @@
char dev_name[16], *name;
int rx_flags;
void (*rx_hook)(struct netpoll *, int, char *, int);
+ void (*drop)(struct sk_buff *skb);
u32 local_ip, remote_ip;
u16 local_port, remote_port;
unsigned char local_mac[6], remote_mac[6];
@@ -33,6 +34,7 @@
void netpoll_set_trap(int trap);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
+void netpoll_queue(struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline int netpoll_rx(struct sk_buff *skb)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 4/6] netpoll: fix ->poll() locking
2005-02-18 5:25 ` [PATCH 3/6] netpoll: add netpoll point to net_device Matt Mackall
@ 2005-02-18 5:25 ` Matt Mackall
2005-02-18 5:25 ` [PATCH 5/6] netpoll: add optional dropping and queueing support Matt Mackall
0 siblings, 1 reply; 8+ messages in thread
From: Matt Mackall @ 2005-02-18 5:25 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev, Jeff Moyer
Introduce a per-client poll lock and flag. The lock assures we never
have more than one caller in dev->poll(). The flag provides recursion
avoidance on UP where the lock disappears.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rc4/net/core/netpoll.c
===================================================================
--- rc4.orig/net/core/netpoll.c 2005-02-17 22:39:59.000000000 -0600
+++ rc4/net/core/netpoll.c 2005-02-17 22:40:02.000000000 -0600
@@ -36,7 +36,6 @@
static struct sk_buff *skbs;
static atomic_t trapped;
-static DEFINE_SPINLOCK(netpoll_poll_lock);
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
@@ -63,8 +62,15 @@
}
/*
- * Check whether delayed processing was scheduled for our current CPU,
- * and then manually invoke NAPI polling to pump data off the card.
+ * Check whether delayed processing was scheduled for our NIC. If so,
+ * we attempt to grab the poll lock and use ->poll() to pump the card.
+ * If this fails, either we've recursed in ->poll() or it's already
+ * running on another CPU.
+ *
+ * Note: we don't mask interrupts with this lock because we're using
+ * trylock here and interrupts are already disabled in the softirq
+ * case. Further, we test the poll_owner to avoid recursion on UP
+ * systems where the lock doesn't exist.
*
* In cases where there is bi-directional communications, reading only
* one message at a time can lead to packets being dropped by the
@@ -74,13 +80,10 @@
static void poll_napi(struct netpoll *np)
{
int budget = 16;
- unsigned long flags;
- struct softnet_data *queue;
- spin_lock_irqsave(&netpoll_poll_lock, flags);
- queue = &__get_cpu_var(softnet_data);
if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
- !list_empty(&queue->poll_list)) {
+ np->poll_owner != __smp_processor_id() &&
+ spin_trylock(&np->poll_lock)) {
np->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
@@ -88,8 +91,8 @@
atomic_dec(&trapped);
np->rx_flags &= ~NETPOLL_RX_DROP;
+ spin_unlock(&np->poll_lock);
}
- spin_unlock_irqrestore(&netpoll_poll_lock, flags);
}
void netpoll_poll(struct netpoll *np)
@@ -194,6 +197,12 @@
return;
}
+ /* avoid ->poll recursion */
+ if(np->poll_owner == __smp_processor_id()) {
+ __kfree_skb(skb);
+ return;
+ }
+
spin_lock(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id();
@@ -542,6 +551,9 @@
struct net_device *ndev = NULL;
struct in_device *in_dev;
+ np->poll_lock = SPIN_LOCK_UNLOCKED;
+ np->poll_owner = -1;
+
if (np->dev_name)
ndev = dev_get_by_name(np->dev_name);
if (!ndev) {
Index: rc4/include/linux/netpoll.h
===================================================================
--- rc4.orig/include/linux/netpoll.h 2005-02-17 22:39:59.000000000 -0600
+++ rc4/include/linux/netpoll.h 2005-02-17 22:40:02.000000000 -0600
@@ -21,6 +21,8 @@
u32 local_ip, remote_ip;
u16 local_port, remote_port;
unsigned char local_mac[6], remote_mac[6];
+ spinlock_t poll_lock;
+ int poll_owner;
};
void netpoll_poll(struct netpoll *np);
@@ -37,8 +39,27 @@
{
return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb);
}
+
+static inline void netpoll_poll_lock(struct net_device *dev)
+{
+ if (dev->np) {
+ spin_lock(&dev->np->poll_lock);
+ dev->np->poll_owner = __smp_processor_id();
+ }
+}
+
+static inline void netpoll_poll_unlock(struct net_device *dev)
+{
+ if (dev->np) {
+ spin_unlock(&dev->np->poll_lock);
+ dev->np->poll_owner = -1;
+ }
+}
+
#else
#define netpoll_rx(a) 0
+#define netpoll_poll_lock(a)
+#define netpoll_poll_unlock(a)
#endif
#endif
Index: rc4/net/core/dev.c
===================================================================
--- rc4.orig/net/core/dev.c 2005-02-17 22:39:59.000000000 -0600
+++ rc4/net/core/dev.c 2005-02-17 22:40:02.000000000 -0600
@@ -1775,8 +1775,10 @@
dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
+ netpoll_poll_lock(dev);
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
+ netpoll_poll_unlock(dev);
local_irq_disable();
list_del(&dev->poll_list);
list_add_tail(&dev->poll_list, &queue->poll_list);
@@ -1785,6 +1787,7 @@
else
dev->quota = dev->weight;
} else {
+ netpoll_poll_unlock(dev);
dev_put(dev);
local_irq_disable();
}
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups
2005-02-18 5:25 [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups Matt Mackall
2005-02-18 5:25 ` [PATCH 1/6] netpoll: shorten carrier detect timeout Matt Mackall
@ 2005-03-23 2:33 ` David S. Miller
1 sibling, 0 replies; 8+ messages in thread
From: David S. Miller @ 2005-03-23 2:33 UTC (permalink / raw)
To: Matt Mackall; +Cc: netdev, jmoyer
On Thu, 17 Feb 2005 23:25:18 -0600
Matt Mackall <mpm@selenic.com> wrote:
> This patch series against -rc4 fixes up some recursion deadlocks in
> netpoll and adds support for fallback to queueing. Various cleanups
> along the way.
>
> Holds up under load testing via ipt_LOG on a dual Opteron with tg3.
I've applied all 6 patches to my tree. Sorry for taking so long
Matt.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2005-03-23 2:33 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-02-18 5:25 [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups Matt Mackall
2005-02-18 5:25 ` [PATCH 1/6] netpoll: shorten carrier detect timeout Matt Mackall
2005-02-18 5:25 ` [PATCH 2/6] netpoll: filter inlines Matt Mackall
2005-02-18 5:25 ` [PATCH 3/6] netpoll: add netpoll point to net_device Matt Mackall
2005-02-18 5:25 ` [PATCH 4/6] netpoll: fix ->poll() locking Matt Mackall
2005-02-18 5:25 ` [PATCH 5/6] netpoll: add optional dropping and queueing support Matt Mackall
2005-02-18 5:25 ` [PATCH 6/6] netpoll: handle xmit_lock recursion similarly Matt Mackall
2005-03-23 2:33 ` [PATCH 0/6] netpoll: recursion fixes, queueing, and cleanups David S. Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).