netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled
@ 2009-08-21 13:33 DDD
  2009-08-21 13:34 ` [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit() DDD
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: DDD @ 2009-08-21 13:33 UTC (permalink / raw)
  To: Matt Mackall, David Miller, Nicolas Pitre; +Cc: lkml, netdev, Jason Wessel

WARN_ONCE for ndo_start_xmit() enable interrupts in netpoll_send_skb(),
because the NETPOLL API requires that interrupts remain disabled in
netpoll_send_skb().

Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>
---
 net/core/netpoll.c |    5 +++++
 1 files changed, 5 insertions(+), 0 deletions(-)

diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index df30feb..1b76eb1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -319,6 +319,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
 			udelay(USEC_PER_POLL);
 		}
+
+		WARN_ONCE(!irqs_disabled(),
+			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+			dev->name, ops->ndo_start_xmit);
+
 		local_irq_restore(flags);
 	}
 
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit()
  2009-08-21 13:33 [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled DDD
@ 2009-08-21 13:34 ` DDD
  2009-08-21 15:26   ` Matt Mackall
  2009-08-24  2:50   ` David Miller
  2009-08-21 13:40 ` [RFC][PATCH] smc91x: let smc91x work well with netpoll DDD
  2009-08-24  2:50 ` [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled David Miller
  2 siblings, 2 replies; 9+ messages in thread
From: DDD @ 2009-08-21 13:34 UTC (permalink / raw)
  To: Matt Mackall, David Miller, Nicolas Pitre; +Cc: lkml, netdev, Jason Wessel

The NETPOLL API requires that interrupts remain disabled in
netpoll_send_skb(). The use of "A functions set" in the NETPOLL API
callbacks causes the interrupts to get enabled and can lead to kernel
instability.

The solution is to use "B functions set" to prevent the irqs from
getting enabled while in netpoll_send_skb().

A functions set:
local_irq_disable()/local_irq_enable()
spin_lock_irq()/spin_unlock_irq()
spin_trylock_irq()/spin_unlock_irq()

B functions set:
local_irq_save()/local_irq_restore()
spin_lock_irqsave()/spin_unlock_irqrestore()
spin_trylock_irqsave()/spin_unlock_irqrestore()

Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>
---
 drivers/net/fec_mpc52xx.c    |    5 +++--
 drivers/net/ixp2000/ixpdev.c |    5 +++--
 drivers/net/macb.c           |    7 ++++---
 drivers/net/mlx4/en_tx.c     |    5 +++--
 4 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index cc78633..c40113f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
 	struct bcom_fec_bd *bd;
+	unsigned long flags;
 
 	if (bcom_queue_full(priv->tx_dmatsk)) {
 		if (net_ratelimit())
@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		return NETDEV_TX_BUSY;
 	}
 
-	spin_lock_irq(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 	dev->trans_start = jiffies;
 
 	bd = (struct bcom_fec_bd *)
@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		netif_stop_queue(dev);
 	}
 
-	spin_unlock_irq(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 2a0174b..92fb823 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct ixpdev_priv *ip = netdev_priv(dev);
 	struct ixpdev_tx_desc *desc;
 	int entry;
+	unsigned long flags;
 
 	if (unlikely(skb->len > PAGE_SIZE)) {
 		/* @@@ Count drops.  */
@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	dev->trans_start = jiffies;
 
-	local_irq_disable();
+	local_irq_save(flags);
 	ip->tx_queue_entries++;
 	if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
 		netif_stop_queue(dev);
-	local_irq_enable();
+	local_irq_restore(flags);
 
 	return 0;
 }
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 5b5c253..e3601cf 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	dma_addr_t mapping;
 	unsigned int len, entry;
 	u32 ctrl;
+	unsigned long flags;
 
 #ifdef DEBUG
 	int i;
@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif
 
 	len = skb->len;
-	spin_lock_irq(&bp->lock);
+	spin_lock_irqsave(&bp->lock, flags);
 
 	/* This is a hard error, log it. */
 	if (TX_BUFFS_AVAIL(bp) < 1) {
 		netif_stop_queue(dev);
-		spin_unlock_irq(&bp->lock);
+		spin_unlock_irqrestore(&bp->lock, flags);
 		dev_err(&bp->pdev->dev,
 			"BUG! Tx Ring full when queue awake!\n");
 		dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (TX_BUFFS_AVAIL(bp) < 1)
 		netif_stop_queue(dev);
 
-	spin_unlock_irq(&bp->lock);
+	spin_unlock_irqrestore(&bp->lock, flags);
 
 	dev->trans_start = jiffies;
 
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5a88b3f..0077d37 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -435,6 +435,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
 
 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
 {
+	unsigned long flags;
 	struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
 	struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
 
@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
 
 	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
 	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-		if (spin_trylock_irq(&ring->comp_lock)) {
+		if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
 			mlx4_en_process_tx_cq(priv->dev, cq);
-			spin_unlock_irq(&ring->comp_lock);
+			spin_unlock_irqrestore(&ring->comp_lock, flags);
 		}
 }
 
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [RFC][PATCH] smc91x: let smc91x work well with netpoll
  2009-08-21 13:33 [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled DDD
  2009-08-21 13:34 ` [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit() DDD
@ 2009-08-21 13:40 ` DDD
  2009-08-21 17:13   ` Nicolas Pitre
  2009-08-24  2:50 ` [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled David Miller
  2 siblings, 1 reply; 9+ messages in thread
From: DDD @ 2009-08-21 13:40 UTC (permalink / raw)
  To: Nicolas Pitre, Matt Mackall, David Miller; +Cc: lkml, netdev, Jason Wessel

This patch changes too much and I didn't have the environment to test, so it is unverified patch.
That's why I separate it from my previous patch, and send it solely as a RFC patch.


Hi Nicolas, 
Given that you are the maintainer of "smc91x" driver since 2004. Could you say somethings about this
patch? Your input on this patch is greatly appreciated. :-)


Thank you very much,
Dongdong


Patch Note:
@@ -520,21 +522,21
-       local_irq_disable();                                        \
+       local_irq_save(flags);                                      \
        __ret = spin_trylock(lock);                                 \
        if (!__ret)                                                 \
-               local_irq_enable();                                 \
+               local_irq_restore(flags);                           \

Here, for "__ret = spin_trylock(lock)", I didn't use
spin_trylock_irqsave() to replace spin_trylock(), because
the current irq state have got by local_irq_save(flags).



---
 drivers/net/smc91x.c |   40 ++++++++++++++++++++++------------------
 1 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 1c70e99..7cabea1 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
 /* this enables an interrupt in the interrupt mask register */
 #define SMC_ENABLE_INT(lp, x) do {					\
 	unsigned char mask;						\
-	spin_lock_irq(&lp->lock);					\
+	unsigned long smc_enable_flags;					\
+	spin_lock_irqsave(&lp->lock, smc_int_flags);			\
 	mask = SMC_GET_INT_MASK(lp);					\
 	mask |= (x);							\
 	SMC_SET_INT_MASK(lp, mask);					\
-	spin_unlock_irq(&lp->lock);					\
+	spin_unlock_irqrestore(&lp->lock, smc_int_flags);		\
 } while (0)
 
 /* this disables an interrupt from the interrupt mask register */
 #define SMC_DISABLE_INT(lp, x) do {					\
 	unsigned char mask;						\
-	spin_lock_irq(&lp->lock);					\
+	unsigned long smc_disable_flags;				\
+	spin_lock_irqsave(&lp->lock, smc_disable_flags);		\
 	mask = SMC_GET_INT_MASK(lp);					\
 	mask &= ~(x);							\
 	SMC_SET_INT_MASK(lp, mask);					\
-	spin_unlock_irq(&lp->lock);					\
+	spin_unlock_irqrestore(&lp->lock, smc_disable_flags);		\
 } while (0)
 
 /*
@@ -520,21 +522,21 @@ static inline void  smc_rcv(struct net_device *dev)
  * any other concurrent access and C would always interrupt B. But life
  * isn't that easy in a SMP world...
  */
-#define smc_special_trylock(lock)					\
+#define smc_special_trylock(lock, flags)				\
 ({									\
 	int __ret;							\
-	local_irq_disable();						\
+	local_irq_save(flags);						\
 	__ret = spin_trylock(lock);					\
 	if (!__ret)							\
-		local_irq_enable();					\
+		local_irq_restore(flags);				\
 	__ret;								\
 })
-#define smc_special_lock(lock)		spin_lock_irq(lock)
-#define smc_special_unlock(lock)	spin_unlock_irq(lock)
+#define smc_special_lock(lock, flags)		spin_lock_irq(lock, flags)
+#define smc_special_unlock(lock, flags) 	spin_unlock_irqrestore(lock, flags)
 #else
-#define smc_special_trylock(lock)	(1)
-#define smc_special_lock(lock)		do { } while (0)
-#define smc_special_unlock(lock)	do { } while (0)
+#define smc_special_trylock(lock, flags)	(1)
+#define smc_special_lock(lock, flags)   	do { } while (0)
+#define smc_special_unlock(lock, flags)	do { } while (0)
 #endif
 
 /*
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
 	struct sk_buff *skb;
 	unsigned int packet_no, len;
 	unsigned char *buf;
+	unsigned long flags;
 
 	DBG(3, "%s: %s\n", dev->name, __func__);
 
-	if (!smc_special_trylock(&lp->lock)) {
+	if (!smc_special_trylock(&lp->lock, flags)) {
 		netif_stop_queue(dev);
 		tasklet_schedule(&lp->tx_task);
 		return;
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 
 	skb = lp->pending_tx_skb;
 	if (unlikely(!skb)) {
-		smc_special_unlock(&lp->lock);
+		smc_special_unlock(&lp->lock, flags);
 		return;
 	}
 	lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 		printk("%s: Memory allocation failed.\n", dev->name);
 		dev->stats.tx_errors++;
 		dev->stats.tx_fifo_errors++;
-		smc_special_unlock(&lp->lock);
+		smc_special_unlock(&lp->lock, flags);
 		goto done;
 	}
 
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 
 	/* queue the packet for TX */
 	SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
-	smc_special_unlock(&lp->lock);
+	smc_special_unlock(&lp->lock, flags);
 
 	dev->trans_start = jiffies;
 	dev->stats.tx_packets++;
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct smc_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 	unsigned int numPages, poll_count, status;
+	unsigned long flags;
 
 	DBG(3, "%s: %s\n", dev->name, __func__);
 
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		return 0;
 	}
 
-	smc_special_lock(&lp->lock);
+	smc_special_lock(&lp->lock, flags);
 
 	/* now, try to allocate the memory */
 	SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
    	} while (--poll_count);
 
-	smc_special_unlock(&lp->lock);
+	smc_special_unlock(&lp->lock, flags);
 
 	lp->pending_tx_skb = skb;
    	if (!poll_count) {
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit()
  2009-08-21 13:34 ` [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit() DDD
@ 2009-08-21 15:26   ` Matt Mackall
  2009-08-24  2:50   ` David Miller
  1 sibling, 0 replies; 9+ messages in thread
From: Matt Mackall @ 2009-08-21 15:26 UTC (permalink / raw)
  To: DDD; +Cc: David Miller, Nicolas Pitre, lkml, netdev, Jason Wessel

On Fri, 2009-08-21 at 21:34 +0800, DDD wrote:
> The NETPOLL API requires that interrupts remain disabled in
> netpoll_send_skb(). The use of "A functions set" in the NETPOLL API
> callbacks causes the interrupts to get enabled and can lead to kernel
> instability.
> 
> The solution is to use "B functions set" to prevent the irqs from
> getting enabled while in netpoll_send_skb().
> 
> A functions set:
> local_irq_disable()/local_irq_enable()
> spin_lock_irq()/spin_unlock_irq()
> spin_trylock_irq()/spin_unlock_irq()
> 
> B functions set:
> local_irq_save()/local_irq_restore()
> spin_lock_irqsave()/spin_unlock_irqrestore()
> spin_trylock_irqsave()/spin_unlock_irqrestore()
> 
> Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>

Both of these look good to me, thanks.

Acked-by: Matt Mackall <mpm@selenic.com>

-- 
http://selenic.com : development and support for Mercurial and Linux

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [RFC][PATCH] smc91x: let smc91x work well with netpoll
  2009-08-21 13:40 ` [RFC][PATCH] smc91x: let smc91x work well with netpoll DDD
@ 2009-08-21 17:13   ` Nicolas Pitre
  2009-08-24  3:42     ` [PATCH] [net] smc91x: let smc91x work well under netpoll DDD
  0 siblings, 1 reply; 9+ messages in thread
From: Nicolas Pitre @ 2009-08-21 17:13 UTC (permalink / raw)
  To: DDD; +Cc: Matt Mackall, David Miller, lkml, netdev, Jason Wessel

On Fri, 21 Aug 2009, DDD wrote:

> This patch changes too much and I didn't have the environment to test, so it is unverified patch.
> That's why I separate it from my previous patch, and send it solely as a RFC patch.
> 
> 
> Hi Nicolas, 
> Given that you are the maintainer of "smc91x" driver since 2004. Could you say somethings about this
> patch? Your input on this patch is greatly appreciated. :-)

Looks fine to me.  The most significant changes affect usage of this 
driver on a SMP system.  I'm afraid those might be hard to find now 
(this network chip is already a strange piece of hardware, and I knew 
about only one platform with it being SMP), so the best I can do is 
review your patch only.

Which I now did and you can add my:

Acked-by: Nicolas Pitre <nico@cam.org>


> Thank you very much,
> Dongdong
> 
> 
> Patch Note:
> @@ -520,21 +522,21
> -       local_irq_disable();                                        \
> +       local_irq_save(flags);                                      \
>         __ret = spin_trylock(lock);                                 \
>         if (!__ret)                                                 \
> -               local_irq_enable();                                 \
> +               local_irq_restore(flags);                           \
> 
> Here, for "__ret = spin_trylock(lock)", I didn't use
> spin_trylock_irqsave() to replace spin_trylock(), because
> the current irq state have got by local_irq_save(flags).
> 
> 
> 
> ---
>  drivers/net/smc91x.c |   40 ++++++++++++++++++++++------------------
>  1 files changed, 22 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
> index 1c70e99..7cabea1 100644
> --- a/drivers/net/smc91x.c
> +++ b/drivers/net/smc91x.c
> @@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
>  /* this enables an interrupt in the interrupt mask register */
>  #define SMC_ENABLE_INT(lp, x) do {					\
>  	unsigned char mask;						\
> -	spin_lock_irq(&lp->lock);					\
> +	unsigned long smc_enable_flags;					\
> +	spin_lock_irqsave(&lp->lock, smc_int_flags);			\
>  	mask = SMC_GET_INT_MASK(lp);					\
>  	mask |= (x);							\
>  	SMC_SET_INT_MASK(lp, mask);					\
> -	spin_unlock_irq(&lp->lock);					\
> +	spin_unlock_irqrestore(&lp->lock, smc_int_flags);		\
>  } while (0)
>  
>  /* this disables an interrupt from the interrupt mask register */
>  #define SMC_DISABLE_INT(lp, x) do {					\
>  	unsigned char mask;						\
> -	spin_lock_irq(&lp->lock);					\
> +	unsigned long smc_disable_flags;				\
> +	spin_lock_irqsave(&lp->lock, smc_disable_flags);		\
>  	mask = SMC_GET_INT_MASK(lp);					\
>  	mask &= ~(x);							\
>  	SMC_SET_INT_MASK(lp, mask);					\
> -	spin_unlock_irq(&lp->lock);					\
> +	spin_unlock_irqrestore(&lp->lock, smc_disable_flags);		\
>  } while (0)
>  
>  /*
> @@ -520,21 +522,21 @@ static inline void  smc_rcv(struct net_device *dev)
>   * any other concurrent access and C would always interrupt B. But life
>   * isn't that easy in a SMP world...
>   */
> -#define smc_special_trylock(lock)					\
> +#define smc_special_trylock(lock, flags)				\
>  ({									\
>  	int __ret;							\
> -	local_irq_disable();						\
> +	local_irq_save(flags);						\
>  	__ret = spin_trylock(lock);					\
>  	if (!__ret)							\
> -		local_irq_enable();					\
> +		local_irq_restore(flags);				\
>  	__ret;								\
>  })
> -#define smc_special_lock(lock)		spin_lock_irq(lock)
> -#define smc_special_unlock(lock)	spin_unlock_irq(lock)
> +#define smc_special_lock(lock, flags)		spin_lock_irq(lock, flags)
> +#define smc_special_unlock(lock, flags) 	spin_unlock_irqrestore(lock, flags)
>  #else
> -#define smc_special_trylock(lock)	(1)
> -#define smc_special_lock(lock)		do { } while (0)
> -#define smc_special_unlock(lock)	do { } while (0)
> +#define smc_special_trylock(lock, flags)	(1)
> +#define smc_special_lock(lock, flags)   	do { } while (0)
> +#define smc_special_unlock(lock, flags)	do { } while (0)
>  #endif
>  
>  /*
> @@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
>  	struct sk_buff *skb;
>  	unsigned int packet_no, len;
>  	unsigned char *buf;
> +	unsigned long flags;
>  
>  	DBG(3, "%s: %s\n", dev->name, __func__);
>  
> -	if (!smc_special_trylock(&lp->lock)) {
> +	if (!smc_special_trylock(&lp->lock, flags)) {
>  		netif_stop_queue(dev);
>  		tasklet_schedule(&lp->tx_task);
>  		return;
> @@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
>  
>  	skb = lp->pending_tx_skb;
>  	if (unlikely(!skb)) {
> -		smc_special_unlock(&lp->lock);
> +		smc_special_unlock(&lp->lock, flags);
>  		return;
>  	}
>  	lp->pending_tx_skb = NULL;
> @@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
>  		printk("%s: Memory allocation failed.\n", dev->name);
>  		dev->stats.tx_errors++;
>  		dev->stats.tx_fifo_errors++;
> -		smc_special_unlock(&lp->lock);
> +		smc_special_unlock(&lp->lock, flags);
>  		goto done;
>  	}
>  
> @@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
>  
>  	/* queue the packet for TX */
>  	SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
> -	smc_special_unlock(&lp->lock);
> +	smc_special_unlock(&lp->lock, flags);
>  
>  	dev->trans_start = jiffies;
>  	dev->stats.tx_packets++;
> @@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
>  	struct smc_local *lp = netdev_priv(dev);
>  	void __iomem *ioaddr = lp->base;
>  	unsigned int numPages, poll_count, status;
> +	unsigned long flags;
>  
>  	DBG(3, "%s: %s\n", dev->name, __func__);
>  
> @@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
>  		return 0;
>  	}
>  
> -	smc_special_lock(&lp->lock);
> +	smc_special_lock(&lp->lock, flags);
>  
>  	/* now, try to allocate the memory */
>  	SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
> @@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
>  		}
>     	} while (--poll_count);
>  
> -	smc_special_unlock(&lp->lock);
> +	smc_special_unlock(&lp->lock, flags);
>  
>  	lp->pending_tx_skb = skb;
>     	if (!poll_count) {
> -- 
> 1.6.0.4
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled
  2009-08-21 13:33 [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled DDD
  2009-08-21 13:34 ` [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit() DDD
  2009-08-21 13:40 ` [RFC][PATCH] smc91x: let smc91x work well with netpoll DDD
@ 2009-08-24  2:50 ` David Miller
  2 siblings, 0 replies; 9+ messages in thread
From: David Miller @ 2009-08-24  2:50 UTC (permalink / raw)
  To: Dongdong.deng; +Cc: mpm, nico, linux-kernel, netdev, jason.wessel

From: DDD <Dongdong.deng@windriver.com>
Date: Fri, 21 Aug 2009 21:33:36 +0800

> WARN_ONCE for ndo_start_xmit() enable interrupts in netpoll_send_skb(),
> because the NETPOLL API requires that interrupts remain disabled in
> netpoll_send_skb().
> 
> Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>

Applied.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit()
  2009-08-21 13:34 ` [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit() DDD
  2009-08-21 15:26   ` Matt Mackall
@ 2009-08-24  2:50   ` David Miller
  1 sibling, 0 replies; 9+ messages in thread
From: David Miller @ 2009-08-24  2:50 UTC (permalink / raw)
  To: Dongdong.deng; +Cc: mpm, nico, linux-kernel, netdev, jason.wessel

From: DDD <Dongdong.deng@windriver.com>
Date: Fri, 21 Aug 2009 21:34:53 +0800

> The NETPOLL API requires that interrupts remain disabled in
> netpoll_send_skb(). The use of "A functions set" in the NETPOLL API
> callbacks causes the interrupts to get enabled and can lead to kernel
> instability.
> 
> The solution is to use "B functions set" to prevent the irqs from
> getting enabled while in netpoll_send_skb().
> 
> A functions set:
> local_irq_disable()/local_irq_enable()
> spin_lock_irq()/spin_unlock_irq()
> spin_trylock_irq()/spin_unlock_irq()
> 
> B functions set:
> local_irq_save()/local_irq_restore()
> spin_lock_irqsave()/spin_unlock_irqrestore()
> spin_trylock_irqsave()/spin_unlock_irqrestore()
> 
> Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>

Applied.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH] [net] smc91x: let smc91x work well under netpoll
  2009-08-21 17:13   ` Nicolas Pitre
@ 2009-08-24  3:42     ` DDD
  2009-08-24  5:59       ` David Miller
  0 siblings, 1 reply; 9+ messages in thread
From: DDD @ 2009-08-24  3:42 UTC (permalink / raw)
  To: Nicolas Pitre, David Miller; +Cc: Matt Mackall, lkml, netdev, Jason Wessel

The NETPOLL requires that interrupts remain disabled in its callbacks.

Using *_irq_save()/irq_restore() to replace *_irq_disable()/irq_enable()
functions in NETPOLL's callbacks of smc91x, so that it doesn't enable
interrupts when already disabled, and kgdboe/netconsole would work
properly over smc91x.

Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>
Acked-by: Nicolas Pitre <nico@cam.org>
---
 drivers/net/smc91x.c |   40 ++++++++++++++++++++++------------------
 1 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 1c70e99..9da1fa1 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
 /* this enables an interrupt in the interrupt mask register */
 #define SMC_ENABLE_INT(lp, x) do {					\
 	unsigned char mask;						\
-	spin_lock_irq(&lp->lock);					\
+	unsigned long smc_enable_flags;					\
+	spin_lock_irqsave(&lp->lock, smc_enable_flags);			\
 	mask = SMC_GET_INT_MASK(lp);					\
 	mask |= (x);							\
 	SMC_SET_INT_MASK(lp, mask);					\
-	spin_unlock_irq(&lp->lock);					\
+	spin_unlock_irqrestore(&lp->lock, smc_enable_flags);		\
 } while (0)
 
 /* this disables an interrupt from the interrupt mask register */
 #define SMC_DISABLE_INT(lp, x) do {					\
 	unsigned char mask;						\
-	spin_lock_irq(&lp->lock);					\
+	unsigned long smc_disable_flags;				\
+	spin_lock_irqsave(&lp->lock, smc_disable_flags);		\
 	mask = SMC_GET_INT_MASK(lp);					\
 	mask &= ~(x);							\
 	SMC_SET_INT_MASK(lp, mask);					\
-	spin_unlock_irq(&lp->lock);					\
+	spin_unlock_irqrestore(&lp->lock, smc_disable_flags);		\
 } while (0)
 
 /*
@@ -520,21 +522,21 @@ static inline void  smc_rcv(struct net_device *dev)
  * any other concurrent access and C would always interrupt B. But life
  * isn't that easy in a SMP world...
  */
-#define smc_special_trylock(lock)					\
+#define smc_special_trylock(lock, flags)				\
 ({									\
 	int __ret;							\
-	local_irq_disable();						\
+	local_irq_save(flags);						\
 	__ret = spin_trylock(lock);					\
 	if (!__ret)							\
-		local_irq_enable();					\
+		local_irq_restore(flags);				\
 	__ret;								\
 })
-#define smc_special_lock(lock)		spin_lock_irq(lock)
-#define smc_special_unlock(lock)	spin_unlock_irq(lock)
+#define smc_special_lock(lock, flags)		spin_lock_irq(lock, flags)
+#define smc_special_unlock(lock, flags) 	spin_unlock_irqrestore(lock, flags)
 #else
-#define smc_special_trylock(lock)	(1)
-#define smc_special_lock(lock)		do { } while (0)
-#define smc_special_unlock(lock)	do { } while (0)
+#define smc_special_trylock(lock, flags)	(1)
+#define smc_special_lock(lock, flags)   	do { } while (0)
+#define smc_special_unlock(lock, flags)	do { } while (0)
 #endif
 
 /*
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
 	struct sk_buff *skb;
 	unsigned int packet_no, len;
 	unsigned char *buf;
+	unsigned long flags;
 
 	DBG(3, "%s: %s\n", dev->name, __func__);
 
-	if (!smc_special_trylock(&lp->lock)) {
+	if (!smc_special_trylock(&lp->lock, flags)) {
 		netif_stop_queue(dev);
 		tasklet_schedule(&lp->tx_task);
 		return;
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 
 	skb = lp->pending_tx_skb;
 	if (unlikely(!skb)) {
-		smc_special_unlock(&lp->lock);
+		smc_special_unlock(&lp->lock, flags);
 		return;
 	}
 	lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 		printk("%s: Memory allocation failed.\n", dev->name);
 		dev->stats.tx_errors++;
 		dev->stats.tx_fifo_errors++;
-		smc_special_unlock(&lp->lock);
+		smc_special_unlock(&lp->lock, flags);
 		goto done;
 	}
 
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 
 	/* queue the packet for TX */
 	SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
-	smc_special_unlock(&lp->lock);
+	smc_special_unlock(&lp->lock, flags);
 
 	dev->trans_start = jiffies;
 	dev->stats.tx_packets++;
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct smc_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 	unsigned int numPages, poll_count, status;
+	unsigned long flags;
 
 	DBG(3, "%s: %s\n", dev->name, __func__);
 
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		return 0;
 	}
 
-	smc_special_lock(&lp->lock);
+	smc_special_lock(&lp->lock, flags);
 
 	/* now, try to allocate the memory */
 	SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
    	} while (--poll_count);
 
-	smc_special_unlock(&lp->lock);
+	smc_special_unlock(&lp->lock, flags);
 
 	lp->pending_tx_skb = skb;
    	if (!poll_count) {
-- 
1.6.0.4



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] [net] smc91x: let smc91x work well under netpoll
  2009-08-24  3:42     ` [PATCH] [net] smc91x: let smc91x work well under netpoll DDD
@ 2009-08-24  5:59       ` David Miller
  0 siblings, 0 replies; 9+ messages in thread
From: David Miller @ 2009-08-24  5:59 UTC (permalink / raw)
  To: Dongdong.deng; +Cc: nico, mpm, linux-kernel, netdev, jason.wessel

From: DDD <Dongdong.deng@windriver.com>
Date: Mon, 24 Aug 2009 11:42:31 +0800

> The NETPOLL requires that interrupts remain disabled in its callbacks.
> 
> Using *_irq_save()/irq_restore() to replace *_irq_disable()/irq_enable()
> functions in NETPOLL's callbacks of smc91x, so that it doesn't enable
> interrupts when already disabled, and kgdboe/netconsole would work
> properly over smc91x.
> 
> Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>
> Acked-by: Nicolas Pitre <nico@cam.org>

Applied, thanks.

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2009-08-24  5:59 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-08-21 13:33 [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled DDD
2009-08-21 13:34 ` [PATCH 2/2] drivers/net: fixed drivers that support netpoll use ndo_start_xmit() DDD
2009-08-21 15:26   ` Matt Mackall
2009-08-24  2:50   ` David Miller
2009-08-21 13:40 ` [RFC][PATCH] smc91x: let smc91x work well with netpoll DDD
2009-08-21 17:13   ` Nicolas Pitre
2009-08-24  3:42     ` [PATCH] [net] smc91x: let smc91x work well under netpoll DDD
2009-08-24  5:59       ` David Miller
2009-08-24  2:50 ` [PATCH 1/2] netpoll: warning for ndo_start_xmit returns with interrupts enabled David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).