* [PATCH] net: ethernet: mediatek: enhance the locking using the lightweight ones
@ 2016-08-11 9:51 sean.wang
2016-08-11 14:28 ` John Crispin
[not found] ` <1470909060-27976-1-git-send-email-sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
0 siblings, 2 replies; 3+ messages in thread
From: sean.wang @ 2016-08-11 9:51 UTC (permalink / raw)
To: john; +Cc: davem, nbd, netdev, linux-mediatek, keyhaede, Sean Wang
From: Sean Wang <sean.wang@mediatek.com>
Since these critical sections protected by page_lock are all entered
from the user context or bottom half context, they can be replaced
with the spin_lock() or spin_lock_bh instead of spin_lock_irqsave().
Signed-off-by: Sean Wang <sean.wang@mediatek.com>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b57ae3a..3a4726e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -353,18 +353,17 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
const char *macaddr = dev->dev_addr;
- unsigned long flags;
if (ret)
return ret;
- spin_lock_irqsave(&mac->hw->page_lock, flags);
+ spin_lock_bh(&mac->hw->page_lock);
mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
MTK_GDMA_MAC_ADRH(mac->id));
mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
(macaddr[4] << 8) | macaddr[5],
MTK_GDMA_MAC_ADRL(mac->id));
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
+ spin_unlock_bh(&mac->hw->page_lock);
return 0;
}
@@ -748,7 +747,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = ð->tx_ring;
struct net_device_stats *stats = &dev->stats;
- unsigned long flags;
bool gso = false;
int tx_num;
@@ -756,14 +754,14 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
* however we have 2 queues running on the same ring so we need to lock
* the ring access
*/
- spin_lock_irqsave(ð->page_lock, flags);
+ spin_lock(ð->page_lock);
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
- spin_unlock_irqrestore(ð->page_lock, flags);
+ spin_unlock(ð->page_lock);
return NETDEV_TX_BUSY;
}
@@ -788,12 +786,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
- spin_unlock_irqrestore(ð->page_lock, flags);
+ spin_unlock(ð->page_lock);
return NETDEV_TX_OK;
drop:
- spin_unlock_irqrestore(ð->page_lock, flags);
+ spin_unlock(ð->page_lock);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1347,16 +1345,15 @@ static int mtk_open(struct net_device *dev)
static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
{
- unsigned long flags;
u32 val;
int i;
/* stop the dma engine */
- spin_lock_irqsave(ð->page_lock, flags);
+ spin_lock_bh(ð->page_lock);
val = mtk_r32(eth, glo_cfg);
mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
glo_cfg);
- spin_unlock_irqrestore(ð->page_lock, flags);
+ spin_unlock_bh(ð->page_lock);
/* wait for dma stop */
for (i = 0; i < 10; i++) {
--
1.7.9.5
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] net: ethernet: mediatek: enhance the locking using the lightweight ones
2016-08-11 9:51 [PATCH] net: ethernet: mediatek: enhance the locking using the lightweight ones sean.wang
@ 2016-08-11 14:28 ` John Crispin
[not found] ` <1470909060-27976-1-git-send-email-sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
1 sibling, 0 replies; 3+ messages in thread
From: John Crispin @ 2016-08-11 14:28 UTC (permalink / raw)
To: sean.wang; +Cc: keyhaede, netdev, linux-mediatek, davem, Felix Fietkau
On 11/08/2016 11:51, sean.wang@mediatek.com wrote:
> From: Sean Wang <sean.wang@mediatek.com>
>
> Since these critical sections protected by page_lock are all entered
> from the user context or bottom half context, they can be replaced
> with the spin_lock() or spin_lock_bh instead of spin_lock_irqsave().
>
> Signed-off-by: Sean Wang <sean.wang@mediatek.com>
I gave this a quick spin on my board and it worked fine
Acked-by: John Crispin <john@phrozen.org>
> ---
> drivers/net/ethernet/mediatek/mtk_eth_soc.c | 19 ++++++++-----------
> 1 file changed, 8 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> index b57ae3a..3a4726e 100644
> --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> @@ -353,18 +353,17 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
> int ret = eth_mac_addr(dev, p);
> struct mtk_mac *mac = netdev_priv(dev);
> const char *macaddr = dev->dev_addr;
> - unsigned long flags;
>
> if (ret)
> return ret;
>
> - spin_lock_irqsave(&mac->hw->page_lock, flags);
> + spin_lock_bh(&mac->hw->page_lock);
> mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
> MTK_GDMA_MAC_ADRH(mac->id));
> mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
> (macaddr[4] << 8) | macaddr[5],
> MTK_GDMA_MAC_ADRL(mac->id));
> - spin_unlock_irqrestore(&mac->hw->page_lock, flags);
> + spin_unlock_bh(&mac->hw->page_lock);
>
> return 0;
> }
> @@ -748,7 +747,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
> struct mtk_eth *eth = mac->hw;
> struct mtk_tx_ring *ring = ð->tx_ring;
> struct net_device_stats *stats = &dev->stats;
> - unsigned long flags;
> bool gso = false;
> int tx_num;
>
> @@ -756,14 +754,14 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
> * however we have 2 queues running on the same ring so we need to lock
> * the ring access
> */
> - spin_lock_irqsave(ð->page_lock, flags);
> + spin_lock(ð->page_lock);
>
> tx_num = mtk_cal_txd_req(skb);
> if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
> mtk_stop_queue(eth);
> netif_err(eth, tx_queued, dev,
> "Tx Ring full when queue awake!\n");
> - spin_unlock_irqrestore(ð->page_lock, flags);
> + spin_unlock(ð->page_lock);
> return NETDEV_TX_BUSY;
> }
>
> @@ -788,12 +786,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
> if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
> mtk_stop_queue(eth);
>
> - spin_unlock_irqrestore(ð->page_lock, flags);
> + spin_unlock(ð->page_lock);
>
> return NETDEV_TX_OK;
>
> drop:
> - spin_unlock_irqrestore(ð->page_lock, flags);
> + spin_unlock(ð->page_lock);
> stats->tx_dropped++;
> dev_kfree_skb(skb);
> return NETDEV_TX_OK;
> @@ -1347,16 +1345,15 @@ static int mtk_open(struct net_device *dev)
>
> static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
> {
> - unsigned long flags;
> u32 val;
> int i;
>
> /* stop the dma engine */
> - spin_lock_irqsave(ð->page_lock, flags);
> + spin_lock_bh(ð->page_lock);
> val = mtk_r32(eth, glo_cfg);
> mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
> glo_cfg);
> - spin_unlock_irqrestore(ð->page_lock, flags);
> + spin_unlock_bh(ð->page_lock);
>
> /* wait for dma stop */
> for (i = 0; i < 10; i++) {
>
^ permalink raw reply [flat|nested] 3+ messages in thread
[parent not found: <1470909060-27976-1-git-send-email-sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH] net: ethernet: mediatek: enhance the locking using the lightweight ones
[not found] ` <1470909060-27976-1-git-send-email-sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
@ 2016-08-13 3:35 ` David Miller
0 siblings, 0 replies; 3+ messages in thread
From: David Miller @ 2016-08-13 3:35 UTC (permalink / raw)
To: sean.wang-NuS5LvNUpcJWk0Htik3J/w
Cc: netdev-u79uwXL29TY76Z2rM5mHXA, nbd-p3rKhJxN3npAfugRpC6u6w,
linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
keyhaede-Re5JQEeQqe8AvxtiuMwx3w, john-Pj+rj9U5foFAfugRpC6u6w
From: <sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
Date: Thu, 11 Aug 2016 17:51:00 +0800
> From: Sean Wang <sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
>
> Since these critical sections protected by page_lock are all entered
> from the user context or bottom half context, they can be replaced
> with the spin_lock() or spin_lock_bh instead of spin_lock_irqsave().
>
> Signed-off-by: Sean Wang <sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
Applied to net-next, thanks.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-08-13 3:35 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-08-11 9:51 [PATCH] net: ethernet: mediatek: enhance the locking using the lightweight ones sean.wang
2016-08-11 14:28 ` John Crispin
[not found] ` <1470909060-27976-1-git-send-email-sean.wang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
2016-08-13 3:35 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).