From: Tino Reichardt <milky-kernel@mcmilk.de>
To: netdev@vger.kernel.org,
Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
Subject: [PATCH 06/07] 3c59x: Support for byte queue limits
Date: Mon, 14 Oct 2013 20:26:22 +0200 [thread overview]
Message-ID: <1381775183-24866-7-git-send-email-milky-kernel@mcmilk.de> (raw)
In-Reply-To: <1381775183-24866-1-git-send-email-milky-kernel@mcmilk.de>
Changes to 3c59x to use byte queue limits.
The checkpatch.pl script will raise this formatting error:
"WARNING: line over 80 characters" - but I don't want to change the whole
formatting of this driver ;)
This patch was not tested on real hardware currently, but compiles fine and
should work.
Signed-off-by: Tino Reichardt <milky-kernel@mcmilk.de>
---
drivers/net/ethernet/3com/3c59x.c | 37 +++++++++++++++++++++++++++++++------
1 file changed, 31 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ad5272b..fd03165 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1726,6 +1726,8 @@ vortex_up(struct net_device *dev)
iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
+
+ netdev_reset_queue(dev);
netif_start_queue (dev);
err_out:
return err;
@@ -2080,10 +2082,13 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&vp->window_lock);
vp->tx_skb = skb;
iowrite16(StartDMADown, ioaddr + EL3_CMD);
+ netdev_sent_queue(dev, len);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
- iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ int len = (skb->len + 3) >> 2;
+ iowrite32_rep(ioaddr + TX_FIFO, skb->data, len);
+ netdev_sent_queue(dev, len);
dev_kfree_skb (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
@@ -2094,7 +2099,6 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
-
/* Clear the Tx status stack. */
{
int tx_status;
@@ -2164,12 +2168,14 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
+ netdev_sent_queue(dev, skb->len);
} else {
- int i;
+ int i, len;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
+ len = skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2180,16 +2186,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
(void *)skb_frag_address(frag),
skb_frag_size(frag), PCI_DMA_TODEVICE));
- if (i == skb_shinfo(skb)->nr_frags-1)
+ if (i == skb_shinfo(skb)->nr_frags - 1) {
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
- else
+ len += skb_frag_size(frag) | LAST_FRAG;
+ } else {
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
+ len += skb_frag_size(frag);
+ }
}
+ netdev_sent_queue(dev, len);
}
#else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+ netdev_sent_queue(dev, skb->len | LAST_FRAG);
#endif
spin_lock_irqsave(&vp->lock, flags);
@@ -2234,6 +2245,7 @@ vortex_interrupt(int irq, void *dev_id)
int status;
int work_done = max_interrupt_work;
int handled = 0;
+ unsigned bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
spin_lock(&vp->lock);
@@ -2279,8 +2291,12 @@ vortex_interrupt(int irq, void *dev_id)
if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ int len;
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
- pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ len = (vp->tx_skb->len + 3) & ~3;
+ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, len, PCI_DMA_TODEVICE);
+ bytes_compl += len;
+ pkts_compl++;
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
@@ -2327,6 +2343,8 @@ vortex_interrupt(int irq, void *dev_id)
spin_unlock(&vp->window_lock);
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
if (vortex_debug > 4)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
@@ -2348,6 +2366,7 @@ boomerang_interrupt(int irq, void *dev_id)
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
+ unsigned bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
@@ -2420,6 +2439,8 @@ boomerang_interrupt(int irq, void *dev_id)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
+ bytes_compl += skb->len;
+ pkts_compl++;
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
@@ -2467,6 +2488,9 @@ boomerang_interrupt(int irq, void *dev_id)
handler_exit:
vp->handling_irq = 0;
spin_unlock(&vp->lock);
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
return IRQ_HANDLED;
}
@@ -2660,6 +2684,7 @@ vortex_down(struct net_device *dev, int final_down)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
+ netdev_reset_queue(dev);
netif_stop_queue (dev);
del_timer_sync(&vp->rx_oom_timer);
--
1.8.4
next prev parent reply other threads:[~2013-10-14 18:35 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-14 18:26 [PATCHSET v1 00/07] Support for byte queue limits on various network interfaces Tino Reichardt
2013-10-14 18:26 ` [PATCH 01/07] 8139too: Support for byte queue limits Tino Reichardt
2013-10-14 19:11 ` Eric Dumazet
2013-10-14 19:31 ` Tino Reichardt
2013-10-14 19:52 ` Tino Reichardt
2013-10-14 18:26 ` [PATCH 02/07] r8169: " Tino Reichardt
2013-10-14 22:28 ` Francois Romieu
2013-10-14 18:26 ` [PATCH 03/03] tulip: " Tino Reichardt
2013-10-14 18:26 ` [PATCH 04/07] via-rhine: " Tino Reichardt
2013-10-14 18:26 ` [PATCH 05/07] via-velocity: " Tino Reichardt
2013-10-14 18:26 ` Tino Reichardt [this message]
2013-10-14 18:26 ` [PATCH 07/07] natsemi: " Tino Reichardt
2013-10-14 18:41 ` [PATCHSET v1 00/07] Support for byte queue limits on various network interfaces Stephen Hemminger
2013-10-14 18:46 ` Tino Reichardt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1381775183-24866-7-git-send-email-milky-kernel@mcmilk.de \
--to=milky-kernel@mcmilk.de \
--cc=klassert@mathematik.tu-chemnitz.de \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).