From: Neil Horman <nhorman@tuxdriver.com>
To: netdev@vger.kernel.org
Cc: Neil Horman <nhorman@tuxdriver.com>,
Guo-Fu Tseng <cooldavid@cooldavid.org>,
"David S. Miller" <davem@davemloft.net>
Subject: [PATCH] jme: Fix DMA unmap warning
Date: Mon, 5 May 2014 14:51:47 -0400 [thread overview]
Message-ID: <1399315907-27148-1-git-send-email-nhorman@tuxdriver.com> (raw)
The jme driver forgot to check the return status from pci_map_page in its tx
path, causing a dma api warning on unmap. Easy fix, just do the check and
augment the tx path to tell the stack that the driver is busy so we re-queue the
frame.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Guo-Fu Tseng <cooldavid@cooldavid.org>
CC: "David S. Miller" <davem@davemloft.net>
---
drivers/net/ethernet/jme.c | 53 ++++++++++++++++++++++++++++++++++++++++------
1 file changed, 47 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050..6e664d9 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
return idx;
}
-static void
+static int
jme_fill_tx_map(struct pci_dev *pdev,
struct txdesc *txdesc,
struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+ return -EINVAL;
+
pci_dma_sync_single_for_device(pdev,
dmaaddr,
len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
txbi->mapping = dmaaddr;
txbi->len = len;
+ return 0;
}
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ int mask = jme->tx_ring_mask;
+ int j;
+
+ for (j = startidx ; j < endidx ; ++j) {
+ ctxbi = txbi + ((startidx + j + 2) & (mask));
+ pci_unmap_page(jme->pdev,
+ ctxbi->mapping,
+ ctxbi->len,
+ PCI_DMA_TODEVICE);
+
+ ctxbi->mapping = 0;
+ ctxbi->len = 0;
+ }
+
+}
+
+static int
jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
{
struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
u32 len;
+ int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
frag = &skb_shinfo(skb)->frags[i];
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
skb_frag_page(frag),
frag->page_offset, skb_frag_size(frag), hidma);
+ if (ret) {
+ jme_drop_tx_map(jme, idx, idx+i);
+ goto out;
+ }
+
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
ctxdesc = txdesc + ((idx + 1) & (mask));
ctxbi = txbi + ((idx + 1) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
offset_in_page(skb->data), len, hidma);
+ if (ret)
+ jme_drop_tx_map(jme, idx, idx+i);
+
+out:
+ return ret;
}
+
static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct txdesc *txdesc;
struct jme_buffer_info *txbi;
u8 flags;
+ int ret = 0;
txdesc = (struct txdesc *)txring->desc + idx;
txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
jme_tx_csum(jme, skb, &flags);
jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
- jme_map_tx_skb(jme, skb, idx);
+ ret = jme_map_tx_skb(jme, skb, idx);
+ if (ret)
+ return ret;
+
txdesc->desc1.flags = flags;
/*
* Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- jme_fill_tx_desc(jme, skb, idx);
+ if (jme_fill_tx_desc(jme, skb, idx))
+ return NETDEV_TX_BUSY;
jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
--
1.8.3.1
next reply other threads:[~2014-05-05 18:52 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-05 18:51 Neil Horman [this message]
2014-05-06 20:25 ` [PATCH] jme: Fix DMA unmap warning Govindarajulu Varadarajan
2014-05-06 20:50 ` Neil Horman
2014-05-06 20:53 ` David Miller
2014-05-06 21:04 ` Govindarajulu Varadarajan
2014-05-06 21:10 ` David Miller
2014-05-07 19:56 ` David Miller
2014-05-07 20:33 ` Neil Horman
2014-05-07 20:44 ` David Miller
2014-05-07 21:54 ` Neil Horman
2014-05-08 9:02 ` David Laight
2014-05-08 11:00 ` Neil Horman
2014-05-08 11:16 ` David Laight
2014-05-08 17:24 ` David Miller
2014-05-08 19:29 ` [RFC PATCH] net: Provide linear backoff mechanism for constrained resources at the driver Neil Horman
2014-05-09 8:55 ` David Laight
2014-05-09 11:17 ` Neil Horman
2014-05-09 12:53 ` David Laight
2014-05-09 15:33 ` Neil Horman
2014-05-09 15:46 ` David Laight
2014-05-09 17:02 ` Neil Horman
2014-05-18 16:19 ` Neil Horman
2014-05-21 18:05 ` Cong Wang
2014-05-21 18:49 ` Neil Horman
2014-05-11 12:49 ` [PATCH] jme: Fix DMA unmap warning Ben Hutchings
2014-05-11 13:04 ` Neil Horman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1399315907-27148-1-git-send-email-nhorman@tuxdriver.com \
--to=nhorman@tuxdriver.com \
--cc=cooldavid@cooldavid.org \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).