* [PATCH] net: gianfar: fix dma check map error when DMA_API_DEBUG is enabled
@ 2014-10-30 10:25 Kevin Hao
2014-10-30 16:28 ` Claudiu Manoil
0 siblings, 1 reply; 6+ messages in thread
From: Kevin Hao @ 2014-10-30 10:25 UTC (permalink / raw)
To: netdev; +Cc: David Miller, Claudiu Manoil
We need to use dma_mapping_error() to check the dma address returned
by dma_map_single/page(). Otherwise we would get warning like this:
WARNING: at lib/dma-debug.c:1140
Modules linked in:
CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.18.0-rc2-next-20141029 #196
task: c0834300 ti: effe6000 task.ti: c0874000
NIP: c02b2c98 LR: c02b2c98 CTR: c030abc4
REGS: effe7d70 TRAP: 0700 Not tainted (3.18.0-rc2-next-20141029)
MSR: 00021000 <CE,ME> CR: 22044022 XER: 20000000
GPR00: c02b2c98 effe7e20 c0834300 00000098 00021000 00000000 c030b898 00000003
GPR08: 00000001 00000000 00000001 749eec9d 22044022 1001abe0 00000020 ef278678
GPR16: ef278670 ef278668 ef278660 070a8040 c087f99c c08cdc60 00029000 c0840d44
GPR24: c08be6e8 c0840000 effe7e78 ef041340 00000600 ef114e10 00000000 c08be6e0
NIP [c02b2c98] check_unmap+0x51c/0x9e4
LR [c02b2c98] check_unmap+0x51c/0x9e4
Call Trace:
[effe7e20] [c02b2c98] check_unmap+0x51c/0x9e4 (unreliable)
[effe7e70] [c02b31d8] debug_dma_unmap_page+0x78/0x8c
[effe7ed0] [c03d1640] gfar_clean_rx_ring+0x208/0x488
[effe7f40] [c03d1a9c] gfar_poll_rx_sq+0x3c/0xa8
[effe7f60] [c04f8714] net_rx_action+0xc0/0x178
[effe7f90] [c00435a0] __do_softirq+0x100/0x1fc
[effe7fe0] [c0043958] irq_exit+0xa4/0xc8
[effe7ff0] [c000d14c] call_do_irq+0x24/0x3c
[c0875e90] [c00048a0] do_IRQ+0x8c/0xf8
[c0875eb0] [c000ed10] ret_from_except+0x0/0x18
For TX, we need to unmap the pages which has already been mapped and
free the skb before return. For RX, just let the rxbdp as unempty.
We can retry to initialize it to empty in next round.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
---
drivers/net/ethernet/freescale/gianfar.c | 58 ++++++++++++++++++++++++++------
1 file changed, 47 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4fdf0aa16978..04b647c4cef6 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -117,8 +117,8 @@ static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+static int gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -219,9 +219,13 @@ static int gfar_init_bds(struct net_device *ndev)
netdev_err(ndev, "Can't allocate RX buffers\n");
return -ENOMEM;
}
- rx_queue->rx_skbuff[j] = skb;
- gfar_new_rxbdp(rx_queue, rxbdp, skb);
+ if (gfar_new_rxbdp(rx_queue, rxbdp, skb)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ rx_queue->rx_skbuff[j] = skb;
}
rxbdp++;
@@ -2290,6 +2294,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
0,
frag_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr;
@@ -2339,8 +2345,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb->ptp = 1;
}
- txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ txbdp_start->bufPtr = bufaddr;
/* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
@@ -2406,6 +2416,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return NETDEV_TX_OK;
+
+dma_map_err:
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+ if (do_tstamp)
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ for (i = 0; i < nr_frags; i++) {
+ lstatus = txbdp->lstatus;
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
+ break;
+
+ txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
+ bufaddr = txbdp->bufPtr;
+ dma_unmap_page(priv->dev, bufaddr, txbdp->length,
+ DMA_TO_DEVICE);
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ }
+ gfar_wmb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */
@@ -2606,8 +2635,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+static int gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2615,7 +2644,11 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, buf))
+ return -1;
+
gfar_init_rxbdp(rx_queue, bdp, buf);
+ return 0;
}
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
@@ -2851,10 +2884,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
}
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
-
/* Setup the new bdp */
- gfar_new_rxbdp(rx_queue, bdp, newskb);
+ if (gfar_new_rxbdp(rx_queue, bdp, newskb)) {
+ dev_kfree_skb_any(newskb);
+ newskb = NULL;
+ }
+
+ rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
/* Update to the next pointer */
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
--
1.9.3
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] net: gianfar: fix dma check map error when DMA_API_DEBUG is enabled
2014-10-30 10:25 [PATCH] net: gianfar: fix dma check map error when DMA_API_DEBUG is enabled Kevin Hao
@ 2014-10-30 16:28 ` Claudiu Manoil
2014-10-31 3:09 ` Kevin Hao
0 siblings, 1 reply; 6+ messages in thread
From: Claudiu Manoil @ 2014-10-30 16:28 UTC (permalink / raw)
To: Kevin Hao, netdev, David Miller
On 10/30/2014 12:25 PM, Kevin Hao wrote:
[...]
> @@ -2406,6 +2416,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
> spin_unlock_irqrestore(&tx_queue->txlock, flags);
>
> return NETDEV_TX_OK;
> +
> +dma_map_err:
> + txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
> + if (do_tstamp)
> + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
> + for (i = 0; i < nr_frags; i++) {
> + lstatus = txbdp->lstatus;
> + if (!(lstatus & BD_LFLAG(TXBD_READY)))
> + break;
> +
> + txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
> + bufaddr = txbdp->bufPtr;
> + dma_unmap_page(priv->dev, bufaddr, txbdp->length,
> + DMA_TO_DEVICE);
> + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
> + }
> + gfar_wmb();
Why use the wmb() memory barrier here?
> + dev_kfree_skb_any(skb);
> + return NETDEV_TX_OK;
> }
>
[...]
Hi Dave,
The patch seems ok at first glance (except a minor comment) but I'd like
to have it tested first because it modifies sensitive code.
I can re-send it to netdev later, after we're done testing it.
Maybe it would be better to stack up a few more gianfar fixes in the
meantime and send them all to netdev as a pull request, later on.
Thanks,
Claudiu
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] net: gianfar: fix dma check map error when DMA_API_DEBUG is enabled
2014-10-30 16:28 ` Claudiu Manoil
@ 2014-10-31 3:09 ` Kevin Hao
0 siblings, 0 replies; 6+ messages in thread
From: Kevin Hao @ 2014-10-31 3:09 UTC (permalink / raw)
To: Claudiu Manoil; +Cc: netdev, David Miller
[-- Attachment #1: Type: text/plain, Size: 1200 bytes --]
On Thu, Oct 30, 2014 at 06:28:01PM +0200, Claudiu Manoil wrote:
> On 10/30/2014 12:25 PM, Kevin Hao wrote:
>
> [...]
>
> >@@ -2406,6 +2416,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > spin_unlock_irqrestore(&tx_queue->txlock, flags);
> >
> > return NETDEV_TX_OK;
> >+
> >+dma_map_err:
> >+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
> >+ if (do_tstamp)
> >+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
> >+ for (i = 0; i < nr_frags; i++) {
> >+ lstatus = txbdp->lstatus;
> >+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
> >+ break;
> >+
> >+ txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
> >+ bufaddr = txbdp->bufPtr;
> >+ dma_unmap_page(priv->dev, bufaddr, txbdp->length,
> >+ DMA_TO_DEVICE);
> >+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
> >+ }
> >+ gfar_wmb();
>
> Why use the wmb() memory barrier here?
Just want make sure that the update to the txbdp->lstatus is performed before
any possible update to the BD fields later. Maybe this seems a bit redundant.
But this is not in hot path, so we may tolerate this overhead for more safety.
Thanks,
Kevin
[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 0/2] DMA API usage fixes in gianfar
@ 2014-12-05 10:37 Arseny Solokha
2014-12-09 14:24 ` [PATCH net] gianfar: Fix dma check map error when DMA_API_DEBUG is enabled Claudiu Manoil
0 siblings, 1 reply; 6+ messages in thread
From: Arseny Solokha @ 2014-12-05 10:37 UTC (permalink / raw)
To: Claudiu Manoil; +Cc: netdev, linux-kernel, Arseny Solokha
Hello.
This patch set fixes DMA API usage issues in gianfar ethernet driver
reported by the kernel w/ DMA API debug enabled.
There were even reports that the kernel sometimes oopsed in the past
because of kernel paging request handling failures, though it was likely
observed on some ancient versions. And while I personally doesn't have
any strong evidence of this, there's no reason to let these possible
failures live any longer.
Arseny Solokha (2):
gianfar: handle map error in gfar_new_rxbdp()
gianfar: handle map error in gfar_start_xmit()
drivers/net/ethernet/freescale/gianfar.c | 49 ++++++++++++++++++++++++++------
1 file changed, 41 insertions(+), 8 deletions(-)
--
Regards,
Arseny Solokha.
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH net] gianfar: Fix dma check map error when DMA_API_DEBUG is enabled
2014-12-05 10:37 [PATCH 0/2] DMA API usage fixes in gianfar Arseny Solokha
@ 2014-12-09 14:24 ` Claudiu Manoil
2014-12-10 18:13 ` David Miller
0 siblings, 1 reply; 6+ messages in thread
From: Claudiu Manoil @ 2014-12-09 14:24 UTC (permalink / raw)
To: netdev; +Cc: David S. Miller, Kevin Hao
From: Kevin Hao <haokexin@gmail.com>
We need to use dma_mapping_error() to check the dma address returned
by dma_map_single/page(). Otherwise we would get warning like this:
WARNING: at lib/dma-debug.c:1140
Modules linked in:
CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.18.0-rc2-next-20141029 #196
task: c0834300 ti: effe6000 task.ti: c0874000
NIP: c02b2c98 LR: c02b2c98 CTR: c030abc4
REGS: effe7d70 TRAP: 0700 Not tainted (3.18.0-rc2-next-20141029)
MSR: 00021000 <CE,ME> CR: 22044022 XER: 20000000
GPR00: c02b2c98 effe7e20 c0834300 00000098 00021000 00000000 c030b898 00000003
GPR08: 00000001 00000000 00000001 749eec9d 22044022 1001abe0 00000020 ef278678
GPR16: ef278670 ef278668 ef278660 070a8040 c087f99c c08cdc60 00029000 c0840d44
GPR24: c08be6e8 c0840000 effe7e78 ef041340 00000600 ef114e10 00000000 c08be6e0
NIP [c02b2c98] check_unmap+0x51c/0x9e4
LR [c02b2c98] check_unmap+0x51c/0x9e4
Call Trace:
[effe7e20] [c02b2c98] check_unmap+0x51c/0x9e4 (unreliable)
[effe7e70] [c02b31d8] debug_dma_unmap_page+0x78/0x8c
[effe7ed0] [c03d1640] gfar_clean_rx_ring+0x208/0x488
[effe7f40] [c03d1a9c] gfar_poll_rx_sq+0x3c/0xa8
[effe7f60] [c04f8714] net_rx_action+0xc0/0x178
[effe7f90] [c00435a0] __do_softirq+0x100/0x1fc
[effe7fe0] [c0043958] irq_exit+0xa4/0xc8
[effe7ff0] [c000d14c] call_do_irq+0x24/0x3c
[c0875e90] [c00048a0] do_IRQ+0x8c/0xf8
[c0875eb0] [c000ed10] ret_from_except+0x0/0x18
For TX, we need to unmap the pages which has already been mapped and
free the skb before return. For RX, just let the rxbdp as unempty.
We can retry to initialize it to empty in next round.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com>
---
drivers/net/ethernet/freescale/gianfar.c | 58 ++++++++++++++++++++++++++------
1 file changed, 47 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4fdf0aa..0253402 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -117,8 +117,8 @@ static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+static int gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -219,9 +219,13 @@ static int gfar_init_bds(struct net_device *ndev)
netdev_err(ndev, "Can't allocate RX buffers\n");
return -ENOMEM;
}
- rx_queue->rx_skbuff[j] = skb;
- gfar_new_rxbdp(rx_queue, rxbdp, skb);
+ if (gfar_new_rxbdp(rx_queue, rxbdp, skb)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ rx_queue->rx_skbuff[j] = skb;
}
rxbdp++;
@@ -2290,6 +2294,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
0,
frag_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr;
@@ -2339,8 +2345,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb->ptp = 1;
}
- txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ txbdp_start->bufPtr = bufaddr;
/* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
@@ -2406,6 +2416,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return NETDEV_TX_OK;
+
+dma_map_err:
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+ if (do_tstamp)
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ for (i = 0; i < nr_frags; i++) {
+ lstatus = txbdp->lstatus;
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
+ break;
+
+ txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
+ bufaddr = txbdp->bufPtr;
+ dma_unmap_page(priv->dev, bufaddr, txbdp->length,
+ DMA_TO_DEVICE);
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ }
+ gfar_wmb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */
@@ -2606,8 +2635,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+static int gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2615,7 +2644,11 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, buf)))
+ return -1;
+
gfar_init_rxbdp(rx_queue, bdp, buf);
+ return 0;
}
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
@@ -2851,10 +2884,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
}
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
-
/* Setup the new bdp */
- gfar_new_rxbdp(rx_queue, bdp, newskb);
+ if (unlikely(gfar_new_rxbdp(rx_queue, bdp, newskb))) {
+ dev_kfree_skb_any(newskb);
+ newskb = NULL;
+ }
+
+ rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
/* Update to the next pointer */
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
--
1.7.11.7
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH net] gianfar: Fix dma check map error when DMA_API_DEBUG is enabled
2014-12-09 14:24 ` [PATCH net] gianfar: Fix dma check map error when DMA_API_DEBUG is enabled Claudiu Manoil
@ 2014-12-10 18:13 ` David Miller
2014-12-11 2:06 ` Kevin Hao
0 siblings, 1 reply; 6+ messages in thread
From: David Miller @ 2014-12-10 18:13 UTC (permalink / raw)
To: claudiu.manoil; +Cc: netdev, haokexin
From: Claudiu Manoil <claudiu.manoil@freescale.com>
Date: Tue, 9 Dec 2014 16:24:35 +0200
> From: Kevin Hao <haokexin@gmail.com>
>
> We need to use dma_mapping_error() to check the dma address returned
> by dma_map_single/page(). Otherwise we would get warning like this:
...
> For TX, we need to unmap the pages which has already been mapped and
> free the skb before return. For RX, just let the rxbdp as unempty.
> We can retry to initialize it to empty in next round.
>
> Signed-off-by: Kevin Hao <haokexin@gmail.com>
> Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com>
The RX behavior needs to be adjusted.
You should never leave holes in the RX ring, ever.
Instead, try allocating the new RX skb first, and only if
you are successful should you pass up the original SKB. If
it fails, then reuse the original SKB in the RX ring.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net] gianfar: Fix dma check map error when DMA_API_DEBUG is enabled
2014-12-10 18:13 ` David Miller
@ 2014-12-11 2:06 ` Kevin Hao
0 siblings, 0 replies; 6+ messages in thread
From: Kevin Hao @ 2014-12-11 2:06 UTC (permalink / raw)
To: David Miller; +Cc: claudiu.manoil, netdev
[-- Attachment #1: Type: text/plain, Size: 1015 bytes --]
On Wed, Dec 10, 2014 at 01:13:47PM -0500, David Miller wrote:
> From: Claudiu Manoil <claudiu.manoil@freescale.com>
> Date: Tue, 9 Dec 2014 16:24:35 +0200
>
> > From: Kevin Hao <haokexin@gmail.com>
> >
> > We need to use dma_mapping_error() to check the dma address returned
> > by dma_map_single/page(). Otherwise we would get warning like this:
> ...
> > For TX, we need to unmap the pages which has already been mapped and
> > free the skb before return. For RX, just let the rxbdp as unempty.
> > We can retry to initialize it to empty in next round.
> >
> > Signed-off-by: Kevin Hao <haokexin@gmail.com>
> > Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com>
>
> The RX behavior needs to be adjusted.
>
> You should never leave holes in the RX ring, ever.
>
> Instead, try allocating the new RX skb first, and only if
> you are successful should you pass up the original SKB. If
> it fails, then reuse the original SKB in the RX ring.
OK, will do.
Thanks,
Kevin
[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2014-12-11 2:06 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-10-30 10:25 [PATCH] net: gianfar: fix dma check map error when DMA_API_DEBUG is enabled Kevin Hao
2014-10-30 16:28 ` Claudiu Manoil
2014-10-31 3:09 ` Kevin Hao
-- strict thread matches above, loose matches on Subject: below --
2014-12-05 10:37 [PATCH 0/2] DMA API usage fixes in gianfar Arseny Solokha
2014-12-09 14:24 ` [PATCH net] gianfar: Fix dma check map error when DMA_API_DEBUG is enabled Claudiu Manoil
2014-12-10 18:13 ` David Miller
2014-12-11 2:06 ` Kevin Hao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).