* [PATCH -next 0/2] bnx2: allow sleep during allocation
@ 2010-07-15 14:25 Stanislaw Gruszka
2010-07-15 14:25 ` [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init Stanislaw Gruszka
2010-07-15 14:25 ` [PATCH 2/2] bnx2: use device model DMA API Stanislaw Gruszka
0 siblings, 2 replies; 14+ messages in thread
From: Stanislaw Gruszka @ 2010-07-15 14:25 UTC (permalink / raw)
To: netdev; +Cc: Michael Chan
We have Fedora bug report about memory allocation failure in bnx2_open
(https://bugzilla.redhat.com/show_bug.cgi?id=612861). To prevent
failure we can allow allocator to sleep. Both patches add
GFP_KERNEL flag where possible, first patch in alloc API, second
in DMA API (after conversion from pci_dma_*).
Stanislaw
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-15 14:25 [PATCH -next 0/2] bnx2: allow sleep during allocation Stanislaw Gruszka
@ 2010-07-15 14:25 ` Stanislaw Gruszka
2010-07-15 14:48 ` Michael Chan
2010-07-15 18:57 ` [PATCH " Mitchell Erblich
2010-07-15 14:25 ` [PATCH 2/2] bnx2: use device model DMA API Stanislaw Gruszka
1 sibling, 2 replies; 14+ messages in thread
From: Stanislaw Gruszka @ 2010-07-15 14:25 UTC (permalink / raw)
To: netdev; +Cc: Michael Chan
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
drivers/net/bnx2.c | 17 +++++++++--------
1 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a203f39..6de4cb7 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2664,13 +2664,13 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
}
static inline int
-bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
dma_addr_t mapping;
struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct rx_bd *rxbd =
&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_page(gfp);
if (!page)
return -ENOMEM;
@@ -2705,7 +2705,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
struct sk_buff *skb;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
@@ -2713,7 +2713,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
if (skb == NULL) {
return -ENOMEM;
}
@@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
int err;
u16 prod = ring_idx & 0xffff;
- err = bnx2_alloc_rx_skb(bp, rxr, prod);
+ err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_KERNEL);
if (unlikely(err)) {
bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
if (hdr_len) {
@@ -3039,7 +3039,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rx_pg->page = NULL;
err = bnx2_alloc_rx_page(bp, rxr,
- RX_PG_RING_IDX(pg_prod));
+ RX_PG_RING_IDX(pg_prod),
+ GFP_ATOMIC);
if (unlikely(err)) {
rxr->rx_pg_cons = pg_cons;
rxr->rx_pg_prod = pg_prod;
@@ -5179,7 +5180,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
- if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
+ if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
ring_num, i, bp->rx_pg_ring_size);
break;
@@ -5191,7 +5192,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
+ if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 2/2] bnx2: use device model DMA API
2010-07-15 14:25 [PATCH -next 0/2] bnx2: allow sleep during allocation Stanislaw Gruszka
2010-07-15 14:25 ` [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init Stanislaw Gruszka
@ 2010-07-15 14:25 ` Stanislaw Gruszka
2010-07-16 21:29 ` Michael Chan
1 sibling, 1 reply; 14+ messages in thread
From: Stanislaw Gruszka @ 2010-07-15 14:25 UTC (permalink / raw)
To: netdev; +Cc: Michael Chan
Use DMA API as PCI equivalents will be deprecated. This change also allow
to allocate with GFP_KERNEL in some places.
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
drivers/net/bnx2.c | 111 +++++++++++++++++++++++++++-------------------------
1 files changed, 58 insertions(+), 53 deletions(-)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 6de4cb7..98aed05 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -692,9 +692,9 @@ bnx2_free_tx_mem(struct bnx2 *bp)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
if (txr->tx_desc_ring) {
- pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
- txr->tx_desc_ring,
- txr->tx_desc_mapping);
+ dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+ txr->tx_desc_ring,
+ txr->tx_desc_mapping);
txr->tx_desc_ring = NULL;
}
kfree(txr->tx_buf_ring);
@@ -714,9 +714,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_ring; j++) {
if (rxr->rx_desc_ring[j])
- pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- rxr->rx_desc_ring[j],
- rxr->rx_desc_mapping[j]);
+ dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+ rxr->rx_desc_ring[j],
+ rxr->rx_desc_mapping[j]);
rxr->rx_desc_ring[j] = NULL;
}
vfree(rxr->rx_buf_ring);
@@ -724,9 +724,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_pg_ring; j++) {
if (rxr->rx_pg_desc_ring[j])
- pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- rxr->rx_pg_desc_ring[j],
- rxr->rx_pg_desc_mapping[j]);
+ dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+ rxr->rx_pg_desc_ring[j],
+ rxr->rx_pg_desc_mapping[j]);
rxr->rx_pg_desc_ring[j] = NULL;
}
vfree(rxr->rx_pg_ring);
@@ -748,8 +748,8 @@ bnx2_alloc_tx_mem(struct bnx2 *bp)
return -ENOMEM;
txr->tx_desc_ring =
- pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
- &txr->tx_desc_mapping);
+ dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+ &txr->tx_desc_mapping, GFP_KERNEL);
if (txr->tx_desc_ring == NULL)
return -ENOMEM;
}
@@ -776,8 +776,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_ring; j++) {
rxr->rx_desc_ring[j] =
- pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
- &rxr->rx_desc_mapping[j]);
+ dma_alloc_coherent(&bp->pdev->dev,
+ RXBD_RING_SIZE,
+ &rxr->rx_desc_mapping[j],
+ GFP_KERNEL);
if (rxr->rx_desc_ring[j] == NULL)
return -ENOMEM;
@@ -795,8 +797,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_pg_ring; j++) {
rxr->rx_pg_desc_ring[j] =
- pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
- &rxr->rx_pg_desc_mapping[j]);
+ dma_alloc_coherent(&bp->pdev->dev,
+ RXBD_RING_SIZE,
+ &rxr->rx_pg_desc_mapping[j],
+ GFP_KERNEL);
if (rxr->rx_pg_desc_ring[j] == NULL)
return -ENOMEM;
@@ -816,16 +820,16 @@ bnx2_free_mem(struct bnx2 *bp)
for (i = 0; i < bp->ctx_pages; i++) {
if (bp->ctx_blk[i]) {
- pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
- bp->ctx_blk[i],
- bp->ctx_blk_mapping[i]);
+ dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
+ bp->ctx_blk[i],
+ bp->ctx_blk_mapping[i]);
bp->ctx_blk[i] = NULL;
}
}
if (bnapi->status_blk.msi) {
- pci_free_consistent(bp->pdev, bp->status_stats_size,
- bnapi->status_blk.msi,
- bp->status_blk_mapping);
+ dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+ bnapi->status_blk.msi,
+ bp->status_blk_mapping);
bnapi->status_blk.msi = NULL;
bp->stats_blk = NULL;
}
@@ -846,8 +850,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
- &bp->status_blk_mapping);
+ status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (status_blk == NULL)
goto alloc_mem_err;
@@ -885,9 +889,10 @@ bnx2_alloc_mem(struct bnx2 *bp)
if (bp->ctx_pages == 0)
bp->ctx_pages = 1;
for (i = 0; i < bp->ctx_pages; i++) {
- bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
+ bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
BCM_PAGE_SIZE,
- &bp->ctx_blk_mapping[i]);
+ &bp->ctx_blk_mapping[i],
+ GFP_KERNEL);
if (bp->ctx_blk[i] == NULL)
goto alloc_mem_err;
}
@@ -2674,9 +2679,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf
if (!page)
return -ENOMEM;
- mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
+ mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(bp->pdev, mapping)) {
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
__free_page(page);
return -EIO;
}
@@ -2697,8 +2702,8 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
if (!page)
return;
- pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
__free_page(page);
rx_pg->page = NULL;
@@ -2721,9 +2726,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp
if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
skb_reserve(skb, BNX2_RX_ALIGN - align);
- mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(bp->pdev, mapping)) {
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
dev_kfree_skb(skb);
return -EIO;
}
@@ -2829,7 +2834,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
}
- pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
tx_buf->skb = NULL;
@@ -2838,7 +2843,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
for (i = 0; i < last; i++) {
sw_cons = NEXT_TX_BD(sw_cons);
- pci_unmap_page(bp->pdev,
+ dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
@@ -2945,7 +2950,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
- pci_dma_sync_single_for_device(bp->pdev,
+ dma_sync_single_for_device(&bp->pdev->dev,
dma_unmap_addr(cons_rx_buf, mapping),
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
@@ -2987,7 +2992,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
}
skb_reserve(skb, BNX2_RX_OFFSET);
- pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
+ dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (hdr_len == 0) {
@@ -3049,7 +3054,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
return err;
}
- pci_unmap_page(bp->pdev, mapping_old,
+ dma_unmap_page(&bp->pdev->dev, mapping_old,
PAGE_SIZE, PCI_DMA_FROMDEVICE);
frag_size -= frag_len;
@@ -3120,7 +3125,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_addr = dma_unmap_addr(rx_buf, mapping);
- pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
@@ -5338,7 +5343,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
continue;
}
- pci_unmap_single(bp->pdev,
+ dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -5349,7 +5354,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
j++;
for (k = 0; k < last; k++, j++) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
- pci_unmap_page(bp->pdev,
+ dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
@@ -5379,7 +5384,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
if (skb == NULL)
continue;
- pci_unmap_single(bp->pdev,
+ dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -5732,9 +5737,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
for (i = 14; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
- map = pci_map_single(bp->pdev, skb->data, pkt_size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(bp->pdev, map)) {
+ map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, map)) {
dev_kfree_skb(skb);
return -EIO;
}
@@ -5772,7 +5777,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
udelay(5);
- pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
+ dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -5789,7 +5794,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
rx_hdr = rx_buf->desc;
skb_reserve(rx_skb, BNX2_RX_OFFSET);
- pci_dma_sync_single_for_cpu(bp->pdev,
+ dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE);
@@ -6457,8 +6462,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else
mss = 0;
- mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(bp->pdev, mapping)) {
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -6486,9 +6491,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd = &txr->tx_desc_ring[ring_prod];
len = frag->size;
- mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
- len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(bp->pdev, mapping))
+ mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
+ len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping))
goto dma_error;
dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
mapping);
@@ -6527,7 +6532,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = NULL;
- pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
/* unmap remaining mapped pages */
@@ -6535,7 +6540,7 @@ dma_error:
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
- pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-15 14:25 ` [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init Stanislaw Gruszka
@ 2010-07-15 14:48 ` Michael Chan
2010-07-16 3:25 ` David Miller
2010-07-16 8:55 ` [PATCH v2 " Stanislaw Gruszka
2010-07-15 18:57 ` [PATCH " Mitchell Erblich
1 sibling, 2 replies; 14+ messages in thread
From: Michael Chan @ 2010-07-15 14:48 UTC (permalink / raw)
To: 'Stanislaw Gruszka', netdev@vger.kernel.org
Stanislaw Gruszka wrote:
> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
> ---
> @@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct
> bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> int err;
> u16 prod = ring_idx & 0xffff;
>
> - err = bnx2_alloc_rx_skb(bp, rxr, prod);
> + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_KERNEL);
This should be GFP_ATOMIC since it is called from NAPI softirq
context.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-15 14:25 ` [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init Stanislaw Gruszka
2010-07-15 14:48 ` Michael Chan
@ 2010-07-15 18:57 ` Mitchell Erblich
2010-07-16 7:30 ` Stanislaw Gruszka
1 sibling, 1 reply; 14+ messages in thread
From: Mitchell Erblich @ 2010-07-15 18:57 UTC (permalink / raw)
To: Stanislaw Gruszka; +Cc: netdev, Michael Chan
On Jul 15, 2010, at 7:25 AM, Stanislaw Gruszka wrote:
> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
> ---
> drivers/net/bnx2.c | 17 +++++++++--------
> 1 files changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
> index a203f39..6de4cb7 100644
> --- a/drivers/net/bnx2.c
> +++ b/drivers/net/bnx2.c
> @@ -2664,13 +2664,13 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
> }
>
> static inline int
> -bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> +bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
> {
> dma_addr_t mapping;
> struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
> struct rx_bd *rxbd =
> &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
> - struct page *page = alloc_page(GFP_ATOMIC);
> + struct page *page = alloc_page(gfp);
>
> if (!page)
> return -ENOMEM;
> @@ -2705,7 +2705,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> }
>
> static inline int
> -bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> +bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
> {
> struct sk_buff *skb;
> struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
> @@ -2713,7 +2713,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
> unsigned long align;
>
> - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
> + skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
> if (skb == NULL) {
> return -ENOMEM;
> }
> @@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> int err;
> u16 prod = ring_idx & 0xffff;
>
> - err = bnx2_alloc_rx_skb(bp, rxr, prod);
> + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_KERNEL);
> if (unlikely(err)) {
> bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
> if (hdr_len) {
> @@ -3039,7 +3039,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> rx_pg->page = NULL;
>
> err = bnx2_alloc_rx_page(bp, rxr,
> - RX_PG_RING_IDX(pg_prod));
> + RX_PG_RING_IDX(pg_prod),
> +
> GFP_ATOMIC);
Why not GFP_NOWAIT here?
This would then not use the last reserved pages of memory.
This still would remove the possibe sleep asociated with GFP_KERNEL.
Mitchell Erblich
> if (unlikely(err)) {
> rxr->rx_pg_cons = pg_cons;
> rxr->rx_pg_prod = pg_prod;
> @@ -5179,7 +5180,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
>
> ring_prod = prod = rxr->rx_pg_prod;
> for (i = 0; i < bp->rx_pg_ring_size; i++) {
> - if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
> + if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
> netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
> ring_num, i, bp->rx_pg_ring_size);
> break;
> @@ -5191,7 +5192,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
>
> ring_prod = prod = rxr->rx_prod;
> for (i = 0; i < bp->rx_ring_size; i++) {
> - if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
> + if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
> netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
> ring_num, i, bp->rx_ring_size);
> break;
> --
> 1.7.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-15 14:48 ` Michael Chan
@ 2010-07-16 3:25 ` David Miller
2010-07-16 5:57 ` Mitchell Erblich
2010-07-16 7:13 ` Stanislaw Gruszka
2010-07-16 8:55 ` [PATCH v2 " Stanislaw Gruszka
1 sibling, 2 replies; 14+ messages in thread
From: David Miller @ 2010-07-16 3:25 UTC (permalink / raw)
To: mchan; +Cc: sgruszka, netdev
From: "Michael Chan" <mchan@broadcom.com>
Date: Thu, 15 Jul 2010 07:48:40 -0700
> Stanislaw Gruszka wrote:
>
>> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
>> ---
>> @@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct
>> bnx2_rx_ring_info *rxr, struct sk_buff *skb,
>> int err;
>> u16 prod = ring_idx & 0xffff;
>>
>> - err = bnx2_alloc_rx_skb(bp, rxr, prod);
>> + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_KERNEL);
>
> This should be GFP_ATOMIC since it is called from NAPI softirq
> context.
This fatal issue gives me doubts about whether this patch was even
tested at all.
Immediately the kernel memory allocator should have issued a warning
due to this GFP_KERNEL allocation in a non-sleep'able context.
Stanislaw, how did you test this patch?
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-16 3:25 ` David Miller
@ 2010-07-16 5:57 ` Mitchell Erblich
2010-07-16 7:13 ` Stanislaw Gruszka
1 sibling, 0 replies; 14+ messages in thread
From: Mitchell Erblich @ 2010-07-16 5:57 UTC (permalink / raw)
To: David Miller; +Cc: mchan, sgruszka, netdev
On Jul 15, 2010, at 8:25 PM, David Miller wrote:
> From: "Michael Chan" <mchan@broadcom.com>
> Date: Thu, 15 Jul 2010 07:48:40 -0700
>
>> Stanislaw Gruszka wrote:
>>
>>> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
>>> ---
>>> @@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct
>>> bnx2_rx_ring_info *rxr, struct sk_buff *skb,
>>> int err;
>>> u16 prod = ring_idx & 0xffff;
>>>
>>> - err = bnx2_alloc_rx_skb(bp, rxr, prod);
>>> + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_KERNEL);
>>
>> This should be GFP_ATOMIC since it is called from NAPI softirq
>> context.
>
> This fatal issue gives me doubts about whether this patch was even
> tested at all.
>
> Immediately the kernel memory allocator should have issued a warning
> due to this GFP_KERNEL allocation in a non-sleep'able context.
>
> Stanislaw, how did you test this patch?
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
Group,
Why NOT GFP_NOWAIT. This won't use the last resource pages
versus GFP_ATOMIC?
GFP_ATOMIC IMO, SHOULD be used in the paths that cleans
and frees pages.
Mitchell Erblich
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-16 3:25 ` David Miller
2010-07-16 5:57 ` Mitchell Erblich
@ 2010-07-16 7:13 ` Stanislaw Gruszka
1 sibling, 0 replies; 14+ messages in thread
From: Stanislaw Gruszka @ 2010-07-16 7:13 UTC (permalink / raw)
To: David Miller; +Cc: mchan, netdev
On Thu, 15 Jul 2010 20:25:37 -0700 (PDT)
David Miller <davem@davemloft.net> wrote:
> > This should be GFP_ATOMIC since it is called from NAPI softirq
> > context.
>
> This fatal issue gives me doubts about whether this patch was even
> tested at all.
>
> Immediately the kernel memory allocator should have issued a warning
> due to this GFP_KERNEL allocation in a non-sleep'able context.
>
> Stanislaw, how did you test this patch?
I run net-next-2.6 kernel with patches on machine with bnx2 device,
but I compiled kernel with CONFIG_DEBUG_KOBJECT and all dmesg was filled
by messages like:
kobject: 'block' (ffff8801663122c0): kobject_add_internal: parent: '2:2:1:0', set: '(null)'
kobject: 'sdc' (ffff8801642ca070): kobject_add_internal: parent: 'block', set: 'devices'
kobject: 'sdc' (ffff8801642ca070): kobject_uevent_env
so I missed the warning, grr...
Stanislaw
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-15 18:57 ` [PATCH " Mitchell Erblich
@ 2010-07-16 7:30 ` Stanislaw Gruszka
0 siblings, 0 replies; 14+ messages in thread
From: Stanislaw Gruszka @ 2010-07-16 7:30 UTC (permalink / raw)
To: Mitchell Erblich; +Cc: netdev, Michael Chan
On Thu, 15 Jul 2010 11:57:59 -0700
Mitchell Erblich <erblichs@earthlink.net> wrote:
> > @@ -3039,7 +3039,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> > rx_pg->page = NULL;
> >
> > err = bnx2_alloc_rx_page(bp, rxr,
> > - RX_PG_RING_IDX(pg_prod));
> > + RX_PG_RING_IDX(pg_prod),
> > +
>
> > GFP_ATOMIC);
>
> Why not GFP_NOWAIT here?
> This would then not use the last reserved pages of memory.
> This still would remove the possibe sleep asociated with GFP_KERNEL.
There is no GFP_NOWAIT usage in any network driver. I'm not sure if
this flag is intended to driver usage. Anyway I can not judge if
GFP_ATOMIC -> GFP_NOWAIT conversion is good or bad idea, I think you
should ask mm guys about that.
Stanislaw
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH v2 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-15 14:48 ` Michael Chan
2010-07-16 3:25 ` David Miller
@ 2010-07-16 8:55 ` Stanislaw Gruszka
2010-07-16 21:24 ` Michael Chan
1 sibling, 1 reply; 14+ messages in thread
From: Stanislaw Gruszka @ 2010-07-16 8:55 UTC (permalink / raw)
To: netdev; +Cc: Michael Chan
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
v1->v2: use GFP_ATOMIC in bnx2_rx_skb
drivers/net/bnx2.c | 17 +++++++++--------
1 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a203f39..a7df539 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2664,13 +2664,13 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
}
static inline int
-bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
dma_addr_t mapping;
struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct rx_bd *rxbd =
&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_page(gfp);
if (!page)
return -ENOMEM;
@@ -2705,7 +2705,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
struct sk_buff *skb;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
@@ -2713,7 +2713,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
if (skb == NULL) {
return -ENOMEM;
}
@@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
int err;
u16 prod = ring_idx & 0xffff;
- err = bnx2_alloc_rx_skb(bp, rxr, prod);
+ err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
if (hdr_len) {
@@ -3039,7 +3039,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rx_pg->page = NULL;
err = bnx2_alloc_rx_page(bp, rxr,
- RX_PG_RING_IDX(pg_prod));
+ RX_PG_RING_IDX(pg_prod),
+ GFP_ATOMIC);
if (unlikely(err)) {
rxr->rx_pg_cons = pg_cons;
rxr->rx_pg_prod = pg_prod;
@@ -5179,7 +5180,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
- if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
+ if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
ring_num, i, bp->rx_pg_ring_size);
break;
@@ -5191,7 +5192,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
+ if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-16 8:55 ` [PATCH v2 " Stanislaw Gruszka
@ 2010-07-16 21:24 ` Michael Chan
2010-07-18 21:43 ` David Miller
0 siblings, 1 reply; 14+ messages in thread
From: Michael Chan @ 2010-07-16 21:24 UTC (permalink / raw)
To: Stanislaw Gruszka; +Cc: netdev@vger.kernel.org
On Fri, 2010-07-16 at 01:55 -0700, Stanislaw Gruszka wrote:
> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Acked-by: Michael Chan <mchan@broadcom.com>
> ---
> v1->v2: use GFP_ATOMIC in bnx2_rx_skb
>
> drivers/net/bnx2.c | 17 +++++++++--------
> 1 files changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
> index a203f39..a7df539 100644
> --- a/drivers/net/bnx2.c
> +++ b/drivers/net/bnx2.c
> @@ -2664,13 +2664,13 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
> }
>
> static inline int
> -bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> +bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
> {
> dma_addr_t mapping;
> struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
> struct rx_bd *rxbd =
> &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
> - struct page *page = alloc_page(GFP_ATOMIC);
> + struct page *page = alloc_page(gfp);
>
> if (!page)
> return -ENOMEM;
> @@ -2705,7 +2705,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> }
>
> static inline int
> -bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> +bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
> {
> struct sk_buff *skb;
> struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
> @@ -2713,7 +2713,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
> unsigned long align;
>
> - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
> + skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
> if (skb == NULL) {
> return -ENOMEM;
> }
> @@ -2974,7 +2974,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> int err;
> u16 prod = ring_idx & 0xffff;
>
> - err = bnx2_alloc_rx_skb(bp, rxr, prod);
> + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
> if (unlikely(err)) {
> bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
> if (hdr_len) {
> @@ -3039,7 +3039,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> rx_pg->page = NULL;
>
> err = bnx2_alloc_rx_page(bp, rxr,
> - RX_PG_RING_IDX(pg_prod));
> + RX_PG_RING_IDX(pg_prod),
> + GFP_ATOMIC);
> if (unlikely(err)) {
> rxr->rx_pg_cons = pg_cons;
> rxr->rx_pg_prod = pg_prod;
> @@ -5179,7 +5180,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
>
> ring_prod = prod = rxr->rx_pg_prod;
> for (i = 0; i < bp->rx_pg_ring_size; i++) {
> - if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
> + if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
> netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
> ring_num, i, bp->rx_pg_ring_size);
> break;
> @@ -5191,7 +5192,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
>
> ring_prod = prod = rxr->rx_prod;
> for (i = 0; i < bp->rx_ring_size; i++) {
> - if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
> + if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
> netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
> ring_num, i, bp->rx_ring_size);
> break;
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 2/2] bnx2: use device model DMA API
2010-07-15 14:25 ` [PATCH 2/2] bnx2: use device model DMA API Stanislaw Gruszka
@ 2010-07-16 21:29 ` Michael Chan
2010-07-18 21:43 ` David Miller
0 siblings, 1 reply; 14+ messages in thread
From: Michael Chan @ 2010-07-16 21:29 UTC (permalink / raw)
To: Stanislaw Gruszka; +Cc: netdev@vger.kernel.org
On Thu, 2010-07-15 at 07:25 -0700, Stanislaw Gruszka wrote:
> Use DMA API as PCI equivalents will be deprecated. This change also allow
> to allocate with GFP_KERNEL in some places.
>
> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Acked-by: Michael Chan <mchan@broadcom.com>
> ---
> drivers/net/bnx2.c | 111 +++++++++++++++++++++++++++-------------------------
> 1 files changed, 58 insertions(+), 53 deletions(-)
>
> diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
> index 6de4cb7..98aed05 100644
> --- a/drivers/net/bnx2.c
> +++ b/drivers/net/bnx2.c
> @@ -692,9 +692,9 @@ bnx2_free_tx_mem(struct bnx2 *bp)
> struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
>
> if (txr->tx_desc_ring) {
> - pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
> - txr->tx_desc_ring,
> - txr->tx_desc_mapping);
> + dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
> + txr->tx_desc_ring,
> + txr->tx_desc_mapping);
> txr->tx_desc_ring = NULL;
> }
> kfree(txr->tx_buf_ring);
> @@ -714,9 +714,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
>
> for (j = 0; j < bp->rx_max_ring; j++) {
> if (rxr->rx_desc_ring[j])
> - pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
> - rxr->rx_desc_ring[j],
> - rxr->rx_desc_mapping[j]);
> + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
> + rxr->rx_desc_ring[j],
> + rxr->rx_desc_mapping[j]);
> rxr->rx_desc_ring[j] = NULL;
> }
> vfree(rxr->rx_buf_ring);
> @@ -724,9 +724,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
>
> for (j = 0; j < bp->rx_max_pg_ring; j++) {
> if (rxr->rx_pg_desc_ring[j])
> - pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
> - rxr->rx_pg_desc_ring[j],
> - rxr->rx_pg_desc_mapping[j]);
> + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
> + rxr->rx_pg_desc_ring[j],
> + rxr->rx_pg_desc_mapping[j]);
> rxr->rx_pg_desc_ring[j] = NULL;
> }
> vfree(rxr->rx_pg_ring);
> @@ -748,8 +748,8 @@ bnx2_alloc_tx_mem(struct bnx2 *bp)
> return -ENOMEM;
>
> txr->tx_desc_ring =
> - pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
> - &txr->tx_desc_mapping);
> + dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
> + &txr->tx_desc_mapping, GFP_KERNEL);
> if (txr->tx_desc_ring == NULL)
> return -ENOMEM;
> }
> @@ -776,8 +776,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
>
> for (j = 0; j < bp->rx_max_ring; j++) {
> rxr->rx_desc_ring[j] =
> - pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
> - &rxr->rx_desc_mapping[j]);
> + dma_alloc_coherent(&bp->pdev->dev,
> + RXBD_RING_SIZE,
> + &rxr->rx_desc_mapping[j],
> + GFP_KERNEL);
> if (rxr->rx_desc_ring[j] == NULL)
> return -ENOMEM;
>
> @@ -795,8 +797,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
>
> for (j = 0; j < bp->rx_max_pg_ring; j++) {
> rxr->rx_pg_desc_ring[j] =
> - pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
> - &rxr->rx_pg_desc_mapping[j]);
> + dma_alloc_coherent(&bp->pdev->dev,
> + RXBD_RING_SIZE,
> + &rxr->rx_pg_desc_mapping[j],
> + GFP_KERNEL);
> if (rxr->rx_pg_desc_ring[j] == NULL)
> return -ENOMEM;
>
> @@ -816,16 +820,16 @@ bnx2_free_mem(struct bnx2 *bp)
>
> for (i = 0; i < bp->ctx_pages; i++) {
> if (bp->ctx_blk[i]) {
> - pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
> - bp->ctx_blk[i],
> - bp->ctx_blk_mapping[i]);
> + dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
> + bp->ctx_blk[i],
> + bp->ctx_blk_mapping[i]);
> bp->ctx_blk[i] = NULL;
> }
> }
> if (bnapi->status_blk.msi) {
> - pci_free_consistent(bp->pdev, bp->status_stats_size,
> - bnapi->status_blk.msi,
> - bp->status_blk_mapping);
> + dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
> + bnapi->status_blk.msi,
> + bp->status_blk_mapping);
> bnapi->status_blk.msi = NULL;
> bp->stats_blk = NULL;
> }
> @@ -846,8 +850,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
> bp->status_stats_size = status_blk_size +
> sizeof(struct statistics_block);
>
> - status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
> - &bp->status_blk_mapping);
> + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
> + &bp->status_blk_mapping, GFP_KERNEL);
> if (status_blk == NULL)
> goto alloc_mem_err;
>
> @@ -885,9 +889,10 @@ bnx2_alloc_mem(struct bnx2 *bp)
> if (bp->ctx_pages == 0)
> bp->ctx_pages = 1;
> for (i = 0; i < bp->ctx_pages; i++) {
> - bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
> + bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
> BCM_PAGE_SIZE,
> - &bp->ctx_blk_mapping[i]);
> + &bp->ctx_blk_mapping[i],
> + GFP_KERNEL);
> if (bp->ctx_blk[i] == NULL)
> goto alloc_mem_err;
> }
> @@ -2674,9 +2679,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf
>
> if (!page)
> return -ENOMEM;
> - mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
> + mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
> PCI_DMA_FROMDEVICE);
> - if (pci_dma_mapping_error(bp->pdev, mapping)) {
> + if (dma_mapping_error(&bp->pdev->dev, mapping)) {
> __free_page(page);
> return -EIO;
> }
> @@ -2697,8 +2702,8 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
> if (!page)
> return;
>
> - pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
> - PCI_DMA_FROMDEVICE);
> + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
> + PAGE_SIZE, PCI_DMA_FROMDEVICE);
>
> __free_page(page);
> rx_pg->page = NULL;
> @@ -2721,9 +2726,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp
> if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
> skb_reserve(skb, BNX2_RX_ALIGN - align);
>
> - mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
> - PCI_DMA_FROMDEVICE);
> - if (pci_dma_mapping_error(bp->pdev, mapping)) {
> + mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
> + PCI_DMA_FROMDEVICE);
> + if (dma_mapping_error(&bp->pdev->dev, mapping)) {
> dev_kfree_skb(skb);
> return -EIO;
> }
> @@ -2829,7 +2834,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
> }
> }
>
> - pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
> + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
> skb_headlen(skb), PCI_DMA_TODEVICE);
>
> tx_buf->skb = NULL;
> @@ -2838,7 +2843,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
> for (i = 0; i < last; i++) {
> sw_cons = NEXT_TX_BD(sw_cons);
>
> - pci_unmap_page(bp->pdev,
> + dma_unmap_page(&bp->pdev->dev,
> dma_unmap_addr(
> &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
> mapping),
> @@ -2945,7 +2950,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
> cons_rx_buf = &rxr->rx_buf_ring[cons];
> prod_rx_buf = &rxr->rx_buf_ring[prod];
>
> - pci_dma_sync_single_for_device(bp->pdev,
> + dma_sync_single_for_device(&bp->pdev->dev,
> dma_unmap_addr(cons_rx_buf, mapping),
> BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
>
> @@ -2987,7 +2992,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> }
>
> skb_reserve(skb, BNX2_RX_OFFSET);
> - pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
> + dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
> PCI_DMA_FROMDEVICE);
>
> if (hdr_len == 0) {
> @@ -3049,7 +3054,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
> return err;
> }
>
> - pci_unmap_page(bp->pdev, mapping_old,
> + dma_unmap_page(&bp->pdev->dev, mapping_old,
> PAGE_SIZE, PCI_DMA_FROMDEVICE);
>
> frag_size -= frag_len;
> @@ -3120,7 +3125,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
>
> dma_addr = dma_unmap_addr(rx_buf, mapping);
>
> - pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
> + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
> BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
> PCI_DMA_FROMDEVICE);
>
> @@ -5338,7 +5343,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
> continue;
> }
>
> - pci_unmap_single(bp->pdev,
> + dma_unmap_single(&bp->pdev->dev,
> dma_unmap_addr(tx_buf, mapping),
> skb_headlen(skb),
> PCI_DMA_TODEVICE);
> @@ -5349,7 +5354,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
> j++;
> for (k = 0; k < last; k++, j++) {
> tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
> - pci_unmap_page(bp->pdev,
> + dma_unmap_page(&bp->pdev->dev,
> dma_unmap_addr(tx_buf, mapping),
> skb_shinfo(skb)->frags[k].size,
> PCI_DMA_TODEVICE);
> @@ -5379,7 +5384,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
> if (skb == NULL)
> continue;
>
> - pci_unmap_single(bp->pdev,
> + dma_unmap_single(&bp->pdev->dev,
> dma_unmap_addr(rx_buf, mapping),
> bp->rx_buf_use_size,
> PCI_DMA_FROMDEVICE);
> @@ -5732,9 +5737,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
> for (i = 14; i < pkt_size; i++)
> packet[i] = (unsigned char) (i & 0xff);
>
> - map = pci_map_single(bp->pdev, skb->data, pkt_size,
> - PCI_DMA_TODEVICE);
> - if (pci_dma_mapping_error(bp->pdev, map)) {
> + map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
> + PCI_DMA_TODEVICE);
> + if (dma_mapping_error(&bp->pdev->dev, map)) {
> dev_kfree_skb(skb);
> return -EIO;
> }
> @@ -5772,7 +5777,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
>
> udelay(5);
>
> - pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
> + dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
> dev_kfree_skb(skb);
>
> if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
> @@ -5789,7 +5794,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
> rx_hdr = rx_buf->desc;
> skb_reserve(rx_skb, BNX2_RX_OFFSET);
>
> - pci_dma_sync_single_for_cpu(bp->pdev,
> + dma_sync_single_for_cpu(&bp->pdev->dev,
> dma_unmap_addr(rx_buf, mapping),
> bp->rx_buf_size, PCI_DMA_FROMDEVICE);
>
> @@ -6457,8 +6462,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
> } else
> mss = 0;
>
> - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
> - if (pci_dma_mapping_error(bp->pdev, mapping)) {
> + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
> + if (dma_mapping_error(&bp->pdev->dev, mapping)) {
> dev_kfree_skb(skb);
> return NETDEV_TX_OK;
> }
> @@ -6486,9 +6491,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
> txbd = &txr->tx_desc_ring[ring_prod];
>
> len = frag->size;
> - mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
> - len, PCI_DMA_TODEVICE);
> - if (pci_dma_mapping_error(bp->pdev, mapping))
> + mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
> + len, PCI_DMA_TODEVICE);
> + if (dma_mapping_error(&bp->pdev->dev, mapping))
> goto dma_error;
> dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
> mapping);
> @@ -6527,7 +6532,7 @@ dma_error:
> ring_prod = TX_RING_IDX(prod);
> tx_buf = &txr->tx_buf_ring[ring_prod];
> tx_buf->skb = NULL;
> - pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
> + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
> skb_headlen(skb), PCI_DMA_TODEVICE);
>
> /* unmap remaining mapped pages */
> @@ -6535,7 +6540,7 @@ dma_error:
> prod = NEXT_TX_BD(prod);
> ring_prod = TX_RING_IDX(prod);
> tx_buf = &txr->tx_buf_ring[ring_prod];
> - pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
> + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
> skb_shinfo(skb)->frags[i].size,
> PCI_DMA_TODEVICE);
> }
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init
2010-07-16 21:24 ` Michael Chan
@ 2010-07-18 21:43 ` David Miller
0 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2010-07-18 21:43 UTC (permalink / raw)
To: mchan; +Cc: sgruszka, netdev
From: "Michael Chan" <mchan@broadcom.com>
Date: Fri, 16 Jul 2010 14:24:38 -0700
>
> On Fri, 2010-07-16 at 01:55 -0700, Stanislaw Gruszka wrote:
>> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
>
> Acked-by: Michael Chan <mchan@broadcom.com>
Applied.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 2/2] bnx2: use device model DMA API
2010-07-16 21:29 ` Michael Chan
@ 2010-07-18 21:43 ` David Miller
0 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2010-07-18 21:43 UTC (permalink / raw)
To: mchan; +Cc: sgruszka, netdev
From: "Michael Chan" <mchan@broadcom.com>
Date: Fri, 16 Jul 2010 14:29:19 -0700
>
> On Thu, 2010-07-15 at 07:25 -0700, Stanislaw Gruszka wrote:
>> Use DMA API as PCI equivalents will be deprecated. This change also allow
>> to allocate with GFP_KERNEL in some places.
>>
>> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
>
> Acked-by: Michael Chan <mchan@broadcom.com>
Applied.
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2010-07-18 21:43 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-07-15 14:25 [PATCH -next 0/2] bnx2: allow sleep during allocation Stanislaw Gruszka
2010-07-15 14:25 ` [PATCH 1/2] bnx2: allocate with GFP_KERNEL flag on RX path init Stanislaw Gruszka
2010-07-15 14:48 ` Michael Chan
2010-07-16 3:25 ` David Miller
2010-07-16 5:57 ` Mitchell Erblich
2010-07-16 7:13 ` Stanislaw Gruszka
2010-07-16 8:55 ` [PATCH v2 " Stanislaw Gruszka
2010-07-16 21:24 ` Michael Chan
2010-07-18 21:43 ` David Miller
2010-07-15 18:57 ` [PATCH " Mitchell Erblich
2010-07-16 7:30 ` Stanislaw Gruszka
2010-07-15 14:25 ` [PATCH 2/2] bnx2: use device model DMA API Stanislaw Gruszka
2010-07-16 21:29 ` Michael Chan
2010-07-18 21:43 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).