* [net-next-2.6 PATCH 2/7] e1000e: use DMA API instead of PCI DMA functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
@ 2010-04-27 23:09 ` Jeff Kirsher
2010-04-27 23:30 ` David Miller
2010-04-27 23:09 ` [net-next-2.6 PATCH 3/7] igb: convert igb from using PCI DMA functions to using DMA API functions Jeff Kirsher
` (5 subsequent siblings)
6 siblings, 1 reply; 14+ messages in thread
From: Jeff Kirsher @ 2010-04-27 23:09 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Nicholas Nunley, Jeff Kirsher
From: Nick Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/e1000e/ethtool.c | 30 +++++++------
drivers/net/e1000e/netdev.c | 99 +++++++++++++++++++++---------------------
2 files changed, 66 insertions(+), 63 deletions(-)
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f..7f9915c 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1069,10 +1069,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
if (tx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tx_ring->buffer_info[i].skb)
dev_kfree_skb(tx_ring->buffer_info[i].skb);
}
@@ -1081,9 +1081,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
if (rx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
rx_ring->buffer_info[i].dma,
- 2048, PCI_DMA_FROMDEVICE);
+ 2048, DMA_FROM_DEVICE);
if (rx_ring->buffer_info[i].skb)
dev_kfree_skb(rx_ring->buffer_info[i].skb);
}
@@ -1163,9 +1163,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1226,9 +1227,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, 2048,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
@@ -1556,10 +1558,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1569,9 +1571,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->buffer_info[l].dma, 2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rx_ring->buffer_info[l].skb, 1024);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2476f8c..3a71215 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -181,10 +181,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
map_skb:
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -250,11 +250,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -279,10 +280,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -369,10 +370,10 @@ check_page:
}
if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -446,10 +447,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -550,12 +551,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -756,9 +756,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other rx routine */
@@ -814,13 +814,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
* kmap_atomic, so we can't hold the mapping
* very long
*/
- pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- pci_dma_sync_single_for_device(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -837,8 +837,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
break;
ps_page = &buffer_info->ps_pages[j];
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
@@ -956,8 +956,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1093,17 +1093,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- pci_unmap_page(pdev, buffer_info->dma,
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -1121,8 +1121,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
@@ -3917,10 +3917,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3952,11 +3953,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -5050,16 +5051,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
^ permalink raw reply related [flat|nested] 14+ messages in thread* [net-next-2.6 PATCH 3/7] igb: convert igb from using PCI DMA functions to using DMA API functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
2010-04-27 23:09 ` [net-next-2.6 PATCH 2/7] e1000e: " Jeff Kirsher
@ 2010-04-27 23:09 ` Jeff Kirsher
2010-04-27 23:30 ` David Miller
2010-04-27 23:09 ` [net-next-2.6 PATCH 4/7] igbvf: use DMA API instead of PCI DMA functions Jeff Kirsher
` (4 subsequent siblings)
6 siblings, 1 reply; 14+ messages in thread
From: Jeff Kirsher @ 2010-04-27 23:09 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Alexander Duyck, Jeff Kirsher
From: Alexander Duyck <alexander.h.duyck@intel.com>
This patch makes it so that igb now uses the DMA API functions instead of
the PCI API functions. To do this the pci_dev pointer that was in the
rings has been replaced with a device pointer, and as a result all
references to [tr]x_ring->pdev have been replaced with [tr]x_ring->dev.
This patch is based of of work originally done by Nicholas Nunley.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/igb/igb.h | 2 -
drivers/net/igb/igb_ethtool.c | 8 ++-
drivers/net/igb/igb_main.c | 109 +++++++++++++++++++++--------------------
3 files changed, 61 insertions(+), 58 deletions(-)
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7d288cc..096a526 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -186,7 +186,7 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
- struct pci_dev *pdev; /* pci device for dma mapping */
+ struct device *dev; /* device pointer for dma mapping */
dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 1b8fd7f..f2ebf92 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1394,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->pdev = adapter->pdev;
+ tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1408,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->pdev = adapter->pdev;
+ rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1604,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
buffer_info = &rx_ring->buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 919e363..9d042fe 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -350,7 +350,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
@@ -364,7 +364,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -1398,15 +1398,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -2080,7 +2080,7 @@ static int igb_close(struct net_device *netdev)
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2093,9 +2093,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev,
- tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2106,7 +2107,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->buffer_info);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -2230,7 +2231,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2245,8 +2246,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -2259,8 +2262,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- dev_err(&pdev->dev, "Unable to allocate memory for "
- "the receive descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
return -ENOMEM;
}
@@ -2636,8 +2639,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
if (!tx_ring->desc)
return;
- pci_free_consistent(tx_ring->pdev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2661,15 +2664,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(tx_ring->pdev,
+ dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(tx_ring->pdev,
+ dma_unmap_single(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -2740,8 +2743,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->desc)
return;
- pci_free_consistent(rx_ring->pdev, rx_ring->size,
- rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2777,10 +2780,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -2789,10 +2792,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
- pci_unmap_page(rx_ring->pdev,
+ dma_unmap_page(rx_ring->dev,
buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
}
if (buffer_info->page) {
@@ -3480,7 +3483,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
@@ -3531,7 +3534,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
break;
default:
if (unlikely(net_ratelimit()))
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
@@ -3565,7 +3568,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
struct igb_buffer *buffer_info;
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
unsigned int len = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
@@ -3578,9 +3581,9 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
@@ -3600,12 +3603,12 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
}
@@ -3617,7 +3620,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
return ++count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
@@ -5059,7 +5062,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(&tx_ring->pdev->dev,
+ dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH <%x>\n"
@@ -5138,7 +5141,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
+ dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
}
static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
@@ -5193,7 +5196,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
{
struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
@@ -5233,9 +5236,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
cleaned_count++;
if (buffer_info->dma) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
@@ -5245,8 +5248,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -5354,12 +5357,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(rx_ring->pdev, buffer_info->page,
+ dma_map_page(rx_ring->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->page_dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->page_dma)) {
buffer_info->page_dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5377,12 +5380,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->skb = skb;
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_single(rx_ring->pdev,
+ buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->dma)) {
buffer_info->dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
^ permalink raw reply related [flat|nested] 14+ messages in thread* Re: [net-next-2.6 PATCH 3/7] igb: convert igb from using PCI DMA functions to using DMA API functions
2010-04-27 23:09 ` [net-next-2.6 PATCH 3/7] igb: convert igb from using PCI DMA functions to using DMA API functions Jeff Kirsher
@ 2010-04-27 23:30 ` David Miller
0 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2010-04-27 23:30 UTC (permalink / raw)
To: jeffrey.t.kirsher; +Cc: netdev, gospo, alexander.h.duyck
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Tue, 27 Apr 2010 16:09:25 -0700
> From: Alexander Duyck <alexander.h.duyck@intel.com>
>
> This patch makes it so that igb now uses the DMA API functions instead of
> the PCI API functions. To do this the pci_dev pointer that was in the
> rings has been replaced with a device pointer, and as a result all
> references to [tr]x_ring->pdev have been replaced with [tr]x_ring->dev.
>
> This patch is based of of work originally done by Nicholas Nunley.
> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Applied.
^ permalink raw reply [flat|nested] 14+ messages in thread
* [net-next-2.6 PATCH 4/7] igbvf: use DMA API instead of PCI DMA functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
2010-04-27 23:09 ` [net-next-2.6 PATCH 2/7] e1000e: " Jeff Kirsher
2010-04-27 23:09 ` [net-next-2.6 PATCH 3/7] igb: convert igb from using PCI DMA functions to using DMA API functions Jeff Kirsher
@ 2010-04-27 23:09 ` Jeff Kirsher
2010-04-27 23:30 ` David Miller
2010-04-27 23:10 ` [net-next-2.6 PATCH 5/7] ixgb: " Jeff Kirsher
` (3 subsequent siblings)
6 siblings, 1 reply; 14+ messages in thread
From: Jeff Kirsher @ 2010-04-27 23:09 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Nicholas Nunley, Jeff Kirsher
From: Nick Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/igbvf/netdev.c | 74 +++++++++++++++++++++++---------------------
1 files changed, 38 insertions(+), 36 deletions(-)
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 9ac3f94..7012e3d 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -164,10 +164,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(pdev, buffer_info->page,
+ dma_map_page(&pdev->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!buffer_info->skb) {
@@ -178,9 +178,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -268,25 +268,25 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
if (!skb_shinfo(skb)->nr_frags) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb, hlen);
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -369,15 +369,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -438,8 +438,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -480,8 +480,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -549,7 +549,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -574,13 +575,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->rx_ps_hdr_size){
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
}
@@ -592,9 +593,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
if (buffer_info->page) {
if (buffer_info->page_dma)
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(buffer_info->page);
buffer_info->page = NULL;
buffer_info->page_dma = 0;
@@ -2104,9 +2106,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -2127,12 +2129,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
}
@@ -2644,16 +2646,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
^ permalink raw reply related [flat|nested] 14+ messages in thread* [net-next-2.6 PATCH 5/7] ixgb: use DMA API instead of PCI DMA functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
` (2 preceding siblings ...)
2010-04-27 23:09 ` [net-next-2.6 PATCH 4/7] igbvf: use DMA API instead of PCI DMA functions Jeff Kirsher
@ 2010-04-27 23:10 ` Jeff Kirsher
2010-04-27 23:30 ` David Miller
2010-04-27 23:10 ` [net-next-2.6 PATCH 6/7] ixgbe: " Jeff Kirsher
` (2 subsequent siblings)
6 siblings, 1 reply; 14+ messages in thread
From: Jeff Kirsher @ 2010-04-27 23:10 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Nicholas Nunley, Jeff Kirsher
From: Nick Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgb/ixgb_main.c | 70 +++++++++++++++++++++++-------------------
1 files changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 3cf7951..d58ca6b 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -368,16 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
+ }
}
- pci_using_dac = 0;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -673,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
@@ -762,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
@@ -883,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+ dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
adapter->tx_ring.desc = NULL;
}
@@ -895,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -966,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -990,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
buffer_info->length = 0;
}
@@ -1300,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
WARN_ON(buffer_info->dma != 0);
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1341,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- pci_map_page(pdev, frag->page,
- offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ dma_map_page(&pdev->dev, frag->page,
+ offset, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1962,10 +1970,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -2088,10 +2096,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
^ permalink raw reply related [flat|nested] 14+ messages in thread* [net-next-2.6 PATCH 6/7] ixgbe: use DMA API instead of PCI DMA functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
` (3 preceding siblings ...)
2010-04-27 23:10 ` [net-next-2.6 PATCH 5/7] ixgb: " Jeff Kirsher
@ 2010-04-27 23:10 ` Jeff Kirsher
2010-04-27 23:30 ` David Miller
2010-04-27 23:10 ` [net-next-2.6 PATCH 7/7] ixgbevf: " Jeff Kirsher
2010-04-27 23:30 ` [net-next-2.6 PATCH 1/7] e1000: " David Miller
6 siblings, 1 reply; 14+ messages in thread
From: Jeff Kirsher @ 2010-04-27 23:10 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Nicholas Nunley, Jeff Kirsher
From: Nick Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbe/ixgbe_ethtool.c | 42 +++++++++++---------
drivers/net/ixgbe/ixgbe_main.c | 77 ++++++++++++++++++++-----------------
2 files changed, 64 insertions(+), 55 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dfbfe35..dc7fd5b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, buf->dma,
+ buf->length, DMA_TO_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
+ dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
}
if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1520,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma))) {
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!(tx_ring->desc)) {
ret_val = 2;
goto err_nomem;
}
@@ -1563,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma))) {
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!(rx_ring->desc)) {
ret_val = 5;
goto err_nomem;
}
@@ -1661,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count))
k = 0;
}
@@ -1789,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
good_cnt = 0;
do {
/* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 5258b3d..7724fb2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -266,15 +266,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -721,10 +721,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!bi->skb) {
@@ -743,9 +743,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- skb->data));
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -886,16 +886,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
else
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev,
+ rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -937,9 +938,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
@@ -3154,9 +3156,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -3165,9 +3167,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
}
skb = skb->prev;
@@ -3177,8 +3180,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
put_page(rx_buffer_info->page);
@@ -4403,8 +4406,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4474,7 +4477,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
@@ -4535,7 +4539,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -4572,7 +4577,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5442,10 +5448,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(pdev,
+ tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5478,12 +5484,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6061,13 +6067,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
^ permalink raw reply related [flat|nested] 14+ messages in thread* [net-next-2.6 PATCH 7/7] ixgbevf: use DMA API instead of PCI DMA functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
` (4 preceding siblings ...)
2010-04-27 23:10 ` [net-next-2.6 PATCH 6/7] ixgbe: " Jeff Kirsher
@ 2010-04-27 23:10 ` Jeff Kirsher
2010-04-27 23:30 ` David Miller
2010-04-27 23:30 ` [net-next-2.6 PATCH 1/7] e1000: " David Miller
6 siblings, 1 reply; 14+ messages in thread
From: Jeff Kirsher @ 2010-04-27 23:10 UTC (permalink / raw)
To: davem; +Cc: netdev, gospo, Nicholas Nunley, Jeff Kirsher
From: Nick Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
---
drivers/net/ixgbevf/ixgbevf_main.c | 68 +++++++++++++++++++-----------------
1 files changed, 35 insertions(+), 33 deletions(-)
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index f484161..0870740 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -1721,9 +1721,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -1737,8 +1737,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
@@ -2445,7 +2445,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2490,8 +2491,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2561,8 +2562,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
@@ -2623,7 +2624,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2935,10 +2937,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -2964,13 +2966,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset,
size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -3311,14 +3313,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
^ permalink raw reply related [flat|nested] 14+ messages in thread* Re: [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions
2010-04-27 23:08 [net-next-2.6 PATCH 1/7] e1000: use DMA API instead of PCI DMA functions Jeff Kirsher
` (5 preceding siblings ...)
2010-04-27 23:10 ` [net-next-2.6 PATCH 7/7] ixgbevf: " Jeff Kirsher
@ 2010-04-27 23:30 ` David Miller
6 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2010-04-27 23:30 UTC (permalink / raw)
To: jeffrey.t.kirsher; +Cc: netdev, gospo, nicholasx.d.nunley
From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Date: Tue, 27 Apr 2010 16:08:45 -0700
> From: Nick Nunley <nicholasx.d.nunley@intel.com>
>
> Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Applied.
^ permalink raw reply [flat|nested] 14+ messages in thread