netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] bnx2x: use the dma state API instead of the pci equivalents
@ 2010-04-02  2:56 FUJITA Tomonori
  2010-04-04  8:19 ` Vladislav Zolotarov
  0 siblings, 1 reply; 10+ messages in thread
From: FUJITA Tomonori @ 2010-04-02  2:56 UTC (permalink / raw)
  To: netdev; +Cc: eilong

The DMA API is preferred.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
---
 drivers/net/bnx2x.h      |    4 ++--
 drivers/net/bnx2x_main.c |   28 ++++++++++++++--------------
 2 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a..ae9c89e 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -163,7 +163,7 @@ do {								 \
 
 struct sw_rx_bd {
 	struct sk_buff	*skb;
-	DECLARE_PCI_UNMAP_ADDR(mapping)
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
 struct sw_tx_bd {
@@ -176,7 +176,7 @@ struct sw_tx_bd {
 
 struct sw_rx_page {
 	struct page	*page;
-	DECLARE_PCI_UNMAP_ADDR(mapping)
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
 union db_prod {
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a7..2a77611 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1086,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 	if (!page)
 		return;
 
-	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
+	pci_unmap_page(bp->pdev, dma_unmap_addr(sw_buf, mapping),
 		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 	__free_pages(page, PAGES_PER_SGE_SHIFT);
 
@@ -1123,7 +1123,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
 	}
 
 	sw_buf->page = page;
-	pci_unmap_addr_set(sw_buf, mapping, mapping);
+	dma_unmap_addr_set(sw_buf, mapping, mapping);
 
 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1151,7 +1151,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
 	}
 
 	rx_buf->skb = skb;
-	pci_unmap_addr_set(rx_buf, mapping, mapping);
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
 
 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1174,12 +1174,12 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 
 	pci_dma_sync_single_for_device(bp->pdev,
-				       pci_unmap_addr(cons_rx_buf, mapping),
+				       dma_unmap_addr(cons_rx_buf, mapping),
 				       RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
 
 	prod_rx_buf->skb = cons_rx_buf->skb;
-	pci_unmap_addr_set(prod_rx_buf, mapping,
-			   pci_unmap_addr(cons_rx_buf, mapping));
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
 	*prod_bd = *cons_bd;
 }
 
@@ -1285,7 +1285,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
 	mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
 				 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
-	pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 
 	/* move partial skb from cons to pool (don't unmap yet) */
 	fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1361,7 +1361,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 		}
 
 		/* Unmap the page as we r going to pass it to the stack */
-		pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
+		pci_unmap_page(bp->pdev, dma_unmap_addr(&old_rx_pg, mapping),
 			      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 
 		/* Add one frag and update the appropriate fields in the skb */
@@ -1389,7 +1389,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
 	   fails. */
-	pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
+	pci_unmap_single(bp->pdev, dma_unmap_addr(rx_buf, mapping),
 			 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
 
 	if (likely(new_skb)) {
@@ -1621,7 +1621,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 			}
 
 			pci_dma_sync_single_for_device(bp->pdev,
-					pci_unmap_addr(rx_buf, mapping),
+					dma_unmap_addr(rx_buf, mapping),
 						       pad + RX_COPY_THRESH,
 						       PCI_DMA_FROMDEVICE);
 			prefetch(skb);
@@ -1666,7 +1666,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 			} else
 			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
 				pci_unmap_single(bp->pdev,
-					pci_unmap_addr(rx_buf, mapping),
+					dma_unmap_addr(rx_buf, mapping),
 						 bp->rx_buf_size,
 						 PCI_DMA_FROMDEVICE);
 				skb_reserve(skb, pad);
@@ -4941,7 +4941,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
 
 		if (fp->tpa_state[i] == BNX2X_TPA_START)
 			pci_unmap_single(bp->pdev,
-					 pci_unmap_addr(rx_buf, mapping),
+					 dma_unmap_addr(rx_buf, mapping),
 					 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
 
 		dev_kfree_skb(skb);
@@ -4978,7 +4978,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
 					fp->disable_tpa = 1;
 					break;
 				}
-				pci_unmap_addr_set((struct sw_rx_bd *)
+				dma_unmap_addr_set((struct sw_rx_bd *)
 							&bp->fp->tpa_pool[i],
 						   mapping, 0);
 				fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -6907,7 +6907,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 				continue;
 
 			pci_unmap_single(bp->pdev,
-					 pci_unmap_addr(rx_buf, mapping),
+					 dma_unmap_addr(rx_buf, mapping),
 					 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
 
 			rx_buf->skb = NULL;
-- 
1.7.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-02  2:56 [PATCH] bnx2x: use the dma state API instead of the pci equivalents FUJITA Tomonori
@ 2010-04-04  8:19 ` Vladislav Zolotarov
  2010-04-04  8:39   ` David Miller
  0 siblings, 1 reply; 10+ messages in thread
From: Vladislav Zolotarov @ 2010-04-04  8:19 UTC (permalink / raw)
  To: FUJITA Tomonori, netdev@vger.kernel.org; +Cc: Eilon Greenstein

Why is it preferable? As far as I can see the current patch is not going to introduce any functional change.

Is there a plan to remove pci_map_X()/pci_alloc_consistent() functions family in the future and completely replaced with dma_X() functions? Is it appropriate to use dma_X() in all the places where pci_X() is used? For instance, we do use DAC mode and as far as I understand we should use pci_X() interface in this case. Is this rule not relevant anymore?

So, if we don't need to use pci_X() interface anymore, lets replace pci_X() properly all over the bnx2x with dma_X() functions. And if not, this patch mixes the macros from one API (dma_X) and functions from another (pci_X()) which may hardly be called "preferable"...

Thanks,
vlad

> -----Original Message-----
> From: netdev-owner@vger.kernel.org 
> [mailto:netdev-owner@vger.kernel.org] On Behalf Of FUJITA Tomonori
> Sent: Friday, April 02, 2010 5:57 AM
> To: netdev@vger.kernel.org
> Cc: Eilon Greenstein
> Subject: [PATCH] bnx2x: use the dma state API instead of the 
> pci equivalents
> 
> The DMA API is preferred.
> 
> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
> ---
>  drivers/net/bnx2x.h      |    4 ++--
>  drivers/net/bnx2x_main.c |   28 ++++++++++++++--------------
>  2 files changed, 16 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
> index 3c48a7a..ae9c89e 100644
> --- a/drivers/net/bnx2x.h
> +++ b/drivers/net/bnx2x.h
> @@ -163,7 +163,7 @@ do {					
> 			 \
>  
>  struct sw_rx_bd {
>  	struct sk_buff	*skb;
> -	DECLARE_PCI_UNMAP_ADDR(mapping)
> +	DEFINE_DMA_UNMAP_ADDR(mapping);
>  };
>  
>  struct sw_tx_bd {
> @@ -176,7 +176,7 @@ struct sw_tx_bd {
>  
>  struct sw_rx_page {
>  	struct page	*page;
> -	DECLARE_PCI_UNMAP_ADDR(mapping)
> +	DEFINE_DMA_UNMAP_ADDR(mapping);
>  };
>  
>  union db_prod {
> diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
> index 6c042a7..2a77611 100644
> --- a/drivers/net/bnx2x_main.c
> +++ b/drivers/net/bnx2x_main.c
> @@ -1086,7 +1086,7 @@ static inline void 
> bnx2x_free_rx_sge(struct bnx2x *bp,
>  	if (!page)
>  		return;
>  
> -	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
> +	pci_unmap_page(bp->pdev, dma_unmap_addr(sw_buf, mapping),
>  		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
>  	__free_pages(page, PAGES_PER_SGE_SHIFT);
>  
> @@ -1123,7 +1123,7 @@ static inline int 
> bnx2x_alloc_rx_sge(struct bnx2x *bp,
>  	}
>  
>  	sw_buf->page = page;
> -	pci_unmap_addr_set(sw_buf, mapping, mapping);
> +	dma_unmap_addr_set(sw_buf, mapping, mapping);
>  
>  	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
>  	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
> @@ -1151,7 +1151,7 @@ static inline int 
> bnx2x_alloc_rx_skb(struct bnx2x *bp,
>  	}
>  
>  	rx_buf->skb = skb;
> -	pci_unmap_addr_set(rx_buf, mapping, mapping);
> +	dma_unmap_addr_set(rx_buf, mapping, mapping);
>  
>  	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
>  	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
> @@ -1174,12 +1174,12 @@ static void bnx2x_reuse_rx_skb(struct 
> bnx2x_fastpath *fp,
>  	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
>  
>  	pci_dma_sync_single_for_device(bp->pdev,
> -				       
> pci_unmap_addr(cons_rx_buf, mapping),
> +				       
> dma_unmap_addr(cons_rx_buf, mapping),
>  				       RX_COPY_THRESH, 
> PCI_DMA_FROMDEVICE);
>  
>  	prod_rx_buf->skb = cons_rx_buf->skb;
> -	pci_unmap_addr_set(prod_rx_buf, mapping,
> -			   pci_unmap_addr(cons_rx_buf, mapping));
> +	dma_unmap_addr_set(prod_rx_buf, mapping,
> +			   dma_unmap_addr(cons_rx_buf, mapping));
>  	*prod_bd = *cons_bd;
>  }
>  
> @@ -1285,7 +1285,7 @@ static void bnx2x_tpa_start(struct 
> bnx2x_fastpath *fp, u16 queue,
>  	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
>  	mapping = pci_map_single(bp->pdev, 
> fp->tpa_pool[queue].skb->data,
>  				 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
> -	pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
> +	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
>  
>  	/* move partial skb from cons to pool (don't unmap yet) */
>  	fp->tpa_pool[queue] = *cons_rx_buf;
> @@ -1361,7 +1361,7 @@ static int bnx2x_fill_frag_skb(struct 
> bnx2x *bp, struct bnx2x_fastpath *fp,
>  		}
>  
>  		/* Unmap the page as we r going to pass it to 
> the stack */
> -		pci_unmap_page(bp->pdev, 
> pci_unmap_addr(&old_rx_pg, mapping),
> +		pci_unmap_page(bp->pdev, 
> dma_unmap_addr(&old_rx_pg, mapping),
>  			      SGE_PAGE_SIZE*PAGES_PER_SGE, 
> PCI_DMA_FROMDEVICE);
>  
>  		/* Add one frag and update the appropriate 
> fields in the skb */
> @@ -1389,7 +1389,7 @@ static void bnx2x_tpa_stop(struct bnx2x 
> *bp, struct bnx2x_fastpath *fp,
>  	/* Unmap skb in the pool anyway, as we are going to change
>  	   pool entry status to BNX2X_TPA_STOP even if new skb 
> allocation
>  	   fails. */
> -	pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
> +	pci_unmap_single(bp->pdev, dma_unmap_addr(rx_buf, mapping),
>  			 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
>  
>  	if (likely(new_skb)) {
> @@ -1621,7 +1621,7 @@ static int bnx2x_rx_int(struct 
> bnx2x_fastpath *fp, int budget)
>  			}
>  
>  			pci_dma_sync_single_for_device(bp->pdev,
> -					pci_unmap_addr(rx_buf, mapping),
> +					dma_unmap_addr(rx_buf, mapping),
>  						       pad + 
> RX_COPY_THRESH,
>  						       
> PCI_DMA_FROMDEVICE);
>  			prefetch(skb);
> @@ -1666,7 +1666,7 @@ static int bnx2x_rx_int(struct 
> bnx2x_fastpath *fp, int budget)
>  			} else
>  			if (likely(bnx2x_alloc_rx_skb(bp, fp, 
> bd_prod) == 0)) {
>  				pci_unmap_single(bp->pdev,
> -					pci_unmap_addr(rx_buf, mapping),
> +					dma_unmap_addr(rx_buf, mapping),
>  						 bp->rx_buf_size,
>  						 PCI_DMA_FROMDEVICE);
>  				skb_reserve(skb, pad);
> @@ -4941,7 +4941,7 @@ static inline void 
> bnx2x_free_tpa_pool(struct bnx2x *bp,
>  
>  		if (fp->tpa_state[i] == BNX2X_TPA_START)
>  			pci_unmap_single(bp->pdev,
> -					 pci_unmap_addr(rx_buf, 
> mapping),
> +					 dma_unmap_addr(rx_buf, 
> mapping),
>  					 bp->rx_buf_size, 
> PCI_DMA_FROMDEVICE);
>  
>  		dev_kfree_skb(skb);
> @@ -4978,7 +4978,7 @@ static void bnx2x_init_rx_rings(struct 
> bnx2x *bp)
>  					fp->disable_tpa = 1;
>  					break;
>  				}
> -				pci_unmap_addr_set((struct sw_rx_bd *)
> +				dma_unmap_addr_set((struct sw_rx_bd *)
>  							
> &bp->fp->tpa_pool[i],
>  						   mapping, 0);
>  				fp->tpa_state[i] = BNX2X_TPA_STOP;
> @@ -6907,7 +6907,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
>  				continue;
>  
>  			pci_unmap_single(bp->pdev,
> -					 pci_unmap_addr(rx_buf, 
> mapping),
> +					 dma_unmap_addr(rx_buf, 
> mapping),
>  					 bp->rx_buf_size, 
> PCI_DMA_FROMDEVICE);
>  
>  			rx_buf->skb = NULL;
> -- 
> 1.7.0
> 
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-04  8:19 ` Vladislav Zolotarov
@ 2010-04-04  8:39   ` David Miller
  2010-04-04  9:15     ` Vladislav Zolotarov
  0 siblings, 1 reply; 10+ messages in thread
From: David Miller @ 2010-04-04  8:39 UTC (permalink / raw)
  To: vladz; +Cc: fujita.tomonori, netdev, eilong

From: "Vladislav Zolotarov" <vladz@broadcom.com>
Date: Sun, 4 Apr 2010 01:19:07 -0700

> So, if we don't need to use pci_X() interface anymore, lets replace
> pci_X() properly all over the bnx2x with dma_X() functions.

I think Fujita's plan of gradual and partial transformations is
legitimate, and his changes shouldn't be rejected because he simply
isn't modifying all of the interfaces used by this driver but rather
just a specific subset he is trying to transform across the tree.

Please rescind your objections.

Thanks.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-04  8:39   ` David Miller
@ 2010-04-04  9:15     ` Vladislav Zolotarov
  2010-04-04 10:03       ` FUJITA Tomonori
  0 siblings, 1 reply; 10+ messages in thread
From: Vladislav Zolotarov @ 2010-04-04  9:15 UTC (permalink / raw)
  To: David Miller
  Cc: fujita.tomonori@lab.ntt.co.jp, netdev@vger.kernel.org,
	Eilon Greenstein

According to the changes in a PCI-DMA-mapping.txt it sounds like the trend is to stop using the pci_dma_* API and start using the dma_* API instead. Does this mean that using the pci_dma_* API is deprecated?

Thanks,
vlad

> -----Original Message-----
> From: David Miller [mailto:davem@davemloft.net] 
> Sent: Sunday, April 04, 2010 11:39 AM
> To: Vladislav Zolotarov
> Cc: fujita.tomonori@lab.ntt.co.jp; netdev@vger.kernel.org; 
> Eilon Greenstein
> Subject: Re: [PATCH] bnx2x: use the dma state API instead of 
> the pci equivalents
> 
> From: "Vladislav Zolotarov" <vladz@broadcom.com>
> Date: Sun, 4 Apr 2010 01:19:07 -0700
> 
> > So, if we don't need to use pci_X() interface anymore, lets replace
> > pci_X() properly all over the bnx2x with dma_X() functions.
> 
> I think Fujita's plan of gradual and partial transformations is
> legitimate, and his changes shouldn't be rejected because he simply
> isn't modifying all of the interfaces used by this driver but rather
> just a specific subset he is trying to transform across the tree.
> 
> Please rescind your objections.
> 
> Thanks.
> 
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-04  9:15     ` Vladislav Zolotarov
@ 2010-04-04 10:03       ` FUJITA Tomonori
  2010-04-04 10:24         ` Vladislav Zolotarov
  0 siblings, 1 reply; 10+ messages in thread
From: FUJITA Tomonori @ 2010-04-04 10:03 UTC (permalink / raw)
  To: vladz; +Cc: davem, fujita.tomonori, netdev, eilong

On Sun, 4 Apr 2010 02:15:52 -0700
"Vladislav Zolotarov" <vladz@broadcom.com> wrote:

> According to the changes in a PCI-DMA-mapping.txt it sounds like the
> trend is to stop using the pci_dma_* API and start using the dma_*
> API instead. Does this mean that using the pci_dma_* API is
> deprecated?

Sorry that I didn't put the enough information in the patch
description.

In the long term, I want to remove the pci_dma_* API.

We had the various bus-specific DMA API (pci, sbus, etc). It was the
headache for driver writers that handle multiple bus devices. So we
invented the generic DMA API long ago. Now we have only two bus
specific APIs: pci and ssb so I want to remove them and make sure that
driver writers are always able to use the generic DMA API with any
bus.

http://lwn.net/Articles/374137/


I don't plan to convert the whole tree to use the DMA API over the PCI
DMA API all together. I convert the drivers gradually. I already
removed the PCI DMA API from the docs under Documentation/. It would
be nice if some driver maintainers (or others) convert their drivers.

The patchset convert only the pci state API (such as
DECLARE_PCI_UNMAP_ADDR) because akpm complained of the API and I
promised him to clean up it:

http://lkml.org/lkml/2010/2/12/415


I'll send a patch if you like me to convert bnx2x to use the the DMA
API completely.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-04 10:03       ` FUJITA Tomonori
@ 2010-04-04 10:24         ` Vladislav Zolotarov
  2010-04-04 11:51           ` FUJITA Tomonori
  0 siblings, 1 reply; 10+ messages in thread
From: Vladislav Zolotarov @ 2010-04-04 10:24 UTC (permalink / raw)
  To: FUJITA Tomonori
  Cc: davem@davemloft.net, netdev@vger.kernel.org, Eilon Greenstein

Ok. Got it now. Thanks, Fujita. I think we should patch the bnx2x to use the generic model (not just the mapping macros).

One last question: since which kernel version the generic DMA layer may be used instead of PCI DMA layer?

Thanks,
vlad

> -----Original Message-----
> From: netdev-owner@vger.kernel.org 
> [mailto:netdev-owner@vger.kernel.org] On Behalf Of FUJITA Tomonori
> Sent: Sunday, April 04, 2010 1:03 PM
> To: Vladislav Zolotarov
> Cc: davem@davemloft.net; fujita.tomonori@lab.ntt.co.jp; 
> netdev@vger.kernel.org; Eilon Greenstein
> Subject: RE: [PATCH] bnx2x: use the dma state API instead of 
> the pci equivalents
> 
> On Sun, 4 Apr 2010 02:15:52 -0700
> "Vladislav Zolotarov" <vladz@broadcom.com> wrote:
> 
> > According to the changes in a PCI-DMA-mapping.txt it sounds like the
> > trend is to stop using the pci_dma_* API and start using the dma_*
> > API instead. Does this mean that using the pci_dma_* API is
> > deprecated?
> 
> Sorry that I didn't put the enough information in the patch
> description.
> 
> In the long term, I want to remove the pci_dma_* API.
> 
> We had the various bus-specific DMA API (pci, sbus, etc). It was the
> headache for driver writers that handle multiple bus devices. So we
> invented the generic DMA API long ago. Now we have only two bus
> specific APIs: pci and ssb so I want to remove them and make sure that
> driver writers are always able to use the generic DMA API with any
> bus.
> 
> http://lwn.net/Articles/374137/
> 
> 
> I don't plan to convert the whole tree to use the DMA API over the PCI
> DMA API all together. I convert the drivers gradually. I already
> removed the PCI DMA API from the docs under Documentation/. It would
> be nice if some driver maintainers (or others) convert their drivers.
> 
> The patchset convert only the pci state API (such as
> DECLARE_PCI_UNMAP_ADDR) because akpm complained of the API and I
> promised him to clean up it:
> 
> http://lkml.org/lkml/2010/2/12/415
> 
> 
> I'll send a patch if you like me to convert bnx2x to use the the DMA
> API completely.
> 
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-04 10:24         ` Vladislav Zolotarov
@ 2010-04-04 11:51           ` FUJITA Tomonori
  2010-04-06  7:39             ` Vladislav Zolotarov
  0 siblings, 1 reply; 10+ messages in thread
From: FUJITA Tomonori @ 2010-04-04 11:51 UTC (permalink / raw)
  To: vladz; +Cc: fujita.tomonori, davem, netdev, eilong

On Sun, 4 Apr 2010 03:24:46 -0700
"Vladislav Zolotarov" <vladz@broadcom.com> wrote:

> Ok. Got it now. Thanks, Fujita. I think we should patch the bnx2x to
> use the generic model (not just the mapping macros).

I've attached the patch.

There is one functional change: pci_alloc_consistent ->
dma_alloc_coherent

pci_alloc_consistent is a wrapper function of dma_alloc_coherent with
GFP_ATOMIC flag (see include/asm-generic/pci-dma-compat.h).

pci_alloc_consistent uses GFP_ATOMIC flag because of the compatibility
for some broken drivers that use the function in interrupt. But
GFP_ATOMIC should be avoided if possible. Looks like bnx2x doesn't use
pci_alloc_consistent in interrupt so I replaced them with
dma_alloc_coherent with GFP_KERNEL.

Please check if that change works for bnx2x.

> One last question: since which kernel version the generic DMA layer
> may be used instead of PCI DMA layer?

After 2.6.34-rc2.

Well, on the majority of architectures, you have been able to use the
generic DMA API over the PCI DMA API. The PCI DMA API is just the
wrapper of the generic DMA API. But on some architectures, two APIs
worked differently a bit. since 2.6.34-rc2, two API work in the exact
same way on all the architectures.


=
From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Subject: [PATCH] bnx2x: use the DMA API instead of the pci equivalents

The DMA API is preferred.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
---
 drivers/net/bnx2x.h      |    4 +-
 drivers/net/bnx2x_main.c |  110 +++++++++++++++++++++++----------------------
 2 files changed, 58 insertions(+), 56 deletions(-)

diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a..ae9c89e 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -163,7 +163,7 @@ do {								 \
 
 struct sw_rx_bd {
 	struct sk_buff	*skb;
-	DECLARE_PCI_UNMAP_ADDR(mapping)
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
 struct sw_tx_bd {
@@ -176,7 +176,7 @@ struct sw_tx_bd {
 
 struct sw_rx_page {
 	struct page	*page;
-	DECLARE_PCI_UNMAP_ADDR(mapping)
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
 union db_prod {
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fa9275c..63a17d6 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 	/* unmap first bd */
 	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
 	tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
-	pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
+	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 			 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
 
 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
 		DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
 		tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
-		pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
-			       BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
+		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 		if (--nbd)
 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 	}
@@ -1086,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 	if (!page)
 		return;
 
-	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
+	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
 		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 	__free_pages(page, PAGES_PER_SGE_SHIFT);
 
@@ -1115,15 +1115,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
 	if (unlikely(page == NULL))
 		return -ENOMEM;
 
-	mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
-			       PCI_DMA_FROMDEVICE);
+	mapping = dma_map_page(&bp->pdev->dev, page, 0,
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 		__free_pages(page, PAGES_PER_SGE_SHIFT);
 		return -ENOMEM;
 	}
 
 	sw_buf->page = page;
-	pci_unmap_addr_set(sw_buf, mapping, mapping);
+	dma_unmap_addr_set(sw_buf, mapping, mapping);
 
 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1143,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
 	if (unlikely(skb == NULL))
 		return -ENOMEM;
 
-	mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
-				 PCI_DMA_FROMDEVICE);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+				 DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 		dev_kfree_skb(skb);
 		return -ENOMEM;
 	}
 
 	rx_buf->skb = skb;
-	pci_unmap_addr_set(rx_buf, mapping, mapping);
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
 
 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
 	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 
-	pci_dma_sync_single_for_device(bp->pdev,
-				       pci_unmap_addr(cons_rx_buf, mapping),
-				       RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+	dma_sync_single_for_device(&bp->pdev->dev,
+				   dma_unmap_addr(cons_rx_buf, mapping),
+				   RX_COPY_THRESH, DMA_FROM_DEVICE);
 
 	prod_rx_buf->skb = cons_rx_buf->skb;
-	pci_unmap_addr_set(prod_rx_buf, mapping,
-			   pci_unmap_addr(cons_rx_buf, mapping));
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
 	*prod_bd = *cons_bd;
 }
 
@@ -1283,9 +1283,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 
 	/* move empty skb from pool to prod and map it */
 	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
-	mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
-				 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
-	pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
+	mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+				 bp->rx_buf_size, DMA_FROM_DEVICE);
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 
 	/* move partial skb from cons to pool (don't unmap yet) */
 	fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1361,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 		}
 
 		/* Unmap the page as we r going to pass it to the stack */
-		pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
-			      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+		dma_unmap_page(&bp->pdev->dev,
+			       dma_unmap_addr(&old_rx_pg, mapping),
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 
 		/* Add one frag and update the appropriate fields in the skb */
 		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
 	   fails. */
-	pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
-			 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+			 bp->rx_buf_size, DMA_FROM_DEVICE);
 
 	if (likely(new_skb)) {
 		/* fix ip xsum and give it to the stack */
@@ -1620,10 +1621,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 				}
 			}
 
-			pci_dma_sync_single_for_device(bp->pdev,
-					pci_unmap_addr(rx_buf, mapping),
-						       pad + RX_COPY_THRESH,
-						       PCI_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
+						   pad + RX_COPY_THRESH,
+						   DMA_FROM_DEVICE);
 			prefetch(skb);
 			prefetch(((char *)(skb)) + 128);
 
@@ -1665,10 +1666,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 
 			} else
 			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
-				pci_unmap_single(bp->pdev,
-					pci_unmap_addr(rx_buf, mapping),
+				dma_unmap_single(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
 						 bp->rx_buf_size,
-						 PCI_DMA_FROMDEVICE);
+						 DMA_FROM_DEVICE);
 				skb_reserve(skb, pad);
 				skb_put(skb, len);
 
@@ -4940,9 +4941,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
 		}
 
 		if (fp->tpa_state[i] == BNX2X_TPA_START)
-			pci_unmap_single(bp->pdev,
-					 pci_unmap_addr(rx_buf, mapping),
-					 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_size, DMA_FROM_DEVICE);
 
 		dev_kfree_skb(skb);
 		rx_buf->skb = NULL;
@@ -4978,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
 					fp->disable_tpa = 1;
 					break;
 				}
-				pci_unmap_addr_set((struct sw_rx_bd *)
+				dma_unmap_addr_set((struct sw_rx_bd *)
 							&bp->fp->tpa_pool[i],
 						   mapping, 0);
 				fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5658,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 
 static int bnx2x_gunzip_init(struct bnx2x *bp)
 {
-	bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
-					      &bp->gunzip_mapping);
+	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+					    &bp->gunzip_mapping, GFP_KERNEL);
 	if (bp->gunzip_buf  == NULL)
 		goto gunzip_nomem1;
 
@@ -5679,8 +5680,8 @@ gunzip_nomem3:
 	bp->strm = NULL;
 
 gunzip_nomem2:
-	pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
-			    bp->gunzip_mapping);
+	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+			  bp->gunzip_mapping);
 	bp->gunzip_buf = NULL;
 
 gunzip_nomem1:
@@ -5696,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
 	bp->strm = NULL;
 
 	if (bp->gunzip_buf) {
-		pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
-				    bp->gunzip_mapping);
+		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+				  bp->gunzip_mapping);
 		bp->gunzip_buf = NULL;
 	}
 }
@@ -6692,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
 #define BNX2X_PCI_FREE(x, y, size) \
 	do { \
 		if (x) { \
-			pci_free_consistent(bp->pdev, size, x, y); \
+			dma_free_coherent(&bp->pdev->dev, size, x, y); \
 			x = NULL; \
 			y = 0; \
 		} \
@@ -6773,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
 
 #define BNX2X_PCI_ALLOC(x, y, size) \
 	do { \
-		x = pci_alloc_consistent(bp->pdev, size, y); \
+		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
 		if (x == NULL) \
 			goto alloc_mem_err; \
 		memset(x, 0, size); \
@@ -6906,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 			if (skb == NULL)
 				continue;
 
-			pci_unmap_single(bp->pdev,
-					 pci_unmap_addr(rx_buf, mapping),
-					 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_size, DMA_FROM_DEVICE);
 
 			rx_buf->skb = NULL;
 			dev_kfree_skb(skb);
@@ -10269,8 +10270,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 
 	bd_prod = TX_BD(fp_tx->tx_bd_prod);
 	tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
-	mapping = pci_map_single(bp->pdev, skb->data,
-				 skb_headlen(skb), PCI_DMA_TODEVICE);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -11316,8 +11317,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
 	}
 
-	mapping = pci_map_single(bp->pdev, skb->data,
-				 skb_headlen(skb), PCI_DMA_TODEVICE);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
 
 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11374,8 +11375,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		if (total_pkt_bd == NULL)
 			total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
 
-		mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
-				       frag->size, PCI_DMA_TODEVICE);
+		mapping = dma_map_page(&bp->pdev->dev, frag->page,
+				       frag->page_offset,
+				       frag->size, DMA_TO_DEVICE);
 
 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11832,15 +11834,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 		goto err_out_release;
 	}
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
 		bp->flags |= USING_DAC_FLAG;
-		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
-			pr_err("pci_set_consistent_dma_mask failed, aborting\n");
+		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+			pr_err("dma_set_coherent_mask failed, aborting\n");
 			rc = -EIO;
 			goto err_out_release;
 		}
 
-	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
 		pr_err("System does not support DMA, aborting\n");
 		rc = -EIO;
 		goto err_out_release;
-- 
1.7.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-04 11:51           ` FUJITA Tomonori
@ 2010-04-06  7:39             ` Vladislav Zolotarov
  2010-04-06 13:41               ` Eilon Greenstein
  0 siblings, 1 reply; 10+ messages in thread
From: Vladislav Zolotarov @ 2010-04-06  7:39 UTC (permalink / raw)
  To: FUJITA Tomonori
  Cc: davem@davemloft.net, netdev@vger.kernel.org, Eilon Greenstein

Thanks, Fujita.

The patch looks fine. I'll run some regression tests on the patched driver to check that things still work and if it's ok we will ack it shortly.

vlad



> -----Original Message-----
> From: netdev-owner@vger.kernel.org
> [mailto:netdev-owner@vger.kernel.org] On Behalf Of FUJITA Tomonori
> Sent: Sunday, April 04, 2010 2:51 PM
> To: Vladislav Zolotarov
> Cc: fujita.tomonori@lab.ntt.co.jp; davem@davemloft.net;
> netdev@vger.kernel.org; Eilon Greenstein
> Subject: RE: [PATCH] bnx2x: use the dma state API instead of
> the pci equivalents
>
> On Sun, 4 Apr 2010 03:24:46 -0700
> "Vladislav Zolotarov" <vladz@broadcom.com> wrote:
>
> > Ok. Got it now. Thanks, Fujita. I think we should patch the bnx2x to
> > use the generic model (not just the mapping macros).
>
> I've attached the patch.
>
> There is one functional change: pci_alloc_consistent ->
> dma_alloc_coherent
>
> pci_alloc_consistent is a wrapper function of dma_alloc_coherent with
> GFP_ATOMIC flag (see include/asm-generic/pci-dma-compat.h).
>
> pci_alloc_consistent uses GFP_ATOMIC flag because of the compatibility
> for some broken drivers that use the function in interrupt. But
> GFP_ATOMIC should be avoided if possible. Looks like bnx2x doesn't use
> pci_alloc_consistent in interrupt so I replaced them with
> dma_alloc_coherent with GFP_KERNEL.
>
> Please check if that change works for bnx2x.
>
> > One last question: since which kernel version the generic DMA layer
> > may be used instead of PCI DMA layer?
>
> After 2.6.34-rc2.
>
> Well, on the majority of architectures, you have been able to use the
> generic DMA API over the PCI DMA API. The PCI DMA API is just the
> wrapper of the generic DMA API. But on some architectures, two APIs
> worked differently a bit. since 2.6.34-rc2, two API work in the exact
> same way on all the architectures.
>
>
> =
> From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
> Subject: [PATCH] bnx2x: use the DMA API instead of the pci equivalents
>
> The DMA API is preferred.
>
> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
> ---
>  drivers/net/bnx2x.h      |    4 +-
>  drivers/net/bnx2x_main.c |  110
> +++++++++++++++++++++++----------------------
>  2 files changed, 58 insertions(+), 56 deletions(-)
>
> diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
> index 3c48a7a..ae9c89e 100644
> --- a/drivers/net/bnx2x.h
> +++ b/drivers/net/bnx2x.h
> @@ -163,7 +163,7 @@ do {
>                        \
>
>  struct sw_rx_bd {
>       struct sk_buff  *skb;
> -     DECLARE_PCI_UNMAP_ADDR(mapping)
> +     DEFINE_DMA_UNMAP_ADDR(mapping);
>  };
>
>  struct sw_tx_bd {
> @@ -176,7 +176,7 @@ struct sw_tx_bd {
>
>  struct sw_rx_page {
>       struct page     *page;
> -     DECLARE_PCI_UNMAP_ADDR(mapping)
> +     DEFINE_DMA_UNMAP_ADDR(mapping);
>  };
>
>  union db_prod {
> diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
> index fa9275c..63a17d6 100644
> --- a/drivers/net/bnx2x_main.c
> +++ b/drivers/net/bnx2x_main.c
> @@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x
> *bp, struct bnx2x_fastpath *fp,
>       /* unmap first bd */
>       DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
>       tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
> -     pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
> +     dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
>                        BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
>
>       nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
> @@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x
> *bp, struct bnx2x_fastpath *fp,
>
>               DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
>               tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
> -             pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
> -                            BD_UNMAP_LEN(tx_data_bd),
> PCI_DMA_TODEVICE);
> +             dma_unmap_page(&bp->pdev->dev,
> BD_UNMAP_ADDR(tx_data_bd),
> +                            BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
>               if (--nbd)
>                       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
>       }
> @@ -1086,7 +1086,7 @@ static inline void
> bnx2x_free_rx_sge(struct bnx2x *bp,
>       if (!page)
>               return;
>
> -     pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
> +     dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
>                      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
>       __free_pages(page, PAGES_PER_SGE_SHIFT);
>
> @@ -1115,15 +1115,15 @@ static inline int
> bnx2x_alloc_rx_sge(struct bnx2x *bp,
>       if (unlikely(page == NULL))
>               return -ENOMEM;
>
> -     mapping = pci_map_page(bp->pdev, page, 0,
> SGE_PAGE_SIZE*PAGES_PER_SGE,
> -                            PCI_DMA_FROMDEVICE);
> +     mapping = dma_map_page(&bp->pdev->dev, page, 0,
> +                            SGE_PAGE_SIZE*PAGES_PER_SGE,
> DMA_FROM_DEVICE);
>       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
>               __free_pages(page, PAGES_PER_SGE_SHIFT);
>               return -ENOMEM;
>       }
>
>       sw_buf->page = page;
> -     pci_unmap_addr_set(sw_buf, mapping, mapping);
> +     dma_unmap_addr_set(sw_buf, mapping, mapping);
>
>       sge->addr_hi = cpu_to_le32(U64_HI(mapping));
>       sge->addr_lo = cpu_to_le32(U64_LO(mapping));
> @@ -1143,15 +1143,15 @@ static inline int
> bnx2x_alloc_rx_skb(struct bnx2x *bp,
>       if (unlikely(skb == NULL))
>               return -ENOMEM;
>
> -     mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
> -                              PCI_DMA_FROMDEVICE);
> +     mapping = dma_map_single(&bp->pdev->dev, skb->data,
> bp->rx_buf_size,
> +                              DMA_FROM_DEVICE);
>       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
>               dev_kfree_skb(skb);
>               return -ENOMEM;
>       }
>
>       rx_buf->skb = skb;
> -     pci_unmap_addr_set(rx_buf, mapping, mapping);
> +     dma_unmap_addr_set(rx_buf, mapping, mapping);
>
>       rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
>       rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
> @@ -1173,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct
> bnx2x_fastpath *fp,
>       struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
>       struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
>
> -     pci_dma_sync_single_for_device(bp->pdev,
> -
> pci_unmap_addr(cons_rx_buf, mapping),
> -                                    RX_COPY_THRESH,
> PCI_DMA_FROMDEVICE);
> +     dma_sync_single_for_device(&bp->pdev->dev,
> +                                dma_unmap_addr(cons_rx_buf, mapping),
> +                                RX_COPY_THRESH, DMA_FROM_DEVICE);
>
>       prod_rx_buf->skb = cons_rx_buf->skb;
> -     pci_unmap_addr_set(prod_rx_buf, mapping,
> -                        pci_unmap_addr(cons_rx_buf, mapping));
> +     dma_unmap_addr_set(prod_rx_buf, mapping,
> +                        dma_unmap_addr(cons_rx_buf, mapping));
>       *prod_bd = *cons_bd;
>  }
>
> @@ -1283,9 +1283,9 @@ static void bnx2x_tpa_start(struct
> bnx2x_fastpath *fp, u16 queue,
>
>       /* move empty skb from pool to prod and map it */
>       prod_rx_buf->skb = fp->tpa_pool[queue].skb;
> -     mapping = pci_map_single(bp->pdev,
> fp->tpa_pool[queue].skb->data,
> -                              bp->rx_buf_size, PCI_DMA_FROMDEVICE);
> -     pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
> +     mapping = dma_map_single(&bp->pdev->dev,
> fp->tpa_pool[queue].skb->data,
> +                              bp->rx_buf_size, DMA_FROM_DEVICE);
> +     dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
>
>       /* move partial skb from cons to pool (don't unmap yet) */
>       fp->tpa_pool[queue] = *cons_rx_buf;
> @@ -1361,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct
> bnx2x *bp, struct bnx2x_fastpath *fp,
>               }
>
>               /* Unmap the page as we r going to pass it to
> the stack */
> -             pci_unmap_page(bp->pdev,
> pci_unmap_addr(&old_rx_pg, mapping),
> -                           SGE_PAGE_SIZE*PAGES_PER_SGE,
> PCI_DMA_FROMDEVICE);
> +             dma_unmap_page(&bp->pdev->dev,
> +                            dma_unmap_addr(&old_rx_pg, mapping),
> +                            SGE_PAGE_SIZE*PAGES_PER_SGE,
> DMA_FROM_DEVICE);
>
>               /* Add one frag and update the appropriate
> fields in the skb */
>               skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
> @@ -1389,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x
> *bp, struct bnx2x_fastpath *fp,
>       /* Unmap skb in the pool anyway, as we are going to change
>          pool entry status to BNX2X_TPA_STOP even if new skb
> allocation
>          fails. */
> -     pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
> -                      bp->rx_buf_size, PCI_DMA_FROMDEVICE);
> +     dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf,
> mapping),
> +                      bp->rx_buf_size, DMA_FROM_DEVICE);
>
>       if (likely(new_skb)) {
>               /* fix ip xsum and give it to the stack */
> @@ -1620,10 +1621,10 @@ static int bnx2x_rx_int(struct
> bnx2x_fastpath *fp, int budget)
>                               }
>                       }
>
> -                     pci_dma_sync_single_for_device(bp->pdev,
> -                                     pci_unmap_addr(rx_buf, mapping),
> -                                                    pad +
> RX_COPY_THRESH,
> -
> PCI_DMA_FROMDEVICE);
> +                     dma_sync_single_for_device(&bp->pdev->dev,
> +                                     dma_unmap_addr(rx_buf, mapping),
> +                                                pad + RX_COPY_THRESH,
> +                                                DMA_FROM_DEVICE);
>                       prefetch(skb);
>                       prefetch(((char *)(skb)) + 128);
>
> @@ -1665,10 +1666,10 @@ static int bnx2x_rx_int(struct
> bnx2x_fastpath *fp, int budget)
>
>                       } else
>                       if (likely(bnx2x_alloc_rx_skb(bp, fp,
> bd_prod) == 0)) {
> -                             pci_unmap_single(bp->pdev,
> -                                     pci_unmap_addr(rx_buf, mapping),
> +                             dma_unmap_single(&bp->pdev->dev,
> +                                     dma_unmap_addr(rx_buf, mapping),
>                                                bp->rx_buf_size,
> -                                              PCI_DMA_FROMDEVICE);
> +                                              DMA_FROM_DEVICE);
>                               skb_reserve(skb, pad);
>                               skb_put(skb, len);
>
> @@ -4940,9 +4941,9 @@ static inline void
> bnx2x_free_tpa_pool(struct bnx2x *bp,
>               }
>
>               if (fp->tpa_state[i] == BNX2X_TPA_START)
> -                     pci_unmap_single(bp->pdev,
> -                                      pci_unmap_addr(rx_buf,
> mapping),
> -                                      bp->rx_buf_size,
> PCI_DMA_FROMDEVICE);
> +                     dma_unmap_single(&bp->pdev->dev,
> +                                      dma_unmap_addr(rx_buf,
> mapping),
> +                                      bp->rx_buf_size,
> DMA_FROM_DEVICE);
>
>               dev_kfree_skb(skb);
>               rx_buf->skb = NULL;
> @@ -4978,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct
> bnx2x *bp)
>                                       fp->disable_tpa = 1;
>                                       break;
>                               }
> -                             pci_unmap_addr_set((struct sw_rx_bd *)
> +                             dma_unmap_addr_set((struct sw_rx_bd *)
>
> &bp->fp->tpa_pool[i],
>                                                  mapping, 0);
>                               fp->tpa_state[i] = BNX2X_TPA_STOP;
> @@ -5658,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x
> *bp, u32 load_code)
>
>  static int bnx2x_gunzip_init(struct bnx2x *bp)
>  {
> -     bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
> -                                           &bp->gunzip_mapping);
> +     bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
> +
> &bp->gunzip_mapping, GFP_KERNEL);
>       if (bp->gunzip_buf  == NULL)
>               goto gunzip_nomem1;
>
> @@ -5679,8 +5680,8 @@ gunzip_nomem3:
>       bp->strm = NULL;
>
>  gunzip_nomem2:
> -     pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
> -                         bp->gunzip_mapping);
> +     dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
> +                       bp->gunzip_mapping);
>       bp->gunzip_buf = NULL;
>
>  gunzip_nomem1:
> @@ -5696,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
>       bp->strm = NULL;
>
>       if (bp->gunzip_buf) {
> -             pci_free_consistent(bp->pdev, FW_BUF_SIZE,
> bp->gunzip_buf,
> -                                 bp->gunzip_mapping);
> +             dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE,
> bp->gunzip_buf,
> +                               bp->gunzip_mapping);
>               bp->gunzip_buf = NULL;
>       }
>  }
> @@ -6692,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
>  #define BNX2X_PCI_FREE(x, y, size) \
>       do { \
>               if (x) { \
> -                     pci_free_consistent(bp->pdev, size, x, y); \
> +                     dma_free_coherent(&bp->pdev->dev, size, x, y); \
>                       x = NULL; \
>                       y = 0; \
>               } \
> @@ -6773,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
>
>  #define BNX2X_PCI_ALLOC(x, y, size) \
>       do { \
> -             x = pci_alloc_consistent(bp->pdev, size, y); \
> +             x = dma_alloc_coherent(&bp->pdev->dev, size, y,
> GFP_KERNEL); \
>               if (x == NULL) \
>                       goto alloc_mem_err; \
>               memset(x, 0, size); \
> @@ -6906,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
>                       if (skb == NULL)
>                               continue;
>
> -                     pci_unmap_single(bp->pdev,
> -                                      pci_unmap_addr(rx_buf,
> mapping),
> -                                      bp->rx_buf_size,
> PCI_DMA_FROMDEVICE);
> +                     dma_unmap_single(&bp->pdev->dev,
> +                                      dma_unmap_addr(rx_buf,
> mapping),
> +                                      bp->rx_buf_size,
> DMA_FROM_DEVICE);
>
>                       rx_buf->skb = NULL;
>                       dev_kfree_skb(skb);
> @@ -10269,8 +10270,8 @@ static int bnx2x_run_loopback(struct
> bnx2x *bp, int loopback_mode, u8 link_up)
>
>       bd_prod = TX_BD(fp_tx->tx_bd_prod);
>       tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
> -     mapping = pci_map_single(bp->pdev, skb->data,
> -                              skb_headlen(skb), PCI_DMA_TODEVICE);
> +     mapping = dma_map_single(&bp->pdev->dev, skb->data,
> +                              skb_headlen(skb), DMA_TO_DEVICE);
>       tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
>       tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
>       tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
> @@ -11316,8 +11317,8 @@ static netdev_tx_t
> bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
>               }
>       }
>
> -     mapping = pci_map_single(bp->pdev, skb->data,
> -                              skb_headlen(skb), PCI_DMA_TODEVICE);
> +     mapping = dma_map_single(&bp->pdev->dev, skb->data,
> +                              skb_headlen(skb), DMA_TO_DEVICE);
>
>       tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
>       tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
> @@ -11374,8 +11375,9 @@ static netdev_tx_t
> bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
>               if (total_pkt_bd == NULL)
>                       total_pkt_bd =
> &fp->tx_desc_ring[bd_prod].reg_bd;
>
> -             mapping = pci_map_page(bp->pdev, frag->page,
> frag->page_offset,
> -                                    frag->size, PCI_DMA_TODEVICE);
> +             mapping = dma_map_page(&bp->pdev->dev, frag->page,
> +                                    frag->page_offset,
> +                                    frag->size, DMA_TO_DEVICE);
>
>               tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
>               tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
> @@ -11832,15 +11834,15 @@ static int __devinit
> bnx2x_init_dev(struct pci_dev *pdev,
>               goto err_out_release;
>       }
>
> -     if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
> +     if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
>               bp->flags |= USING_DAC_FLAG;
> -             if (pci_set_consistent_dma_mask(pdev,
> DMA_BIT_MASK(64)) != 0) {
> -                     pr_err("pci_set_consistent_dma_mask
> failed, aborting\n");
> +             if (dma_set_coherent_mask(&pdev->dev,
> DMA_BIT_MASK(64)) != 0) {
> +                     pr_err("dma_set_coherent_mask failed,
> aborting\n");
>                       rc = -EIO;
>                       goto err_out_release;
>               }
>
> -     } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
> +     } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
>               pr_err("System does not support DMA, aborting\n");
>               rc = -EIO;
>               goto err_out_release;
> --
> 1.7.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-06  7:39             ` Vladislav Zolotarov
@ 2010-04-06 13:41               ` Eilon Greenstein
  2010-04-08  4:06                 ` David Miller
  0 siblings, 1 reply; 10+ messages in thread
From: Eilon Greenstein @ 2010-04-06 13:41 UTC (permalink / raw)
  To: FUJITA Tomonori, davem@davemloft.net
  Cc: netdev@vger.kernel.org, Vladislav Zolotarov

On Tue, 2010-04-06 at 00:39 -0700, Vladislav Zolotarov wrote:
> Thanks, Fujita.
> 
> The patch looks fine. I'll run some regression tests on the patched driver to check that things still work and if it's ok we will ack it shortly.
> 
> vlad
> 
> 

> > =
> > From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
> > Subject: [PATCH] bnx2x: use the DMA API instead of the pci equivalents
> >
> > The DMA API is preferred.
> >
> > Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Vlad's testing with this patch were finished successfully.

Thanks Fujita!
Acked-by: Vladislav Zolotarov <vladz@broadcom.com>
Acked-by: Eilon Greenstein <eilong@broadcom.com>



^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] bnx2x: use the dma state API instead of the pci equivalents
  2010-04-06 13:41               ` Eilon Greenstein
@ 2010-04-08  4:06                 ` David Miller
  0 siblings, 0 replies; 10+ messages in thread
From: David Miller @ 2010-04-08  4:06 UTC (permalink / raw)
  To: eilong; +Cc: fujita.tomonori, netdev, vladz

From: "Eilon Greenstein" <eilong@broadcom.com>
Date: Tue, 6 Apr 2010 16:41:18 +0300

> On Tue, 2010-04-06 at 00:39 -0700, Vladislav Zolotarov wrote:
>> Thanks, Fujita.
>> 
>> The patch looks fine. I'll run some regression tests on the patched driver to check that things still work and if it's ok we will ack it shortly.
>> 
>> vlad
>> 
>> 
> 
>> > =
>> > From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
>> > Subject: [PATCH] bnx2x: use the DMA API instead of the pci equivalents
>> >
>> > The DMA API is preferred.
>> >
>> > Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
> Vlad's testing with this patch were finished successfully.
> 
> Thanks Fujita!
> Acked-by: Vladislav Zolotarov <vladz@broadcom.com>
> Acked-by: Eilon Greenstein <eilong@broadcom.com>

Applied, thanks.

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2010-04-08  4:06 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-04-02  2:56 [PATCH] bnx2x: use the dma state API instead of the pci equivalents FUJITA Tomonori
2010-04-04  8:19 ` Vladislav Zolotarov
2010-04-04  8:39   ` David Miller
2010-04-04  9:15     ` Vladislav Zolotarov
2010-04-04 10:03       ` FUJITA Tomonori
2010-04-04 10:24         ` Vladislav Zolotarov
2010-04-04 11:51           ` FUJITA Tomonori
2010-04-06  7:39             ` Vladislav Zolotarov
2010-04-06 13:41               ` Eilon Greenstein
2010-04-08  4:06                 ` David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).