linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: jszhang@marvell.com (Jisheng Zhang)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH net-next v3 3/4] net: mvneta: avoid reading from tx_desc as much as possible
Date: Mon, 20 Feb 2017 20:53:43 +0800	[thread overview]
Message-ID: <20170220125344.3555-4-jszhang@marvell.com> (raw)
In-Reply-To: <20170220125344.3555-1-jszhang@marvell.com>

In hot code path such as mvneta_tx(), mvneta_txq_bufs_free() etc. we
access tx_desc several times. The tx_desc is allocated by
dma_alloc_coherent, it's uncacheable if the device isn't cache-coherent,
reading from uncached memory is fairly slow. So use local variable to
store what we need to avoid extra reading from uncached memory.

We get the following performance data on Marvell BG4CT Platforms
(tested with iperf):

before the patch:
sending 1GB in mvneta_tx()(disabled TSO) costs 793553760ns

after the patch:
sending 1GB in mvneta_tx()(disabled TSO) costs 719953800ns

we saved 9.2% time.

Signed-off-by: Jisheng Zhang <jszhang@marvell.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 50 ++++++++++++++++++-----------------
 1 file changed, 26 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a25042801eec..b6cda4131c78 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1770,6 +1770,7 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
 		struct mvneta_tx_desc *tx_desc = txq->descs +
 			txq->txq_get_index;
 		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+		u32 dma_addr = tx_desc->buf_phys_addr;
 
 		if (skb) {
 			bytes_compl += skb->len;
@@ -1778,9 +1779,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
 
 		mvneta_txq_inc_get(txq);
 
-		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
-			dma_unmap_single(pp->dev->dev.parent,
-					 tx_desc->buf_phys_addr,
+		if (!IS_TSO_HEADER(txq, dma_addr))
+			dma_unmap_single(pp->dev->dev.parent, dma_addr,
 					 tx_desc->data_size, DMA_TO_DEVICE);
 		if (!skb)
 			continue;
@@ -2191,17 +2191,18 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
 		    bool last_tcp, bool is_last)
 {
 	struct mvneta_tx_desc *tx_desc;
+	dma_addr_t dma_addr;
 
 	tx_desc = mvneta_txq_next_desc_get(txq);
 	tx_desc->data_size = size;
-	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
-						size, DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(dev->dev.parent,
-		     tx_desc->buf_phys_addr))) {
+
+	dma_addr = dma_map_single(dev->dev.parent, data, size, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
 		mvneta_txq_desc_put(txq);
 		return -ENOMEM;
 	}
 
+	tx_desc->buf_phys_addr = dma_addr;
 	tx_desc->command = 0;
 	txq->tx_skb[txq->txq_put_index] = NULL;
 
@@ -2278,9 +2279,10 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
 	 */
 	for (i = desc_count - 1; i >= 0; i--) {
 		struct mvneta_tx_desc *tx_desc = txq->descs + i;
-		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+		u32 dma_addr = tx_desc->buf_phys_addr;
+		if (!IS_TSO_HEADER(txq, dma_addr))
 			dma_unmap_single(pp->dev->dev.parent,
-					 tx_desc->buf_phys_addr,
+					 dma_addr,
 					 tx_desc->data_size,
 					 DMA_TO_DEVICE);
 		mvneta_txq_desc_put(txq);
@@ -2296,21 +2298,20 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
 
 	for (i = 0; i < nr_frags; i++) {
+		dma_addr_t dma_addr;
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 		void *addr = page_address(frag->page.p) + frag->page_offset;
 
 		tx_desc = mvneta_txq_next_desc_get(txq);
 		tx_desc->data_size = frag->size;
 
-		tx_desc->buf_phys_addr =
-			dma_map_single(pp->dev->dev.parent, addr,
-				       tx_desc->data_size, DMA_TO_DEVICE);
-
-		if (dma_mapping_error(pp->dev->dev.parent,
-				      tx_desc->buf_phys_addr)) {
+		dma_addr = dma_map_single(pp->dev->dev.parent, addr,
+					  frag->size, DMA_TO_DEVICE);
+		if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
 			mvneta_txq_desc_put(txq);
 			goto error;
 		}
+		tx_desc->buf_phys_addr = dma_addr;
 
 		if (i == nr_frags - 1) {
 			/* Last descriptor */
@@ -2351,7 +2352,8 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 	struct mvneta_tx_desc *tx_desc;
 	int len = skb->len;
 	int frags = 0;
-	u32 tx_cmd;
+	u32 tx_cmd, size;
+	dma_addr_t dma_addr;
 
 	if (!netif_running(dev))
 		goto out;
@@ -2368,17 +2370,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 
 	tx_cmd = mvneta_skb_tx_csum(pp, skb);
 
-	tx_desc->data_size = skb_headlen(skb);
+	size = skb_headlen(skb);
+	tx_desc->data_size = size;
 
-	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
-						tx_desc->data_size,
-						DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(dev->dev.parent,
-				       tx_desc->buf_phys_addr))) {
+	dma_addr = dma_map_single(dev->dev.parent, skb->data,
+				  size, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
 		mvneta_txq_desc_put(txq);
 		frags = 0;
 		goto out;
 	}
+	tx_desc->buf_phys_addr = dma_addr;
 
 	if (frags == 1) {
 		/* First and Last descriptor */
@@ -2395,8 +2397,8 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 		/* Continue with other skb fragments */
 		if (mvneta_tx_frag_process(pp, skb, txq)) {
 			dma_unmap_single(dev->dev.parent,
-					 tx_desc->buf_phys_addr,
-					 tx_desc->data_size,
+					 dma_addr,
+					 size,
 					 DMA_TO_DEVICE);
 			mvneta_txq_desc_put(txq);
 			frags = 0;
-- 
2.11.0

  parent reply	other threads:[~2017-02-20 12:53 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-20 12:53 [PATCH net-next v3 0/4] net: mvneta: improve rx/tx performance Jisheng Zhang
2017-02-20 12:53 ` [PATCH net-next v3 1/4] net: mvneta: avoid getting status from rx_desc as much as possible Jisheng Zhang
2017-02-20 12:53 ` [PATCH net-next v3 2/4] net: mvneta: avoid getting buf_phys_addr from rx_desc again Jisheng Zhang
2017-02-20 12:53 ` Jisheng Zhang [this message]
2017-02-20 12:53 ` [PATCH net-next v3 4/4] net: mvneta: Use cacheable memory to store the rx buffer DMA address Jisheng Zhang
2017-02-20 14:21 ` [PATCH net-next v3 0/4] net: mvneta: improve rx/tx performance Gregory CLEMENT
2017-02-21  4:37   ` Jisheng Zhang
2017-02-21 16:16     ` David Miller
2017-02-21 16:35       ` Marcin Wojtas
2017-02-24 11:56       ` Jisheng Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170220125344.3555-4-jszhang@marvell.com \
    --to=jszhang@marvell.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).