netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Anton Blanchard <anton@samba.org>
To: leitao@linux.vnet.ibm.com
Cc: netdev@vger.kernel.org, michael@ellerman.id.au
Subject: [PATCH 8/14] ehea: Simplify ehea_xmit2 and ehea_xmit3
Date: Tue, 5 Apr 2011 21:39:09 +1000	[thread overview]
Message-ID: <20110405213909.31602f45@kryten> (raw)
In-Reply-To: <20110405212825.6eb85677@kryten>


Based on a patch from Michael Ellerman, clean up a significant
portion of the transmit path. There was a lot of duplication here.
Even worse, we were always checksumming tx packets and ignoring the
skb->ip_summed field.

Also remove NETIF_F_FRAGLIST from dev->features, I'm not sure why
it was enabled.

Signed-off-by: Anton Blanchard <anton@samba.org>
---

Index: linux-2.6/drivers/net/ehea/ehea_main.c
===================================================================
--- linux-2.6.orig/drivers/net/ehea/ehea_main.c	2011-04-05 20:36:34.437226735 +1000
+++ linux-2.6/drivers/net/ehea/ehea_main.c	2011-04-05 20:36:38.726736977 +1000
@@ -1681,37 +1681,6 @@ static int ehea_clean_portres(struct ehe
 	return ret;
 }
 
-/*
- * The write_* functions store information in swqe which is used by
- * the hardware to calculate the ip/tcp/udp checksum
- */
-
-static inline void write_ip_start_end(struct ehea_swqe *swqe,
-				      const struct sk_buff *skb)
-{
-	swqe->ip_start = skb_network_offset(skb);
-	swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
-}
-
-static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
-					const struct sk_buff *skb)
-{
-	swqe->tcp_offset =
-		(u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
-
-	swqe->tcp_end = (u16)skb->len - 1;
-}
-
-static inline void write_udp_offset_end(struct ehea_swqe *swqe,
-					const struct sk_buff *skb)
-{
-	swqe->tcp_offset =
-		(u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
-
-	swqe->tcp_end = (u16)skb->len - 1;
-}
-
-
 static void write_swqe2_TSO(struct sk_buff *skb,
 			    struct ehea_swqe *swqe, u32 lkey)
 {
@@ -2113,41 +2082,46 @@ static int ehea_change_mtu(struct net_de
 	return 0;
 }
 
-static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
-		       struct ehea_swqe *swqe, u32 lkey)
+static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
 {
-	if (skb->protocol == htons(ETH_P_IP)) {
-		const struct iphdr *iph = ip_hdr(skb);
+	swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
 
-		/* IPv4 */
-		swqe->tx_control |= EHEA_SWQE_CRC
-				 | EHEA_SWQE_IP_CHECKSUM
-				 | EHEA_SWQE_TCP_CHECKSUM
-				 | EHEA_SWQE_IMM_DATA_PRESENT
-				 | EHEA_SWQE_DESCRIPTORS_PRESENT;
-
-		write_ip_start_end(swqe, skb);
-
-		if (iph->protocol == IPPROTO_UDP) {
-			if ((iph->frag_off & IP_MF) ||
-			    (iph->frag_off & IP_OFFSET))
-				/* IP fragment, so don't change cs */
-				swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
-			else
-				write_udp_offset_end(swqe, skb);
-		} else if (iph->protocol == IPPROTO_TCP) {
-			write_tcp_offset_end(swqe, skb);
-		}
+	if (skb->protocol != htons(ETH_P_IP))
+		return;
 
-		/* icmp (big data) and ip segmentation packets (all other ip
-		   packets) do not require any special handling */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
 
-	} else {
-		/* Other Ethernet Protocol */
-		swqe->tx_control |= EHEA_SWQE_CRC
-				 | EHEA_SWQE_IMM_DATA_PRESENT
-				 | EHEA_SWQE_DESCRIPTORS_PRESENT;
+	swqe->ip_start = skb_network_offset(skb);
+	swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
+
+	switch (ip_hdr(skb)->protocol) {
+	case IPPROTO_UDP:
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+		swqe->tcp_offset = swqe->ip_end + 1 +
+				   offsetof(struct udphdr, check);
+		swqe->tcp_end = skb->len - 1;
+		break;
+
+	case IPPROTO_TCP:
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+		swqe->tcp_offset = swqe->ip_end + 1 +
+				   offsetof(struct tcphdr, check);
+		swqe->tcp_end = skb->len - 1;
+		break;
 	}
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+		       struct ehea_swqe *swqe, u32 lkey)
+{
+	swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+	xmit_common(skb, swqe);
 
 	write_swqe2_data(skb, dev, swqe, lkey);
 }
@@ -2160,51 +2134,11 @@ static void ehea_xmit3(struct sk_buff *s
 	skb_frag_t *frag;
 	int i;
 
-	if (skb->protocol == htons(ETH_P_IP)) {
-		const struct iphdr *iph = ip_hdr(skb);
-
-		/* IPv4 */
-		write_ip_start_end(swqe, skb);
+	xmit_common(skb, swqe);
 
-		if (iph->protocol == IPPROTO_TCP) {
-			swqe->tx_control |= EHEA_SWQE_CRC
-					 | EHEA_SWQE_IP_CHECKSUM
-					 | EHEA_SWQE_TCP_CHECKSUM
-					 | EHEA_SWQE_IMM_DATA_PRESENT;
-
-			write_tcp_offset_end(swqe, skb);
-
-		} else if (iph->protocol == IPPROTO_UDP) {
-			if ((iph->frag_off & IP_MF) ||
-			    (iph->frag_off & IP_OFFSET))
-				/* IP fragment, so don't change cs */
-				swqe->tx_control |= EHEA_SWQE_CRC
-						 | EHEA_SWQE_IMM_DATA_PRESENT;
-			else {
-				swqe->tx_control |= EHEA_SWQE_CRC
-						 | EHEA_SWQE_IP_CHECKSUM
-						 | EHEA_SWQE_TCP_CHECKSUM
-						 | EHEA_SWQE_IMM_DATA_PRESENT;
-
-				write_udp_offset_end(swqe, skb);
-			}
-		} else {
-			/* icmp (big data) and
-			   ip segmentation packets (all other ip packets) */
-			swqe->tx_control |= EHEA_SWQE_CRC
-					 | EHEA_SWQE_IP_CHECKSUM
-					 | EHEA_SWQE_IMM_DATA_PRESENT;
-		}
-	} else {
-		/* Other Ethernet Protocol */
-		swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
-	}
-	/* copy (immediate) data */
 	if (nfrags == 0) {
-		/* data is in a single piece */
 		skb_copy_from_linear_data(skb, imm_data, skb->len);
 	} else {
-		/* first copy data from the skb->data buffer ... */
 		skb_copy_from_linear_data(skb, imm_data,
 					  skb_headlen(skb));
 		imm_data += skb_headlen(skb);
@@ -2218,6 +2152,7 @@ static void ehea_xmit3(struct sk_buff *s
 			imm_data += frag->size;
 		}
 	}
+
 	swqe->immediate_data_length = skb->len;
 	dev_kfree_skb(skb);
 }
@@ -3221,7 +3156,7 @@ struct ehea_port *ehea_setup_single_port
 	dev->netdev_ops = &ehea_netdev_ops;
 	ehea_set_ethtool_ops(dev);
 
-	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+	dev->features = NETIF_F_SG | NETIF_F_TSO
 		      | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
 		      | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 	dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |

  parent reply	other threads:[~2011-04-05 11:39 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-04-05 11:28 [PATCH 1/14] ehea: Remove NETIF_F_LLTX Anton Blanchard
2011-04-05 11:29 ` [PATCH 2/14] ehea: Update multiqueue support Anton Blanchard
2011-04-11  1:13   ` Michael Ellerman
2011-04-05 11:30 ` [PATCH 3/14] ehea: Remove force_irq logic in napi poll routine Anton Blanchard
2011-04-05 11:32 ` [PATCH 4/14] ehea: Remove num_tx_qps module option Anton Blanchard
2011-04-05 11:33 ` [PATCH 5/14] ehea: Don't check NETIF_F_TSO in TX path Anton Blanchard
2011-04-05 11:35 ` [PATCH 6/14] ehea: Add vlan_features Anton Blanchard
2011-04-05 11:36 ` [PATCH 7/14] ehea: Allocate large enough skbs to avoid partial cacheline DMA writes Anton Blanchard
2011-04-05 11:39 ` Anton Blanchard [this message]
2011-04-05 11:40 ` [PATCH 9/14] ehea: Merge swqe2 TSO and non TSO paths Anton Blanchard
2011-04-05 11:41 ` [PATCH 10/14] ehea: Simplify type 3 transmit routine Anton Blanchard
2011-04-05 11:41 ` [PATCH 11/14] ehea: Remove some unused definitions Anton Blanchard
2011-04-05 11:42 ` [PATCH 12/14] ehea: Add some more ethtool operations and 64bit stats Anton Blanchard
2011-04-05 13:07   ` Ben Hutchings
2011-04-05 21:35     ` Anton Blanchard
2011-04-05 11:43 ` [PATCH 13/14] ehea: Remove LRO support Anton Blanchard
2011-04-05 11:45 ` [PATCH 14/14] ehea: Add GRO support Anton Blanchard
2011-04-05 21:29   ` Jesse Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20110405213909.31602f45@kryten \
    --to=anton@samba.org \
    --cc=leitao@linux.vnet.ibm.com \
    --cc=michael@ellerman.id.au \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).