netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 0/2] ixgbe/ixgbevf: Add support for UDP segmentation offload
@ 2018-05-04  0:39 Alexander Duyck
  2018-05-04  0:39 ` [RFC PATCH 1/2] ixgbe: " Alexander Duyck
  2018-05-04  0:39 ` [RFC PATCH 2/2] ixgbevf: " Alexander Duyck
  0 siblings, 2 replies; 3+ messages in thread
From: Alexander Duyck @ 2018-05-04  0:39 UTC (permalink / raw)
  To: netdev, willemb, intel-wired-lan

These patches are meant to be a follow-up to the following series:
https://patchwork.ozlabs.org/project/netdev/list/?series=42476&archive=both&state=*

These patches enable driver support for the new UDP segmentation offload
feature. For now I am pushing them as an RFC as they haven't been
officially productized or validated and I was using these mostly as a test
vehicle to verify the offload could be supported generically by drivers.

---

Alexander Duyck (2):
      ixgbe: Add support for UDP segmentation offload
      ixgbevf: Add support for UDP segmentation offload


 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c     |   24 +++++++++++++++-----
 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |   25 ++++++++++++++++-----
 2 files changed, 37 insertions(+), 12 deletions(-)

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [RFC PATCH 1/2] ixgbe: Add support for UDP segmentation offload
  2018-05-04  0:39 [RFC PATCH 0/2] ixgbe/ixgbevf: Add support for UDP segmentation offload Alexander Duyck
@ 2018-05-04  0:39 ` Alexander Duyck
  2018-05-04  0:39 ` [RFC PATCH 2/2] ixgbevf: " Alexander Duyck
  1 sibling, 0 replies; 3+ messages in thread
From: Alexander Duyck @ 2018-05-04  0:39 UTC (permalink / raw)
  To: netdev, willemb, intel-wired-lan

From: Alexander Duyck <alexander.h.duyck@intel.com>

This patch adds support for UDP segmentation offload. Relatively few
changes were needed to add this support as it functions much like the TCP
segmentation offload.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |   24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a52d92e182ee..0bed3350a795 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7693,6 +7693,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
 	} ip;
 	union {
 		struct tcphdr *tcp;
+		struct udphdr *udp;
 		unsigned char *hdr;
 	} l4;
 	u32 paylen, l4_offset;
@@ -7716,7 +7717,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
 	l4.hdr = skb_checksum_start(skb);
 
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+	type_tucmd = (skb->csum_offset == offsetof(struct tcphdr, check)) ?
+		     IXGBE_ADVTXD_TUCMD_L4T_TCP : IXGBE_ADVTXD_TUCMD_L4T_UDP;
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
@@ -7746,12 +7748,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
 	/* determine offset of inner transport header */
 	l4_offset = l4.hdr - skb->data;
 
-	/* compute length of segmentation header */
-	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
 	/* remove payload length from inner checksum */
 	paylen = skb->len - l4_offset;
-	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+	if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
+		/* compute length of segmentation header */
+		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+		csum_replace_by_diff(&l4.tcp->check,
+				     (__force __wsum)htonl(paylen));
+	} else {
+		/* compute length of segmentation header */
+		*hdr_len = sizeof(*l4.udp) + l4_offset;
+		csum_replace_by_diff(&l4.udp->check,
+				     (__force __wsum)htonl(paylen));
+	}
 
 	/* update gso size and bytecount with header size */
 	first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -9931,6 +9941,7 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 	if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
 		return features & ~(NETIF_F_HW_CSUM |
 				    NETIF_F_SCTP_CRC |
+				    NETIF_F_GSO_UDP_L4 |
 				    NETIF_F_HW_VLAN_CTAG_TX |
 				    NETIF_F_TSO |
 				    NETIF_F_TSO6);
@@ -9939,6 +9950,7 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 	if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
 		return features & ~(NETIF_F_HW_CSUM |
 				    NETIF_F_SCTP_CRC |
+				    NETIF_F_GSO_UDP_L4 |
 				    NETIF_F_TSO |
 				    NETIF_F_TSO6);
 
@@ -10480,7 +10492,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 			    IXGBE_GSO_PARTIAL_FEATURES;
 
 	if (hw->mac.type >= ixgbe_mac_82599EB)
-		netdev->features |= NETIF_F_SCTP_CRC;
+		netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
 
 	/* copy netdev features into list of user selectable features */
 	netdev->hw_features |= netdev->features |

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [RFC PATCH 2/2] ixgbevf: Add support for UDP segmentation offload
  2018-05-04  0:39 [RFC PATCH 0/2] ixgbe/ixgbevf: Add support for UDP segmentation offload Alexander Duyck
  2018-05-04  0:39 ` [RFC PATCH 1/2] ixgbe: " Alexander Duyck
@ 2018-05-04  0:39 ` Alexander Duyck
  1 sibling, 0 replies; 3+ messages in thread
From: Alexander Duyck @ 2018-05-04  0:39 UTC (permalink / raw)
  To: netdev, willemb, intel-wired-lan

From: Alexander Duyck <alexander.h.duyck@intel.com>

This patch adds support for UDP segmentation offload. Relatively few
changes were needed to add this support as it functions much like the TCP
segmentation offload.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---
 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |   25 ++++++++++++++++-----
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9a939dcaf727..c2986142c98a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3709,6 +3709,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 	} ip;
 	union {
 		struct tcphdr *tcp;
+		struct udphdr *udp;
 		unsigned char *hdr;
 	} l4;
 	u32 paylen, l4_offset;
@@ -3731,7 +3732,8 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 	l4.hdr = skb_checksum_start(skb);
 
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+	type_tucmd = (skb->csum_offset == offsetof(struct tcphdr, check)) ?
+		     IXGBE_ADVTXD_TUCMD_L4T_TCP : IXGBE_ADVTXD_TUCMD_L4T_UDP;
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
@@ -3759,12 +3761,20 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 	/* determine offset of inner transport header */
 	l4_offset = l4.hdr - skb->data;
 
-	/* compute length of segmentation header */
-	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
 	/* remove payload length from inner checksum */
 	paylen = skb->len - l4_offset;
-	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+	if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
+		/* compute length of segmentation header */
+		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+		csum_replace_by_diff(&l4.tcp->check,
+				     (__force __wsum)htonl(paylen));
+	} else {
+		/* compute length of segmentation header */
+		*hdr_len = sizeof(*l4.udp) + l4_offset;
+		csum_replace_by_diff(&l4.udp->check,
+				     (__force __wsum)htonl(paylen));
+	}
 
 	/* update gso size and bytecount with header size */
 	first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -4368,6 +4378,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
 	if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
 		return features & ~(NETIF_F_HW_CSUM |
 				    NETIF_F_SCTP_CRC |
+				    NETIF_F_GSO_UDP_L4 |
 				    NETIF_F_HW_VLAN_CTAG_TX |
 				    NETIF_F_TSO |
 				    NETIF_F_TSO6);
@@ -4376,6 +4387,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
 	if (unlikely(network_hdr_len >  IXGBEVF_MAX_NETWORK_HDR_LEN))
 		return features & ~(NETIF_F_HW_CSUM |
 				    NETIF_F_SCTP_CRC |
+				    NETIF_F_GSO_UDP_L4 |
 				    NETIF_F_TSO |
 				    NETIF_F_TSO6);
 
@@ -4571,7 +4583,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 			      NETIF_F_TSO6 |
 			      NETIF_F_RXCSUM |
 			      NETIF_F_HW_CSUM |
-			      NETIF_F_SCTP_CRC;
+			      NETIF_F_SCTP_CRC |
+			      NETIF_F_GSO_UDP_L4;
 
 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
 				      NETIF_F_GSO_GRE_CSUM | \

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-05-04  0:39 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-05-04  0:39 [RFC PATCH 0/2] ixgbe/ixgbevf: Add support for UDP segmentation offload Alexander Duyck
2018-05-04  0:39 ` [RFC PATCH 1/2] ixgbe: " Alexander Duyck
2018-05-04  0:39 ` [RFC PATCH 2/2] ixgbevf: " Alexander Duyck

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).