* [PATCH v2 net-next v2 1/5] net: move skb_gro_receive_list from udp to core
2024-04-25 15:04 [PATCH v2 net-next 0/5] Add TCP fraglist GRO support Felix Fietkau
@ 2024-04-25 15:04 ` Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 2/5] net: add support for segmenting TCP fraglist GSO packets Felix Fietkau
` (4 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:04 UTC (permalink / raw)
To: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
Paolo Abeni, David Ahern
Cc: willemdebruijn.kernel, linux-kernel
This helper function will be used for TCP fraglist GRO support
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/net/gro.h | 1 +
net/core/gro.c | 27 +++++++++++++++++++++++++++
net/ipv4/udp_offload.c | 27 ---------------------------
3 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/include/net/gro.h b/include/net/gro.h
index 50f1e403dbbb..ca8e4b3de044 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -429,6 +429,7 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
}
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
static inline void gro_normal_list(struct napi_struct *napi)
diff --git a/net/core/gro.c b/net/core/gro.c
index 2459ab697f7f..268c6c826d09 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -231,6 +231,33 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
return 0;
}
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+ if (unlikely(p->len + skb->len >= 65536))
+ return -E2BIG;
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+
+ skb_pull(skb, skb_gro_offset(skb));
+
+ NAPI_GRO_CB(p)->last = skb;
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+ /* sk ownership - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+ NAPI_GRO_CB(skb)->same_flow = 1;
+
+ return 0;
+}
+
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 3498dd1d0694..a3cd546a1aea 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -433,33 +433,6 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
return segs;
}
-static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-{
- if (unlikely(p->len + skb->len >= 65536))
- return -E2BIG;
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
-
- skb_pull(skb, skb_gro_offset(skb));
-
- NAPI_GRO_CB(p)->last = skb;
- NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
-
- /* sk ownership - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- skb->sk = NULL;
- p->truesize += skb->truesize;
- p->len += skb->len;
-
- NAPI_GRO_CB(skb)->same_flow = 1;
-
- return 0;
-}
-
#define UDP_GRO_CNT_MAX 64
static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH v2 net-next v2 2/5] net: add support for segmenting TCP fraglist GSO packets
2024-04-25 15:04 [PATCH v2 net-next 0/5] Add TCP fraglist GRO support Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 1/5] net: move skb_gro_receive_list from udp to core Felix Fietkau
@ 2024-04-25 15:04 ` Felix Fietkau
2024-04-25 15:17 ` Willem de Bruijn
2024-04-25 15:04 ` [PATCH v2 net-next v2 3/5] net: add code for TCP fraglist GRO Felix Fietkau
` (3 subsequent siblings)
5 siblings, 1 reply; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
Preparation for adding TCP fraglist GRO support. It expects packets to be
combined in a similar way as UDP fraglist GSO packets.
For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
net/ipv4/tcp_offload.c | 69 ++++++++++++++++++++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 3 ++
2 files changed, 72 insertions(+)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index fab0973f995b..e455f884190c 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -28,6 +28,72 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
}
}
+static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
+ __be32 *oldip, __be32 *newip,
+ __be16 *oldport, __be16 *newport)
+{
+ struct tcphdr *th;
+ struct iphdr *iph;
+
+ if (*oldip == *newip && *oldport == *newport)
+ return;
+
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ inet_proto_csum_replace4(&th->check, seg, *oldip, *newip, true);
+ inet_proto_csum_replace2(&th->check, seg, *oldport, *newport, false);
+ *oldport = *newport;
+
+ csum_replace4(&iph->check, *oldip, *newip);
+ *oldip = *newip;
+}
+
+static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+ struct sk_buff *seg;
+ struct tcphdr *th, *th2;
+ struct iphdr *iph, *iph2;
+ __be32 flags, flags2;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+ flags = tcp_flag_word(th);
+ flags2 = tcp_flag_word(tcp_hdr(seg->next));
+
+ if ((tcp_hdr(seg)->dest == tcp_hdr(seg->next)->dest) &&
+ (tcp_hdr(seg)->source == tcp_hdr(seg->next)->source) &&
+ (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
+ (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr) &&
+ (flags == flags2))
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+ iph2 = ip_hdr(seg);
+
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->saddr, &iph->saddr,
+ &th2->source, &th->source);
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->daddr, &iph->daddr,
+ &th2->dest, &th->dest);
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv4_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -37,6 +103,9 @@ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+ return __tcp4_gso_segment_list(skb, features);
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 4b07d1e6c952..b3b8e1f6b92a 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -51,6 +51,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+ return skb_segment_list(skb, features, skb_mac_header_len(skb));
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH v2 net-next v2 2/5] net: add support for segmenting TCP fraglist GSO packets
2024-04-25 15:04 ` [PATCH v2 net-next v2 2/5] net: add support for segmenting TCP fraglist GSO packets Felix Fietkau
@ 2024-04-25 15:17 ` Willem de Bruijn
2024-04-25 15:22 ` Felix Fietkau
0 siblings, 1 reply; 13+ messages in thread
From: Willem de Bruijn @ 2024-04-25 15:17 UTC (permalink / raw)
To: Felix Fietkau, netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
Felix Fietkau wrote:
> Preparation for adding TCP fraglist GRO support. It expects packets to be
> combined in a similar way as UDP fraglist GSO packets.
> For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
>
> Signed-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> net/ipv4/tcp_offload.c | 69 ++++++++++++++++++++++++++++++++++++++++
> net/ipv6/tcpv6_offload.c | 3 ++
> 2 files changed, 72 insertions(+)
>
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index fab0973f995b..e455f884190c 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -28,6 +28,72 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
> }
> }
>
> +static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
> + __be32 *oldip, __be32 *newip,
> + __be16 *oldport, __be16 *newport)
> +{
> + struct tcphdr *th;
> + struct iphdr *iph;
> +
> + if (*oldip == *newip && *oldport == *newport)
> + return;
> +
> + th = tcp_hdr(seg);
> + iph = ip_hdr(seg);
> +
> + inet_proto_csum_replace4(&th->check, seg, *oldip, *newip, true);
> + inet_proto_csum_replace2(&th->check, seg, *oldport, *newport, false);
> + *oldport = *newport;
> +
> + csum_replace4(&iph->check, *oldip, *newip);
> + *oldip = *newip;
> +}
> +
> +static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
> +{
> + struct sk_buff *seg;
> + struct tcphdr *th, *th2;
> + struct iphdr *iph, *iph2;
> + __be32 flags, flags2;
> +
> + seg = segs;
> + th = tcp_hdr(seg);
> + iph = ip_hdr(seg);
> + flags = tcp_flag_word(th);
> + flags2 = tcp_flag_word(tcp_hdr(seg->next));
Vestigial, now that flag overwrite is removed in v2?
All this code is very similar to __udpv4_gso_segment_list_csum. But
the zero checksum handling in __udpv4_gso_segment_csum makes it just
different enough that I also do not immediately see a straightforward
way to avoid duplicating.
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v2 net-next v2 2/5] net: add support for segmenting TCP fraglist GSO packets
2024-04-25 15:17 ` Willem de Bruijn
@ 2024-04-25 15:22 ` Felix Fietkau
0 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:22 UTC (permalink / raw)
To: Willem de Bruijn, netdev, Eric Dumazet, David S. Miller,
David Ahern, Jakub Kicinski, Paolo Abeni
Cc: linux-kernel
On 25.04.24 17:17, Willem de Bruijn wrote:
> Felix Fietkau wrote:
>> Preparation for adding TCP fraglist GRO support. It expects packets to be
>> combined in a similar way as UDP fraglist GSO packets.
>> For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
>>
>> Signed-off-by: Felix Fietkau <nbd@nbd.name>
>> ---
>> net/ipv4/tcp_offload.c | 69 ++++++++++++++++++++++++++++++++++++++++
>> net/ipv6/tcpv6_offload.c | 3 ++
>> 2 files changed, 72 insertions(+)
>>
>> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
>> index fab0973f995b..e455f884190c 100644
>> --- a/net/ipv4/tcp_offload.c
>> +++ b/net/ipv4/tcp_offload.c
>> @@ -28,6 +28,72 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
>> }
>> }
>>
>> +static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
>> + __be32 *oldip, __be32 *newip,
>> + __be16 *oldport, __be16 *newport)
>> +{
>> + struct tcphdr *th;
>> + struct iphdr *iph;
>> +
>> + if (*oldip == *newip && *oldport == *newport)
>> + return;
>> +
>> + th = tcp_hdr(seg);
>> + iph = ip_hdr(seg);
>> +
>> + inet_proto_csum_replace4(&th->check, seg, *oldip, *newip, true);
>> + inet_proto_csum_replace2(&th->check, seg, *oldport, *newport, false);
>> + *oldport = *newport;
>> +
>> + csum_replace4(&iph->check, *oldip, *newip);
>> + *oldip = *newip;
>> +}
>> +
>> +static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
>> +{
>> + struct sk_buff *seg;
>> + struct tcphdr *th, *th2;
>> + struct iphdr *iph, *iph2;
>> + __be32 flags, flags2;
>> +
>> + seg = segs;
>> + th = tcp_hdr(seg);
>> + iph = ip_hdr(seg);
>> + flags = tcp_flag_word(th);
>> + flags2 = tcp_flag_word(tcp_hdr(seg->next));
>
> Vestigial, now that flag overwrite is removed in v2?
Will fix, thanks.
> All this code is very similar to __udpv4_gso_segment_list_csum. But
> the zero checksum handling in __udpv4_gso_segment_csum makes it just
> different enough that I also do not immediately see a straightforward
> way to avoid duplicating.
Also, the checksum field is in a different location in the udp header. I
don't think avoiding duplication makes sense here.
- Felix
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 net-next v2 3/5] net: add code for TCP fraglist GRO
2024-04-25 15:04 [PATCH v2 net-next 0/5] Add TCP fraglist GRO support Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 1/5] net: move skb_gro_receive_list from udp to core Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 2/5] net: add support for segmenting TCP fraglist GSO packets Felix Fietkau
@ 2024-04-25 15:04 ` Felix Fietkau
2024-04-25 15:18 ` Willem de Bruijn
2024-04-25 15:04 ` [PATCH v2 net-next v2 4/4] net: add heuristic for enabling " Felix Fietkau
` (2 subsequent siblings)
5 siblings, 1 reply; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
This implements fraglist GRO similar to how it's handled in UDP, however
no functional changes are added yet. The next change adds a heuristic for
using fraglist GRO instead of regular GRO.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
net/ipv4/tcp_offload.c | 22 ++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 9 +++++++++
2 files changed, 31 insertions(+)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index e455f884190c..68157130c264 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -336,6 +336,19 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
flush |= skb_cmp_decrypted(p, skb);
+ if (NAPI_GRO_CB(p)->is_flist) {
+ flush |= (__force int)(flags ^ tcp_flag_word(th2));
+ flush |= skb->ip_summed != p->ip_summed;
+ flush |= skb->csum_level != p->csum_level;
+ flush |= !pskb_may_pull(skb, skb_gro_offset(skb));
+ flush |= NAPI_GRO_CB(p)->count >= 64;
+
+ if (flush || skb_gro_receive_list(p, skb))
+ mss = 1;
+
+ goto out_check_final;
+ }
+
if (flush || skb_gro_receive(p, skb)) {
mss = 1;
goto out_check_final;
@@ -402,6 +415,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
+ if (NAPI_GRO_CB(skb)->is_flist) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index b3b8e1f6b92a..c97d55cf036f 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -32,6 +32,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
+ if (NAPI_GRO_CB(skb)->is_flist) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
&iph->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH v2 net-next v2 3/5] net: add code for TCP fraglist GRO
2024-04-25 15:04 ` [PATCH v2 net-next v2 3/5] net: add code for TCP fraglist GRO Felix Fietkau
@ 2024-04-25 15:18 ` Willem de Bruijn
2024-04-25 15:26 ` Felix Fietkau
0 siblings, 1 reply; 13+ messages in thread
From: Willem de Bruijn @ 2024-04-25 15:18 UTC (permalink / raw)
To: Felix Fietkau, netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
Felix Fietkau wrote:
> This implements fraglist GRO similar to how it's handled in UDP, however
> no functional changes are added yet. The next change adds a heuristic for
> using fraglist GRO instead of regular GRO.
>
> Signed-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> net/ipv4/tcp_offload.c | 22 ++++++++++++++++++++++
> net/ipv6/tcpv6_offload.c | 9 +++++++++
> 2 files changed, 31 insertions(+)
>
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index e455f884190c..68157130c264 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -336,6 +336,19 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
> flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
> flush |= skb_cmp_decrypted(p, skb);
>
> + if (NAPI_GRO_CB(p)->is_flist) {
> + flush |= (__force int)(flags ^ tcp_flag_word(th2));
What is the purpose of this check, given the existing check above
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
> + flush |= skb->ip_summed != p->ip_summed;
> + flush |= skb->csum_level != p->csum_level;
> + flush |= !pskb_may_pull(skb, skb_gro_offset(skb));
> + flush |= NAPI_GRO_CB(p)->count >= 64;
> +
> + if (flush || skb_gro_receive_list(p, skb))
> + mss = 1;
> +
> + goto out_check_final;
> + }
> +
> if (flush || skb_gro_receive(p, skb)) {
> mss = 1;
> goto out_check_final;
> @@ -402,6 +415,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
> const struct iphdr *iph = ip_hdr(skb);
> struct tcphdr *th = tcp_hdr(skb);
>
> + if (NAPI_GRO_CB(skb)->is_flist) {
> + skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
> + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
> +
> + __skb_incr_checksum_unnecessary(skb);
> +
> + return 0;
> + }
> +
> th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
> iph->daddr, 0);
>
> diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
> index b3b8e1f6b92a..c97d55cf036f 100644
> --- a/net/ipv6/tcpv6_offload.c
> +++ b/net/ipv6/tcpv6_offload.c
> @@ -32,6 +32,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
> const struct ipv6hdr *iph = ipv6_hdr(skb);
> struct tcphdr *th = tcp_hdr(skb);
>
> + if (NAPI_GRO_CB(skb)->is_flist) {
> + skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
> + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
> +
> + __skb_incr_checksum_unnecessary(skb);
> +
> + return 0;
> + }
> +
> th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
> &iph->daddr, 0);
> skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
> --
> 2.44.0
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v2 net-next v2 3/5] net: add code for TCP fraglist GRO
2024-04-25 15:18 ` Willem de Bruijn
@ 2024-04-25 15:26 ` Felix Fietkau
0 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:26 UTC (permalink / raw)
To: Willem de Bruijn, netdev, Eric Dumazet, David S. Miller,
David Ahern, Jakub Kicinski, Paolo Abeni
Cc: linux-kernel
On 25.04.24 17:18, Willem de Bruijn wrote:
> Felix Fietkau wrote:
>> This implements fraglist GRO similar to how it's handled in UDP, however
>> no functional changes are added yet. The next change adds a heuristic for
>> using fraglist GRO instead of regular GRO.
>>
>> Signed-off-by: Felix Fietkau <nbd@nbd.name>
>> ---
>> net/ipv4/tcp_offload.c | 22 ++++++++++++++++++++++
>> net/ipv6/tcpv6_offload.c | 9 +++++++++
>> 2 files changed, 31 insertions(+)
>>
>> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
>> index e455f884190c..68157130c264 100644
>> --- a/net/ipv4/tcp_offload.c
>> +++ b/net/ipv4/tcp_offload.c
>> @@ -336,6 +336,19 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
>> flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
>> flush |= skb_cmp_decrypted(p, skb);
>>
>> + if (NAPI_GRO_CB(p)->is_flist) {
>> + flush |= (__force int)(flags ^ tcp_flag_word(th2));
>
> What is the purpose of this check, given the existing check above
>
> flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
> ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
The fraglist codepath is not updating the TCP flags of the first packet,
so the extra check ensures it doesn't have to.
- Felix
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 net-next v2 4/4] net: add heuristic for enabling TCP fraglist GRO
2024-04-25 15:04 [PATCH v2 net-next 0/5] Add TCP fraglist GRO support Felix Fietkau
` (2 preceding siblings ...)
2024-04-25 15:04 ` [PATCH v2 net-next v2 3/5] net: add code for TCP fraglist GRO Felix Fietkau
@ 2024-04-25 15:04 ` Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 4/5] net: create tcp_gro_lookup helper function Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 5/5] net: add heuristic for enabling TCP fraglist GRO Felix Fietkau
5 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
When forwarding TCP after GRO, software segmentation is very expensive,
especially when the checksum needs to be recalculated.
One case where that's currently unavoidable is when routing packets over
PPPoE. Performance improves significantly when using fraglist GRO
implemented in the same way as for UDP.
When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
socket in the same netns as the receiving device. While this may not
cover all relevant use cases in multi-netns configurations, it should be
good enough for most configurations that need this.
Here's a measurement of running 2 TCP streams through a MediaTek MT7622
device (2-core Cortex-A53), which runs NAT with flow offload enabled from
one ethernet port to PPPoE on another ethernet port + cake qdisc set to
1Gbps.
rx-gro-list off: 630 Mbit/s, CPU 35% idle
rx-gro-list on: 770 Mbit/s, CPU 40% idle
Signe-off-by: Felix Fietkau <nbd@nbd.name>
---
net/ipv4/tcp_offload.c | 56 ++++++++++++++++++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 59 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 115 insertions(+)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 68157130c264..fc836ca084dd 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -396,6 +396,60 @@ void tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
+static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct tcphdr *th, *th2;
+ struct sk_buff *p;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+ }
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+ if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet_get_iif_sdif(skb, &iif, &sdif);
+ sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -407,6 +461,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
+ tcp4_check_fraglist_gro(head, skb);
+
return tcp_gro_receive(head, skb);
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c97d55cf036f..dffeef080889 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,12 +7,69 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *hdr = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct tcphdr *th, *th2;
+ struct sk_buff *p;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+ }
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+ if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet6_get_iif_sdif(skb, &iif, &sdif);
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -24,6 +81,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
+ tcp6_check_fraglist_gro(head, skb);
+
return tcp_gro_receive(head, skb);
}
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH v2 net-next v2 4/5] net: create tcp_gro_lookup helper function
2024-04-25 15:04 [PATCH v2 net-next 0/5] Add TCP fraglist GRO support Felix Fietkau
` (3 preceding siblings ...)
2024-04-25 15:04 ` [PATCH v2 net-next v2 4/4] net: add heuristic for enabling " Felix Fietkau
@ 2024-04-25 15:04 ` Felix Fietkau
2024-04-25 15:04 ` [PATCH v2 net-next v2 5/5] net: add heuristic for enabling TCP fraglist GRO Felix Fietkau
5 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, Jakub Kicinski,
Paolo Abeni, David Ahern
Cc: willemdebruijn.kernel, linux-kernel
This pulls the flow port matching out of tcp_gro_receive, so that it can be
reused for the next change, which adds the TCP fraglist GRO heuristic.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/net/tcp.h | 1 +
net/ipv4/tcp_offload.c | 41 +++++++++++++++++++++++++----------------
2 files changed, 26 insertions(+), 16 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ffc9371fe9de..a3f09aa44487 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2198,6 +2198,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 68157130c264..520fd425ab19 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -247,6 +247,27 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
return segs;
}
+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
+{
+ struct tcphdr *th2;
+ struct sk_buff *p;
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+ if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ return p;
+ }
+
+ return NULL;
+}
+
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
{
struct sk_buff *pp = NULL;
@@ -284,24 +305,12 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
- list_for_each_entry(p, head, list) {
- if (!NAPI_GRO_CB(p)->same_flow)
- continue;
-
- th2 = tcp_hdr(p);
-
- if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
-
- goto found;
- }
- p = NULL;
- goto out_check_final;
+ p = tcp_gro_lookup(head, th);
+ if (!p)
+ goto out_check_final;
-found:
/* Include the IP ID check below from the inner most IP hdr */
+ th2 = tcp_hdr(p);
flush = NAPI_GRO_CB(p)->flush;
flush |= (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH v2 net-next v2 5/5] net: add heuristic for enabling TCP fraglist GRO
2024-04-25 15:04 [PATCH v2 net-next 0/5] Add TCP fraglist GRO support Felix Fietkau
` (4 preceding siblings ...)
2024-04-25 15:04 ` [PATCH v2 net-next v2 4/5] net: create tcp_gro_lookup helper function Felix Fietkau
@ 2024-04-25 15:04 ` Felix Fietkau
2024-04-25 15:26 ` Eric Dumazet
5 siblings, 1 reply; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
When forwarding TCP after GRO, software segmentation is very expensive,
especially when the checksum needs to be recalculated.
One case where that's currently unavoidable is when routing packets over
PPPoE. Performance improves significantly when using fraglist GRO
implemented in the same way as for UDP.
When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
socket in the same netns as the receiving device. While this may not
cover all relevant use cases in multi-netns configurations, it should be
good enough for most configurations that need this.
Here's a measurement of running 2 TCP streams through a MediaTek MT7622
device (2-core Cortex-A53), which runs NAT with flow offload enabled from
one ethernet port to PPPoE on another ethernet port + cake qdisc set to
1Gbps.
rx-gro-list off: 630 Mbit/s, CPU 35% idle
rx-gro-list on: 770 Mbit/s, CPU 40% idle
Signe-off-by: Felix Fietkau <nbd@nbd.name>
---
net/ipv4/tcp_offload.c | 48 +++++++++++++++++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 99 insertions(+)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 520fd425ab19..3bb96a110402 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
+static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct sk_buff *p;
+ struct tcphdr *th;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+ }
+
+ p = tcp_gro_lookup(head, th);
+ if (p) {
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet_get_iif_sdif(skb, &iif, &sdif);
+ sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
+ tcp4_check_fraglist_gro(head, skb);
+
return tcp_gro_receive(head, skb);
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c97d55cf036f..7948420dcad0 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,12 +7,61 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *hdr = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct sk_buff *p;
+ struct tcphdr *th;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return;
+ }
+
+ p = tcp_gro_lookup(head, th);
+ if (p) {
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet6_get_iif_sdif(skb, &iif, &sdif);
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -24,6 +73,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
+ tcp6_check_fraglist_gro(head, skb);
+
return tcp_gro_receive(head, skb);
}
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH v2 net-next v2 5/5] net: add heuristic for enabling TCP fraglist GRO
2024-04-25 15:04 ` [PATCH v2 net-next v2 5/5] net: add heuristic for enabling TCP fraglist GRO Felix Fietkau
@ 2024-04-25 15:26 ` Eric Dumazet
2024-04-25 15:29 ` Felix Fietkau
0 siblings, 1 reply; 13+ messages in thread
From: Eric Dumazet @ 2024-04-25 15:26 UTC (permalink / raw)
To: Felix Fietkau
Cc: netdev, David S. Miller, David Ahern, Jakub Kicinski, Paolo Abeni,
willemdebruijn.kernel, linux-kernel
On Thu, Apr 25, 2024 at 5:04 PM Felix Fietkau <nbd@nbd.name> wrote:
>
> When forwarding TCP after GRO, software segmentation is very expensive,
> especially when the checksum needs to be recalculated.
> One case where that's currently unavoidable is when routing packets over
> PPPoE. Performance improves significantly when using fraglist GRO
> implemented in the same way as for UDP.
>
> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
> socket in the same netns as the receiving device. While this may not
> cover all relevant use cases in multi-netns configurations, it should be
> good enough for most configurations that need this.
>
> Here's a measurement of running 2 TCP streams through a MediaTek MT7622
> device (2-core Cortex-A53), which runs NAT with flow offload enabled from
> one ethernet port to PPPoE on another ethernet port + cake qdisc set to
> 1Gbps.
>
> rx-gro-list off: 630 Mbit/s, CPU 35% idle
> rx-gro-list on: 770 Mbit/s, CPU 40% idle
>
> Signe-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> net/ipv4/tcp_offload.c | 48 +++++++++++++++++++++++++++++++++++++
> net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 99 insertions(+)
>
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index 520fd425ab19..3bb96a110402 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb)
> }
> EXPORT_SYMBOL(tcp_gro_complete);
>
> +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
> +{
> + const struct iphdr *iph = skb_gro_network_header(skb);
I do not think loading iph before all skb_gro_header() and
skb_gro_header_slow() calls is wise.
pskb_may_pull() can re-allocate skb->head
> + struct net *net = dev_net(skb->dev);
> + unsigned int off, hlen, thlen;
> + struct sk_buff *p;
> + struct tcphdr *th;
> + struct sock *sk;
> + int iif, sdif;
> +
> + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
> + return;
> +
> + off = skb_gro_offset(skb);
> + hlen = off + sizeof(*th);
> + th = skb_gro_header(skb, hlen, off);
> + if (unlikely(!th))
> + return;
> +
> + thlen = th->doff * 4;
> + if (thlen < sizeof(*th))
> + return;
> +
> + hlen = off + thlen;
> + if (!skb_gro_may_pull(skb, hlen)) {
> + th = skb_gro_header_slow(skb, hlen, off);
> + if (unlikely(!th))
> + return;
> + }
> +
> + p = tcp_gro_lookup(head, th);
> + if (p) {
> + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
> + return;
> + }
> +
> + inet_get_iif_sdif(skb, &iif, &sdif);
> + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
> + iph->saddr, th->source,
> + iph->daddr, ntohs(th->dest),
> + iif, sdif);
> + NAPI_GRO_CB(skb)->is_flist = !sk;
> + if (sk)
> + sock_put(sk);
> +}
> +
> INDIRECT_CALLABLE_SCOPE
> struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
> {
> @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
> return NULL;
> }
>
I would probably pull the whole TCP header here, before calling
tcp4_check_fraglist_gro(head, skb)
and no longer do this twice from tcp4_check_fraglist_gro() and tcp_gro_receive()
Perhaps define a new inline helper, that will be called from
tcp4_gro_receive() and tcp6_gro_receive(),
and not anymore from tcp_gro_receive()
static inline struct tcphdr *tcp_gro_pull_header(...)
{
....
off = skb_gro_offset(skb);
hlen = off + sizeof(*th);
th = skb_gro_header(skb, hlen, off);
if (unlikely(!th))
return NULL;
thlen = th->doff * 4;
if (thlen < sizeof(*th))
return NULL;
hlen = off + thlen;
if (!skb_gro_may_pull(skb, hlen))
th = skb_gro_header_slow(skb, hlen, off);
return th;
}
> + tcp4_check_fraglist_gro(head, skb);
> +
> return tcp_gro_receive(head, skb);
> }
>
> diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
> index c97d55cf036f..7948420dcad0 100644
> --- a/net/ipv6/tcpv6_offload.c
> +++ b/net/ipv6/tcpv6_offload.c
> @@ -7,12 +7,61 @@
> */
> #include <linux/indirect_call_wrapper.h>
> #include <linux/skbuff.h>
> +#include <net/inet6_hashtables.h>
> #include <net/gro.h>
> #include <net/protocol.h>
> #include <net/tcp.h>
> #include <net/ip6_checksum.h>
> #include "ip6_offload.h"
>
> +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
> +{
> +#if IS_ENABLED(CONFIG_IPV6)
> + const struct ipv6hdr *hdr = skb_gro_network_header(skb);
> + struct net *net = dev_net(skb->dev);
> + unsigned int off, hlen, thlen;
> + struct sk_buff *p;
> + struct tcphdr *th;
> + struct sock *sk;
> + int iif, sdif;
> +
> + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
> + return;
> +
> + off = skb_gro_offset(skb);
> + hlen = off + sizeof(*th);
> + th = skb_gro_header(skb, hlen, off);
> + if (unlikely(!th))
> + return;
> +
> + thlen = th->doff * 4;
> + if (thlen < sizeof(*th))
> + return;
> +
> + hlen = off + thlen;
> + if (!skb_gro_may_pull(skb, hlen)) {
> + th = skb_gro_header_slow(skb, hlen, off);
> + if (unlikely(!th))
> + return;
> + }
> +
> + p = tcp_gro_lookup(head, th);
> + if (p) {
> + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
> + return;
> + }
> +
> + inet6_get_iif_sdif(skb, &iif, &sdif);
> + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
> + &hdr->saddr, th->source,
> + &hdr->daddr, ntohs(th->dest),
> + iif, sdif);
> + NAPI_GRO_CB(skb)->is_flist = !sk;
> + if (sk)
> + sock_put(sk);
> +#endif /* IS_ENABLED(CONFIG_IPV6) */
> +}
> +
> INDIRECT_CALLABLE_SCOPE
> struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
> {
> @@ -24,6 +73,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
> return NULL;
> }
>
> + tcp6_check_fraglist_gro(head, skb);
> +
> return tcp_gro_receive(head, skb);
> }
>
> --
> 2.44.0
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v2 net-next v2 5/5] net: add heuristic for enabling TCP fraglist GRO
2024-04-25 15:26 ` Eric Dumazet
@ 2024-04-25 15:29 ` Felix Fietkau
0 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 15:29 UTC (permalink / raw)
To: Eric Dumazet
Cc: netdev, David S. Miller, David Ahern, Jakub Kicinski, Paolo Abeni,
willemdebruijn.kernel, linux-kernel
On 25.04.24 17:26, Eric Dumazet wrote:
> On Thu, Apr 25, 2024 at 5:04 PM Felix Fietkau <nbd@nbd.name> wrote:
>>
>> When forwarding TCP after GRO, software segmentation is very expensive,
>> especially when the checksum needs to be recalculated.
>> One case where that's currently unavoidable is when routing packets over
>> PPPoE. Performance improves significantly when using fraglist GRO
>> implemented in the same way as for UDP.
>>
>> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
>> socket in the same netns as the receiving device. While this may not
>> cover all relevant use cases in multi-netns configurations, it should be
>> good enough for most configurations that need this.
>>
>> Here's a measurement of running 2 TCP streams through a MediaTek MT7622
>> device (2-core Cortex-A53), which runs NAT with flow offload enabled from
>> one ethernet port to PPPoE on another ethernet port + cake qdisc set to
>> 1Gbps.
>>
>> rx-gro-list off: 630 Mbit/s, CPU 35% idle
>> rx-gro-list on: 770 Mbit/s, CPU 40% idle
>>
>> Signe-off-by: Felix Fietkau <nbd@nbd.name>
>> ---
>> net/ipv4/tcp_offload.c | 48 +++++++++++++++++++++++++++++++++++++
>> net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++
>> 2 files changed, 99 insertions(+)
>>
>> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
>> index 520fd425ab19..3bb96a110402 100644
>> --- a/net/ipv4/tcp_offload.c
>> +++ b/net/ipv4/tcp_offload.c
>> @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb)
>> }
>> EXPORT_SYMBOL(tcp_gro_complete);
>>
>> +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
>> +{
>> + const struct iphdr *iph = skb_gro_network_header(skb);
>
> I do not think loading iph before all skb_gro_header() and
> skb_gro_header_slow() calls is wise.
>
> pskb_may_pull() can re-allocate skb->head
Will fix.
>> + struct net *net = dev_net(skb->dev);
>> + unsigned int off, hlen, thlen;
>> + struct sk_buff *p;
>> + struct tcphdr *th;
>> + struct sock *sk;
>> + int iif, sdif;
>> +
>> + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
>> + return;
>> +
>> + off = skb_gro_offset(skb);
>> + hlen = off + sizeof(*th);
>> + th = skb_gro_header(skb, hlen, off);
>> + if (unlikely(!th))
>> + return;
>> +
>> + thlen = th->doff * 4;
>> + if (thlen < sizeof(*th))
>> + return;
>> +
>> + hlen = off + thlen;
>> + if (!skb_gro_may_pull(skb, hlen)) {
>> + th = skb_gro_header_slow(skb, hlen, off);
>> + if (unlikely(!th))
>> + return;
>> + }
>> +
>> + p = tcp_gro_lookup(head, th);
>> + if (p) {
>> + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
>> + return;
>> + }
>> +
>> + inet_get_iif_sdif(skb, &iif, &sdif);
>> + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
>> + iph->saddr, th->source,
>> + iph->daddr, ntohs(th->dest),
>> + iif, sdif);
>> + NAPI_GRO_CB(skb)->is_flist = !sk;
>> + if (sk)
>> + sock_put(sk);
>> +}
>> +
>> INDIRECT_CALLABLE_SCOPE
>> struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>> {
>> @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>> return NULL;
>> }
>>
>
> I would probably pull the whole TCP header here, before calling
> tcp4_check_fraglist_gro(head, skb)
> and no longer do this twice from tcp4_check_fraglist_gro() and tcp_gro_receive()
>
> Perhaps define a new inline helper, that will be called from
> tcp4_gro_receive() and tcp6_gro_receive(),
> and not anymore from tcp_gro_receive()
>
> static inline struct tcphdr *tcp_gro_pull_header(...)
> {
> ....
> off = skb_gro_offset(skb);
> hlen = off + sizeof(*th);
> th = skb_gro_header(skb, hlen, off);
> if (unlikely(!th))
> return NULL;
>
> thlen = th->doff * 4;
> if (thlen < sizeof(*th))
> return NULL;
>
> hlen = off + thlen;
> if (!skb_gro_may_pull(skb, hlen))
> th = skb_gro_header_slow(skb, hlen, off);
>
> return th;
> }
Makes sense
Thanks,
- Felix
^ permalink raw reply [flat|nested] 13+ messages in thread