* [PATCH net-next 1/4] net: move skb_gro_receive_list from udp to core
2024-04-24 18:04 [PATCH net-next 0/4] Add TCP fraglist GRO support Felix Fietkau
@ 2024-04-24 18:04 ` Felix Fietkau
2024-04-24 18:04 ` [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets Felix Fietkau
` (2 subsequent siblings)
3 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-24 18:04 UTC (permalink / raw)
To: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
Paolo Abeni, David Ahern
Cc: willemdebruijn.kernel, linux-kernel
This helper function will be used for TCP fraglist GRO support
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/net/gro.h | 1 +
net/core/gro.c | 27 +++++++++++++++++++++++++++
net/ipv4/udp_offload.c | 27 ---------------------------
3 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/include/net/gro.h b/include/net/gro.h
index 50f1e403dbbb..ca8e4b3de044 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -429,6 +429,7 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
}
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
static inline void gro_normal_list(struct napi_struct *napi)
diff --git a/net/core/gro.c b/net/core/gro.c
index 2459ab697f7f..268c6c826d09 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -231,6 +231,33 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
return 0;
}
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+ if (unlikely(p->len + skb->len >= 65536))
+ return -E2BIG;
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+
+ skb_pull(skb, skb_gro_offset(skb));
+
+ NAPI_GRO_CB(p)->last = skb;
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+ /* sk ownership - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+ NAPI_GRO_CB(skb)->same_flow = 1;
+
+ return 0;
+}
+
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 3498dd1d0694..a3cd546a1aea 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -433,33 +433,6 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
return segs;
}
-static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-{
- if (unlikely(p->len + skb->len >= 65536))
- return -E2BIG;
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
-
- skb_pull(skb, skb_gro_offset(skb));
-
- NAPI_GRO_CB(p)->last = skb;
- NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
-
- /* sk ownership - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- skb->sk = NULL;
- p->truesize += skb->truesize;
- p->len += skb->len;
-
- NAPI_GRO_CB(skb)->same_flow = 1;
-
- return 0;
-}
-
#define UDP_GRO_CNT_MAX 64
static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets
2024-04-24 18:04 [PATCH net-next 0/4] Add TCP fraglist GRO support Felix Fietkau
2024-04-24 18:04 ` [PATCH net-next 1/4] net: move skb_gro_receive_list from udp to core Felix Fietkau
@ 2024-04-24 18:04 ` Felix Fietkau
2024-04-25 3:03 ` Willem de Bruijn
2024-04-24 18:04 ` [PATCH net-next 3/4] net: add code for TCP fraglist GRO Felix Fietkau
2024-04-24 18:04 ` [PATCH net-next 4/4] net: add heuristic for enabling " Felix Fietkau
3 siblings, 1 reply; 13+ messages in thread
From: Felix Fietkau @ 2024-04-24 18:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
Preparation for adding TCP fraglist GRO support. It expects packets to be
combined in a similar way as UDP fraglist GSO packets.
One difference is the fact that this code assumes that the TCP flags of
all packets have the same value. This allows simple handling of flags
mutations. For IPv4 packets, NAT is handled in the same way as UDP
fraglist GSO.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
net/ipv4/tcp_offload.c | 74 ++++++++++++++++++++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 37 ++++++++++++++++++++
2 files changed, 111 insertions(+)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index fab0973f995b..06dbb2e2b2f3 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -28,6 +28,77 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
}
}
+static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
+ __be32 *oldip, __be32 *newip,
+ __be16 *oldport, __be16 *newport)
+{
+ struct tcphdr *th;
+ struct iphdr *iph;
+
+ if (*oldip == *newip && *oldport == *newport)
+ return;
+
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ inet_proto_csum_replace4(&th->check, seg, *oldip, *newip, true);
+ inet_proto_csum_replace2(&th->check, seg, *oldport, *newport, false);
+ *oldport = *newport;
+
+ csum_replace4(&iph->check, *oldip, *newip);
+ *oldip = *newip;
+}
+
+static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+ struct sk_buff *seg;
+ struct tcphdr *th, *th2;
+ struct iphdr *iph, *iph2;
+ __be32 flags, flags2;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+ flags = tcp_flag_word(th);
+ flags2 = tcp_flag_word(tcp_hdr(seg->next));
+
+ if ((tcp_hdr(seg)->dest == tcp_hdr(seg->next)->dest) &&
+ (tcp_hdr(seg)->source == tcp_hdr(seg->next)->source) &&
+ (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
+ (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr) &&
+ (flags == flags2))
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+ iph2 = ip_hdr(seg);
+
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->saddr, &iph->saddr,
+ &th2->source, &th->source);
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->daddr, &iph->daddr,
+ &th2->dest, &th->dest);
+ if (flags == flags2)
+ continue;
+
+ inet_proto_csum_replace4(&th2->check, seg, flags2, flags, false);
+ tcp_flag_word(th2) = flags;
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv4_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -37,6 +108,9 @@ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+ return __tcp_gso_segment_list(skb, features);
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 4b07d1e6c952..12fe79cb2c10 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -40,6 +40,40 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
return 0;
}
+static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
+{
+ struct tcphdr *th, *th2;
+ __be32 flags, flags2;
+ struct sk_buff *seg;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ flags = tcp_flag_word(th);
+ flags2 = tcp_flag_word(tcp_hdr(seg->next));
+
+ if (flags == flags2)
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+
+ inet_proto_csum_replace4(&th2->check, seg, flags2, flags, false);
+ tcp_flag_word(th2) = flags;
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv6_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -51,6 +85,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+ return __tcp_gso_segment_list(skb, features);
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets
2024-04-24 18:04 ` [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets Felix Fietkau
@ 2024-04-25 3:03 ` Willem de Bruijn
2024-04-25 7:51 ` Felix Fietkau
0 siblings, 1 reply; 13+ messages in thread
From: Willem de Bruijn @ 2024-04-25 3:03 UTC (permalink / raw)
To: Felix Fietkau, netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
Felix Fietkau wrote:
> Preparation for adding TCP fraglist GRO support. It expects packets to be
> combined in a similar way as UDP fraglist GSO packets.
> One difference is the fact that this code assumes that the TCP flags of
> all packets have the same value. This allows simple handling of flags
> mutations.
Can you clarify this some more? We expect potentially different flags
on first and last packet in a TSO train. With fraglist, the segments
keep their original flags, as the headers are only pulled. When do
segment flags need to be replaced with those of the first segment?
> For IPv4 packets, NAT is handled in the same way as UDP
> fraglist GSO.
>
> Signed-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> net/ipv4/tcp_offload.c | 74 ++++++++++++++++++++++++++++++++++++++++
> net/ipv6/tcpv6_offload.c | 37 ++++++++++++++++++++
> 2 files changed, 111 insertions(+)
>
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index fab0973f995b..06dbb2e2b2f3 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -28,6 +28,77 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
> }
> }
>
> +static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
> + __be32 *oldip, __be32 *newip,
> + __be16 *oldport, __be16 *newport)
> +{
> + struct tcphdr *th;
> + struct iphdr *iph;
> +
> + if (*oldip == *newip && *oldport == *newport)
> + return;
> +
> + th = tcp_hdr(seg);
> + iph = ip_hdr(seg);
> +
> + inet_proto_csum_replace4(&th->check, seg, *oldip, *newip, true);
> + inet_proto_csum_replace2(&th->check, seg, *oldport, *newport, false);
> + *oldport = *newport;
> +
> + csum_replace4(&iph->check, *oldip, *newip);
> + *oldip = *newip;
> +}
> +
> +static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
> +{
> + struct sk_buff *seg;
> + struct tcphdr *th, *th2;
> + struct iphdr *iph, *iph2;
> + __be32 flags, flags2;
> +
> + seg = segs;
> + th = tcp_hdr(seg);
> + iph = ip_hdr(seg);
> + flags = tcp_flag_word(th);
> + flags2 = tcp_flag_word(tcp_hdr(seg->next));
> +
> + if ((tcp_hdr(seg)->dest == tcp_hdr(seg->next)->dest) &&
> + (tcp_hdr(seg)->source == tcp_hdr(seg->next)->source) &&
> + (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
> + (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr) &&
> + (flags == flags2))
> + return segs;
> +
> + while ((seg = seg->next)) {
> + th2 = tcp_hdr(seg);
> + iph2 = ip_hdr(seg);
> +
> + __tcpv4_gso_segment_csum(seg,
> + &iph2->saddr, &iph->saddr,
> + &th2->source, &th->source);
> + __tcpv4_gso_segment_csum(seg,
> + &iph2->daddr, &iph->daddr,
> + &th2->dest, &th->dest);
> + if (flags == flags2)
> + continue;
> +
> + inet_proto_csum_replace4(&th2->check, seg, flags2, flags, false);
> + tcp_flag_word(th2) = flags;
> + }
> +
> + return segs;
> +}
> +
> +static struct sk_buff *__tcp_gso_segment_list(struct sk_buff *skb,
> + netdev_features_t features)
For consistency and to avoid having the same name in ipv6,
add the 4/6 suffix here too.
> +{
> + skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
> + if (IS_ERR(skb))
> + return skb;
> +
> + return __tcpv4_gso_segment_list_csum(skb);
> +}
> +
> static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
> netdev_features_t features)
> {
> @@ -37,6 +108,9 @@ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
> if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
> return ERR_PTR(-EINVAL);
>
> + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
> + return __tcp_gso_segment_list(skb, features);
> +
> if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
> const struct iphdr *iph = ip_hdr(skb);
> struct tcphdr *th = tcp_hdr(skb);
> diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
> index 4b07d1e6c952..12fe79cb2c10 100644
> --- a/net/ipv6/tcpv6_offload.c
> +++ b/net/ipv6/tcpv6_offload.c
> @@ -40,6 +40,40 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
> return 0;
> }
>
> +static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
> +{
> + struct tcphdr *th, *th2;
> + __be32 flags, flags2;
> + struct sk_buff *seg;
> +
> + seg = segs;
> + th = tcp_hdr(seg);
> + flags = tcp_flag_word(th);
> + flags2 = tcp_flag_word(tcp_hdr(seg->next));
> +
> + if (flags == flags2)
> + return segs;
> +
> + while ((seg = seg->next)) {
> + th2 = tcp_hdr(seg);
> +
> + inet_proto_csum_replace4(&th2->check, seg, flags2, flags, false);
> + tcp_flag_word(th2) = flags;
> + }
> +
> + return segs;
> +}
> +
> +static struct sk_buff *__tcp_gso_segment_list(struct sk_buff *skb,
> + netdev_features_t features)
> +{
> + skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
> + if (IS_ERR(skb))
> + return skb;
> +
> + return __tcpv6_gso_segment_list_csum(skb);
> +}
> +
> static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
> netdev_features_t features)
> {
> @@ -51,6 +85,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
> if (!pskb_may_pull(skb, sizeof(*th)))
> return ERR_PTR(-EINVAL);
>
> + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
> + return __tcp_gso_segment_list(skb, features);
> +
> if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
> const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
> struct tcphdr *th = tcp_hdr(skb);
> --
> 2.44.0
>
x
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets
2024-04-25 3:03 ` Willem de Bruijn
@ 2024-04-25 7:51 ` Felix Fietkau
2024-04-26 8:00 ` Paolo Abeni
0 siblings, 1 reply; 13+ messages in thread
From: Felix Fietkau @ 2024-04-25 7:51 UTC (permalink / raw)
To: Willem de Bruijn, netdev, Eric Dumazet, David S. Miller,
David Ahern, Jakub Kicinski, Paolo Abeni
Cc: linux-kernel
On 25.04.24 05:03, Willem de Bruijn wrote:
> Felix Fietkau wrote:
>> Preparation for adding TCP fraglist GRO support. It expects packets to be
>> combined in a similar way as UDP fraglist GSO packets.
>> One difference is the fact that this code assumes that the TCP flags of
>> all packets have the same value. This allows simple handling of flags
>> mutations.
>
> Can you clarify this some more? We expect potentially different flags
> on first and last packet in a TSO train. With fraglist, the segments
> keep their original flags, as the headers are only pulled. When do
> segment flags need to be replaced with those of the first segment?
Maybe I just misunderstood a comment that Paolo made earlier regarding
TCP header mutations. Will review this again and compare with regular TSO.
- Felix
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets
2024-04-25 7:51 ` Felix Fietkau
@ 2024-04-26 8:00 ` Paolo Abeni
0 siblings, 0 replies; 13+ messages in thread
From: Paolo Abeni @ 2024-04-26 8:00 UTC (permalink / raw)
To: Felix Fietkau, Willem de Bruijn, netdev, Eric Dumazet,
David S. Miller, David Ahern, Jakub Kicinski
Cc: linux-kernel
On Thu, 2024-04-25 at 09:51 +0200, Felix Fietkau wrote:
> On 25.04.24 05:03, Willem de Bruijn wrote:
> > Felix Fietkau wrote:
> > > Preparation for adding TCP fraglist GRO support. It expects packets to be
> > > combined in a similar way as UDP fraglist GSO packets.
> > > One difference is the fact that this code assumes that the TCP flags of
> > > all packets have the same value. This allows simple handling of flags
> > > mutations.
> >
> > Can you clarify this some more? We expect potentially different flags
> > on first and last packet in a TSO train. With fraglist, the segments
> > keep their original flags, as the headers are only pulled. When do
> > segment flags need to be replaced with those of the first segment?
>
> Maybe I just misunderstood a comment that Paolo made earlier regarding
> TCP header mutations. Will review this again and compare with regular TSO.
I likely was not clear, I'm sorry.
Let me try to rephrase. After the GRO stage, and before segmentation,
the stack could change other fields inside the TCP header (beyond
src/dst port). e.g. nftables can clear the ECN bit, or strip all the
TCP options.
The frag_list segmentation should catch such changes and update the
individual segments csum accordingly.
Note that even IPv6 could snat/dnat a packet!
The GRO stage allows aggregating with different flags. Later on, at
segmentation stage, all the individual packets except the last one will
retain the same flags of the first segment, except for the PUSH and FIN
bit, that will be cleared. The last segment will have such bit value
preserved.
Cheers,
Paolo
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH net-next 3/4] net: add code for TCP fraglist GRO
2024-04-24 18:04 [PATCH net-next 0/4] Add TCP fraglist GRO support Felix Fietkau
2024-04-24 18:04 ` [PATCH net-next 1/4] net: move skb_gro_receive_list from udp to core Felix Fietkau
2024-04-24 18:04 ` [PATCH net-next 2/4] net: add support for segmenting TCP fraglist GSO packets Felix Fietkau
@ 2024-04-24 18:04 ` Felix Fietkau
2024-04-26 8:14 ` Paolo Abeni
2024-04-24 18:04 ` [PATCH net-next 4/4] net: add heuristic for enabling " Felix Fietkau
3 siblings, 1 reply; 13+ messages in thread
From: Felix Fietkau @ 2024-04-24 18:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, Jakub Kicinski,
Paolo Abeni, David Ahern
Cc: willemdebruijn.kernel, linux-kernel
This implements fraglist GRO similar to how it's handled in UDP, however
no functional changes are added yet. The next change adds a heuristic for
using fraglist GRO instead of regular GRO.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/net/tcp.h | 3 ++-
net/ipv4/tcp_offload.c | 29 +++++++++++++++++++++++++++--
net/ipv6/tcpv6_offload.c | 11 ++++++++++-
3 files changed, 39 insertions(+), 4 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b935e1ae4caf..875cda53a7c9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2194,7 +2194,8 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ bool fraglist);
INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 06dbb2e2b2f3..6294e7a5c099 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -252,7 +252,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
return segs;
}
-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ bool fraglist)
{
struct sk_buff *pp = NULL;
struct sk_buff *p;
@@ -289,6 +290,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
+ NAPI_GRO_CB(skb)->is_flist = fraglist;
list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -308,6 +310,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
found:
/* Include the IP ID check below from the inner most IP hdr */
flush = NAPI_GRO_CB(p)->flush;
+ flush |= fraglist != NAPI_GRO_CB(p)->is_flist;
flush |= (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -341,6 +344,19 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
flush |= skb_cmp_decrypted(p, skb);
+ if (fraglist) {
+ flush |= (__force int)(flags ^ tcp_flag_word(th2));
+ flush |= skb->ip_summed != p->ip_summed;
+ flush |= skb->csum_level != p->csum_level;
+ flush |= !pskb_may_pull(skb, skb_gro_offset(skb));
+ flush |= NAPI_GRO_CB(p)->count >= 64;
+
+ if (flush || skb_gro_receive_list(p, skb))
+ mss = 1;
+
+ goto out_check_final;
+ }
+
if (flush || skb_gro_receive(p, skb)) {
mss = 1;
goto out_check_final;
@@ -399,7 +415,7 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
- return tcp_gro_receive(head, skb);
+ return tcp_gro_receive(head, skb, false);
}
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
@@ -407,6 +423,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
+ if (NAPI_GRO_CB(skb)->is_flist) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 12fe79cb2c10..239588557dc4 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -24,7 +24,7 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
- return tcp_gro_receive(head, skb);
+ return tcp_gro_receive(head, skb, false);
}
INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
@@ -32,6 +32,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
+ if (NAPI_GRO_CB(skb)->is_flist) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
&iph->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH net-next 3/4] net: add code for TCP fraglist GRO
2024-04-24 18:04 ` [PATCH net-next 3/4] net: add code for TCP fraglist GRO Felix Fietkau
@ 2024-04-26 8:14 ` Paolo Abeni
0 siblings, 0 replies; 13+ messages in thread
From: Paolo Abeni @ 2024-04-26 8:14 UTC (permalink / raw)
To: Felix Fietkau, netdev, Eric Dumazet, David S. Miller,
Jakub Kicinski, David Ahern
Cc: willemdebruijn.kernel, linux-kernel
On Wed, 2024-04-24 at 20:04 +0200, Felix Fietkau wrote:
> This implements fraglist GRO similar to how it's handled in UDP, however
> no functional changes are added yet. The next change adds a heuristic for
> using fraglist GRO instead of regular GRO.
>
> Signed-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> include/net/tcp.h | 3 ++-
> net/ipv4/tcp_offload.c | 29 +++++++++++++++++++++++++++--
> net/ipv6/tcpv6_offload.c | 11 ++++++++++-
> 3 files changed, 39 insertions(+), 4 deletions(-)
>
> diff --git a/include/net/tcp.h b/include/net/tcp.h
> index b935e1ae4caf..875cda53a7c9 100644
> --- a/include/net/tcp.h
> +++ b/include/net/tcp.h
> @@ -2194,7 +2194,8 @@ void tcp_v4_destroy_sock(struct sock *sk);
>
> struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
> netdev_features_t features);
> -struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
> +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
> + bool fraglist);
> INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
> INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
> INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index 06dbb2e2b2f3..6294e7a5c099 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -252,7 +252,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
> return segs;
> }
>
> -struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
> +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
> + bool fraglist)
> {
> struct sk_buff *pp = NULL;
> struct sk_buff *p;
> @@ -289,6 +290,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
> len = skb_gro_len(skb);
> flags = tcp_flag_word(th);
>
> + NAPI_GRO_CB(skb)->is_flist = fraglist;
> list_for_each_entry(p, head, list) {
> if (!NAPI_GRO_CB(p)->same_flow)
> continue;
> @@ -308,6 +310,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
> found:
> /* Include the IP ID check below from the inner most IP hdr */
> flush = NAPI_GRO_CB(p)->flush;
> + flush |= fraglist != NAPI_GRO_CB(p)->is_flist;
> flush |= (__force int)(flags & TCP_FLAG_CWR);
> flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
> ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
> @@ -341,6 +344,19 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
> flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
> flush |= skb_cmp_decrypted(p, skb);
>
> + if (fraglist) {
> + flush |= (__force int)(flags ^ tcp_flag_word(th2));
Don't we have this check already a few lines above?
> + flush |= skb->ip_summed != p->ip_summed;
> + flush |= skb->csum_level != p->csum_level;
> + flush |= !pskb_may_pull(skb, skb_gro_offset(skb));
Why we need this check? The earlier skb_gro_may_pull() should ensure
that, right?
> + flush |= NAPI_GRO_CB(p)->count >= 64;
> +
> + if (flush || skb_gro_receive_list(p, skb))
> + mss = 1;
> +
> + goto out_check_final;
TCP flags processing needs some care. You need to propagate the current
packets flag to the old one, and update the older packet csum
accordingly.
Cheers,
Paolo
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
2024-04-24 18:04 [PATCH net-next 0/4] Add TCP fraglist GRO support Felix Fietkau
` (2 preceding siblings ...)
2024-04-24 18:04 ` [PATCH net-next 3/4] net: add code for TCP fraglist GRO Felix Fietkau
@ 2024-04-24 18:04 ` Felix Fietkau
2024-04-24 18:23 ` Eric Dumazet
` (2 more replies)
3 siblings, 3 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-24 18:04 UTC (permalink / raw)
To: netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: willemdebruijn.kernel, linux-kernel
When forwarding TCP after GRO, software segmentation is very expensive,
especially when the checksum needs to be recalculated.
One case where that's currently unavoidable is when routing packets over
PPPoE. Performance improves significantly when using fraglist GRO
implemented in the same way as for UDP.
When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
socket in the same netns as the receiving device. While this may not
cover all relevant use cases in multi-netns configurations, it should be
good enough for most configurations that need this.
Here's a measurement of running 2 TCP streams through a MediaTek MT7622
device (2-core Cortex-A53), which runs NAT with flow offload enabled from
one ethernet port to PPPoE on another ethernet port + cake qdisc set to
1Gbps.
rx-gro-list off: 630 Mbit/s, CPU 35% idle
rx-gro-list on: 770 Mbit/s, CPU 40% idle
Signe-off-by: Felix Fietkau <nbd@nbd.name>
---
net/ipv4/tcp_offload.c | 45 ++++++++++++++++++++++++++++++++++++++-
net/ipv6/tcpv6_offload.c | 46 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 89 insertions(+), 2 deletions(-)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 6294e7a5c099..f987e2d8423a 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -404,6 +404,49 @@ void tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
+static bool tcp4_check_fraglist_gro(struct sk_buff *skb)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct tcphdr *th;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return false;
+
+ inet_get_iif_sdif(skb, &iif, &sdif);
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return false;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+ }
+
+ sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ iif, sdif);
+ if (!sk)
+ return true;
+
+ sock_put(sk);
+
+ return false;
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -415,7 +458,7 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
- return tcp_gro_receive(head, skb, false);
+ return tcp_gro_receive(head, skb, tcp4_check_fraglist_gro(skb));
}
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 239588557dc4..c214f5cfe595 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,12 +7,56 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+static bool tcp6_check_fraglist_gro(struct sk_buff *skb)
+{
+ const struct ipv6hdr *hdr = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct tcphdr *th;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return false;
+
+ inet6_get_iif_sdif(skb, &iif, &sdif);
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return false;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+ }
+
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ iif, sdif);
+ if (!sk)
+ return true;
+
+ sock_put(sk);
+
+ return false;
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -24,7 +68,7 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
- return tcp_gro_receive(head, skb, false);
+ return tcp_gro_receive(head, skb, tcp6_check_fraglist_gro(skb));
}
INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
--
2.44.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
2024-04-24 18:04 ` [PATCH net-next 4/4] net: add heuristic for enabling " Felix Fietkau
@ 2024-04-24 18:23 ` Eric Dumazet
2024-04-24 18:48 ` Felix Fietkau
2024-04-25 7:44 ` kernel test robot
2024-04-25 9:30 ` kernel test robot
2 siblings, 1 reply; 13+ messages in thread
From: Eric Dumazet @ 2024-04-24 18:23 UTC (permalink / raw)
To: Felix Fietkau
Cc: netdev, David S. Miller, David Ahern, Jakub Kicinski, Paolo Abeni,
willemdebruijn.kernel, linux-kernel
On Wed, Apr 24, 2024 at 8:05 PM Felix Fietkau <nbd@nbd.name> wrote:
>
> When forwarding TCP after GRO, software segmentation is very expensive,
> especially when the checksum needs to be recalculated.
> One case where that's currently unavoidable is when routing packets over
> PPPoE. Performance improves significantly when using fraglist GRO
> implemented in the same way as for UDP.
>
> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
> socket in the same netns as the receiving device. While this may not
> cover all relevant use cases in multi-netns configurations, it should be
> good enough for most configurations that need this.
>
> Here's a measurement of running 2 TCP streams through a MediaTek MT7622
> device (2-core Cortex-A53), which runs NAT with flow offload enabled from
> one ethernet port to PPPoE on another ethernet port + cake qdisc set to
> 1Gbps.
>
> rx-gro-list off: 630 Mbit/s, CPU 35% idle
> rx-gro-list on: 770 Mbit/s, CPU 40% idle
>
> Signe-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> net/ipv4/tcp_offload.c | 45 ++++++++++++++++++++++++++++++++++++++-
> net/ipv6/tcpv6_offload.c | 46 +++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 89 insertions(+), 2 deletions(-)
>
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index 6294e7a5c099..f987e2d8423a 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -404,6 +404,49 @@ void tcp_gro_complete(struct sk_buff *skb)
> }
> EXPORT_SYMBOL(tcp_gro_complete);
>
> +static bool tcp4_check_fraglist_gro(struct sk_buff *skb)
> +{
> + const struct iphdr *iph = skb_gro_network_header(skb);
> + struct net *net = dev_net(skb->dev);
> + unsigned int off, hlen, thlen;
> + struct tcphdr *th;
> + struct sock *sk;
> + int iif, sdif;
> +
> + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
> + return false;
> +
> + inet_get_iif_sdif(skb, &iif, &sdif);
> +
> + off = skb_gro_offset(skb);
> + hlen = off + sizeof(*th);
> + th = skb_gro_header(skb, hlen, off);
> + if (unlikely(!th))
> + return false;
> +
> + thlen = th->doff * 4;
> + if (thlen < sizeof(*th))
> + return false;
> +
> + hlen = off + thlen;
> + if (!skb_gro_may_pull(skb, hlen)) {
> + th = skb_gro_header_slow(skb, hlen, off);
> + if (unlikely(!th))
> + return false;
> + }
> +
> + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
> + iph->saddr, th->source,
> + iph->daddr, ntohs(th->dest),
> + iif, sdif);
Presumably all this could be done only for the first skb/segment of a GRO train.
We could store the fraglist in a single bit in NAPI_GRO_CB(skb) ?
GRO does a full tuple evaluation, we can trust it.
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
2024-04-24 18:23 ` Eric Dumazet
@ 2024-04-24 18:48 ` Felix Fietkau
0 siblings, 0 replies; 13+ messages in thread
From: Felix Fietkau @ 2024-04-24 18:48 UTC (permalink / raw)
To: Eric Dumazet
Cc: netdev, David S. Miller, David Ahern, Jakub Kicinski, Paolo Abeni,
willemdebruijn.kernel, linux-kernel
On 24.04.24 20:23, Eric Dumazet wrote:
> On Wed, Apr 24, 2024 at 8:05 PM Felix Fietkau <nbd@nbd.name> wrote:
>>
>> When forwarding TCP after GRO, software segmentation is very expensive,
>> especially when the checksum needs to be recalculated.
>> One case where that's currently unavoidable is when routing packets over
>> PPPoE. Performance improves significantly when using fraglist GRO
>> implemented in the same way as for UDP.
>>
>> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
>> socket in the same netns as the receiving device. While this may not
>> cover all relevant use cases in multi-netns configurations, it should be
>> good enough for most configurations that need this.
>>
>> Here's a measurement of running 2 TCP streams through a MediaTek MT7622
>> device (2-core Cortex-A53), which runs NAT with flow offload enabled from
>> one ethernet port to PPPoE on another ethernet port + cake qdisc set to
>> 1Gbps.
>>
>> rx-gro-list off: 630 Mbit/s, CPU 35% idle
>> rx-gro-list on: 770 Mbit/s, CPU 40% idle
>>
>> Signe-off-by: Felix Fietkau <nbd@nbd.name>
>> ---
>> net/ipv4/tcp_offload.c | 45 ++++++++++++++++++++++++++++++++++++++-
>> net/ipv6/tcpv6_offload.c | 46 +++++++++++++++++++++++++++++++++++++++-
>> 2 files changed, 89 insertions(+), 2 deletions(-)
>>
>> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
>> index 6294e7a5c099..f987e2d8423a 100644
>> --- a/net/ipv4/tcp_offload.c
>> +++ b/net/ipv4/tcp_offload.c
>> @@ -404,6 +404,49 @@ void tcp_gro_complete(struct sk_buff *skb)
>> }
>> EXPORT_SYMBOL(tcp_gro_complete);
>>
>> +static bool tcp4_check_fraglist_gro(struct sk_buff *skb)
>> +{
>> + const struct iphdr *iph = skb_gro_network_header(skb);
>> + struct net *net = dev_net(skb->dev);
>> + unsigned int off, hlen, thlen;
>> + struct tcphdr *th;
>> + struct sock *sk;
>> + int iif, sdif;
>> +
>> + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
>> + return false;
>> +
>> + inet_get_iif_sdif(skb, &iif, &sdif);
>> +
>> + off = skb_gro_offset(skb);
>> + hlen = off + sizeof(*th);
>> + th = skb_gro_header(skb, hlen, off);
>> + if (unlikely(!th))
>> + return false;
>> +
>> + thlen = th->doff * 4;
>> + if (thlen < sizeof(*th))
>> + return false;
>> +
>> + hlen = off + thlen;
>> + if (!skb_gro_may_pull(skb, hlen)) {
>> + th = skb_gro_header_slow(skb, hlen, off);
>> + if (unlikely(!th))
>> + return false;
>> + }
>> +
>> + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
>> + iph->saddr, th->source,
>> + iph->daddr, ntohs(th->dest),
>> + iif, sdif);
>
> Presumably all this could be done only for the first skb/segment of a GRO train.
>
> We could store the fraglist in a single bit in NAPI_GRO_CB(skb) ?
>
> GRO does a full tuple evaluation, we can trust it.
I will look into that, thanks.
- Felix
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
2024-04-24 18:04 ` [PATCH net-next 4/4] net: add heuristic for enabling " Felix Fietkau
2024-04-24 18:23 ` Eric Dumazet
@ 2024-04-25 7:44 ` kernel test robot
2024-04-25 9:30 ` kernel test robot
2 siblings, 0 replies; 13+ messages in thread
From: kernel test robot @ 2024-04-25 7:44 UTC (permalink / raw)
To: Felix Fietkau, netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: oe-kbuild-all, willemdebruijn.kernel, linux-kernel
Hi Felix,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/Felix-Fietkau/net-move-skb_gro_receive_list-from-udp-to-core/20240425-060838
base: net-next/main
patch link: https://lore.kernel.org/r/20240424180458.56211-5-nbd%40nbd.name
patch subject: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
config: openrisc-defconfig (https://download.01.org/0day-ci/archive/20240425/202404251517.ZUALo8e5-lkp@intel.com/config)
compiler: or1k-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240425/202404251517.ZUALo8e5-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404251517.ZUALo8e5-lkp@intel.com/
All warnings (new ones prefixed by >>):
net/ipv6/tcpv6_offload.c: In function 'tcp6_check_fraglist_gro':
net/ipv6/tcpv6_offload.c:48:14: error: implicit declaration of function '__inet6_lookup_established'; did you mean '__inet_lookup_established'? [-Werror=implicit-function-declaration]
48 | sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
| __inet_lookup_established
>> net/ipv6/tcpv6_offload.c:48:12: warning: assignment to 'struct sock *' from 'int' makes pointer from integer without a cast [-Wint-conversion]
48 | sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
| ^
cc1: some warnings being treated as errors
vim +48 net/ipv6/tcpv6_offload.c
16
17 static bool tcp6_check_fraglist_gro(struct sk_buff *skb)
18 {
19 const struct ipv6hdr *hdr = skb_gro_network_header(skb);
20 struct net *net = dev_net(skb->dev);
21 unsigned int off, hlen, thlen;
22 struct tcphdr *th;
23 struct sock *sk;
24 int iif, sdif;
25
26 if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
27 return false;
28
29 inet6_get_iif_sdif(skb, &iif, &sdif);
30
31 off = skb_gro_offset(skb);
32 hlen = off + sizeof(*th);
33 th = skb_gro_header(skb, hlen, off);
34 if (unlikely(!th))
35 return false;
36
37 thlen = th->doff * 4;
38 if (thlen < sizeof(*th))
39 return false;
40
41 hlen = off + thlen;
42 if (!skb_gro_may_pull(skb, hlen)) {
43 th = skb_gro_header_slow(skb, hlen, off);
44 if (unlikely(!th))
45 return false;
46 }
47
> 48 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
49 &hdr->saddr, th->source,
50 &hdr->daddr, ntohs(th->dest),
51 iif, sdif);
52 if (!sk)
53 return true;
54
55 sock_put(sk);
56
57 return false;
58 }
59
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
2024-04-24 18:04 ` [PATCH net-next 4/4] net: add heuristic for enabling " Felix Fietkau
2024-04-24 18:23 ` Eric Dumazet
2024-04-25 7:44 ` kernel test robot
@ 2024-04-25 9:30 ` kernel test robot
2 siblings, 0 replies; 13+ messages in thread
From: kernel test robot @ 2024-04-25 9:30 UTC (permalink / raw)
To: Felix Fietkau, netdev, Eric Dumazet, David S. Miller, David Ahern,
Jakub Kicinski, Paolo Abeni
Cc: llvm, oe-kbuild-all, willemdebruijn.kernel, linux-kernel
Hi Felix,
kernel test robot noticed the following build errors:
[auto build test ERROR on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/Felix-Fietkau/net-move-skb_gro_receive_list-from-udp-to-core/20240425-060838
base: net-next/main
patch link: https://lore.kernel.org/r/20240424180458.56211-5-nbd%40nbd.name
patch subject: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
config: um-x86_64_defconfig (https://download.01.org/0day-ci/archive/20240425/202404251744.Tq24y05K-lkp@intel.com/config)
compiler: clang version 15.0.7 (https://github.com/llvm/llvm-project.git 8dfdcc7b7bf66834a761bd8de445840ef68e4d1a)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240425/202404251744.Tq24y05K-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404251744.Tq24y05K-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from net/ipv6/tcpv6_offload.c:9:
In file included from include/linux/skbuff.h:17:
In file included from include/linux/bvec.h:10:
In file included from include/linux/highmem.h:12:
In file included from include/linux/hardirq.h:11:
In file included from arch/um/include/asm/hardirq.h:5:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/um/include/asm/io.h:24:
include/asm-generic/io.h:547:31: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
val = __raw_readb(PCI_IOBASE + addr);
~~~~~~~~~~ ^
include/asm-generic/io.h:560:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
~~~~~~~~~~ ^
include/uapi/linux/byteorder/little_endian.h:37:51: note: expanded from macro '__le16_to_cpu'
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
^
In file included from net/ipv6/tcpv6_offload.c:9:
In file included from include/linux/skbuff.h:17:
In file included from include/linux/bvec.h:10:
In file included from include/linux/highmem.h:12:
In file included from include/linux/hardirq.h:11:
In file included from arch/um/include/asm/hardirq.h:5:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/um/include/asm/io.h:24:
include/asm-generic/io.h:573:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
~~~~~~~~~~ ^
include/uapi/linux/byteorder/little_endian.h:35:51: note: expanded from macro '__le32_to_cpu'
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
^
In file included from net/ipv6/tcpv6_offload.c:9:
In file included from include/linux/skbuff.h:17:
In file included from include/linux/bvec.h:10:
In file included from include/linux/highmem.h:12:
In file included from include/linux/hardirq.h:11:
In file included from arch/um/include/asm/hardirq.h:5:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/um/include/asm/io.h:24:
include/asm-generic/io.h:584:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
__raw_writeb(value, PCI_IOBASE + addr);
~~~~~~~~~~ ^
include/asm-generic/io.h:594:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
__raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
~~~~~~~~~~ ^
include/asm-generic/io.h:604:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
__raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
~~~~~~~~~~ ^
include/asm-generic/io.h:692:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
readsb(PCI_IOBASE + addr, buffer, count);
~~~~~~~~~~ ^
include/asm-generic/io.h:700:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
readsw(PCI_IOBASE + addr, buffer, count);
~~~~~~~~~~ ^
include/asm-generic/io.h:708:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
readsl(PCI_IOBASE + addr, buffer, count);
~~~~~~~~~~ ^
include/asm-generic/io.h:717:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
writesb(PCI_IOBASE + addr, buffer, count);
~~~~~~~~~~ ^
include/asm-generic/io.h:726:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
writesw(PCI_IOBASE + addr, buffer, count);
~~~~~~~~~~ ^
include/asm-generic/io.h:735:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
writesl(PCI_IOBASE + addr, buffer, count);
~~~~~~~~~~ ^
>> net/ipv6/tcpv6_offload.c:48:7: error: call to undeclared function '__inet6_lookup_established'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
^
net/ipv6/tcpv6_offload.c:48:7: note: did you mean '__inet_lookup_established'?
include/net/inet_hashtables.h:371:14: note: '__inet_lookup_established' declared here
struct sock *__inet_lookup_established(struct net *net,
^
net/ipv6/tcpv6_offload.c:48:5: error: incompatible integer to pointer conversion assigning to 'struct sock *' from 'int' [-Wint-conversion]
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 warnings and 2 errors generated.
vim +/__inet6_lookup_established +48 net/ipv6/tcpv6_offload.c
16
17 static bool tcp6_check_fraglist_gro(struct sk_buff *skb)
18 {
19 const struct ipv6hdr *hdr = skb_gro_network_header(skb);
20 struct net *net = dev_net(skb->dev);
21 unsigned int off, hlen, thlen;
22 struct tcphdr *th;
23 struct sock *sk;
24 int iif, sdif;
25
26 if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
27 return false;
28
29 inet6_get_iif_sdif(skb, &iif, &sdif);
30
31 off = skb_gro_offset(skb);
32 hlen = off + sizeof(*th);
33 th = skb_gro_header(skb, hlen, off);
34 if (unlikely(!th))
35 return false;
36
37 thlen = th->doff * 4;
38 if (thlen < sizeof(*th))
39 return false;
40
41 hlen = off + thlen;
42 if (!skb_gro_may_pull(skb, hlen)) {
43 th = skb_gro_header_slow(skb, hlen, off);
44 if (unlikely(!th))
45 return false;
46 }
47
> 48 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
49 &hdr->saddr, th->source,
50 &hdr->daddr, ntohs(th->dest),
51 iif, sdif);
52 if (!sk)
53 return true;
54
55 sock_put(sk);
56
57 return false;
58 }
59
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 13+ messages in thread