From: Florian Fainelli <f.fainelli@gmail.com>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, gregkh@linuxfoundation.org,
stable@vger.kernel.org, edumazet@google.com,
sthemmin@microsoft.com
Subject: [PATCH stable 4.9 24/29] net: add rb_to_skb() and other rb tree helpers
Date: Tue, 9 Oct 2018 15:49:19 -0700 [thread overview]
Message-ID: <20181009224924.30151-25-f.fainelli@gmail.com> (raw)
In-Reply-To: <20181009224924.30151-1-f.fainelli@gmail.com>
From: Eric Dumazet <edumazet@google.com>
Geeralize private netem_rb_to_skb()
TCP rtx queue will soon be converted to rb-tree,
so we will need skb_rbtree_walk() helpers.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 18a4c0eab2623cc95be98a1e6af1ad18e7695977)
---
include/linux/skbuff.h | 18 ++++++++++++++++++
net/ipv4/tcp_input.c | 33 ++++++++++++---------------------
2 files changed, 30 insertions(+), 21 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 11974e63a41d..7e7e12aeaf82 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2988,6 +2988,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3002,6 +3008,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
+#define skb_rbtree_walk(skb, root) \
+ for (skb = skb_rb_first(root); skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from(skb) \
+ for (; skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from_safe(skb, tmp) \
+ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
+ skb = tmp)
+
#define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9d0b73aa649f..c169a2b261be 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4406,7 +4406,7 @@ static void tcp_ofo_queue(struct sock *sk)
p = rb_first(&tp->out_of_order_queue);
while (p) {
- skb = rb_entry(p, struct sk_buff, rbnode);
+ skb = rb_to_skb(p);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
@@ -4470,7 +4470,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct rb_node **p, *q, *parent;
+ struct rb_node **p, *parent;
struct sk_buff *skb1;
u32 seq, end_seq;
bool fragstolen;
@@ -4529,7 +4529,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
parent = NULL;
while (*p) {
parent = *p;
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ skb1 = rb_to_skb(parent);
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
p = &parent->rb_left;
continue;
@@ -4574,9 +4574,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
merge_right:
/* Remove other segments covered by skb. */
- while ((q = rb_next(&skb->rbnode)) != NULL) {
- skb1 = rb_entry(q, struct sk_buff, rbnode);
-
+ while ((skb1 = skb_rb_next(skb)) != NULL) {
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4591,7 +4589,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tcp_drop(sk, skb1);
}
/* If there is no skb after us, we are the last_skb ! */
- if (!q)
+ if (!skb1)
tp->ooo_last_skb = skb;
add_sack:
@@ -4792,7 +4790,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
if (list)
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+ return skb_rb_next(skb);
}
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4821,7 +4819,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
while (*p) {
parent = *p;
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ skb1 = rb_to_skb(parent);
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
p = &parent->rb_left;
else
@@ -4941,19 +4939,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
u32 range_truesize, sum_tiny = 0;
struct sk_buff *skb, *head;
- struct rb_node *p;
u32 start, end;
- p = rb_first(&tp->out_of_order_queue);
- skb = rb_entry_safe(p, struct sk_buff, rbnode);
+ skb = skb_rb_first(&tp->out_of_order_queue);
new_range:
if (!skb) {
- p = rb_last(&tp->out_of_order_queue);
- /* Note: This is possible p is NULL here. We do not
- * use rb_entry_safe(), as ooo_last_skb is valid only
- * if rbtree is not empty.
- */
- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
+ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
return;
}
start = TCP_SKB_CB(skb)->seq;
@@ -4961,7 +4952,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
range_truesize = skb->truesize;
for (head = skb;;) {
- skb = tcp_skb_next(skb, NULL);
+ skb = skb_rb_next(skb);
/* Range is terminated when we see a gap or when
* we are at the queue end.
@@ -5017,7 +5008,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue);
goal -= rb_to_skb(node)->truesize;
- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
+ tcp_drop(sk, rb_to_skb(node));
if (!prev || goal <= 0) {
sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@@ -5027,7 +5018,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
}
node = prev;
} while (node);
- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+ tp->ooo_last_skb = rb_to_skb(prev);
/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
--
2.17.1
next prev parent reply other threads:[~2018-10-10 6:09 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-09 22:48 [PATCH stable 4.9 00/29] backport of IP fragmentation fixes Florian Fainelli
2018-10-09 22:48 ` [PATCH stable 4.9 01/29] inet: frags: change inet_frags_init_net() return value Florian Fainelli
2018-10-09 22:48 ` [PATCH stable 4.9 02/29] inet: frags: add a pointer to struct netns_frags Florian Fainelli
2018-10-09 22:48 ` [PATCH stable 4.9 03/29] inet: frags: refactor ipfrag_init() Florian Fainelli
2018-10-09 22:48 ` [PATCH stable 4.9 04/29] inet: frags: refactor ipv6_frag_init() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 05/29] inet: frags: refactor lowpan_net_frag_init() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 06/29] ipv6: export ip6 fragments sysctl to unprivileged users Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 07/29] rhashtable: add schedule points Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 08/29] inet: frags: use rhashtables for reassembly units Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 09/29] inet: frags: remove some helpers Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 10/29] inet: frags: get rif of inet_frag_evicting() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 11/29] inet: frags: remove inet_frag_maybe_warn_overflow() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 12/29] inet: frags: break the 2GB limit for frags storage Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 13/29] inet: frags: do not clone skb in ip_expire() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 14/29] ipv6: frags: rewrite ip6_expire_frag_queue() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 15/29] rhashtable: reorganize struct rhashtable layout Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 16/29] inet: frags: reorganize struct netns_frags Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 17/29] inet: frags: get rid of ipfrag_skb_cb/FRAG_CB Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 18/29] inet: frags: fix ip6frag_low_thresh boundary Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 19/29] ip: discard IPv4 datagrams with overlapping segments Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 20/29] net: speed up skb_rbtree_purge() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 21/29] net: modify skb_rbtree_purge to return the truesize of all purged skbs Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 22/29] ipv6: defrag: drop non-last frags smaller than min mtu Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 23/29] net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends Florian Fainelli
2018-10-09 22:49 ` Florian Fainelli [this message]
2018-10-09 22:49 ` [PATCH stable 4.9 25/29] net: sk_buff rbnode reorg Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 26/29] ipv4: frags: precedence bug in ip_expire() Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 27/29] ip: add helpers to process in-order fragments faster Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 28/29] ip: process in-order fragments efficiently Florian Fainelli
2018-10-09 22:49 ` [PATCH stable 4.9 29/29] ip: frags: fix crash in ip_do_fragment() Florian Fainelli
2018-10-10 0:46 ` [PATCH stable 4.9 00/29] backport of IP fragmentation fixes Eric Dumazet
2018-10-10 4:15 ` Florian Fainelli
2018-10-10 23:18 ` Stephen Hemminger
2018-10-10 23:23 ` Florian Fainelli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181009224924.30151-25-f.fainelli@gmail.com \
--to=f.fainelli@gmail.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=gregkh@linuxfoundation.org \
--cc=netdev@vger.kernel.org \
--cc=stable@vger.kernel.org \
--cc=sthemmin@microsoft.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).