netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next] net: ipv4: fix drop handling in ip_list_rcv() and ip_list_rcv_finish()
@ 2018-07-04 18:23 Edward Cree
  2018-07-05  2:26 ` David Miller
  0 siblings, 1 reply; 2+ messages in thread
From: Edward Cree @ 2018-07-04 18:23 UTC (permalink / raw)
  To: davem; +Cc: netdev

Since callees (ip_rcv_core() and ip_rcv_finish_core()) might free or steal
 the skb, we can't use the list_cut_before() method; we can't even do a
 list_del(&skb->list) in the drop case, because skb might have already been
 freed and reused.
So instead, take each skb off the source list before processing, and add it
 to the sublist afterwards if it wasn't freed or stolen.

Fixes: 5fa12739a53d net: ipv4: listify ip_rcv_finish
Fixes: 17266ee93984 net: ipv4: listified version of ip_rcv
Signed-off-by: Edward Cree <ecree@solarflare.com>
---
 net/ipv4/ip_input.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 24b9b0210aeb..14ba628b2761 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -540,24 +540,27 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
 	struct sk_buff *skb, *next;
 	struct list_head sublist;
 
+	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
 		struct dst_entry *dst;
 
+		list_del(&skb->list);
 		if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
 			continue;
 
 		dst = skb_dst(skb);
 		if (curr_dst != dst) {
 			/* dispatch old sublist */
-			list_cut_before(&sublist, head, &skb->list);
 			if (!list_empty(&sublist))
 				ip_sublist_rcv_finish(&sublist);
 			/* start new sublist */
+			INIT_LIST_HEAD(&sublist);
 			curr_dst = dst;
 		}
+		list_add_tail(&skb->list, &sublist);
 	}
 	/* dispatch final sublist */
-	ip_sublist_rcv_finish(head);
+	ip_sublist_rcv_finish(&sublist);
 }
 
 static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
@@ -577,24 +580,27 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
 	struct sk_buff *skb, *next;
 	struct list_head sublist;
 
+	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
 		struct net_device *dev = skb->dev;
 		struct net *net = dev_net(dev);
 
+		list_del(&skb->list);
 		skb = ip_rcv_core(skb, net);
 		if (skb == NULL)
 			continue;
 
 		if (curr_dev != dev || curr_net != net) {
 			/* dispatch old sublist */
-			list_cut_before(&sublist, head, &skb->list);
 			if (!list_empty(&sublist))
-				ip_sublist_rcv(&sublist, dev, net);
+				ip_sublist_rcv(&sublist, curr_dev, curr_net);
 			/* start new sublist */
+			INIT_LIST_HEAD(&sublist);
 			curr_dev = dev;
 			curr_net = net;
 		}
+		list_add_tail(&skb->list, &sublist);
 	}
 	/* dispatch final sublist */
-	ip_sublist_rcv(head, curr_dev, curr_net);
+	ip_sublist_rcv(&sublist, curr_dev, curr_net);
 }

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-07-05  2:26 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-07-04 18:23 [PATCH net-next] net: ipv4: fix drop handling in ip_list_rcv() and ip_list_rcv_finish() Edward Cree
2018-07-05  2:26 ` David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).