diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 9c05a50d6013..270c3d0233ad 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -577,6 +577,10 @@ static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id, return -1; } + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); + skb_set_inner_protocol(skb, skb->protocol); + skb->encapsulation = 1; + __skb_push(skb, PPPOE_SES_HLEN); skb_reset_network_header(skb); @@ -771,7 +775,6 @@ struct nf_flow_xmit { const void *source; struct net_device *outdev; struct flow_offload_tuple *tuple; - bool needs_gso_segment; }; static void __nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, @@ -792,41 +795,10 @@ static void __nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, dev_queue_xmit(skb); } -static unsigned int nf_flow_encap_gso_xmit(struct net *net, struct sk_buff *skb, - struct nf_flow_xmit *xmit) -{ - struct sk_buff *segs, *nskb; - - segs = skb_gso_segment(skb, 0); - if (IS_ERR(segs)) - return NF_DROP; - - if (segs) - consume_skb(skb); - else - segs = skb; - - skb_list_walk_safe(segs, segs, nskb) { - skb_mark_not_on_list(segs); - - if (nf_flow_encap_push(segs, xmit->tuple, xmit->outdev) < 0) { - kfree_skb(segs); - kfree_skb_list(nskb); - return NF_STOLEN; - } - __nf_flow_queue_xmit(net, segs, xmit); - } - - return NF_STOLEN; -} - static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, struct nf_flow_xmit *xmit) { if (xmit->tuple->encap_num) { - if (skb_is_gso(skb) && xmit->needs_gso_segment) - return nf_flow_encap_gso_xmit(net, skb, xmit); - if (nf_flow_encap_push(skb, xmit->tuple, xmit->outdev) < 0) return NF_DROP; } @@ -910,7 +882,6 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, return NF_DROP; } xmit.tuple = other_tuple; - xmit.needs_gso_segment = tuplehash->tuple.needs_gso_segment; return nf_flow_queue_xmit(state->net, skb, &xmit); } @@ -1231,7 +1202,6 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, return NF_DROP; } xmit.tuple = other_tuple; - xmit.needs_gso_segment = tuplehash->tuple.needs_gso_segment; return nf_flow_queue_xmit(state->net, skb, &xmit); }