* [PATCH v2 net-next 0/2] add multi-buff support for xdp running in generic mode
@ 2023-11-30 9:11 Lorenzo Bianconi
2023-11-30 9:11 ` [PATCH v2 net-next 1/2] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
2023-11-30 9:11 ` [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
0 siblings, 2 replies; 8+ messages in thread
From: Lorenzo Bianconi @ 2023-11-30 9:11 UTC (permalink / raw)
To: netdev
Cc: davem, edumazet, kuba, pabeni, lorenzo.bianconi, bpf, hawk, toke,
willemdebruijn.kernel, jasowang
Introduce multi-buffer support for xdp running in generic mode not always
linearizing the skb in netif_receive_generic_xdp routine.
Changes since v1:
- explictly keep the skb segmented in netif_skb_check_for_generic_xdp() and
do not rely on pskb_expand_head()
Lorenzo Bianconi (2):
xdp: rely on skb pointer reference in do_xdp_generic and
netif_receive_generic_xdp
xdp: add multi-buff support for xdp running in generic mode
drivers/net/tun.c | 4 +-
include/linux/netdevice.h | 2 +-
net/core/dev.c | 158 ++++++++++++++++++++++++++++++--------
3 files changed, 130 insertions(+), 34 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 net-next 1/2] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp
2023-11-30 9:11 [PATCH v2 net-next 0/2] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
@ 2023-11-30 9:11 ` Lorenzo Bianconi
2023-11-30 9:52 ` Jesper Dangaard Brouer
2023-11-30 9:11 ` [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
1 sibling, 1 reply; 8+ messages in thread
From: Lorenzo Bianconi @ 2023-11-30 9:11 UTC (permalink / raw)
To: netdev
Cc: davem, edumazet, kuba, pabeni, lorenzo.bianconi, bpf, hawk, toke,
willemdebruijn.kernel, jasowang
Rely on skb pointer reference instead of the skb pointer in do_xdp_generic and
netif_receive_generic_xdp routine signatures. This is a preliminary patch to add
multi-buff support for xdp running in generic mode.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/tun.c | 4 ++--
include/linux/netdevice.h | 2 +-
net/core/dev.c | 16 +++++++++-------
3 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afa5497f7c35..206adddff699 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1921,7 +1921,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
@@ -2511,7 +2511,7 @@ static int tun_xdp_one(struct tun_struct *tun,
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
ret = 0;
goto out;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 998c7aaa98b8..c3fafe0ead44 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3925,7 +3925,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 3950ced396b5..4df68d7f04a2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4915,10 +4915,11 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
+ struct sk_buff *skb = *pskb;
u32 act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
@@ -4999,24 +5000,24 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
- act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
- err = xdp_do_generic_redirect(skb->dev, skb,
+ err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
- generic_xdp_tx(skb, xdp_prog);
+ generic_xdp_tx(*pskb, xdp_prog);
break;
}
return XDP_DROP;
@@ -5024,7 +5025,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
return XDP_PASS;
out_redir:
- kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
@@ -5347,7 +5348,8 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret2;
migrate_disable();
- ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
+ &skb);
migrate_enable();
if (ret2 != XDP_PASS) {
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode
2023-11-30 9:11 [PATCH v2 net-next 0/2] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2023-11-30 9:11 ` [PATCH v2 net-next 1/2] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
@ 2023-11-30 9:11 ` Lorenzo Bianconi
2023-11-30 10:36 ` Jesper Dangaard Brouer
1 sibling, 1 reply; 8+ messages in thread
From: Lorenzo Bianconi @ 2023-11-30 9:11 UTC (permalink / raw)
To: netdev
Cc: davem, edumazet, kuba, pabeni, lorenzo.bianconi, bpf, hawk, toke,
willemdebruijn.kernel, jasowang
Similar to native xdp, do not always linearize the skb in
netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
processed by the eBPF program. This allow to add multi-buffer support
for xdp running in generic mode.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 119 insertions(+), 25 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 4df68d7f04a2..0d08e755bb7f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
orig_data_end = xdp->data_end;
orig_data = xdp->data;
@@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
skb->len += off; /* positive on grow, negative on shrink */
}
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
@@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
- struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
+ struct bpf_prog *prog)
{
struct sk_buff *skb = *pskb;
- u32 act = XDP_DROP;
-
- /* Reinjected packets coming from act_mirred or similar should
- * not get XDP generic processing.
- */
- if (skb_is_redirected(skb))
- return XDP_PASS;
+ int err;
- /* XDP packets must be linear and must have sufficient headroom
- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
- * native XDP provides, thus we need to do it here as well.
+ /* XDP does not support fraglist so we need to linearize
+ * the skb.
*/
- if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
- skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+ if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
int troom = skb->tail + skb->data_len - skb->end;
/* In case we have to go down the path and also linearize,
* then lets do the pskb_expand_head() work just once here.
*/
- if (pskb_expand_head(skb,
- hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
- troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
- goto do_drop;
- if (skb_linearize(skb))
- goto do_drop;
+ err = pskb_expand_head(skb,
+ hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+ troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+ if (err)
+ return err;
+
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+ * bytes. This is the guarantee that also native XDP provides,
+ * thus we need to do it here as well.
+ */
+ if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
+ skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+ u32 mac_len = skb->data - skb_mac_header(skb);
+ u32 size, len, max_head_size, off;
+ struct sk_buff *nskb;
+ int i, head_off;
+
+ __skb_push(skb, mac_len);
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+ XDP_PACKET_HEADROOM);
+ if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+ return -ENOMEM;
+
+ size = min_t(u32, skb->len, max_head_size);
+ nskb = netdev_alloc_skb(skb->dev, size + XDP_PACKET_HEADROOM);
+ if (!nskb)
+ return -ENOMEM;
+
+ skb_reserve(nskb, XDP_PACKET_HEADROOM);
+ skb_copy_header(nskb, skb);
+
+ err = skb_copy_bits(skb, 0, nskb->data, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+ skb_put(nskb, size);
+
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+
+ off = size;
+ len = skb->len - off;
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ struct page *page;
+ void *frag;
+
+ size = min_t(u32, len, PAGE_SIZE);
+ frag = netdev_alloc_frag(size);
+ if (!frag) {
+ consume_skb(nskb);
+ return -ENOMEM;
+ }
+
+ page = virt_to_head_page(frag);
+ skb_add_rx_frag(nskb, i, page,
+ frag - page_address(page), size, size);
+ err = skb_copy_bits(skb, off, frag, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ len -= size;
+ off += size;
+ }
+
+ consume_skb(skb);
+ *pskb = nskb;
+ __skb_pull(nskb, mac_len);
}
- act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+ return 0;
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
+ struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ u32 act = XDP_DROP;
+
+ /* Reinjected packets coming from act_mirred or similar should
+ * not get XDP generic processing.
+ */
+ if (skb_is_redirected(*pskb))
+ return XDP_PASS;
+
+ if (netif_skb_check_for_generic_xdp(pskb, xdp_prog))
+ goto do_drop;
+
+ act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+ bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(skb->dev, xdp_prog, act);
+ trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
- kfree_skb(skb);
+ kfree_skb(*pskb);
break;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v2 net-next 1/2] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp
2023-11-30 9:11 ` [PATCH v2 net-next 1/2] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
@ 2023-11-30 9:52 ` Jesper Dangaard Brouer
0 siblings, 0 replies; 8+ messages in thread
From: Jesper Dangaard Brouer @ 2023-11-30 9:52 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: davem, edumazet, kuba, pabeni, lorenzo.bianconi, bpf, toke,
willemdebruijn.kernel, jasowang
On 11/30/23 10:11, Lorenzo Bianconi wrote:
> Rely on skb pointer reference instead of the skb pointer in do_xdp_generic and
> netif_receive_generic_xdp routine signatures. This is a preliminary patch to add
> multi-buff support for xdp running in generic mode.
>
> Signed-off-by: Lorenzo Bianconi<lorenzo@kernel.org>
> ---
> drivers/net/tun.c | 4 ++--
> include/linux/netdevice.h | 2 +-
> net/core/dev.c | 16 +++++++++-------
> 3 files changed, 12 insertions(+), 10 deletions(-)
LGTM
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode
2023-11-30 9:11 ` [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
@ 2023-11-30 10:36 ` Jesper Dangaard Brouer
2023-11-30 10:51 ` Lorenzo Bianconi
0 siblings, 1 reply; 8+ messages in thread
From: Jesper Dangaard Brouer @ 2023-11-30 10:36 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: davem, edumazet, kuba, pabeni, lorenzo.bianconi, bpf, toke,
willemdebruijn.kernel, jasowang, kernel-team, Yan Zhai
On 11/30/23 10:11, Lorenzo Bianconi wrote:
> Similar to native xdp, do not always linearize the skb in
> netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> processed by the eBPF program. This allow to add multi-buffer support
> for xdp running in generic mode.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
> net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 119 insertions(+), 25 deletions(-)
>
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 4df68d7f04a2..0d08e755bb7f 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> skb_headlen(skb) + mac_len, true);
> + if (skb_is_nonlinear(skb)) {
> + skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> + xdp_buff_set_frags_flag(xdp);
> + } else {
> + xdp_buff_clear_frags_flag(xdp);
> + }
>
> orig_data_end = xdp->data_end;
> orig_data = xdp->data;
> @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> skb->len += off; /* positive on grow, negative on shrink */
> }
>
> + /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> + * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> + */
> + if (xdp_buff_has_frags(xdp))
> + skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> + else
> + skb->data_len = 0;
> +
> /* check if XDP changed eth hdr such SKB needs update */
> eth = (struct ethhdr *)xdp->data;
> if ((orig_eth_type != eth->h_proto) ||
> @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> return act;
> }
>
> -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> - struct xdp_buff *xdp,
> - struct bpf_prog *xdp_prog)
> +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> + struct bpf_prog *prog)
I like this is split out into a check function.
> {
> struct sk_buff *skb = *pskb;
> - u32 act = XDP_DROP;
> -
> - /* Reinjected packets coming from act_mirred or similar should
> - * not get XDP generic processing.
> - */
> - if (skb_is_redirected(skb))
> - return XDP_PASS;
(For other reviewers)
This reinjected check is moved further down.
> + int err;
>
> - /* XDP packets must be linear and must have sufficient headroom
> - * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> - * native XDP provides, thus we need to do it here as well.
> + /* XDP does not support fraglist so we need to linearize
> + * the skb.
> */
> - if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> - skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> + if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> int troom = skb->tail + skb->data_len - skb->end;
>
> /* In case we have to go down the path and also linearize,
> * then lets do the pskb_expand_head() work just once here.
> */
> - if (pskb_expand_head(skb,
> - hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> - troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> - goto do_drop;
> - if (skb_linearize(skb))
> - goto do_drop;
> + err = pskb_expand_head(skb,
> + hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> + troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> + if (err)
> + return err;
> +
> + err = skb_linearize(skb);
> + if (err)
> + return err;
> +
> + return 0;
> + }
> +
> + /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> + * bytes. This is the guarantee that also native XDP provides,
> + * thus we need to do it here as well.
> + */
> + if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
cloned or shared) to be processed by generic XDP without any reallocation?
So check would be: (skb_cloned(skb) || skb_shared(skb) ||)
> + skb_headroom(skb) < XDP_PACKET_HEADROOM) {
[Headroom trick]
For layered devices the netstack could be the process that created the
SKB. If you noticed my veth patchset[4/4], when I detect an XDP-prog
attach, I'm increasing the net_device headroom (.ndo_set_rx_headroom)
such that netstack will allocated enough headroom to satisfy
XDP_PACKET_HEADROOM.
[4/4]
https://lore.kernel.org/netdev/169272716651.1975370.10514711233878278884.stgit@firesoul/
> + u32 mac_len = skb->data - skb_mac_header(skb);
> + u32 size, len, max_head_size, off;
> + struct sk_buff *nskb;
> + int i, head_off;
> +
> + __skb_push(skb, mac_len);
> + max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
> + XDP_PACKET_HEADROOM);
> + if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
> + return -ENOMEM;
> +
> + size = min_t(u32, skb->len, max_head_size);
> + nskb = netdev_alloc_skb(skb->dev, size + XDP_PACKET_HEADROOM);
Would is be possible to use napi_alloc_skb() here?
The napi_alloc_skb() is faster than netdev_alloc_skb(), but it as name
suggest assumes this is called under NAPI protection/context. It
used-to-be the case for generic XDP, but code got moved around to
support layered devices, so I not 100% sure if this is always true (NAPI
context).
> + if (!nskb)
> + return -ENOMEM;
> +
> + skb_reserve(nskb, XDP_PACKET_HEADROOM);
> + skb_copy_header(nskb, skb);
> +
> + err = skb_copy_bits(skb, 0, nskb->data, size);
> + if (err) {
> + consume_skb(nskb);
> + return err;
> + }
> + skb_put(nskb, size);
> +
> + head_off = skb_headroom(nskb) - skb_headroom(skb);
> + skb_headers_offset_update(nskb, head_off);
> +
> + off = size;
> + len = skb->len - off;
> + for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
> + struct page *page;
> + void *frag;
> +
> + size = min_t(u32, len, PAGE_SIZE);
> + frag = netdev_alloc_frag(size);
Again the slower variant.
> + if (!frag) {
> + consume_skb(nskb);
> + return -ENOMEM;
> + }
> +
> + page = virt_to_head_page(frag);
> + skb_add_rx_frag(nskb, i, page,
> + frag - page_address(page), size, size);
> + err = skb_copy_bits(skb, off, frag, size);
> + if (err) {
> + consume_skb(nskb);
> + return err;
> + }
> +
> + len -= size;
> + off += size;
> + }
> +
> + consume_skb(skb);
> + *pskb = nskb;
> + __skb_pull(nskb, mac_len);
> }
>
> - act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
> + return 0;
> +}
> +
> +static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> + struct xdp_buff *xdp,
> + struct bpf_prog *xdp_prog)
> +{
> + u32 act = XDP_DROP;
> +
> + /* Reinjected packets coming from act_mirred or similar should
> + * not get XDP generic processing.
> + */
> + if (skb_is_redirected(*pskb))
> + return XDP_PASS;
> +
> + if (netif_skb_check_for_generic_xdp(pskb, xdp_prog))
> + goto do_drop;
> +
> + act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
> switch (act) {
> case XDP_REDIRECT:
> case XDP_TX:
> case XDP_PASS:
> break;
> default:
> - bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
> + bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
> fallthrough;
> case XDP_ABORTED:
> - trace_xdp_exception(skb->dev, xdp_prog, act);
> + trace_xdp_exception((*pskb)->dev, xdp_prog, act);
> fallthrough;
> case XDP_DROP:
> do_drop:
> - kfree_skb(skb);
> + kfree_skb(*pskb);
> break;
> }
>
Overall I like the patch :-)
Are we missing more things to allow GRO packets getting processed by
generic XDP?
--Jesper
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode
2023-11-30 10:36 ` Jesper Dangaard Brouer
@ 2023-11-30 10:51 ` Lorenzo Bianconi
2023-11-30 18:49 ` Stanislav Fomichev
0 siblings, 1 reply; 8+ messages in thread
From: Lorenzo Bianconi @ 2023-11-30 10:51 UTC (permalink / raw)
To: Jesper Dangaard Brouer
Cc: netdev, davem, edumazet, kuba, pabeni, lorenzo.bianconi, bpf,
toke, willemdebruijn.kernel, jasowang, kernel-team, Yan Zhai
[-- Attachment #1: Type: text/plain, Size: 8702 bytes --]
>
>
> On 11/30/23 10:11, Lorenzo Bianconi wrote:
> > Similar to native xdp, do not always linearize the skb in
> > netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> > processed by the eBPF program. This allow to add multi-buffer support
> > for xdp running in generic mode.
> >
> > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > ---
> > net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> > 1 file changed, 119 insertions(+), 25 deletions(-)
> >
> > diff --git a/net/core/dev.c b/net/core/dev.c
> > index 4df68d7f04a2..0d08e755bb7f 100644
> > --- a/net/core/dev.c
> > +++ b/net/core/dev.c
> > @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> > xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> > skb_headlen(skb) + mac_len, true);
> > + if (skb_is_nonlinear(skb)) {
> > + skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> > + xdp_buff_set_frags_flag(xdp);
> > + } else {
> > + xdp_buff_clear_frags_flag(xdp);
> > + }
> > orig_data_end = xdp->data_end;
> > orig_data = xdp->data;
> > @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > skb->len += off; /* positive on grow, negative on shrink */
> > }
> > + /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> > + * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> > + */
> > + if (xdp_buff_has_frags(xdp))
> > + skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> > + else
> > + skb->data_len = 0;
> > +
> > /* check if XDP changed eth hdr such SKB needs update */
> > eth = (struct ethhdr *)xdp->data;
> > if ((orig_eth_type != eth->h_proto) ||
> > @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > return act;
> > }
> > -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > - struct xdp_buff *xdp,
> > - struct bpf_prog *xdp_prog)
> > +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> > + struct bpf_prog *prog)
>
> I like this is split out into a check function.
>
> > {
> > struct sk_buff *skb = *pskb;
> > - u32 act = XDP_DROP;
> > -
> > - /* Reinjected packets coming from act_mirred or similar should
> > - * not get XDP generic processing.
> > - */
> > - if (skb_is_redirected(skb))
> > - return XDP_PASS;
>
> (For other reviewers)
> This reinjected check is moved further down.
>
> > + int err;
> > - /* XDP packets must be linear and must have sufficient headroom
> > - * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> > - * native XDP provides, thus we need to do it here as well.
> > + /* XDP does not support fraglist so we need to linearize
> > + * the skb.
> > */
> > - if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> > - skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> > + if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> > int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> > int troom = skb->tail + skb->data_len - skb->end;
> > /* In case we have to go down the path and also linearize,
> > * then lets do the pskb_expand_head() work just once here.
> > */
> > - if (pskb_expand_head(skb,
> > - hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > - troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> > - goto do_drop;
> > - if (skb_linearize(skb))
> > - goto do_drop;
> > + err = pskb_expand_head(skb,
> > + hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > + troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> > + if (err)
> > + return err;
> > +
> > + err = skb_linearize(skb);
> > + if (err)
> > + return err;
> > +
> > + return 0;
> > + }
> > +
> > + /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> > + * bytes. This is the guarantee that also native XDP provides,
> > + * thus we need to do it here as well.
> > + */
> > + if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
>
> I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
> cloned or shared) to be processed by generic XDP without any reallocation?
I do not think so, we discussed about it with Jakub here [0]
[0] https://lore.kernel.org/netdev/20231128105145.7b39db7d@kernel.org/
>
> So check would be: (skb_cloned(skb) || skb_shared(skb) ||)
>
> > + skb_headroom(skb) < XDP_PACKET_HEADROOM) {
>
> [Headroom trick]
> For layered devices the netstack could be the process that created the
> SKB. If you noticed my veth patchset[4/4], when I detect an XDP-prog
> attach, I'm increasing the net_device headroom (.ndo_set_rx_headroom)
> such that netstack will allocated enough headroom to satisfy
> XDP_PACKET_HEADROOM.
>
> [4/4] https://lore.kernel.org/netdev/169272716651.1975370.10514711233878278884.stgit@firesoul/
Ah nice, for some reason I missed this patch
>
>
>
> > + u32 mac_len = skb->data - skb_mac_header(skb);
> > + u32 size, len, max_head_size, off;
> > + struct sk_buff *nskb;
> > + int i, head_off;
> > +
> > + __skb_push(skb, mac_len);
> > + max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
> > + XDP_PACKET_HEADROOM);
> > + if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
> > + return -ENOMEM;
> > +
> > + size = min_t(u32, skb->len, max_head_size);
> > + nskb = netdev_alloc_skb(skb->dev, size + XDP_PACKET_HEADROOM);
>
>
> Would is be possible to use napi_alloc_skb() here?
>
> The napi_alloc_skb() is faster than netdev_alloc_skb(), but it as name
> suggest assumes this is called under NAPI protection/context. It
> used-to-be the case for generic XDP, but code got moved around to
> support layered devices, so I not 100% sure if this is always true (NAPI
> context).
Actually I was thinking about it and I was not 100% sure too (so I decided to
use the non-NAPI version). Any input about it?
Regards,
Lorenzo
>
>
> > + if (!nskb)
> > + return -ENOMEM;
> > +
> > + skb_reserve(nskb, XDP_PACKET_HEADROOM);
> > + skb_copy_header(nskb, skb);
> > +
> > + err = skb_copy_bits(skb, 0, nskb->data, size);
> > + if (err) {
> > + consume_skb(nskb);
> > + return err;
> > + }
> > + skb_put(nskb, size);
> > +
> > + head_off = skb_headroom(nskb) - skb_headroom(skb);
> > + skb_headers_offset_update(nskb, head_off);
> > +
> > + off = size;
> > + len = skb->len - off;
> > + for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
> > + struct page *page;
> > + void *frag;
> > +
> > + size = min_t(u32, len, PAGE_SIZE);
> > + frag = netdev_alloc_frag(size);
>
> Again the slower variant.
>
> > + if (!frag) {
> > + consume_skb(nskb);
> > + return -ENOMEM;
> > + }
> > +
> > + page = virt_to_head_page(frag);
> > + skb_add_rx_frag(nskb, i, page,
> > + frag - page_address(page), size, size);
> > + err = skb_copy_bits(skb, off, frag, size);
> > + if (err) {
> > + consume_skb(nskb);
> > + return err;
> > + }
> > +
> > + len -= size;
> > + off += size;
> > + }
> > +
> > + consume_skb(skb);
> > + *pskb = nskb;
> > + __skb_pull(nskb, mac_len);
> > }
> > - act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
> > + return 0;
> > +}
> > +
> > +static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > + struct xdp_buff *xdp,
> > + struct bpf_prog *xdp_prog)
> > +{
> > + u32 act = XDP_DROP;
> > +
> > + /* Reinjected packets coming from act_mirred or similar should
> > + * not get XDP generic processing.
> > + */
> > + if (skb_is_redirected(*pskb))
> > + return XDP_PASS;
> > +
> > + if (netif_skb_check_for_generic_xdp(pskb, xdp_prog))
> > + goto do_drop;
> > +
> > + act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
> > switch (act) {
> > case XDP_REDIRECT:
> > case XDP_TX:
> > case XDP_PASS:
> > break;
> > default:
> > - bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
> > + bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
> > fallthrough;
> > case XDP_ABORTED:
> > - trace_xdp_exception(skb->dev, xdp_prog, act);
> > + trace_xdp_exception((*pskb)->dev, xdp_prog, act);
> > fallthrough;
> > case XDP_DROP:
> > do_drop:
> > - kfree_skb(skb);
> > + kfree_skb(*pskb);
> > break;
> > }
>
> Overall I like the patch :-)
>
> Are we missing more things to allow GRO packets getting processed by generic
> XDP?
>
> --Jesper
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode
2023-11-30 10:51 ` Lorenzo Bianconi
@ 2023-11-30 18:49 ` Stanislav Fomichev
2023-12-01 9:33 ` Lorenzo Bianconi
0 siblings, 1 reply; 8+ messages in thread
From: Stanislav Fomichev @ 2023-11-30 18:49 UTC (permalink / raw)
To: Lorenzo Bianconi
Cc: Jesper Dangaard Brouer, netdev, davem, edumazet, kuba, pabeni,
lorenzo.bianconi, bpf, toke, willemdebruijn.kernel, jasowang,
kernel-team, Yan Zhai
On 11/30, Lorenzo Bianconi wrote:
> >
> >
> > On 11/30/23 10:11, Lorenzo Bianconi wrote:
> > > Similar to native xdp, do not always linearize the skb in
> > > netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> > > processed by the eBPF program. This allow to add multi-buffer support
> > > for xdp running in generic mode.
> > >
> > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > > ---
> > > net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> > > 1 file changed, 119 insertions(+), 25 deletions(-)
> > >
> > > diff --git a/net/core/dev.c b/net/core/dev.c
> > > index 4df68d7f04a2..0d08e755bb7f 100644
> > > --- a/net/core/dev.c
> > > +++ b/net/core/dev.c
> > > @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> > > xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> > > skb_headlen(skb) + mac_len, true);
> > > + if (skb_is_nonlinear(skb)) {
> > > + skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> > > + xdp_buff_set_frags_flag(xdp);
> > > + } else {
> > > + xdp_buff_clear_frags_flag(xdp);
> > > + }
> > > orig_data_end = xdp->data_end;
> > > orig_data = xdp->data;
> > > @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > skb->len += off; /* positive on grow, negative on shrink */
> > > }
> > > + /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> > > + * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> > > + */
> > > + if (xdp_buff_has_frags(xdp))
> > > + skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> > > + else
> > > + skb->data_len = 0;
> > > +
> > > /* check if XDP changed eth hdr such SKB needs update */
> > > eth = (struct ethhdr *)xdp->data;
> > > if ((orig_eth_type != eth->h_proto) ||
> > > @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > return act;
> > > }
> > > -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > > - struct xdp_buff *xdp,
> > > - struct bpf_prog *xdp_prog)
> > > +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> > > + struct bpf_prog *prog)
> >
> > I like this is split out into a check function.
> >
> > > {
> > > struct sk_buff *skb = *pskb;
> > > - u32 act = XDP_DROP;
> > > -
> > > - /* Reinjected packets coming from act_mirred or similar should
> > > - * not get XDP generic processing.
> > > - */
> > > - if (skb_is_redirected(skb))
> > > - return XDP_PASS;
> >
> > (For other reviewers)
> > This reinjected check is moved further down.
> >
> > > + int err;
> > > - /* XDP packets must be linear and must have sufficient headroom
> > > - * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> > > - * native XDP provides, thus we need to do it here as well.
> > > + /* XDP does not support fraglist so we need to linearize
> > > + * the skb.
> > > */
> > > - if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> > > - skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> > > + if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> > > int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> > > int troom = skb->tail + skb->data_len - skb->end;
> > > /* In case we have to go down the path and also linearize,
> > > * then lets do the pskb_expand_head() work just once here.
> > > */
> > > - if (pskb_expand_head(skb,
> > > - hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > - troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> > > - goto do_drop;
> > > - if (skb_linearize(skb))
> > > - goto do_drop;
> > > + err = pskb_expand_head(skb,
> > > + hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > + troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> > > + if (err)
> > > + return err;
> > > +
> > > + err = skb_linearize(skb);
> > > + if (err)
> > > + return err;
> > > +
> > > + return 0;
> > > + }
> > > +
> > > + /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> > > + * bytes. This is the guarantee that also native XDP provides,
> > > + * thus we need to do it here as well.
> > > + */
> > > + if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
> >
> > I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
> > cloned or shared) to be processed by generic XDP without any reallocation?
>
> I do not think so, we discussed about it with Jakub here [0]
>
> [0] https://lore.kernel.org/netdev/20231128105145.7b39db7d@kernel.org/
Can this be done as an optimization later on? If, from the bpf side,
the verifier can attest that the program is not calling
bpf_xdp_{load,store}_bytes on the frags for example.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode
2023-11-30 18:49 ` Stanislav Fomichev
@ 2023-12-01 9:33 ` Lorenzo Bianconi
0 siblings, 0 replies; 8+ messages in thread
From: Lorenzo Bianconi @ 2023-12-01 9:33 UTC (permalink / raw)
To: Stanislav Fomichev
Cc: Jesper Dangaard Brouer, netdev, davem, edumazet, kuba, pabeni,
lorenzo.bianconi, bpf, toke, willemdebruijn.kernel, jasowang,
kernel-team, Yan Zhai
[-- Attachment #1: Type: text/plain, Size: 5256 bytes --]
> On 11/30, Lorenzo Bianconi wrote:
> > >
> > >
> > > On 11/30/23 10:11, Lorenzo Bianconi wrote:
> > > > Similar to native xdp, do not always linearize the skb in
> > > > netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> > > > processed by the eBPF program. This allow to add multi-buffer support
> > > > for xdp running in generic mode.
> > > >
> > > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > > > ---
> > > > net/core/dev.c | 144 ++++++++++++++++++++++++++++++++++++++++---------
> > > > 1 file changed, 119 insertions(+), 25 deletions(-)
> > > >
> > > > diff --git a/net/core/dev.c b/net/core/dev.c
> > > > index 4df68d7f04a2..0d08e755bb7f 100644
> > > > --- a/net/core/dev.c
> > > > +++ b/net/core/dev.c
> > > > @@ -4853,6 +4853,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > > xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
> > > > xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
> > > > skb_headlen(skb) + mac_len, true);
> > > > + if (skb_is_nonlinear(skb)) {
> > > > + skb_shinfo(skb)->xdp_frags_size = skb->data_len;
> > > > + xdp_buff_set_frags_flag(xdp);
> > > > + } else {
> > > > + xdp_buff_clear_frags_flag(xdp);
> > > > + }
> > > > orig_data_end = xdp->data_end;
> > > > orig_data = xdp->data;
> > > > @@ -4882,6 +4888,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > > skb->len += off; /* positive on grow, negative on shrink */
> > > > }
> > > > + /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
> > > > + * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
> > > > + */
> > > > + if (xdp_buff_has_frags(xdp))
> > > > + skb->data_len = skb_shinfo(skb)->xdp_frags_size;
> > > > + else
> > > > + skb->data_len = 0;
> > > > +
> > > > /* check if XDP changed eth hdr such SKB needs update */
> > > > eth = (struct ethhdr *)xdp->data;
> > > > if ((orig_eth_type != eth->h_proto) ||
> > > > @@ -4915,54 +4929,134 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
> > > > return act;
> > > > }
> > > > -static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
> > > > - struct xdp_buff *xdp,
> > > > - struct bpf_prog *xdp_prog)
> > > > +static int netif_skb_check_for_generic_xdp(struct sk_buff **pskb,
> > > > + struct bpf_prog *prog)
> > >
> > > I like this is split out into a check function.
> > >
> > > > {
> > > > struct sk_buff *skb = *pskb;
> > > > - u32 act = XDP_DROP;
> > > > -
> > > > - /* Reinjected packets coming from act_mirred or similar should
> > > > - * not get XDP generic processing.
> > > > - */
> > > > - if (skb_is_redirected(skb))
> > > > - return XDP_PASS;
> > >
> > > (For other reviewers)
> > > This reinjected check is moved further down.
> > >
> > > > + int err;
> > > > - /* XDP packets must be linear and must have sufficient headroom
> > > > - * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
> > > > - * native XDP provides, thus we need to do it here as well.
> > > > + /* XDP does not support fraglist so we need to linearize
> > > > + * the skb.
> > > > */
> > > > - if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
> > > > - skb_headroom(skb) < XDP_PACKET_HEADROOM) {
> > > > + if (skb_has_frag_list(skb) || !prog->aux->xdp_has_frags) {
> > > > int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
> > > > int troom = skb->tail + skb->data_len - skb->end;
> > > > /* In case we have to go down the path and also linearize,
> > > > * then lets do the pskb_expand_head() work just once here.
> > > > */
> > > > - if (pskb_expand_head(skb,
> > > > - hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > > - troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
> > > > - goto do_drop;
> > > > - if (skb_linearize(skb))
> > > > - goto do_drop;
> > > > + err = pskb_expand_head(skb,
> > > > + hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
> > > > + troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
> > > > + if (err)
> > > > + return err;
> > > > +
> > > > + err = skb_linearize(skb);
> > > > + if (err)
> > > > + return err;
> > > > +
> > > > + return 0;
> > > > + }
> > > > +
> > > > + /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
> > > > + * bytes. This is the guarantee that also native XDP provides,
> > > > + * thus we need to do it here as well.
> > > > + */
> > > > + if (skb_cloned(skb) || skb_shinfo(skb)->nr_frags ||
> > >
> > > I though we could allow a SKB with skb_shinfo(skb)->nr_frags (that isn't
> > > cloned or shared) to be processed by generic XDP without any reallocation?
> >
> > I do not think so, we discussed about it with Jakub here [0]
> >
> > [0] https://lore.kernel.org/netdev/20231128105145.7b39db7d@kernel.org/
>
> Can this be done as an optimization later on? If, from the bpf side,
> the verifier can attest that the program is not calling
> bpf_xdp_{load,store}_bytes on the frags for example.
Yes, I think so. Moreover this would be useful for veth too.
Regards,
Lorenzo
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2023-12-01 9:33 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-11-30 9:11 [PATCH v2 net-next 0/2] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2023-11-30 9:11 ` [PATCH v2 net-next 1/2] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
2023-11-30 9:52 ` Jesper Dangaard Brouer
2023-11-30 9:11 ` [PATCH v2 net-next 2/2] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2023-11-30 10:36 ` Jesper Dangaard Brouer
2023-11-30 10:51 ` Lorenzo Bianconi
2023-11-30 18:49 ` Stanislav Fomichev
2023-12-01 9:33 ` Lorenzo Bianconi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).