* [PATCH v7 net-next 0/4] add multi-buff support for xdp running in generic mode
@ 2024-02-02 8:12 Lorenzo Bianconi
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
` (3 more replies)
0 siblings, 4 replies; 13+ messages in thread
From: Lorenzo Bianconi @ 2024-02-02 8:12 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Introduce multi-buffer support for xdp running in generic mode not always
linearizing the skb in netif_receive_generic_xdp routine.
Introduce generic percpu page_pools allocator.
Changes since v6:
- remove patch 4/5 'net: page_pool: make stats available just for global pools'
- rename netif_skb_segment_for_xdp() in
skb_cow_data_for_xdp()/skb_pp_cow_data()
- rename net_page_pool_alloc() in net_page_pool_create()
- rename page_pool percpu pointer in system_page_pool
- set percpu page_pool memory size
Changes since v5:
- move percpu page_pool pointer out of softnet_data in a dedicated variable
- make page_pool stats available just for global pools
- rely on netif_skb_segment_for_xdp utility routine in veth driver
Changes since v4:
- fix compilation error if page_pools are not enabled
Changes since v3:
- introduce page_pool in softnet_data structure
- rely on page_pools for xdp_generic code
Changes since v2:
- rely on napi_alloc_frag() and napi_build_skb() to build the new skb
Changes since v1:
- explicitly keep the skb segmented in netif_skb_check_for_generic_xdp() and
do not rely on pskb_expand_head()
Lorenzo Bianconi (4):
net: add generic percpu page_pool allocator
xdp: rely on skb pointer reference in do_xdp_generic and
netif_receive_generic_xdp
xdp: add multi-buff support for xdp running in generic mode
veth: rely on skb_cow_data_for_xdp utility routine
drivers/net/tun.c | 4 +-
drivers/net/veth.c | 79 ++------------------
include/linux/netdevice.h | 2 +-
include/linux/skbuff.h | 2 +
include/net/page_pool/types.h | 3 +
net/core/dev.c | 131 +++++++++++++++++++++++++++-------
net/core/page_pool.c | 23 ++++--
net/core/skbuff.c | 96 ++++++++++++++++++++++++-
8 files changed, 231 insertions(+), 109 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator
2024-02-02 8:12 [PATCH v7 net-next 0/4] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
@ 2024-02-02 8:12 ` Lorenzo Bianconi
2024-02-02 8:59 ` Jesper Dangaard Brouer
` (2 more replies)
2024-02-02 8:12 ` [PATCH v7 net-next 2/4] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
` (2 subsequent siblings)
3 siblings, 3 replies; 13+ messages in thread
From: Lorenzo Bianconi @ 2024-02-02 8:12 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Introduce generic percpu page_pools allocator.
Moreover add page_pool_create_percpu() and cpuid filed in page_pool struct
in order to recycle the page in the page_pool "hot" cache if
napi_pp_put_page() is running on the same cpu.
This is a preliminary patch to add xdp multi-buff support for xdp running
in generic mode.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
include/net/page_pool/types.h | 3 +++
net/core/dev.c | 45 +++++++++++++++++++++++++++++++++++
net/core/page_pool.c | 23 ++++++++++++++----
net/core/skbuff.c | 5 ++--
4 files changed, 70 insertions(+), 6 deletions(-)
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 76481c465375..3828396ae60c 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -128,6 +128,7 @@ struct page_pool_stats {
struct page_pool {
struct page_pool_params_fast p;
+ int cpuid;
bool has_init_callback;
long frag_users;
@@ -203,6 +204,8 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
+ int cpuid);
struct xdp_mem_info;
diff --git a/net/core/dev.c b/net/core/dev.c
index b53b9c94de40..5a100360389f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -153,6 +153,8 @@
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
+#include <net/page_pool/types.h>
+#include <net/page_pool/helpers.h>
#include "dev.h"
#include "net-sysfs.h"
@@ -450,6 +452,12 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
+/* Page_pool has a lockless array/stack to alloc/recycle pages.
+ * PP consumers must pay attention to run APIs in the appropriate context
+ * (e.g. NAPI context).
+ */
+static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
+
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
@@ -11691,6 +11699,27 @@ static void __init net_dev_struct_check(void)
*
*/
+/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
+#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
+
+static int net_page_pool_create(int cpuid)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ struct page_pool_params page_pool_params = {
+ .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
+ .nid = NUMA_NO_NODE,
+ };
+ struct page_pool *pp_ptr;
+
+ pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
+ if (IS_ERR(pp_ptr))
+ return -ENOMEM;
+
+ per_cpu(system_page_pool, cpuid) = pp_ptr;
+#endif
+ return 0;
+}
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11743,6 +11772,9 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+
+ if (net_page_pool_create(i))
+ goto out;
}
dev_boot_phase = 0;
@@ -11770,6 +11802,19 @@ static int __init net_dev_init(void)
WARN_ON(rc < 0);
rc = 0;
out:
+ if (rc < 0) {
+ for_each_possible_cpu(i) {
+ struct page_pool *pp_ptr;
+
+ pp_ptr = per_cpu_ptr(system_page_pool, i);
+ if (!pp_ptr)
+ continue;
+
+ page_pool_destroy(pp_ptr);
+ per_cpu(system_page_pool, i) = NULL;
+ }
+ }
+
return rc;
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4933762e5a6b..89c835fcf094 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -171,13 +171,16 @@ static void page_pool_producer_unlock(struct page_pool *pool,
}
static int page_pool_init(struct page_pool *pool,
- const struct page_pool_params *params)
+ const struct page_pool_params *params,
+ int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
memcpy(&pool->p, ¶ms->fast, sizeof(pool->p));
memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow));
+ pool->cpuid = cpuid;
+
/* Validate only known flags were used */
if (pool->p.flags & ~(PP_FLAG_ALL))
return -EINVAL;
@@ -253,10 +256,12 @@ static void page_pool_uninit(struct page_pool *pool)
}
/**
- * page_pool_create() - create a page pool.
+ * page_pool_create_percpu() - create a page pool for a given cpu.
* @params: parameters, see struct page_pool_params
+ * @cpuid: cpu identifier
*/
-struct page_pool *page_pool_create(const struct page_pool_params *params)
+struct page_pool *
+page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{
struct page_pool *pool;
int err;
@@ -265,7 +270,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
if (!pool)
return ERR_PTR(-ENOMEM);
- err = page_pool_init(pool, params);
+ err = page_pool_init(pool, params, cpuid);
if (err < 0)
goto err_free;
@@ -282,6 +287,16 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
kfree(pool);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(page_pool_create_percpu);
+
+/**
+ * page_pool_create() - create a page pool
+ * @params: parameters, see struct page_pool_params
+ */
+struct page_pool *page_pool_create(const struct page_pool_params *params)
+{
+ return page_pool_create_percpu(params, -1);
+}
EXPORT_SYMBOL(page_pool_create);
static void page_pool_return_page(struct page_pool *pool, struct page *page);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index edbbef563d4d..9e5eb47b4025 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -923,9 +923,10 @@ bool napi_pp_put_page(struct page *page, bool napi_safe)
*/
if (napi_safe || in_softirq()) {
const struct napi_struct *napi = READ_ONCE(pp->p.napi);
+ unsigned int cpuid = smp_processor_id();
- allow_direct = napi &&
- READ_ONCE(napi->list_owner) == smp_processor_id();
+ allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid;
+ allow_direct |= (pp->cpuid == cpuid);
}
/* Driver set this to memory recycling info. Reset it on recycle.
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v7 net-next 2/4] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp
2024-02-02 8:12 [PATCH v7 net-next 0/4] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
@ 2024-02-02 8:12 ` Lorenzo Bianconi
2024-02-02 11:39 ` Toke Høiland-Jørgensen
2024-02-02 8:12 ` [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2024-02-02 8:12 ` [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine Lorenzo Bianconi
3 siblings, 1 reply; 13+ messages in thread
From: Lorenzo Bianconi @ 2024-02-02 8:12 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Rely on skb pointer reference instead of the skb pointer in do_xdp_generic
and netif_receive_generic_xdp routine signatures.
This is a preliminary patch to add multi-buff support for xdp running in
generic mode where we will need to reallocate the skb to avoid
linearization and we will need to make it visible to do_xdp_generic()
caller.
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/tun.c | 4 ++--
include/linux/netdevice.h | 2 +-
net/core/dev.c | 16 +++++++++-------
3 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e335ece47dec..869df474c215 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1926,7 +1926,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
@@ -2516,7 +2516,7 @@ static int tun_xdp_one(struct tun_struct *tun,
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
ret = 0;
goto out;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 118c40258d07..7eee99a58200 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3958,7 +3958,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 5a100360389f..8076e3cc8df0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4936,10 +4936,11 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
+ struct sk_buff *skb = *pskb;
u32 act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
@@ -5020,24 +5021,24 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
- act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
- err = xdp_do_generic_redirect(skb->dev, skb,
+ err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
- generic_xdp_tx(skb, xdp_prog);
+ generic_xdp_tx(*pskb, xdp_prog);
break;
}
return XDP_DROP;
@@ -5045,7 +5046,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
return XDP_PASS;
out_redir:
- kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
@@ -5368,7 +5369,8 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret2;
migrate_disable();
- ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
+ &skb);
migrate_enable();
if (ret2 != XDP_PASS) {
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode
2024-02-02 8:12 [PATCH v7 net-next 0/4] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
2024-02-02 8:12 ` [PATCH v7 net-next 2/4] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
@ 2024-02-02 8:12 ` Lorenzo Bianconi
2024-02-02 11:42 ` Toke Høiland-Jørgensen
2024-02-02 17:42 ` Jesper Dangaard Brouer
2024-02-02 8:12 ` [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine Lorenzo Bianconi
3 siblings, 2 replies; 13+ messages in thread
From: Lorenzo Bianconi @ 2024-02-02 8:12 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Similar to native xdp, do not always linearize the skb in
netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
processed by the eBPF program. This allow to add multi-buffer support
for xdp running in generic mode.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
include/linux/skbuff.h | 2 +
net/core/dev.c | 70 +++++++++++++++++++++++---------
net/core/skbuff.c | 91 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 144 insertions(+), 19 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2dde34c29203..def3d8689c3d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3446,6 +3446,8 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog);
bool napi_pp_put_page(struct page *page, bool napi_safe);
static inline void
diff --git a/net/core/dev.c b/net/core/dev.c
index 8076e3cc8df0..d64f747e3583 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4874,6 +4874,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
orig_data_end = xdp->data_end;
orig_data = xdp->data;
@@ -4903,6 +4909,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
skb->len += off; /* positive on grow, negative on shrink */
}
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
@@ -4936,12 +4950,35 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
+static int
+netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
+{
+ struct sk_buff *skb = *pskb;
+ int err, hroom, troom;
+
+ if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
+ return 0;
+
+ /* In case we have to go down the path and also linearize,
+ * then lets do the pskb_expand_head() work just once here.
+ */
+ hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+ troom = skb->tail + skb->data_len - skb->end;
+ err = pskb_expand_head(skb,
+ hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+ troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+ if (err)
+ return err;
+
+ return skb_linearize(skb);
+}
+
static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct sk_buff *skb = *pskb;
- u32 act = XDP_DROP;
+ u32 mac_len, act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
@@ -4949,41 +4986,36 @@ static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
if (skb_is_redirected(skb))
return XDP_PASS;
- /* XDP packets must be linear and must have sufficient headroom
- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
- * native XDP provides, thus we need to do it here as well.
+ /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+ * bytes. This is the guarantee that also native XDP provides,
+ * thus we need to do it here as well.
*/
+ mac_len = skb->data - skb_mac_header(skb);
+ __skb_push(skb, mac_len);
+
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
- int troom = skb->tail + skb->data_len - skb->end;
-
- /* In case we have to go down the path and also linearize,
- * then lets do the pskb_expand_head() work just once here.
- */
- if (pskb_expand_head(skb,
- hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
- troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
- goto do_drop;
- if (skb_linearize(skb))
+ if (netif_skb_check_for_xdp(pskb, xdp_prog))
goto do_drop;
}
- act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+ __skb_pull(*pskb, mac_len);
+
+ act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+ bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(skb->dev, xdp_prog, act);
+ trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
- kfree_skb(skb);
+ kfree_skb(*pskb);
break;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9e5eb47b4025..bdb94749f05d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -895,6 +895,97 @@ static bool is_pp_page(struct page *page)
return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
}
+static int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ u32 size, truesize, len, max_head_size, off;
+ struct sk_buff *skb = *pskb, *nskb;
+ int err, i, head_off;
+ void *data;
+
+ /* XDP does not support fraglist so we need to linearize
+ * the skb.
+ */
+ if (skb_has_frag_list(skb))
+ return -EOPNOTSUPP;
+
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
+ if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+ return -ENOMEM;
+
+ size = min_t(u32, skb->len, max_head_size);
+ truesize = SKB_HEAD_ALIGN(size) + headroom;
+ data = page_pool_dev_alloc_va(pool, &truesize);
+ if (!data)
+ return -ENOMEM;
+
+ nskb = napi_build_skb(data, truesize);
+ if (!nskb) {
+ page_pool_free_va(pool, data, true);
+ return -ENOMEM;
+ }
+
+ skb_reserve(nskb, headroom);
+ skb_copy_header(nskb, skb);
+ skb_mark_for_recycle(nskb);
+
+ err = skb_copy_bits(skb, 0, nskb->data, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+ skb_put(nskb, size);
+
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+
+ off = size;
+ len = skb->len - off;
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ struct page *page;
+ u32 page_off;
+
+ size = min_t(u32, len, PAGE_SIZE);
+ truesize = size;
+
+ page = page_pool_dev_alloc(pool, &page_off, &truesize);
+ if (!data) {
+ consume_skb(nskb);
+ return -ENOMEM;
+ }
+
+ skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
+ err = skb_copy_bits(skb, off, page_address(page) + page_off,
+ size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ len -= size;
+ off += size;
+ }
+
+ consume_skb(skb);
+ *pskb = nskb;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog)
+{
+ if (!prog->aux->xdp_has_frags)
+ return -EINVAL;
+
+ return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
+}
+EXPORT_SYMBOL(skb_cow_data_for_xdp);
+
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(struct page *page, bool napi_safe)
{
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine
2024-02-02 8:12 [PATCH v7 net-next 0/4] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
` (2 preceding siblings ...)
2024-02-02 8:12 ` [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
@ 2024-02-02 8:12 ` Lorenzo Bianconi
2024-02-02 11:43 ` Toke Høiland-Jørgensen
2024-02-02 17:39 ` Jesper Dangaard Brouer
3 siblings, 2 replies; 13+ messages in thread
From: Lorenzo Bianconi @ 2024-02-02 8:12 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Rely on skb_cow_data_for_xdp utility routine and remove duplicated
code.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/veth.c | 79 +++-------------------------------------------
1 file changed, 5 insertions(+), 74 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 578e36ea1589..a7a541c1a374 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -721,7 +721,8 @@ static void veth_xdp_get(struct xdp_buff *xdp)
static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
struct xdp_buff *xdp,
- struct sk_buff **pskb)
+ struct sk_buff **pskb,
+ struct bpf_prog *prog)
{
struct sk_buff *skb = *pskb;
u32 frame_sz;
@@ -729,80 +730,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off, truesize, page_offset;
- struct sk_buff *nskb;
- struct page *page;
- int i, head_off;
- void *va;
-
- /* We need a private copy of the skb and data buffers since
- * the ebpf program can modify it. We segment the original skb
- * into order-0 pages without linearize it.
- *
- * Make sure we have enough space for linear and paged area
- */
- max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
- VETH_XDP_HEADROOM);
- if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
- goto drop;
-
- size = min_t(u32, skb->len, max_head_size);
- truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
-
- /* Allocate skb head */
- va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
- if (!va)
- goto drop;
-
- nskb = napi_build_skb(va, truesize);
- if (!nskb) {
- page_pool_free_va(rq->page_pool, va, true);
+ if (skb_cow_data_for_xdp(rq->page_pool, pskb, prog))
goto drop;
- }
-
- skb_reserve(nskb, VETH_XDP_HEADROOM);
- skb_copy_header(nskb, skb);
- skb_mark_for_recycle(nskb);
-
- if (skb_copy_bits(skb, 0, nskb->data, size)) {
- consume_skb(nskb);
- goto drop;
- }
- skb_put(nskb, size);
- head_off = skb_headroom(nskb) - skb_headroom(skb);
- skb_headers_offset_update(nskb, head_off);
-
- /* Allocate paged area of new skb */
- off = size;
- len = skb->len - off;
-
- for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- size = min_t(u32, len, PAGE_SIZE);
- truesize = size;
-
- page = page_pool_dev_alloc(rq->page_pool, &page_offset,
- &truesize);
- if (!page) {
- consume_skb(nskb);
- goto drop;
- }
-
- skb_add_rx_frag(nskb, i, page, page_offset, size,
- truesize);
- if (skb_copy_bits(skb, off,
- page_address(page) + page_offset,
- size)) {
- consume_skb(nskb);
- goto drop;
- }
-
- len -= size;
- off += size;
- }
-
- consume_skb(skb);
- skb = nskb;
+ skb = *pskb;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -850,7 +781,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
}
__skb_push(skb, skb->data - skb_mac_header(skb));
- if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
+ if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb, xdp_prog))
goto drop;
vxbuf.skb = skb;
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
@ 2024-02-02 8:59 ` Jesper Dangaard Brouer
2024-02-02 11:38 ` Toke Høiland-Jørgensen
2024-02-03 14:52 ` kernel test robot
2 siblings, 0 replies; 13+ messages in thread
From: Jesper Dangaard Brouer @ 2024-02-02 8:59 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, ilias.apalodimas,
linyunsheng
On 02/02/2024 09.12, Lorenzo Bianconi wrote:
> Introduce generic percpu page_pools allocator.
> Moreover add page_pool_create_percpu() and cpuid filed in page_pool struct
> in order to recycle the page in the page_pool "hot" cache if
> napi_pp_put_page() is running on the same cpu.
> This is a preliminary patch to add xdp multi-buff support for xdp running
> in generic mode.
>
> Signed-off-by: Lorenzo Bianconi<lorenzo@kernel.org>
> ---
> include/net/page_pool/types.h | 3 +++
> net/core/dev.c | 45 +++++++++++++++++++++++++++++++++++
> net/core/page_pool.c | 23 ++++++++++++++----
> net/core/skbuff.c | 5 ++--
> 4 files changed, 70 insertions(+), 6 deletions(-)
>
> diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
> index 76481c465375..3828396ae60c 100644
> --- a/include/net/page_pool/types.h
> +++ b/include/net/page_pool/types.h
> @@ -128,6 +128,7 @@ struct page_pool_stats {
> struct page_pool {
> struct page_pool_params_fast p;
>
> + int cpuid;
> bool has_init_callback;
>
> long frag_users;
> @@ -203,6 +204,8 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
> struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
> unsigned int size, gfp_t gfp);
> struct page_pool *page_pool_create(const struct page_pool_params *params);
> +struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
> + int cpuid);
>
> struct xdp_mem_info;
>
> diff --git a/net/core/dev.c b/net/core/dev.c
> index b53b9c94de40..5a100360389f 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -153,6 +153,8 @@
> #include <linux/prandom.h>
> #include <linux/once_lite.h>
> #include <net/netdev_rx_queue.h>
> +#include <net/page_pool/types.h>
> +#include <net/page_pool/helpers.h>
>
> #include "dev.h"
> #include "net-sysfs.h"
> @@ -450,6 +452,12 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
> DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
> EXPORT_PER_CPU_SYMBOL(softnet_data);
>
> +/* Page_pool has a lockless array/stack to alloc/recycle pages.
> + * PP consumers must pay attention to run APIs in the appropriate context
> + * (e.g. NAPI context).
> + */
> +static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
Thanks for adding comment.
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
2024-02-02 8:59 ` Jesper Dangaard Brouer
@ 2024-02-02 11:38 ` Toke Høiland-Jørgensen
2024-02-03 14:52 ` kernel test robot
2 siblings, 0 replies; 13+ messages in thread
From: Toke Høiland-Jørgensen @ 2024-02-02 11:38 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Lorenzo Bianconi <lorenzo@kernel.org> writes:
> Introduce generic percpu page_pools allocator.
> Moreover add page_pool_create_percpu() and cpuid filed in page_pool struct
> in order to recycle the page in the page_pool "hot" cache if
> napi_pp_put_page() is running on the same cpu.
> This is a preliminary patch to add xdp multi-buff support for xdp running
> in generic mode.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 2/4] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp
2024-02-02 8:12 ` [PATCH v7 net-next 2/4] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
@ 2024-02-02 11:39 ` Toke Høiland-Jørgensen
0 siblings, 0 replies; 13+ messages in thread
From: Toke Høiland-Jørgensen @ 2024-02-02 11:39 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Lorenzo Bianconi <lorenzo@kernel.org> writes:
> Rely on skb pointer reference instead of the skb pointer in do_xdp_generic
> and netif_receive_generic_xdp routine signatures.
> This is a preliminary patch to add multi-buff support for xdp running in
> generic mode where we will need to reallocate the skb to avoid
> linearization and we will need to make it visible to do_xdp_generic()
> caller.
>
> Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode
2024-02-02 8:12 ` [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
@ 2024-02-02 11:42 ` Toke Høiland-Jørgensen
2024-02-02 17:42 ` Jesper Dangaard Brouer
1 sibling, 0 replies; 13+ messages in thread
From: Toke Høiland-Jørgensen @ 2024-02-02 11:42 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Lorenzo Bianconi <lorenzo@kernel.org> writes:
> Similar to native xdp, do not always linearize the skb in
> netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> processed by the eBPF program. This allow to add multi-buffer support
> for xdp running in generic mode.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine
2024-02-02 8:12 ` [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine Lorenzo Bianconi
@ 2024-02-02 11:43 ` Toke Høiland-Jørgensen
2024-02-02 17:39 ` Jesper Dangaard Brouer
1 sibling, 0 replies; 13+ messages in thread
From: Toke Høiland-Jørgensen @ 2024-02-02 11:43 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf,
willemdebruijn.kernel, jasowang, sdf, hawk, ilias.apalodimas,
linyunsheng
Lorenzo Bianconi <lorenzo@kernel.org> writes:
> Rely on skb_cow_data_for_xdp utility routine and remove duplicated
> code.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Neat that we can finally consolidate this duplication! :)
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine
2024-02-02 8:12 ` [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine Lorenzo Bianconi
2024-02-02 11:43 ` Toke Høiland-Jørgensen
@ 2024-02-02 17:39 ` Jesper Dangaard Brouer
1 sibling, 0 replies; 13+ messages in thread
From: Jesper Dangaard Brouer @ 2024-02-02 17:39 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, ilias.apalodimas,
linyunsheng
On 02/02/2024 09.12, Lorenzo Bianconi wrote:
> Rely on skb_cow_data_for_xdp utility routine and remove duplicated
> code.
>
> Signed-off-by: Lorenzo Bianconi<lorenzo@kernel.org>
> ---
> drivers/net/veth.c | 79 +++-------------------------------------------
> 1 file changed, 5 insertions(+), 74 deletions(-)
Nice to see removal of this duplicated code!
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode
2024-02-02 8:12 ` [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2024-02-02 11:42 ` Toke Høiland-Jørgensen
@ 2024-02-02 17:42 ` Jesper Dangaard Brouer
1 sibling, 0 replies; 13+ messages in thread
From: Jesper Dangaard Brouer @ 2024-02-02 17:42 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: lorenzo.bianconi, davem, kuba, edumazet, pabeni, bpf, toke,
willemdebruijn.kernel, jasowang, sdf, ilias.apalodimas,
linyunsheng
On 02/02/2024 09.12, Lorenzo Bianconi wrote:
> Similar to native xdp, do not always linearize the skb in
> netif_receive_generic_xdp routine but create a non-linear xdp_buff to be
> processed by the eBPF program. This allow to add multi-buffer support
> for xdp running in generic mode.
>
> Signed-off-by: Lorenzo Bianconi<lorenzo@kernel.org>
> ---
> include/linux/skbuff.h | 2 +
> net/core/dev.c | 70 +++++++++++++++++++++++---------
> net/core/skbuff.c | 91 ++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 144 insertions(+), 19 deletions(-)
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
2024-02-02 8:59 ` Jesper Dangaard Brouer
2024-02-02 11:38 ` Toke Høiland-Jørgensen
@ 2024-02-03 14:52 ` kernel test robot
2 siblings, 0 replies; 13+ messages in thread
From: kernel test robot @ 2024-02-03 14:52 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: oe-kbuild-all, lorenzo.bianconi, davem, kuba, edumazet, pabeni,
bpf, toke, willemdebruijn.kernel, jasowang, sdf, hawk,
ilias.apalodimas, linyunsheng
Hi Lorenzo,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/Lorenzo-Bianconi/net-add-generic-percpu-page_pool-allocator/20240202-162516
base: net-next/main
patch link: https://lore.kernel.org/r/1d34b717f8f842b9c3e9f70f0e8ffd245a5d2460.1706861261.git.lorenzo%40kernel.org
patch subject: [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator
config: x86_64-randconfig-121-20240203 (https://download.01.org/0day-ci/archive/20240203/202402032223.Imbb9JgJ-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240203/202402032223.Imbb9JgJ-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202402032223.Imbb9JgJ-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
net/core/dev.c:3364:23: sparse: sparse: incorrect type in argument 4 (different base types) @@ expected restricted __wsum [usertype] csum @@ got unsigned int @@
net/core/dev.c:3364:23: sparse: expected restricted __wsum [usertype] csum
net/core/dev.c:3364:23: sparse: got unsigned int
net/core/dev.c:3364:23: sparse: sparse: cast from restricted __wsum
>> net/core/dev.c:11809:34: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected void const [noderef] __percpu *__vpp_verify @@ got struct page_pool * @@
net/core/dev.c:11809:34: sparse: expected void const [noderef] __percpu *__vpp_verify
net/core/dev.c:11809:34: sparse: got struct page_pool *
net/core/dev.c: note: in included file (through include/linux/smp.h, include/linux/lockdep.h, include/linux/spinlock.h, ...):
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
net/core/dev.c:205:9: sparse: sparse: context imbalance in 'unlist_netdevice' - different lock contexts for basic block
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
net/core/dev.c:3804:17: sparse: sparse: context imbalance in '__dev_queue_xmit' - different lock contexts for basic block
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
net/core/dev.c:5184:17: sparse: sparse: context imbalance in 'net_tx_action' - different lock contexts for basic block
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
>> net/core/dev.c:11809:34: sparse: sparse: dereference of noderef expression
vim +11809 net/core/dev.c
11722
11723 /*
11724 * This is called single threaded during boot, so no need
11725 * to take the rtnl semaphore.
11726 */
11727 static int __init net_dev_init(void)
11728 {
11729 int i, rc = -ENOMEM;
11730
11731 BUG_ON(!dev_boot_phase);
11732
11733 net_dev_struct_check();
11734
11735 if (dev_proc_init())
11736 goto out;
11737
11738 if (netdev_kobject_init())
11739 goto out;
11740
11741 INIT_LIST_HEAD(&ptype_all);
11742 for (i = 0; i < PTYPE_HASH_SIZE; i++)
11743 INIT_LIST_HEAD(&ptype_base[i]);
11744
11745 if (register_pernet_subsys(&netdev_net_ops))
11746 goto out;
11747
11748 /*
11749 * Initialise the packet receive queues.
11750 */
11751
11752 for_each_possible_cpu(i) {
11753 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11754 struct softnet_data *sd = &per_cpu(softnet_data, i);
11755
11756 INIT_WORK(flush, flush_backlog);
11757
11758 skb_queue_head_init(&sd->input_pkt_queue);
11759 skb_queue_head_init(&sd->process_queue);
11760 #ifdef CONFIG_XFRM_OFFLOAD
11761 skb_queue_head_init(&sd->xfrm_backlog);
11762 #endif
11763 INIT_LIST_HEAD(&sd->poll_list);
11764 sd->output_queue_tailp = &sd->output_queue;
11765 #ifdef CONFIG_RPS
11766 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11767 sd->cpu = i;
11768 #endif
11769 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11770 spin_lock_init(&sd->defer_lock);
11771
11772 init_gro_hash(&sd->backlog);
11773 sd->backlog.poll = process_backlog;
11774 sd->backlog.weight = weight_p;
11775
11776 if (net_page_pool_create(i))
11777 goto out;
11778 }
11779
11780 dev_boot_phase = 0;
11781
11782 /* The loopback device is special if any other network devices
11783 * is present in a network namespace the loopback device must
11784 * be present. Since we now dynamically allocate and free the
11785 * loopback device ensure this invariant is maintained by
11786 * keeping the loopback device as the first device on the
11787 * list of network devices. Ensuring the loopback devices
11788 * is the first device that appears and the last network device
11789 * that disappears.
11790 */
11791 if (register_pernet_device(&loopback_net_ops))
11792 goto out;
11793
11794 if (register_pernet_device(&default_device_ops))
11795 goto out;
11796
11797 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11798 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11799
11800 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11801 NULL, dev_cpu_dead);
11802 WARN_ON(rc < 0);
11803 rc = 0;
11804 out:
11805 if (rc < 0) {
11806 for_each_possible_cpu(i) {
11807 struct page_pool *pp_ptr;
11808
11809 pp_ptr = per_cpu_ptr(system_page_pool, i);
11810 if (!pp_ptr)
11811 continue;
11812
11813 page_pool_destroy(pp_ptr);
11814 per_cpu(system_page_pool, i) = NULL;
11815 }
11816 }
11817
11818 return rc;
11819 }
11820
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2024-02-03 14:53 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-02-02 8:12 [PATCH v7 net-next 0/4] add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2024-02-02 8:12 ` [PATCH v7 net-next 1/4] net: add generic percpu page_pool allocator Lorenzo Bianconi
2024-02-02 8:59 ` Jesper Dangaard Brouer
2024-02-02 11:38 ` Toke Høiland-Jørgensen
2024-02-03 14:52 ` kernel test robot
2024-02-02 8:12 ` [PATCH v7 net-next 2/4] xdp: rely on skb pointer reference in do_xdp_generic and netif_receive_generic_xdp Lorenzo Bianconi
2024-02-02 11:39 ` Toke Høiland-Jørgensen
2024-02-02 8:12 ` [PATCH v7 net-next 3/4] xdp: add multi-buff support for xdp running in generic mode Lorenzo Bianconi
2024-02-02 11:42 ` Toke Høiland-Jørgensen
2024-02-02 17:42 ` Jesper Dangaard Brouer
2024-02-02 8:12 ` [PATCH v7 net-next 4/4] veth: rely on skb_cow_data_for_xdp utility routine Lorenzo Bianconi
2024-02-02 11:43 ` Toke Høiland-Jørgensen
2024-02-02 17:39 ` Jesper Dangaard Brouer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).