public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v2 1/2] net: page_pool: support dumping pps of a specific ifindex via Netlink
@ 2026-05-04 21:43 Jakub Kicinski
  2026-05-04 21:43 ` [PATCH net-next v2 2/2] selftests: net: add tests for filtered dumps of page pool Jakub Kicinski
  0 siblings, 1 reply; 2+ messages in thread
From: Jakub Kicinski @ 2026-05-04 21:43 UTC (permalink / raw)
  To: davem
  Cc: netdev, edumazet, pabeni, andrew+netdev, horms, donald.hunter,
	shuah, matttbe, hawk, linux-kselftest, maxime.chevallier,
	Jakub Kicinski

NIPA tries to make sure that HW tests don't modify system state.
It saves the state of page pools, too. Now that I write this commit
message I realize that this is impractical since page pool IDs and
state will get legitimately changed by the tests. But I already
spent a couple of hours implementing the filtering, so..

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
v2:
 - adjust ynltool to pass empty req pointer now
v1: https://lore.kernel.org/20260319035649.2396137-1-kuba@kernel.org
---
 Documentation/netlink/specs/netdev.yaml |  6 ++++
 net/core/netdev-genl-gen.c              | 30 ++++++++++++----
 net/core/page_pool_user.c               | 47 +++++++++++++++++++++++--
 tools/net/ynl/ynltool/page-pool.c       |  6 ++--
 4 files changed, 78 insertions(+), 11 deletions(-)

diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index b93beb247a11..a1f4c5a561e9 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -649,6 +649,9 @@ doc: >-
             - dmabuf
             - io-uring
       dump:
+        request:
+          attributes:
+            - ifindex
         reply: *pp-reply
       config-cond: page-pool
     -
@@ -692,6 +695,9 @@ doc: >-
             - recycle-ring-full
             - recycle-released-refcnt
       dump:
+        request:
+          attributes:
+            - info
         reply: *pp-stats-reply
       config-cond: page-pool-stats
     -
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index 81aecb5d3bc5..f605ed1e9872 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -54,6 +54,11 @@ static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1
 static const struct nla_policy netdev_page_pool_get_nl_policy[NETDEV_A_PAGE_POOL_ID + 1] = {
 	[NETDEV_A_PAGE_POOL_ID] = NLA_POLICY_FULL_RANGE(NLA_UINT, &netdev_a_page_pool_id_range),
 };
+
+/* NETDEV_CMD_PAGE_POOL_GET - dump */
+static const struct nla_policy netdev_page_pool_get_dump_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1] = {
+	[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
+};
 #endif /* CONFIG_PAGE_POOL */
 
 /* NETDEV_CMD_PAGE_POOL_STATS_GET - do */
@@ -61,6 +66,15 @@ static const struct nla_policy netdev_page_pool_get_nl_policy[NETDEV_A_PAGE_POOL
 static const struct nla_policy netdev_page_pool_stats_get_nl_policy[NETDEV_A_PAGE_POOL_STATS_INFO + 1] = {
 	[NETDEV_A_PAGE_POOL_STATS_INFO] = NLA_POLICY_NESTED(netdev_page_pool_info_nl_policy),
 };
+
+/* NETDEV_CMD_PAGE_POOL_STATS_GET - dump */
+static const struct nla_policy netdev_page_pool_stats_get_dump_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1] = {
+	[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
+};
+
+static const struct nla_policy netdev_page_pool_stats_get_dump_nl_policy[NETDEV_A_PAGE_POOL_STATS_INFO + 1] = {
+	[NETDEV_A_PAGE_POOL_STATS_INFO] = NLA_POLICY_NESTED(netdev_page_pool_stats_get_dump_info_nl_policy),
+};
 #endif /* CONFIG_PAGE_POOL_STATS */
 
 /* NETDEV_CMD_QUEUE_GET - do */
@@ -143,9 +157,11 @@ static const struct genl_split_ops netdev_nl_ops[] = {
 		.flags		= GENL_CMD_CAP_DO,
 	},
 	{
-		.cmd	= NETDEV_CMD_PAGE_POOL_GET,
-		.dumpit	= netdev_nl_page_pool_get_dumpit,
-		.flags	= GENL_CMD_CAP_DUMP,
+		.cmd		= NETDEV_CMD_PAGE_POOL_GET,
+		.dumpit		= netdev_nl_page_pool_get_dumpit,
+		.policy		= netdev_page_pool_get_dump_nl_policy,
+		.maxattr	= NETDEV_A_PAGE_POOL_IFINDEX,
+		.flags		= GENL_CMD_CAP_DUMP,
 	},
 #endif /* CONFIG_PAGE_POOL */
 #ifdef CONFIG_PAGE_POOL_STATS
@@ -157,9 +173,11 @@ static const struct genl_split_ops netdev_nl_ops[] = {
 		.flags		= GENL_CMD_CAP_DO,
 	},
 	{
-		.cmd	= NETDEV_CMD_PAGE_POOL_STATS_GET,
-		.dumpit	= netdev_nl_page_pool_stats_get_dumpit,
-		.flags	= GENL_CMD_CAP_DUMP,
+		.cmd		= NETDEV_CMD_PAGE_POOL_STATS_GET,
+		.dumpit		= netdev_nl_page_pool_stats_get_dumpit,
+		.policy		= netdev_page_pool_stats_get_dump_nl_policy,
+		.maxattr	= NETDEV_A_PAGE_POOL_STATS_INFO,
+		.flags		= GENL_CMD_CAP_DUMP,
 	},
 #endif /* CONFIG_PAGE_POOL_STATS */
 	{
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index ee5060d8eec0..01509d1b3cba 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -79,7 +79,7 @@ struct page_pool_dump_cb {
 
 static int
 netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
-			     pp_nl_fill_cb fill)
+			     pp_nl_fill_cb fill, struct nlattr *ifindex_attr)
 {
 	struct page_pool_dump_cb *state = (void *)cb->ctx;
 	const struct genl_info *info = genl_info_dump(cb);
@@ -88,9 +88,17 @@ netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
 	struct page_pool *pool;
 	int err = 0;
 
+	if (ifindex_attr)
+		state->ifindex = nla_get_u32(ifindex_attr);
+
 	rtnl_lock();
 	mutex_lock(&page_pools_lock);
 	for_each_netdev_dump(net, netdev, state->ifindex) {
+		/* Either the provided ifindex doesn't exist or done dumping */
+		if (ifindex_attr &&
+		    netdev->ifindex != nla_get_u32(ifindex_attr))
+			break;
+
 		hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
 			if (state->pp_id && state->pp_id < pool->user.id)
 				continue;
@@ -206,10 +214,40 @@ int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
 	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
 }
 
+static const struct netlink_range_validation page_pool_ifindex_range = {
+	.min	= 1ULL,
+	.max	= S32_MAX,
+};
+
+static const struct nla_policy
+page_pool_stat_info_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1] = {
+	[NETDEV_A_PAGE_POOL_IFINDEX] =
+		NLA_POLICY_FULL_RANGE(NLA_U32, &page_pool_ifindex_range),
+};
+
 int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
 					 struct netlink_callback *cb)
 {
-	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
+	struct nlattr *tb[ARRAY_SIZE(page_pool_stat_info_policy)];
+	const struct genl_info *info = genl_info_dump(cb);
+	struct nlattr *ifindex_attr = NULL;
+
+	if (info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO]) {
+		struct nlattr *nest;
+		int err;
+
+		nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
+		err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
+				       page_pool_stat_info_policy,
+				       info->extack);
+		if (err)
+			return err;
+
+		ifindex_attr = tb[NETDEV_A_PAGE_POOL_IFINDEX];
+	}
+
+	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill,
+					    ifindex_attr);
 }
 
 static int
@@ -305,7 +343,10 @@ int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
 int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
 				   struct netlink_callback *cb)
 {
-	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
+	const struct genl_info *info = genl_info_dump(cb);
+
+	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill,
+					    info->attrs[NETDEV_A_PAGE_POOL_IFINDEX]);
 }
 
 int page_pool_list(struct page_pool *pool)
diff --git a/tools/net/ynl/ynltool/page-pool.c b/tools/net/ynl/ynltool/page-pool.c
index 4b24492abab7..9487eda6b3aa 100644
--- a/tools/net/ynl/ynltool/page-pool.c
+++ b/tools/net/ynl/ynltool/page-pool.c
@@ -327,7 +327,9 @@ static void aggregate_device_stats(struct pp_stats_array *a,
 
 static int do_stats(int argc, char **argv)
 {
+	struct netdev_page_pool_stats_get_req_dump pp_stat_req = {};
 	struct netdev_page_pool_stats_get_list *pp_stats;
+	struct netdev_page_pool_get_req_dump pp_req = {};
 	struct netdev_page_pool_get_list *pools;
 	enum {
 		GROUP_BY_DEVICE,
@@ -374,14 +376,14 @@ static int do_stats(int argc, char **argv)
 		return -1;
 	}
 
-	pools = netdev_page_pool_get_dump(ys);
+	pools = netdev_page_pool_get_dump(ys, &pp_req);
 	if (!pools) {
 		p_err("failed to get page pools: %s", ys->err.msg);
 		ret = -1;
 		goto exit_close;
 	}
 
-	pp_stats = netdev_page_pool_stats_get_dump(ys);
+	pp_stats = netdev_page_pool_stats_get_dump(ys, &pp_stat_req);
 	if (!pp_stats) {
 		p_err("failed to get page pool stats: %s", ys->err.msg);
 		ret = -1;
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [PATCH net-next v2 2/2] selftests: net: add tests for filtered dumps of page pool
  2026-05-04 21:43 [PATCH net-next v2 1/2] net: page_pool: support dumping pps of a specific ifindex via Netlink Jakub Kicinski
@ 2026-05-04 21:43 ` Jakub Kicinski
  0 siblings, 0 replies; 2+ messages in thread
From: Jakub Kicinski @ 2026-05-04 21:43 UTC (permalink / raw)
  To: davem
  Cc: netdev, edumazet, pabeni, andrew+netdev, horms, donald.hunter,
	shuah, matttbe, hawk, linux-kselftest, maxime.chevallier,
	Jakub Kicinski

Add tests for page pool dumps of a specific ifindex.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
v2:
 - add CONFIG_PAGE_POOL_STATS=y to the config
v1: https://lore.kernel.org/20260319035649.2396137-2-kuba@kernel.org
---
 tools/testing/selftests/net/config       |   1 +
 tools/testing/selftests/net/nl_netdev.py | 119 ++++++++++++++++++++++-
 2 files changed, 118 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 94d722770420..d07c5ac5cab7 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -117,6 +117,7 @@ CONFIG_OPENVSWITCH=m
 CONFIG_OPENVSWITCH_GENEVE=m
 CONFIG_OPENVSWITCH_GRE=m
 CONFIG_OPENVSWITCH_VXLAN=m
+CONFIG_PAGE_POOL_STATS=y
 CONFIG_PROC_SYSCTL=y
 CONFIG_PSAMPLE=m
 CONFIG_RPS=y
diff --git a/tools/testing/selftests/net/nl_netdev.py b/tools/testing/selftests/net/nl_netdev.py
index eff55c64a012..ceb44c8e1fec 100755
--- a/tools/testing/selftests/net/nl_netdev.py
+++ b/tools/testing/selftests/net/nl_netdev.py
@@ -9,7 +9,7 @@ import errno
 from os import system
 from lib.py import ksft_run, ksft_exit
 from lib.py import ksft_eq, ksft_ge, ksft_ne, ksft_raises, ksft_busy_wait
-from lib.py import NetdevFamily, NetdevSimDev, NlError, ip
+from lib.py import NetdevFamily, NetdevSimDev, NlError, defer, ip
 
 
 def empty_check(nf) -> None:
@@ -255,6 +255,117 @@ from lib.py import NetdevFamily, NetdevSimDev, NlError, ip
         nsim.dfs_write("pp_hold", "y")
 
 
+def page_pool_dump_ifindex(nf) -> None:
+    """Test page pool dump filtering by ifindex."""
+    nsimdev1 = NetdevSimDev(queue_count=3)
+    rm_nsim1 = defer(nsimdev1.remove)
+    nsimdev2 = NetdevSimDev(queue_count=5)
+    defer(nsimdev2.remove)
+
+    nsim1 = nsimdev1.nsims[0]
+    nsim2 = nsimdev2.nsims[0]
+
+    ip(f"link set dev {nsim1.ifname} up")
+    ip(f"link set dev {nsim2.ifname} up")
+
+    # Unfiltered dump should have pools from both devices
+    all_pp = nf.page_pool_get({}, dump=True)
+    pp1_all = [pp for pp in all_pp
+               if pp.get("ifindex") == nsim1.ifindex]
+    pp2_all = [pp for pp in all_pp
+               if pp.get("ifindex") == nsim2.ifindex]
+    ksft_ge(len(pp1_all), 1)
+    ksft_ge(len(pp2_all), 1)
+
+    # Filtered dump should only return pools for that device
+    pp1_flt = nf.page_pool_get({'ifindex': nsim1.ifindex}, dump=True)
+    ksft_eq(pp1_flt, pp1_all)
+
+    pp2_flt = nf.page_pool_get({'ifindex': nsim2.ifindex}, dump=True)
+    ksft_eq(pp2_flt, pp2_all)
+
+    # Non-existent ifindex should return empty dump
+    pp_none = nf.page_pool_get({'ifindex': 12345678}, dump=True)
+    ksft_eq(len(pp_none), 0)
+
+    # Device down - no pools for that ifindex
+    ip(f"link set dev {nsim1.ifname} down")
+    pp1_down = nf.page_pool_get({'ifindex': nsim1.ifindex}, dump=True)
+    ksft_eq(len(pp1_down), 0)
+
+    # Remove device, dump by its old ifindex should return empty
+    old_ifindex = nsim1.ifindex
+    rm_nsim1.exec()
+    pp1_gone = nf.page_pool_get({'ifindex': old_ifindex}, dump=True)
+    ksft_eq(len(pp1_gone), 0)
+
+
+def page_pool_ifindex_leak_check(nf) -> None:
+    """Test that zombie page pools don't show up under the original ifindex."""
+    nsimdev = NetdevSimDev()
+    rm_nsim = defer(nsimdev.remove)
+    nsim = nsimdev.nsims[0]
+
+    ip(f"link set dev {nsim.ifname} up")
+    nsim.dfs_write("pp_hold", "y")
+
+    pp_up = nf.page_pool_get({'ifindex': nsim.ifindex}, dump=True)
+    ksft_ge(len(pp_up), 1)
+
+    # Remove device with leaked page - pool becomes zombie (orphaned to lo)
+    old_ifindex = nsim.ifindex
+    rm_nsim.exec()
+
+    # Zombie pool should NOT appear under the original device
+    pp_down = nf.page_pool_get({'ifindex': old_ifindex}, dump=True)
+    ksft_eq(len(pp_down), 0)
+
+    # But it should appear in an unfiltered dump (under loopback)
+    pp_all = nf.page_pool_get({}, dump=True)
+    orphans = [pp for pp in pp_all
+               if "detach-time" in pp and "ifindex" not in pp]
+    ksft_ge(len(orphans), 1)
+
+
+def page_pool_stats_ifindex_check(nf) -> None:
+    """Test page pool stats dump filtering by ifindex."""
+    nsimdev1 = NetdevSimDev(queue_count=3)
+    defer(nsimdev1.remove)
+    nsimdev2 = NetdevSimDev(queue_count=5)
+    defer(nsimdev2.remove)
+
+    nsim1 = nsimdev1.nsims[0]
+    nsim2 = nsimdev2.nsims[0]
+
+    ip(f"link set dev {nsim1.ifname} up")
+    ip(f"link set dev {nsim2.ifname} up")
+
+    # Unfiltered stats dump
+    all_stats = nf.page_pool_stats_get({}, dump=True)
+    s1_all = [s for s in all_stats
+              if s.get("info", {}).get("ifindex") == nsim1.ifindex]
+    s2_all = [s for s in all_stats
+              if s.get("info", {}).get("ifindex") == nsim2.ifindex]
+    ksft_ge(len(s1_all), 1)
+    ksft_ge(len(s2_all), 1)
+
+    # Filtered stats dump
+    s1_flt = nf.page_pool_stats_get({'info': {'ifindex': nsim1.ifindex}},
+                                    dump=True)
+    ksft_eq(s1_flt, s1_all)
+
+    # Non-existent ifindex should return empty
+    s_none = nf.page_pool_stats_get({'info': {'ifindex': 12345678}}, dump=True)
+    ksft_eq(len(s_none), 0)
+
+    # info.id should be rejected for stats dump
+    with ksft_raises(NlError) as cm:
+        nf.page_pool_stats_get({'info': {'id': s1_all[0]['info']['id']}},
+                               dump=True)
+    ksft_eq(cm.exception.nl_msg.error, -errno.EINVAL)
+    ksft_eq(cm.exception.nl_msg.extack['bad-attr'], '.info.id')
+
+
 def main() -> None:
     """ Ksft boiler plate main """
     nf = NetdevFamily()
@@ -265,7 +376,11 @@ from lib.py import NetdevFamily, NetdevSimDev, NlError, ip
               napi_set_threaded,
               dev_set_threaded,
               nsim_rxq_reset_down,
-              page_pool_check],
+              page_pool_check,
+              page_pool_dump_ifindex,
+              page_pool_ifindex_leak_check,
+              page_pool_stats_ifindex_check
+              ],
              args=(nf, ))
     ksft_exit()
 
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-05-04 21:43 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-04 21:43 [PATCH net-next v2 1/2] net: page_pool: support dumping pps of a specific ifindex via Netlink Jakub Kicinski
2026-05-04 21:43 ` [PATCH net-next v2 2/2] selftests: net: add tests for filtered dumps of page pool Jakub Kicinski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox