public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Chiara Meiohas <cmeiohas@nvidia.com>
To: <leon@kernel.org>, <dsahern@gmail.com>, <stephen@networkplumber.org>
Cc: <michaelgur@nvidia.com>, <jgg@nvidia.com>,
	<linux-rdma@vger.kernel.org>, <netdev@vger.kernel.org>,
	Patrisious Haddad <phaddad@nvidia.com>
Subject: [PATCH iproute2-next 2/4] rdma: Add resource FRMR pools show command
Date: Mon, 2 Mar 2026 17:51:58 +0200	[thread overview]
Message-ID: <20260302155200.2611098-3-cmeiohas@nvidia.com> (raw)
In-Reply-To: <20260302155200.2611098-1-cmeiohas@nvidia.com>

From: Michael Guralnik <michaelgur@nvidia.com>

Allow users to see the FRMR pools that were created on the devices,
their properties and their usage statistics.
The set of properties of each pool are encoded to a hex representation
in order to simplify referencing a specific pool in 'set' commands.

Sample output:

$rdma resource show frmr_pools
dev rocep8s0f0 key 10000000000000 queue 0 in_use 0 max_in_use 200
dev rocep8s0f0 key 8000000000000 queue 0 in_use 0 max_in_use 200
dev rocep8s0f0 key 4000000000000 queue 0 in_use 0 max_in_use 200

$rdma resource show frmr_pools -d
dev rocep8s0f0 key 10000000000000 ats 0 access_flags 0 vendor_key 0 num_dma_blocks 4096 queue 0 in_use 0 max_in_use 200
dev rocep8s0f0 key 8000000000000 ats 0 access_flags 0 vendor_key 0 num_dma_blocks 2048 queue 0 in_use 0 max_in_use 200
dev rocep8s0f0 key 4000000000000 ats 0 access_flags 0 vendor_key 0 num_dma_blocks 1024 queue 0 in_use 0 max_in_use 200

$rdma resource show frmr_pools num_dma_blocks 2048
dev rocep8s0f0 key 8000000000000 queue 0 in_use 0 max_in_use 200

Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
---
 man/man8/rdma-resource.8 |   8 +-
 rdma/Makefile            |   2 +-
 rdma/res-frmr-pools.c    | 190 +++++++++++++++++++++++++++++++++++++++
 rdma/res.c               |   5 +-
 rdma/res.h               |  18 ++++
 5 files changed, 220 insertions(+), 3 deletions(-)
 create mode 100644 rdma/res-frmr-pools.c

diff --git a/man/man8/rdma-resource.8 b/man/man8/rdma-resource.8
index 61bec471..4e2ba39a 100644
--- a/man/man8/rdma-resource.8
+++ b/man/man8/rdma-resource.8
@@ -13,7 +13,8 @@ rdma-resource \- rdma resource configuration
 
 .ti -8
 .IR RESOURCE " := { "
-.BR cm_id " | " cq " | " mr " | " pd " | " qp " | " ctx " | " srq " }"
+.BR cm_id " | " cq " | " mr " | " pd " | " qp " | " ctx " | " srq " | "
+.BR frmr_pools " }"
 .sp
 
 .ti -8
@@ -113,6 +114,11 @@ rdma resource show srq lqpn 5-7
 Show SRQs that the QPs with lqpn 5-7 are associated with.
 .RE
 .PP
+rdma resource show frmr_pools ats 1
+.RS
+Show FRMR pools that have ats attribute set.
+.RE
+.PP
 
 .SH SEE ALSO
 .BR rdma (8),
diff --git a/rdma/Makefile b/rdma/Makefile
index ed3c1c1c..66fe53f9 100644
--- a/rdma/Makefile
+++ b/rdma/Makefile
@@ -5,7 +5,7 @@ CFLAGS += -I./include/uapi/
 
 RDMA_OBJ = rdma.o utils.o dev.o link.o res.o res-pd.o res-mr.o res-cq.o \
 	   res-cmid.o res-qp.o sys.o stat.o stat-mr.o res-ctx.o res-srq.o \
-	   monitor.o
+	   monitor.o res-frmr-pools.o
 
 TARGETS += rdma
 
diff --git a/rdma/res-frmr-pools.c b/rdma/res-frmr-pools.c
new file mode 100644
index 00000000..97d59705
--- /dev/null
+++ b/rdma/res-frmr-pools.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * res-frmr-pools.c	RDMA tool
+ * Authors:    Michael Guralnik <michaelgur@nvidia.com>
+ */
+
+#include "res.h"
+#include <inttypes.h>
+
+#define FRMR_POOL_KEY_SIZE 21
+#define FRMR_POOL_KEY_HEX_SIZE (FRMR_POOL_KEY_SIZE * 2)
+union frmr_pool_key {
+	struct {
+		uint8_t ats;
+		uint32_t access_flags;
+		uint64_t vendor_key;
+		uint64_t num_dma_blocks;
+	} __attribute__((packed)) fields;
+	uint8_t raw[FRMR_POOL_KEY_SIZE];
+};
+
+/* Function to encode FRMR pool key to hex string (dropping leading zeros) */
+static void encode_hex_pool_key(const union frmr_pool_key *key,
+				char *hex_string)
+{
+	char temp_hex[FRMR_POOL_KEY_HEX_SIZE + 1] = { 0 };
+	int i;
+
+	for (i = 0; i < FRMR_POOL_KEY_SIZE; i++)
+		sprintf(temp_hex + (i * 2), "%02x", key->raw[i]);
+
+	for (i = 0; i < FRMR_POOL_KEY_HEX_SIZE && temp_hex[i] == '0'; i++) {
+		/* Skip leading zeros */
+	}
+
+	if (i == FRMR_POOL_KEY_HEX_SIZE) {
+		strcpy(hex_string, "0");
+		return;
+	}
+
+	strcpy(hex_string, temp_hex + i);
+}
+
+static int res_frmr_pools_line(struct rd *rd, const char *name, int idx,
+			       struct nlattr **nla_line)
+{
+	uint64_t in_use = 0, max_in_use = 0, kernel_vendor_key = 0;
+	char hex_string[FRMR_POOL_KEY_HEX_SIZE + 1] = { 0 };
+	struct nlattr *key_tb[RDMA_NLDEV_ATTR_MAX] = {};
+	union frmr_pool_key key = { 0 };
+	uint32_t queue_handles = 0;
+
+	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY]) {
+		if (mnl_attr_parse_nested(
+			    nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY],
+			    rd_attr_cb, key_tb) != MNL_CB_OK)
+			return MNL_CB_ERROR;
+
+		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS])
+			key.fields.ats = mnl_attr_get_u8(
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS]);
+		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS])
+			key.fields.access_flags = mnl_attr_get_u32(
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS]);
+		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY])
+			key.fields.vendor_key = mnl_attr_get_u64(
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY]);
+		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS])
+			key.fields.num_dma_blocks = mnl_attr_get_u64(
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
+		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY])
+			kernel_vendor_key = mnl_attr_get_u64(
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY]);
+
+		if (rd_is_filtered_attr(
+			    rd, "ats", key.fields.ats,
+			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS]))
+			goto out;
+
+		if (rd_is_filtered_attr(
+			    rd, "access_flags", key.fields.access_flags,
+			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS]))
+			goto out;
+
+		if (rd_is_filtered_attr(
+			    rd, "vendor_key", key.fields.vendor_key,
+			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY]))
+			goto out;
+
+		if (rd_is_filtered_attr(
+			    rd, "num_dma_blocks", key.fields.num_dma_blocks,
+			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS]))
+			goto out;
+	}
+
+	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES])
+		queue_handles = mnl_attr_get_u32(
+			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES]);
+	if (rd_is_filtered_attr(
+		    rd, "queue", queue_handles,
+		    nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES]))
+		goto out;
+
+	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE])
+		in_use = mnl_attr_get_u64(
+			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]);
+	if (rd_is_filtered_attr(rd, "in_use", in_use,
+				nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]))
+		goto out;
+
+	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE])
+		max_in_use = mnl_attr_get_u64(
+			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]);
+	if (rd_is_filtered_attr(
+		    rd, "max_in_use", max_in_use,
+		    nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]))
+		goto out;
+
+	open_json_object(NULL);
+	print_dev(idx, name);
+
+	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY]) {
+		encode_hex_pool_key(&key, hex_string);
+		print_string(PRINT_ANY, "key", "key %s ", hex_string);
+
+		if (rd->show_details) {
+			res_print_u32(
+				"ats", key.fields.ats,
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS]);
+			res_print_u32(
+				"access_flags", key.fields.access_flags,
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS]);
+			res_print_u64(
+				"vendor_key", key.fields.vendor_key,
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY]);
+			res_print_u64(
+				"num_dma_blocks", key.fields.num_dma_blocks,
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
+			res_print_u64(
+				"kernel_vendor_key", kernel_vendor_key,
+				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY]);
+		}
+	}
+
+	res_print_u32("queue", queue_handles,
+		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES]);
+	res_print_u64("in_use", in_use,
+		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]);
+	res_print_u64("max_in_use", max_in_use,
+		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]);
+
+	print_driver_table(rd, nla_line[RDMA_NLDEV_ATTR_DRIVER]);
+	close_json_object();
+	newline();
+
+out:
+	return MNL_CB_OK;
+}
+
+int res_frmr_pools_parse_cb(const struct nlmsghdr *nlh, void *data)
+{
+	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX] = {};
+	struct nlattr *nla_table, *nla_entry;
+	struct rd *rd = data;
+	int ret = MNL_CB_OK;
+	const char *name;
+	uint32_t idx;
+
+	mnl_attr_parse(nlh, 0, rd_attr_cb, tb);
+	if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
+	    !tb[RDMA_NLDEV_ATTR_RES_FRMR_POOLS])
+		return MNL_CB_ERROR;
+
+	name = mnl_attr_get_str(tb[RDMA_NLDEV_ATTR_DEV_NAME]);
+	idx = mnl_attr_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+	nla_table = tb[RDMA_NLDEV_ATTR_RES_FRMR_POOLS];
+
+	mnl_attr_for_each_nested(nla_entry, nla_table) {
+		struct nlattr *nla_line[RDMA_NLDEV_ATTR_MAX] = {};
+
+		ret = mnl_attr_parse_nested(nla_entry, rd_attr_cb, nla_line);
+		if (ret != MNL_CB_OK)
+			break;
+
+		ret = res_frmr_pools_line(rd, name, idx, nla_line);
+		if (ret != MNL_CB_OK)
+			break;
+	}
+	return ret;
+}
diff --git a/rdma/res.c b/rdma/res.c
index 7e7de042..f1f13d74 100644
--- a/rdma/res.c
+++ b/rdma/res.c
@@ -11,7 +11,7 @@ static int res_help(struct rd *rd)
 {
 	pr_out("Usage: %s resource\n", rd->filename);
 	pr_out("          resource show [DEV]\n");
-	pr_out("          resource show [qp|cm_id|pd|mr|cq|ctx|srq]\n");
+	pr_out("          resource show [qp|cm_id|pd|mr|cq|ctx|srq|frmr_pools]\n");
 	pr_out("          resource show qp link [DEV/PORT]\n");
 	pr_out("          resource show qp link [DEV/PORT] [FILTER-NAME FILTER-VALUE]\n");
 	pr_out("          resource show cm_id link [DEV/PORT]\n");
@@ -26,6 +26,8 @@ static int res_help(struct rd *rd)
 	pr_out("          resource show ctx dev [DEV] [FILTER-NAME FILTER-VALUE]\n");
 	pr_out("          resource show srq dev [DEV]\n");
 	pr_out("          resource show srq dev [DEV] [FILTER-NAME FILTER-VALUE]\n");
+	pr_out("          resource show frmr_pools dev [DEV]\n");
+	pr_out("          resource show frmr_pools dev [DEV] [FILTER-NAME FILTER-VALUE]\n");
 	return 0;
 }
 
@@ -237,6 +239,7 @@ static int res_show(struct rd *rd)
 		{ "pd",		res_pd		},
 		{ "ctx",	res_ctx		},
 		{ "srq",	res_srq		},
+		{ "frmr_pools",	res_frmr_pools	},
 		{ 0 }
 	};
 
diff --git a/rdma/res.h b/rdma/res.h
index fd09ce7d..30edb8f8 100644
--- a/rdma/res.h
+++ b/rdma/res.h
@@ -26,6 +26,8 @@ int res_ctx_parse_cb(const struct nlmsghdr *nlh, void *data);
 int res_ctx_idx_parse_cb(const struct nlmsghdr *nlh, void *data);
 int res_srq_parse_cb(const struct nlmsghdr *nlh, void *data);
 int res_srq_idx_parse_cb(const struct nlmsghdr *nlh, void *data);
+int res_frmr_pools_parse_cb(const struct nlmsghdr *nlh, void *data);
+int res_frmr_pools_idx_parse_cb(const struct nlmsghdr *nlh, void *data);
 
 static inline uint32_t res_get_command(uint32_t command, struct rd *rd)
 {
@@ -185,6 +187,22 @@ struct filters srq_valid_filters[MAX_NUMBER_OF_FILTERS] = {
 RES_FUNC(res_srq, RDMA_NLDEV_CMD_RES_SRQ_GET, srq_valid_filters, true,
 	 RDMA_NLDEV_ATTR_RES_SRQN);
 
+
+static const
+struct filters frmr_pools_valid_filters[MAX_NUMBER_OF_FILTERS] = {
+	{ .name = "dev", .is_number = false },
+	{ .name = "ats", .is_number = true },
+	{ .name = "access_flags", .is_number = true },
+	{ .name = "vendor_key", .is_number = true },
+	{ .name = "num_dma_blocks", .is_number = true },
+	{ .name = "queue", .is_number = true },
+	{ .name = "in_use", .is_number = true },
+	{ .name = "max_in_use", .is_number = true },
+};
+
+RES_FUNC(res_frmr_pools, RDMA_NLDEV_CMD_RES_FRMR_POOLS_GET,
+	 frmr_pools_valid_filters, true, 0);
+
 void print_dev(uint32_t idx, const char *name);
 void print_link(uint32_t idx, const char *name, uint32_t port, struct nlattr **nla_line);
 void print_key(const char *name, uint64_t val, struct nlattr *nlattr);
-- 
2.38.1


  parent reply	other threads:[~2026-03-02 15:53 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-02 15:51 [PATCH iproute2-next 0/4] Introduce FRMR pools Chiara Meiohas
2026-03-02 15:51 ` [PATCH iproute2-next 1/4] rdma: Update headers Chiara Meiohas
2026-03-02 15:51 ` Chiara Meiohas [this message]
2026-03-07  1:45   ` [PATCH iproute2-next 2/4] rdma: Add resource FRMR pools show command David Ahern
2026-03-16 17:17     ` Michael Gur
2026-03-02 15:51 ` [PATCH iproute2-next 3/4] rdma: Add FRMR pools set aging command Chiara Meiohas
2026-03-02 15:52 ` [PATCH iproute2-next 4/4] rdma: Add FRMR pools set pinned command Chiara Meiohas
2026-03-07  0:16 ` [PATCH iproute2-next 0/4] Introduce FRMR pools David Ahern
2026-03-09 15:48   ` Chiara Meiohas
2026-03-07  1:45 ` David Ahern
2026-03-16 21:37   ` Michael Gur

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260302155200.2611098-3-cmeiohas@nvidia.com \
    --to=cmeiohas@nvidia.com \
    --cc=dsahern@gmail.com \
    --cc=jgg@nvidia.com \
    --cc=leon@kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=michaelgur@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=phaddad@nvidia.com \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox