Netdev List
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: netdev@vger.kernel.org
Cc: Stephen Hemminger <stephen@networkplumber.org>
Subject: [PATCH iproute2] rdama: sync kernel headers and fix build
Date: Tue,  5 May 2026 11:10:44 -0700	[thread overview]
Message-ID: <20260505181045.748088-1-stephen@networkplumber.org> (raw)

The upstream kernel removed the 'RES_' prefix from RDMA netlink
command and attribute names, but the rdma tool was still using
the old names, causing build failures.

Update all references to match the new kernel header names:
- RDMA_NLDEV_CMD_RES_FRMR_POOLS_* → RDMA_NLDEV_CMD_FRMR_POOLS_*
- RDMA_NLDEV_ATTR_RES_FRMR_POOL* → RDMA_NLDEV_ATTR_FRMR_POOL*
- RDMA_NLDEV_ATTR_FRMR_POOL_PINNED → RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES
- RDMA_NLDEV_ATTR_FRMR_POOL_AGING_PERIOD → RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD

  Fixes build error:
    res.h:203:26: error: 'RDMA_NLDEV_CMD_RES_FRMR_POOLS_GET' undeclared

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 rdma/include/uapi/rdma/rdma_netlink.h | 30 ++++----
 rdma/res-frmr-pools.c                 | 98 +++++++++++++--------------
 rdma/res.h                            |  2 +-
 3 files changed, 65 insertions(+), 65 deletions(-)

diff --git a/rdma/include/uapi/rdma/rdma_netlink.h b/rdma/include/uapi/rdma/rdma_netlink.h
index 8709e558..4356ec4a 100644
--- a/rdma/include/uapi/rdma/rdma_netlink.h
+++ b/rdma/include/uapi/rdma/rdma_netlink.h
@@ -308,9 +308,9 @@ enum rdma_nldev_command {
 
 	RDMA_NLDEV_CMD_MONITOR,
 
-	RDMA_NLDEV_CMD_RES_FRMR_POOLS_GET, /* can dump */
+	RDMA_NLDEV_CMD_FRMR_POOLS_GET, /* can dump */
 
-	RDMA_NLDEV_CMD_RES_FRMR_POOLS_SET,
+	RDMA_NLDEV_CMD_FRMR_POOLS_SET,
 
 	RDMA_NLDEV_NUM_OPS
 };
@@ -590,19 +590,19 @@ enum rdma_nldev_attr {
 	/*
 	 * FRMR Pools attributes
 	 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOLS,			/* nested table */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_ENTRY,		/* nested table */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY,		/* nested table */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS,		/* u8 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS,	/* u32 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY,	/* u64 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS, /* u64 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES,	/* u32 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE,	/* u64 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE,		/* u64 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_AGING_PERIOD,	/* u32 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED,		/* u32 */
-	RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY, /* u64 */
+	RDMA_NLDEV_ATTR_FRMR_POOLS,		/* nested table */
+	RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY,	/* nested table */
+	RDMA_NLDEV_ATTR_FRMR_POOL_KEY,		/* nested table */
+	RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS,	/* u8 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS,	/* u32 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY,	/* u64 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS,	/* u64 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES,	/* u32 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE,	/* u64 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE,	/* u64 */
+	RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD,	/* u32 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES,	/* u32 */
+	RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY,	/* u64 */
 
 	/*
 	 * Always the end
diff --git a/rdma/res-frmr-pools.c b/rdma/res-frmr-pools.c
index abcd2188..d5faa5c1 100644
--- a/rdma/res-frmr-pools.c
+++ b/rdma/res-frmr-pools.c
@@ -80,83 +80,83 @@ static int res_frmr_pools_line(struct rd *rd, const char *name, int idx,
 	char key_str[FRMR_POOL_KEY_MAX_LEN];
 	struct frmr_pool_key key = { 0 };
 
-	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY]) {
+	if (nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_KEY]) {
 		if (mnl_attr_parse_nested(
-			    nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY],
+			    nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_KEY],
 			    rd_attr_cb, key_tb) != MNL_CB_OK)
 			return MNL_CB_ERROR;
 
-		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS])
+		if (key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS])
 			key.ats = mnl_attr_get_u8(
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS]);
-		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS])
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS]);
+		if (key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS])
 			key.access_flags = mnl_attr_get_u32(
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS]);
-		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY])
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS]);
+		if (key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY])
 			key.vendor_key = mnl_attr_get_u64(
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY]);
-		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS])
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY]);
+		if (key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS])
 			key.num_dma_blocks = mnl_attr_get_u64(
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
-		if (key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY])
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
+		if (key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY])
 			kernel_vendor_key = mnl_attr_get_u64(
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY]);
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY]);
 
 		if (rd_is_filtered_attr(
 			    rd, "ats", key.ats,
-			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS]))
+			    key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS]))
 			goto out;
 
 		if (rd_is_filtered_attr(
 			    rd, "access_flags", key.access_flags,
-			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS]))
+			    key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS]))
 			goto out;
 
 		if (rd_is_filtered_attr(
 			    rd, "vendor_key", key.vendor_key,
-			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY]))
+			    key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY]))
 			goto out;
 
 		if (rd_is_filtered_attr(
 			    rd, "num_dma_blocks", key.num_dma_blocks,
-			    key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS]))
+			    key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS]))
 			goto out;
 	}
 
-	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES])
+	if (nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES])
 		queue_handles = mnl_attr_get_u32(
-			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES]);
+			nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES]);
 	if (rd_is_filtered_attr(
 		    rd, "queue", queue_handles,
-		    nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES]))
+		    nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES]))
 		goto out;
 
-	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE])
+	if (nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE])
 		in_use = mnl_attr_get_u64(
-			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]);
+			nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE]);
 	if (rd_is_filtered_attr(rd, "in_use", in_use,
-				nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]))
+				nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE]))
 		goto out;
 
-	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE])
+	if (nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE])
 		max_in_use = mnl_attr_get_u64(
-			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]);
+			nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE]);
 	if (rd_is_filtered_attr(
 		    rd, "max_in_use", max_in_use,
-		    nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]))
+		    nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE]))
 		goto out;
 
-	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED])
+	if (nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES])
 		pinned_handles = mnl_attr_get_u32(
-			nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED]);
+			nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES]);
 	if (rd_is_filtered_attr(rd, "pinned", pinned_handles,
-				nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED]))
+				nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES]))
 		goto out;
 
 	open_json_object(NULL);
 	print_dev(idx, name);
 
-	if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY]) {
+	if (nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_KEY]) {
 		snprintf(key_str, sizeof(key_str),
 			 "%" PRIx64 ":%" PRIx64 ":%x:%s",
 			 key.vendor_key, key.num_dma_blocks,
@@ -166,30 +166,30 @@ static int res_frmr_pools_line(struct rd *rd, const char *name, int idx,
 		if (rd->show_details) {
 			res_print_u32(
 				"ats", key.ats,
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS]);
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS]);
 			res_print_u32(
 				"access_flags", key.access_flags,
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS]);
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS]);
 			res_print_u64(
 				"vendor_key", key.vendor_key,
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY]);
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY]);
 			res_print_u64(
 				"num_dma_blocks", key.num_dma_blocks,
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
 			res_print_u64(
 				"kernel_vendor_key", kernel_vendor_key,
-				key_tb[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_KERNEL_VENDOR_KEY]);
+				key_tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY]);
 		}
 	}
 
 	res_print_u32("queue", queue_handles,
-		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_QUEUE_HANDLES]);
+		      nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES]);
 	res_print_u64("in_use", in_use,
-		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]);
+		      nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE]);
 	res_print_u64("max_in_use", max_in_use,
-		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]);
+		      nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE]);
 	res_print_u32("pinned", pinned_handles,
-		      nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED]);
+		      nla_line[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES]);
 
 	print_driver_table(rd, nla_line[RDMA_NLDEV_ATTR_DRIVER]);
 	close_json_object();
@@ -215,12 +215,12 @@ int res_frmr_pools_parse_cb(const struct nlmsghdr *nlh, void *data)
 
 	mnl_attr_parse(nlh, 0, rd_attr_cb, tb);
 	if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
-	    !tb[RDMA_NLDEV_ATTR_RES_FRMR_POOLS])
+	    !tb[RDMA_NLDEV_ATTR_FRMR_POOLS])
 		return MNL_CB_ERROR;
 
 	name = mnl_attr_get_str(tb[RDMA_NLDEV_ATTR_DEV_NAME]);
 	idx = mnl_attr_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-	nla_table = tb[RDMA_NLDEV_ATTR_RES_FRMR_POOLS];
+	nla_table = tb[RDMA_NLDEV_ATTR_FRMR_POOLS];
 
 	mnl_attr_for_each_nested(nla_entry, nla_table) {
 		struct nlattr *nla_line[RDMA_NLDEV_ATTR_MAX] = {};
@@ -256,10 +256,10 @@ static int res_frmr_pools_one_set_aging(struct rd *rd)
 		return -EINVAL;
 	}
 
-	rd_prepare_msg(rd, RDMA_NLDEV_CMD_RES_FRMR_POOLS_SET, &seq,
+	rd_prepare_msg(rd, RDMA_NLDEV_CMD_FRMR_POOLS_SET, &seq,
 		       (NLM_F_REQUEST | NLM_F_ACK));
 	mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_DEV_INDEX, rd->dev_idx);
-	mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_AGING_PERIOD,
+	mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD,
 			 aging_period);
 
 	return rd_sendrecv_msg(rd, seq);
@@ -294,24 +294,24 @@ static int res_frmr_pools_one_set_pinned(struct rd *rd)
 		return -EINVAL;
 	}
 
-	rd_prepare_msg(rd, RDMA_NLDEV_CMD_RES_FRMR_POOLS_SET, &seq,
+	rd_prepare_msg(rd, RDMA_NLDEV_CMD_FRMR_POOLS_SET, &seq,
 		       (NLM_F_REQUEST | NLM_F_ACK));
 	mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_DEV_INDEX, rd->dev_idx);
 
-	mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED,
+	mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES,
 			 pinned_value);
 
 	key_attr =
-		mnl_attr_nest_start(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY);
-	mnl_attr_put_u8(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS,
+		mnl_attr_nest_start(rd->nlh, RDMA_NLDEV_ATTR_FRMR_POOL_KEY);
+	mnl_attr_put_u8(rd->nlh, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS,
 			pool_key.ats);
 	mnl_attr_put_u32(rd->nlh,
-			 RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS,
+			 RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS,
 			 pool_key.access_flags);
-	mnl_attr_put_u64(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY,
+	mnl_attr_put_u64(rd->nlh, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY,
 			 pool_key.vendor_key);
 	mnl_attr_put_u64(rd->nlh,
-			 RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS,
+			 RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS,
 			 pool_key.num_dma_blocks);
 	mnl_attr_nest_end(rd->nlh, key_attr);
 
diff --git a/rdma/res.h b/rdma/res.h
index 8d7b4a0b..1f71115b 100644
--- a/rdma/res.h
+++ b/rdma/res.h
@@ -200,7 +200,7 @@ struct filters frmr_pools_valid_filters[MAX_NUMBER_OF_FILTERS] = {
 	{ .name = "pinned", .is_number = true },
 };
 
-RES_FUNC(res_frmr_pools, RDMA_NLDEV_CMD_RES_FRMR_POOLS_GET,
+RES_FUNC(res_frmr_pools, RDMA_NLDEV_CMD_FRMR_POOLS_GET,
 	 frmr_pools_valid_filters, true, 0);
 
 int res_frmr_pools_set(struct rd *rd);
-- 
2.53.0


             reply	other threads:[~2026-05-05 18:10 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-05 18:10 Stephen Hemminger [this message]
2026-05-12 11:45 ` [PATCH iproute2] rdama: sync kernel headers and fix build Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260505181045.748088-1-stephen@networkplumber.org \
    --to=stephen@networkplumber.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox