From: Edward Srouji <edwards@nvidia.com>
To: Jason Gunthorpe <jgg@ziepe.ca>, Leon Romanovsky <leon@kernel.org>,
"Saeed Mahameed" <saeedm@nvidia.com>,
Tariq Toukan <tariqt@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: <linux-kernel@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
<netdev@vger.kernel.org>,
Michael Guralnik <michaelgur@nvidia.com>,
"Edward Srouji" <edwards@nvidia.com>,
Patrisious Haddad <phaddad@nvidia.com>
Subject: [PATCH rdma-next v3 11/11] RDMA/nldev: Expose kernel-internal FRMR pools in netlink
Date: Mon, 2 Feb 2026 18:00:03 +0200 [thread overview]
Message-ID: <20260202-frmr_pools-v3-11-b8405ed9deba@nvidia.com> (raw)
In-Reply-To: <20260202-frmr_pools-v3-0-b8405ed9deba@nvidia.com>
From: Michael Guralnik <michaelgur@nvidia.com>
Allow netlink users, through the usage of driver-details netlink
attribute, to get information about internal FRMR pools that use the
kernel_vendor_key FRMR key member.
Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Edward Srouji <edwards@nvidia.com>
---
drivers/infiniband/core/nldev.c | 28 +++++++++++++++++++++++-----
include/uapi/rdma/rdma_netlink.h | 1 +
2 files changed, 24 insertions(+), 5 deletions(-)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0b0f689eadd7..80b0079f63ae 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -186,6 +186,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY] = { .type = NLA_U64 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -2671,6 +2672,12 @@ static int fill_frmr_pool_key(struct sk_buff *msg, struct ib_frmr_key *key)
key->num_dma_blocks, RDMA_NLDEV_ATTR_PAD))
goto err;
+ if (key->kernel_vendor_key &&
+ nla_put_u64_64bit(msg,
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY,
+ key->kernel_vendor_key, RDMA_NLDEV_ATTR_PAD))
+ goto err;
+
nla_nest_end(msg, key_attr);
return 0;
@@ -2705,9 +2712,9 @@ static int fill_frmr_pool_entry(struct sk_buff *msg, struct ib_frmr_pool *pool)
return -EMSGSIZE;
}
-static void nldev_frmr_pools_parse_key(struct nlattr *tb[],
- struct ib_frmr_key *key,
- struct netlink_ext_ack *extack)
+static int nldev_frmr_pools_parse_key(struct nlattr *tb[],
+ struct ib_frmr_key *key,
+ struct netlink_ext_ack *extack)
{
if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS])
key->ats = nla_get_u8(tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS]);
@@ -2723,6 +2730,11 @@ static void nldev_frmr_pools_parse_key(struct nlattr *tb[],
if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS])
key->num_dma_blocks = nla_get_u64(
tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS]);
+
+ if (tb[RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY])
+ return -EINVAL;
+
+ return 0;
}
static int nldev_frmr_pools_set_pinned(struct ib_device *device,
@@ -2746,7 +2758,9 @@ static int nldev_frmr_pools_set_pinned(struct ib_device *device,
if (err)
return err;
- nldev_frmr_pools_parse_key(key_tb, &key, extack);
+ err = nldev_frmr_pools_parse_key(key_tb, &key, extack);
+ if (err)
+ return err;
err = ib_frmr_pools_set_pinned(device, &key, pinned_handles);
@@ -2762,6 +2776,7 @@ static int nldev_frmr_pools_get_dumpit(struct sk_buff *skb,
struct ib_frmr_pool *pool;
struct nlattr *table_attr;
struct nlattr *entry_attr;
+ bool show_details = false;
struct ib_device *device;
int start = cb->args[0];
struct rb_node *node;
@@ -2778,6 +2793,9 @@ static int nldev_frmr_pools_get_dumpit(struct sk_buff *skb,
if (!device)
return -EINVAL;
+ if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
+ show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
+
pools = device->frmr_pools;
if (!pools) {
ib_device_put(device);
@@ -2803,7 +2821,7 @@ static int nldev_frmr_pools_get_dumpit(struct sk_buff *skb,
read_lock(&pools->rb_lock);
for (node = rb_first(&pools->rb_root); node; node = rb_next(node)) {
pool = rb_entry(node, struct ib_frmr_pool, node);
- if (pool->key.kernel_vendor_key)
+ if (pool->key.kernel_vendor_key && !show_details)
continue;
if (idx < start) {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 39178df104f0..aac9782ddc09 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -602,6 +602,7 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE, /* u64 */
RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD, /* u32 */
RDMA_NLDEV_ATTR_FRMR_POOL_PINNED_HANDLES, /* u32 */
+ RDMA_NLDEV_ATTR_FRMR_POOL_KEY_KERNEL_VENDOR_KEY, /* u64 */
/*
* Always the end
--
2.47.1
next prev parent reply other threads:[~2026-02-02 16:01 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-02 15:59 [PATCH rdma-next v3 00/11] RDMA/core: Introduce FRMR pools infrastructure Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 01/11] RDMA/mlx5: Move device async_ctx initialization Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 02/11] IB/core: Introduce FRMR pools Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 03/11] RDMA/core: Add aging to " Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 04/11] RDMA/core: Add FRMR pools statistics Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 05/11] RDMA/core: Add pinned handles to FRMR pools Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 06/11] RDMA/mlx5: Switch from MR cache " Edward Srouji
2026-02-02 15:59 ` [PATCH rdma-next v3 07/11] net/mlx5: Drop MR cache related code Edward Srouji
2026-02-02 16:00 ` [PATCH rdma-next v3 08/11] RDMA/nldev: Add command to get FRMR pools Edward Srouji
2026-02-02 16:00 ` [PATCH rdma-next v3 09/11] RDMA/core: Add netlink command to modify FRMR aging Edward Srouji
2026-02-02 16:00 ` [PATCH rdma-next v3 10/11] RDMA/nldev: Add command to set pinned FRMR handles Edward Srouji
2026-02-02 16:00 ` Edward Srouji [this message]
2026-02-25 11:47 ` [PATCH rdma-next v3 00/11] RDMA/core: Introduce FRMR pools infrastructure Leon Romanovsky
2026-02-26 13:32 ` Edward Srouji
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260202-frmr_pools-v3-11-b8405ed9deba@nvidia.com \
--to=edwards@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=jgg@ziepe.ca \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=michaelgur@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=phaddad@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox