From: Chiara Meiohas <cmeiohas@nvidia.com>
To: <leon@kernel.org>, <dsahern@gmail.com>, <stephen@networkplumber.org>
Cc: <michaelgur@nvidia.com>, <jgg@nvidia.com>,
<linux-rdma@vger.kernel.org>, <netdev@vger.kernel.org>,
Patrisious Haddad <phaddad@nvidia.com>,
"Chiara Meiohas" <cmeiohas@nvidia.com>
Subject: [PATCH v2 iproute2-next 4/4] rdma: Add FRMR pools set pinned command
Date: Mon, 30 Mar 2026 20:31:18 +0300 [thread overview]
Message-ID: <20260330173118.766885-5-cmeiohas@nvidia.com> (raw)
In-Reply-To: <20260330173118.766885-1-cmeiohas@nvidia.com>
From: Michael Guralnik <michaelgur@nvidia.com>
Add an option to set the amount of pinned handles to FRMR pool.
Pinned handles are not affected by aging and stay available for reuse in
the FRMR pool.
The pool is identified by a colon-separated key of hexadecimal fields
(vendor_key:num_dma_blocks:access_flags:ats) as shown in the 'show'
command output.
Usage:
Set 250 pinned handles to FRMR pool with key 0:800:0:0 on
device rocep8s0f0
$rdma resource set frmr_pools dev rocep8s0f0 pinned 0:800:0:0 250
Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
Reviewed-by: Chiara Meiohas <cmeiohas@nvidia.com>
---
man/man8/rdma-resource.8 | 21 +++++++
rdma/res-frmr-pools.c | 121 ++++++++++++++++++++++++++++++++++++++-
rdma/res.c | 1 +
rdma/res.h | 1 +
4 files changed, 143 insertions(+), 1 deletion(-)
diff --git a/man/man8/rdma-resource.8 b/man/man8/rdma-resource.8
index a6dc33f3..1138cd23 100644
--- a/man/man8/rdma-resource.8
+++ b/man/man8/rdma-resource.8
@@ -33,6 +33,14 @@ rdma-resource \- rdma resource configuration
.BR aging
.IR AGING_PERIOD
+.ti -8
+.B rdma resource set frmr_pools
+.BR dev
+.IR DEV
+.BR pinned
+.IR POOL_KEY
+.IR PINNED_VALUE
+
.ti -8
.B rdma resource help
@@ -54,6 +62,14 @@ If this argument is omitted all links are listed.
.I "AGING_PERIOD"
- specifies the aging period in seconds for unused FRMR handles. Handles unused for this period will be freed.
+.PP
+.I "POOL_KEY"
+- specifies the pool key that identifies a specific FRMR pool. The key is a colon-separated list of hexadecimal fields in the format vendor_key:num_dma_blocks:access_flags:ats.
+
+.PP
+.I "PINNED_VALUE"
+- specifies the pinned value for the FRMR pool. A non-zero value pins handles to the pool, preventing them from being freed by the aging mechanism.
+
.SH "EXAMPLES"
.PP
rdma resource show
@@ -141,6 +157,11 @@ rdma resource set frmr_pools dev rocep8s0f0 aging 120
Set the aging period for FRMR pools on device rocep8s0f0 to 120 seconds.
.RE
.PP
+rdma resource set frmr_pools dev rocep8s0f0 pinned 0:1000:0:0 25000
+.RS 4
+Pin 25000 handles to the FRMR pool identified by key 0:1000:0:0 on device rocep8s0f0 to prevent them from being freed.
+.RE
+.PP
.SH SEE ALSO
.BR rdma (8),
diff --git a/rdma/res-frmr-pools.c b/rdma/res-frmr-pools.c
index c9d80c4b..abcd2188 100644
--- a/rdma/res-frmr-pools.c
+++ b/rdma/res-frmr-pools.c
@@ -17,14 +17,68 @@ struct frmr_pool_key {
/* vendor_key(16) + ':' + num_dma_blocks(16) + ':' + access_flags(8) + ':' + ats(1) + '\0' */
#define FRMR_POOL_KEY_MAX_LEN 45
+static int decode_pool_key(const char *str, struct frmr_pool_key *key)
+{
+ const char *p = str;
+ char *end;
+ int i = 0;
+
+ while (*p) {
+ uint64_t val;
+
+ errno = 0;
+ val = strtoull(p, &end, 16);
+ if (errno == ERANGE || end == p || (*end != ':' && *end != '\0')) {
+ pr_err("Invalid pool key: %s\n", str);
+ return -EINVAL;
+ }
+
+ switch (i) {
+ case 0:
+ key->vendor_key = val;
+ break;
+ case 1:
+ key->num_dma_blocks = val;
+ break;
+ case 2:
+ if (val > UINT32_MAX)
+ goto out_of_range;
+ key->access_flags = val;
+ break;
+ case 3:
+ if (val != 0 && val != 1)
+ goto out_of_range;
+ key->ats = val;
+ break;
+ default:
+ if (val) {
+ pr_err("Unsupported pool attributes passed in pool key\n");
+ return -EINVAL;
+ }
+ }
+ i++;
+ p = *end ? end + 1 : end;
+ }
+
+ if (i < 4) {
+ pr_err("Invalid pool key: %s, expected 4 fields\n", str);
+ return -EINVAL;
+ }
+ return 0;
+
+out_of_range:
+ pr_err("Pool key field at index %d value out of range\n", i);
+ return -EINVAL;
+}
+
static int res_frmr_pools_line(struct rd *rd, const char *name, int idx,
struct nlattr **nla_line)
{
uint64_t in_use = 0, max_in_use = 0, kernel_vendor_key = 0;
struct nlattr *key_tb[RDMA_NLDEV_ATTR_MAX] = {};
+ uint32_t queue_handles = 0, pinned_handles = 0;
char key_str[FRMR_POOL_KEY_MAX_LEN];
struct frmr_pool_key key = { 0 };
- uint32_t queue_handles = 0;
if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY]) {
if (mnl_attr_parse_nested(
@@ -92,6 +146,13 @@ static int res_frmr_pools_line(struct rd *rd, const char *name, int idx,
nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]))
goto out;
+ if (nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED])
+ pinned_handles = mnl_attr_get_u32(
+ nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED]);
+ if (rd_is_filtered_attr(rd, "pinned", pinned_handles,
+ nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED]))
+ goto out;
+
open_json_object(NULL);
print_dev(idx, name);
@@ -127,6 +188,8 @@ static int res_frmr_pools_line(struct rd *rd, const char *name, int idx,
nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_IN_USE]);
res_print_u64("max_in_use", max_in_use,
nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_MAX_IN_USE]);
+ res_print_u32("pinned", pinned_handles,
+ nla_line[RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED]);
print_driver_table(rd, nla_line[RDMA_NLDEV_ATTR_DRIVER]);
close_json_object();
@@ -202,10 +265,65 @@ static int res_frmr_pools_one_set_aging(struct rd *rd)
return rd_sendrecv_msg(rd, seq);
}
+static int res_frmr_pools_one_set_pinned(struct rd *rd)
+{
+ struct frmr_pool_key pool_key = { 0 };
+ struct nlattr *key_attr;
+ uint32_t pinned_value;
+ const char *key_str;
+ uint32_t seq;
+
+ if (rd_no_arg(rd)) {
+ pr_err("Please provide pool key and pinned value.\n");
+ return -EINVAL;
+ }
+
+ key_str = rd_argv(rd);
+ rd_arg_inc(rd);
+
+ if (decode_pool_key(key_str, &pool_key))
+ return -EINVAL;
+
+ if (rd_no_arg(rd)) {
+ pr_err("Please provide pinned value.\n");
+ return -EINVAL;
+ }
+
+ if (get_u32(&pinned_value, rd_argv(rd), 10)) {
+ pr_err("Invalid pinned value: %s\n", rd_argv(rd));
+ return -EINVAL;
+ }
+
+ rd_prepare_msg(rd, RDMA_NLDEV_CMD_RES_FRMR_POOLS_SET, &seq,
+ (NLM_F_REQUEST | NLM_F_ACK));
+ mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_DEV_INDEX, rd->dev_idx);
+
+ mnl_attr_put_u32(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_PINNED,
+ pinned_value);
+
+ key_attr =
+ mnl_attr_nest_start(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY);
+ mnl_attr_put_u8(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ATS,
+ pool_key.ats);
+ mnl_attr_put_u32(rd->nlh,
+ RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_ACCESS_FLAGS,
+ pool_key.access_flags);
+ mnl_attr_put_u64(rd->nlh, RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_VENDOR_KEY,
+ pool_key.vendor_key);
+ mnl_attr_put_u64(rd->nlh,
+ RDMA_NLDEV_ATTR_RES_FRMR_POOL_KEY_NUM_DMA_BLOCKS,
+ pool_key.num_dma_blocks);
+ mnl_attr_nest_end(rd->nlh, key_attr);
+
+ return rd_sendrecv_msg(rd, seq);
+}
+
static int res_frmr_pools_one_set_help(struct rd *rd)
{
pr_out("Usage: %s set frmr_pools dev DEV aging AGING_PERIOD\n",
rd->filename);
+ pr_out("Usage: %s set frmr_pools dev DEV pinned POOL_KEY PINNED_VALUE\n",
+ rd->filename);
return 0;
}
@@ -215,6 +333,7 @@ static int res_frmr_pools_one_set(struct rd *rd)
{ NULL, res_frmr_pools_one_set_help },
{ "help", res_frmr_pools_one_set_help },
{ "aging", res_frmr_pools_one_set_aging },
+ { "pinned", res_frmr_pools_one_set_pinned },
{ 0 }
};
diff --git a/rdma/res.c b/rdma/res.c
index 63d8386a..062f0007 100644
--- a/rdma/res.c
+++ b/rdma/res.c
@@ -29,6 +29,7 @@ static int res_help(struct rd *rd)
pr_out(" resource show frmr_pools dev [DEV]\n");
pr_out(" resource show frmr_pools dev [DEV] [FILTER-NAME FILTER-VALUE]\n");
pr_out(" resource set frmr_pools dev DEV aging AGING_PERIOD\n");
+ pr_out(" resource set frmr_pools dev DEV pinned POOL_KEY PINNED_VALUE\n");
return 0;
}
diff --git a/rdma/res.h b/rdma/res.h
index dffbdb52..4758f2ea 100644
--- a/rdma/res.h
+++ b/rdma/res.h
@@ -198,6 +198,7 @@ struct filters frmr_pools_valid_filters[MAX_NUMBER_OF_FILTERS] = {
{ .name = "queue", .is_number = true },
{ .name = "in_use", .is_number = true },
{ .name = "max_in_use", .is_number = true },
+ { .name = "pinned", .is_number = true },
};
RES_FUNC(res_frmr_pools, RDMA_NLDEV_CMD_RES_FRMR_POOLS_GET,
--
2.38.1
next prev parent reply other threads:[~2026-03-30 17:33 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-30 17:31 [PATCH v2 iproute2-next 0/4] Introduce FRMR pools Chiara Meiohas
2026-03-30 17:31 ` [PATCH v2 iproute2-next 1/4] rdma: Update headers Chiara Meiohas
2026-03-30 17:31 ` [PATCH v2 iproute2-next 2/4] rdma: Add resource FRMR pools show command Chiara Meiohas
2026-03-30 17:31 ` [PATCH v2 iproute2-next 3/4] rdma: Add FRMR pools set aging command Chiara Meiohas
2026-03-30 17:31 ` Chiara Meiohas [this message]
2026-04-05 17:09 ` [PATCH v2 iproute2-next 0/4] Introduce FRMR pools David Ahern
2026-04-05 17:44 ` Chiara Meiohas
2026-04-05 17:10 ` patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260330173118.766885-5-cmeiohas@nvidia.com \
--to=cmeiohas@nvidia.com \
--cc=dsahern@gmail.com \
--cc=jgg@nvidia.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=michaelgur@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=phaddad@nvidia.com \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox