From: Aurelien Aptel <aaptel@nvidia.com>
To: linux-nvme@lists.infradead.org, Christoph Hellwig <hch@lst.de>,
Sagi Grimberg <sagi@grimberg.me>,
Chaitanya Kulkarni <kch@nvidia.com>
Cc: smalin@nvidia.com, linux-kernel@vger.kernel.org,
Aurelien Aptel <aaptel@nvidia.com>,
Max Gurtovoy <mgurtovoy@nvidia.com>
Subject: [PATCH v2] nvmet: introduce new mdts configuration entry
Date: Thu, 2 Apr 2026 13:21:08 +0000 [thread overview]
Message-ID: <20260402132108.250386-1-aaptel@nvidia.com> (raw)
In-Reply-To: <253ikaa1woe.fsf@mtr-vdi-124.i-did-not-set--mail-host-address--so-tickle-me>
Using this port configuration, one will be able to set the Maximum Data
Transfer Size (MDTS) for any controller that will be associated to the
configured port. The default value remains 0 (no limit).
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
---
drivers/nvme/target/admin-cmd.c | 8 ++------
drivers/nvme/target/configfs.c | 27 +++++++++++++++++++++++++++
drivers/nvme/target/core.c | 8 ++++++++
drivers/nvme/target/nvmet.h | 15 +++++++++++++++
drivers/nvme/target/zns.c | 6 +-----
5 files changed, 53 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ca5b08ce1211..4b759fc96cfd 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -687,12 +687,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
- /* Limit MDTS according to transport capability */
- if (ctrl->ops->get_mdts)
- id->mdts = ctrl->ops->get_mdts(ctrl);
- else
- id->mdts = 0;
-
+ /* Limit MDTS according to port config or transport capability */
+ id->mdts = nvmet_ctrl_mdts(req);
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 3088e044dbcb..63d72fbf4d9d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -302,6 +302,31 @@ static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+static ssize_t nvmet_param_mdts_show(struct config_item *item, char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->mdts);
+}
+
+static ssize_t nvmet_param_mdts_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->mdts);
+ if (ret) {
+ pr_err("Invalid value '%s' for mdts\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_mdts);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -1996,6 +2021,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
&nvmet_attr_param_max_queue_size,
+ &nvmet_attr_param_mdts,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -2054,6 +2080,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->mdts = -1; /* < 0 == let the transport choose */
port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 9238e13bd480..779d8a130619 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -370,6 +370,14 @@ int nvmet_enable_port(struct nvmet_port *port)
NVMET_MIN_QUEUE_SIZE,
NVMET_MAX_QUEUE_SIZE);
+ /*
+ * If the transport didn't set the mdts properly, then clamp it to the
+ * target limits. Also set default values in case the transport didn't
+ * set it at all.
+ */
+ if (port->mdts < 0 || port->mdts > NVMET_MAX_MDTS)
+ port->mdts = 0;
+
port->enabled = true;
port->tr_ops = ops;
return 0;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 319d6a5e9cf0..b2dccf0a4ef2 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -214,6 +214,7 @@ struct nvmet_port {
bool enabled;
int inline_data_size;
int max_queue_size;
+ int mdts;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -671,6 +672,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
+#define NVMET_MAX_MDTS 255
/*
* Nice round number that makes a list of nsids fit into a page.
@@ -759,6 +761,19 @@ static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
}
+static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u8 mdts;
+
+ /* Limit MDTS according to port config or transport capability */
+ mdts = req->port->mdts;
+ if (ctrl->ops->get_mdts)
+ mdts = min_not_zero(ctrl->ops->get_mdts(ctrl), mdts);
+
+ return mdts;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index aeaf73b54c3a..f00921931eb6 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -69,7 +69,6 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
{
u8 zasl = req->sq->ctrl->subsys->zasl;
- struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl_zns *id;
u16 status;
@@ -79,10 +78,7 @@ void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
goto out;
}
- if (ctrl->ops->get_mdts)
- id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
- else
- id->zasl = zasl;
+ id->zasl = min_not_zero(nvmet_ctrl_mdts(req), zasl);
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
--
2.51.0
next prev parent reply other threads:[~2026-04-02 13:23 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-01 10:13 [PATCH] nvmet: introduce new mdts configuration entry Aurelien Aptel
2026-04-01 14:10 ` Christoph Hellwig
2026-04-01 16:18 ` Aurelien Aptel
2026-04-02 13:21 ` Aurelien Aptel [this message]
2026-04-07 5:23 ` [PATCH v2] " Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260402132108.250386-1-aaptel@nvidia.com \
--to=aaptel@nvidia.com \
--cc=hch@lst.de \
--cc=kch@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=mgurtovoy@nvidia.com \
--cc=sagi@grimberg.me \
--cc=smalin@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox