* [PATCH] nvmet: introduce new mdts configuration entry
@ 2026-04-01 10:13 Aurelien Aptel
2026-04-01 14:10 ` Christoph Hellwig
0 siblings, 1 reply; 5+ messages in thread
From: Aurelien Aptel @ 2026-04-01 10:13 UTC (permalink / raw)
To: linux-nvme, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: smalin, linux-kernel, Aurelien Aptel, Max Gurtovoy
Using this port configuration, one will be able to set the Maximum Data
Transfer Size (MDTS) for any controller that will be associated to the
configured port.
The default value stayed 0 (no limit) but each transport will be able to
set its own values before enabling the port.
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
---
drivers/nvme/target/admin-cmd.c | 7 +++----
drivers/nvme/target/configfs.c | 27 +++++++++++++++++++++++++++
drivers/nvme/target/core.c | 8 ++++++++
drivers/nvme/target/nvmet.h | 2 ++
4 files changed, 40 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ca5b08ce1211..057ac62f6f63 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -687,11 +687,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
- /* Limit MDTS according to transport capability */
+ /* Limit MDTS according to port config or transport capability */
+ id->mdts = req->port->mdts;
if (ctrl->ops->get_mdts)
- id->mdts = ctrl->ops->get_mdts(ctrl);
- else
- id->mdts = 0;
+ id->mdts = min_not_zero(ctrl->ops->get_mdts(ctrl), id->mdts);
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 3088e044dbcb..63d72fbf4d9d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -302,6 +302,31 @@ static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+static ssize_t nvmet_param_mdts_show(struct config_item *item, char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->mdts);
+}
+
+static ssize_t nvmet_param_mdts_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->mdts);
+ if (ret) {
+ pr_err("Invalid value '%s' for mdts\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_mdts);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -1996,6 +2021,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
&nvmet_attr_param_max_queue_size,
+ &nvmet_attr_param_mdts,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -2054,6 +2080,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->mdts = -1; /* < 0 == let the transport choose */
port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 9238e13bd480..779d8a130619 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -370,6 +370,14 @@ int nvmet_enable_port(struct nvmet_port *port)
NVMET_MIN_QUEUE_SIZE,
NVMET_MAX_QUEUE_SIZE);
+ /*
+ * If the transport didn't set the mdts properly, then clamp it to the
+ * target limits. Also set default values in case the transport didn't
+ * set it at all.
+ */
+ if (port->mdts < 0 || port->mdts > NVMET_MAX_MDTS)
+ port->mdts = 0;
+
port->enabled = true;
port->tr_ops = ops;
return 0;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 319d6a5e9cf0..90ca10cd9438 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -214,6 +214,7 @@ struct nvmet_port {
bool enabled;
int inline_data_size;
int max_queue_size;
+ int mdts;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -671,6 +672,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
+#define NVMET_MAX_MDTS 255
/*
* Nice round number that makes a list of nsids fit into a page.
--
2.51.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH] nvmet: introduce new mdts configuration entry
2026-04-01 10:13 [PATCH] nvmet: introduce new mdts configuration entry Aurelien Aptel
@ 2026-04-01 14:10 ` Christoph Hellwig
2026-04-01 16:18 ` Aurelien Aptel
0 siblings, 1 reply; 5+ messages in thread
From: Christoph Hellwig @ 2026-04-01 14:10 UTC (permalink / raw)
To: Aurelien Aptel
Cc: linux-nvme, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
smalin, linux-kernel, Max Gurtovoy
On Wed, Apr 01, 2026 at 10:13:55AM +0000, Aurelien Aptel wrote:
> Using this port configuration, one will be able to set the Maximum Data
> Transfer Size (MDTS) for any controller that will be associated to the
> configured port.
>
> The default value stayed 0 (no limit) but each transport will be able to
> set its own values before enabling the port.
The ZASL calculation in zns.c also needs to take this limit into account.
So maybe add a helper to calculate it?
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] nvmet: introduce new mdts configuration entry
2026-04-01 14:10 ` Christoph Hellwig
@ 2026-04-01 16:18 ` Aurelien Aptel
2026-04-02 13:21 ` [PATCH v2] " Aurelien Aptel
0 siblings, 1 reply; 5+ messages in thread
From: Aurelien Aptel @ 2026-04-01 16:18 UTC (permalink / raw)
To: Christoph Hellwig
Cc: linux-nvme, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
smalin, linux-kernel, Max Gurtovoy
Christoph Hellwig <hch@lst.de> writes:
> The ZASL calculation in zns.c also needs to take this limit into account.
> So maybe add a helper to calculate it?
Ok I will make an inline helper in nvmet.h and use it in both places.
Thanks
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v2] nvmet: introduce new mdts configuration entry
2026-04-01 16:18 ` Aurelien Aptel
@ 2026-04-02 13:21 ` Aurelien Aptel
2026-04-07 5:23 ` Christoph Hellwig
0 siblings, 1 reply; 5+ messages in thread
From: Aurelien Aptel @ 2026-04-02 13:21 UTC (permalink / raw)
To: linux-nvme, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: smalin, linux-kernel, Aurelien Aptel, Max Gurtovoy
Using this port configuration, one will be able to set the Maximum Data
Transfer Size (MDTS) for any controller that will be associated to the
configured port. The default value remains 0 (no limit).
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
---
drivers/nvme/target/admin-cmd.c | 8 ++------
drivers/nvme/target/configfs.c | 27 +++++++++++++++++++++++++++
drivers/nvme/target/core.c | 8 ++++++++
drivers/nvme/target/nvmet.h | 15 +++++++++++++++
drivers/nvme/target/zns.c | 6 +-----
5 files changed, 53 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ca5b08ce1211..4b759fc96cfd 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -687,12 +687,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
- /* Limit MDTS according to transport capability */
- if (ctrl->ops->get_mdts)
- id->mdts = ctrl->ops->get_mdts(ctrl);
- else
- id->mdts = 0;
-
+ /* Limit MDTS according to port config or transport capability */
+ id->mdts = nvmet_ctrl_mdts(req);
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 3088e044dbcb..63d72fbf4d9d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -302,6 +302,31 @@ static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+static ssize_t nvmet_param_mdts_show(struct config_item *item, char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->mdts);
+}
+
+static ssize_t nvmet_param_mdts_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->mdts);
+ if (ret) {
+ pr_err("Invalid value '%s' for mdts\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_mdts);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -1996,6 +2021,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
&nvmet_attr_param_max_queue_size,
+ &nvmet_attr_param_mdts,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -2054,6 +2080,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->mdts = -1; /* < 0 == let the transport choose */
port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 9238e13bd480..779d8a130619 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -370,6 +370,14 @@ int nvmet_enable_port(struct nvmet_port *port)
NVMET_MIN_QUEUE_SIZE,
NVMET_MAX_QUEUE_SIZE);
+ /*
+ * If the transport didn't set the mdts properly, then clamp it to the
+ * target limits. Also set default values in case the transport didn't
+ * set it at all.
+ */
+ if (port->mdts < 0 || port->mdts > NVMET_MAX_MDTS)
+ port->mdts = 0;
+
port->enabled = true;
port->tr_ops = ops;
return 0;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 319d6a5e9cf0..b2dccf0a4ef2 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -214,6 +214,7 @@ struct nvmet_port {
bool enabled;
int inline_data_size;
int max_queue_size;
+ int mdts;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -671,6 +672,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
+#define NVMET_MAX_MDTS 255
/*
* Nice round number that makes a list of nsids fit into a page.
@@ -759,6 +761,19 @@ static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
}
+static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u8 mdts;
+
+ /* Limit MDTS according to port config or transport capability */
+ mdts = req->port->mdts;
+ if (ctrl->ops->get_mdts)
+ mdts = min_not_zero(ctrl->ops->get_mdts(ctrl), mdts);
+
+ return mdts;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index aeaf73b54c3a..f00921931eb6 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -69,7 +69,6 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
{
u8 zasl = req->sq->ctrl->subsys->zasl;
- struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl_zns *id;
u16 status;
@@ -79,10 +78,7 @@ void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
goto out;
}
- if (ctrl->ops->get_mdts)
- id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
- else
- id->zasl = zasl;
+ id->zasl = min_not_zero(nvmet_ctrl_mdts(req), zasl);
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
--
2.51.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v2] nvmet: introduce new mdts configuration entry
2026-04-02 13:21 ` [PATCH v2] " Aurelien Aptel
@ 2026-04-07 5:23 ` Christoph Hellwig
0 siblings, 0 replies; 5+ messages in thread
From: Christoph Hellwig @ 2026-04-07 5:23 UTC (permalink / raw)
To: Aurelien Aptel
Cc: linux-nvme, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
smalin, linux-kernel, Max Gurtovoy
On Thu, Apr 02, 2026 at 01:21:08PM +0000, Aurelien Aptel wrote:
> +static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req)
> +{
> + struct nvmet_ctrl *ctrl = req->sq->ctrl;
> + u8 mdts;
> +
> + /* Limit MDTS according to port config or transport capability */
> + mdts = req->port->mdts;
> + if (ctrl->ops->get_mdts)
> + mdts = min_not_zero(ctrl->ops->get_mdts(ctrl), mdts);
> +
> + return mdts;
This could be simplified a little more:
/* Limit MDTS according to port config or transport capability */
+static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
u8 mdts = req->port->mdts;
if (!ctrl->ops->get_mdts)
return mdts;
return min_not_zero(ctrl->ops->get_mdts(ctrl), mdts);
}
Otherwise looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2026-04-07 5:24 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-01 10:13 [PATCH] nvmet: introduce new mdts configuration entry Aurelien Aptel
2026-04-01 14:10 ` Christoph Hellwig
2026-04-01 16:18 ` Aurelien Aptel
2026-04-02 13:21 ` [PATCH v2] " Aurelien Aptel
2026-04-07 5:23 ` Christoph Hellwig
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox