From: Caleb Sander Mateos <csander@purestorage.com>
To: Keith Busch <kbusch@kernel.org>, Jens Axboe <axboe@kernel.dk>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
Chaitanya Kulkarni <kch@nvidia.com>
Cc: linux-nvme@lists.infradead.org, linux-kernel@vger.kernel.org,
Caleb Sander Mateos <csander@purestorage.com>
Subject: [PATCH v2 2/7] nvme: fold nvme_config_discard() into nvme_update_disk_info()
Date: Fri, 20 Feb 2026 20:32:57 -0700 [thread overview]
Message-ID: <20260221033302.1451669-3-csander@purestorage.com> (raw)
In-Reply-To: <20260221033302.1451669-1-csander@purestorage.com>
The choice of what queue limits are set in nvme_update_disk_info() vs.
nvme_config_discard() seems a bit arbitrary. A subsequent commit will
compute the discard_granularity limit using struct nvme_id_ns, which is
only passed to nvme_update_disk_info() currently. So move the logic in
nvme_config_discard() to nvme_update_disk_info(). Replace several
instances of ns->ctrl in nvme_update_disk_info() with the ctrl variable
brought from nvme_config_discard().
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
---
drivers/nvme/host/core.c | 43 ++++++++++++++++++----------------------
1 file changed, 19 insertions(+), 24 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3a2126584a23..8dda2fe69789 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1880,30 +1880,10 @@ static bool nvme_init_integrity(struct nvme_ns_head *head,
bi->pi_offset = info->pi_offset;
}
return true;
}
-static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
-{
- struct nvme_ctrl *ctrl = ns->ctrl;
-
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
- lim->max_hw_discard_sectors =
- nvme_lba_to_sect(ns->head, ctrl->dmrsl);
- else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
- lim->max_hw_discard_sectors = UINT_MAX;
- else
- lim->max_hw_discard_sectors = 0;
-
- lim->discard_granularity = lim->logical_block_size;
-
- if (ctrl->dmrl)
- lim->max_discard_segments = ctrl->dmrl;
- else
- lim->max_discard_segments = NVME_DSM_MAX_RANGES;
-}
-
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
@@ -2078,10 +2058,11 @@ static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
struct queue_limits *lim)
{
struct nvme_ns_head *head = ns->head;
+ struct nvme_ctrl *ctrl = ns->ctrl;
u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
bool valid = true;
/*
@@ -2112,15 +2093,30 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
*/
lim->logical_block_size = bs;
lim->physical_block_size = min(phys_bs, atomic_bs);
lim->io_min = phys_bs;
lim->io_opt = io_opt;
- if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
- (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
+ if ((ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
+ (ctrl->oncs & NVME_CTRL_ONCS_DSM))
lim->max_write_zeroes_sectors = UINT_MAX;
else
- lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+ lim->max_write_zeroes_sectors = ctrl->max_zeroes_sectors;
+
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
+ lim->max_hw_discard_sectors =
+ nvme_lba_to_sect(ns->head, ctrl->dmrsl);
+ else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ lim->max_hw_discard_sectors = UINT_MAX;
+ else
+ lim->max_hw_discard_sectors = 0;
+
+ lim->discard_granularity = lim->logical_block_size;
+
+ if (ctrl->dmrl)
+ lim->max_discard_segments = ctrl->dmrl;
+ else
+ lim->max_discard_segments = NVME_DSM_MAX_RANGES;
return valid;
}
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
{
@@ -2381,11 +2377,10 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
if (!nvme_update_disk_info(ns, id, &lim))
capacity = 0;
- nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc)
--
2.45.2
next prev parent reply other threads:[~2026-02-21 3:33 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-21 3:32 [PATCH v2 0/7] nvme: set discard_granularity from NPDG/NPDA Caleb Sander Mateos
2026-02-21 3:32 ` [PATCH v2 1/7] nvme: add preferred I/O size fields to struct nvme_id_ns_nvm Caleb Sander Mateos
2026-02-21 3:32 ` Caleb Sander Mateos [this message]
2026-02-24 14:29 ` [PATCH v2 2/7] nvme: fold nvme_config_discard() into nvme_update_disk_info() Christoph Hellwig
2026-02-21 3:32 ` [PATCH v2 3/7] nvme: update nvme_id_ns OPTPERF constants Caleb Sander Mateos
2026-02-24 14:30 ` Christoph Hellwig
2026-02-21 3:32 ` [PATCH v2 4/7] nvme: always issue I/O Command Set specific Identify Namespace Caleb Sander Mateos
2026-02-21 3:33 ` [PATCH v2 5/7] nvme: set discard_granularity from NPDG/NPDA Caleb Sander Mateos
2026-02-24 14:33 ` Christoph Hellwig
2026-02-24 15:15 ` Keith Busch
2026-02-24 16:05 ` Caleb Sander Mateos
2026-02-21 3:33 ` [PATCH v2 6/7] nvmet: use NVME_NS_FEAT_OPTPERF_SHIFT Caleb Sander Mateos
2026-02-21 3:33 ` [PATCH v2 7/7] nvmet: report NPDGL and NPDAL Caleb Sander Mateos
2026-02-24 14:34 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260221033302.1451669-3-csander@purestorage.com \
--to=csander@purestorage.com \
--cc=axboe@kernel.dk \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox