From: Sam Li <faithilikerun@gmail.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, "Kevin Wolf" <kwolf@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
stefanha@redhat.com, "Peter Xu" <peterx@redhat.com>,
"David Hildenbrand" <david@redhat.com>,
dmitry.fomichev@wdc.com, hare@suse.de,
"Hanna Reitz" <hreitz@redhat.com>,
"Eric Blake" <eblake@redhat.com>,
"Markus Armbruster" <armbru@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
dlemoal@kernel.org, "Keith Busch" <kbusch@kernel.org>,
"Klaus Jensen" <its@irrelevant.dk>,
"Sam Li" <faithilikerun@gmail.com>
Subject: [RFC v3 3/7] hw/nvme: use blk_get_*() to access zone info in the block layer
Date: Mon, 22 Jan 2024 20:00:09 +0100 [thread overview]
Message-ID: <20240122190013.41302-4-faithilikerun@gmail.com> (raw)
In-Reply-To: <20240122190013.41302-1-faithilikerun@gmail.com>
The zone information is contained in the BlockLimits fileds. Add blk_get_*() functions
to access the block layer and update zone info accessing in the NVMe device emulation.
Signed-off-by: Sam Li <faithilikerun@gmail.com>
---
block/block-backend.c | 72 +++++++++++++++++++++++++++++++
hw/nvme/ctrl.c | 34 +++++----------
hw/nvme/ns.c | 61 ++++++++------------------
hw/nvme/nvme.h | 3 --
include/sysemu/block-backend-io.h | 9 ++++
5 files changed, 111 insertions(+), 68 deletions(-)
diff --git a/block/block-backend.c b/block/block-backend.c
index 209eb07528..c23f2a731b 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -2359,6 +2359,78 @@ int blk_get_max_iov(BlockBackend *blk)
return blk->root->bs->bl.max_iov;
}
+uint8_t blk_get_zone_model(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+ return bs ? bs->bl.zoned: 0;
+
+}
+
+uint32_t blk_get_zone_size(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.zone_size : 0;
+}
+
+uint32_t blk_get_zone_capacity(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.zone_capacity : 0;
+}
+
+uint32_t blk_get_max_open_zones(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.max_open_zones : 0;
+}
+
+uint32_t blk_get_max_active_zones(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.max_active_zones : 0;
+}
+
+uint32_t blk_get_max_append_sectors(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.max_append_sectors : 0;
+}
+
+uint32_t blk_get_nr_zones(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.nr_zones : 0;
+}
+
+uint32_t blk_get_write_granularity(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->bl.write_granularity : 0;
+}
+
+BlockZoneWps *blk_get_zone_wps(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
+
+ return bs ? bs->wps : NULL;
+}
+
void *blk_try_blockalign(BlockBackend *blk, size_t size)
{
IO_CODE();
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index f026245d1e..e64b021454 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -417,18 +417,6 @@ static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone,
static uint16_t nvme_zns_check_resources(NvmeNamespace *ns, uint32_t act,
uint32_t opn, uint32_t zrwa)
{
- if (ns->params.max_active_zones != 0 &&
- ns->nr_active_zones + act > ns->params.max_active_zones) {
- trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones);
- return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR;
- }
-
- if (ns->params.max_open_zones != 0 &&
- ns->nr_open_zones + opn > ns->params.max_open_zones) {
- trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones);
- return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR;
- }
-
if (zrwa > ns->zns.numzrwa) {
return NVME_NOZRWA | NVME_DNR;
}
@@ -1988,9 +1976,9 @@ static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone)
static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns)
{
NvmeZone *zone;
+ int moz = blk_get_max_open_zones(ns->blkconf.blk);
- if (ns->params.max_open_zones &&
- ns->nr_open_zones == ns->params.max_open_zones) {
+ if (moz && ns->nr_open_zones == moz) {
zone = QTAILQ_FIRST(&ns->imp_open_zones);
if (zone) {
/*
@@ -2160,7 +2148,7 @@ void nvme_rw_complete_cb(void *opaque, int ret)
block_acct_done(stats, acct);
}
- if (ns->params.zoned && nvme_is_write(req)) {
+ if (blk_get_zone_model(blk) && nvme_is_write(req)) {
nvme_finalize_zoned_write(ns, req);
}
@@ -2882,7 +2870,7 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret)
goto out;
}
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
nvme_advance_zone_wp(ns, iocb->zone, nlb);
}
@@ -2994,7 +2982,7 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
goto invalid;
}
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb);
if (status) {
goto invalid;
@@ -3088,7 +3076,7 @@ static void nvme_do_copy(NvmeCopyAIOCB *iocb)
}
}
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
status = nvme_check_zone_read(ns, slba, nlb);
if (status) {
goto invalid;
@@ -3164,7 +3152,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
iocb->slba = le64_to_cpu(copy->sdlba);
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba);
if (!iocb->zone) {
status = NVME_LBA_RANGE | NVME_DNR;
@@ -3434,7 +3422,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
goto invalid;
}
- if (ns->params.zoned) {
+ if (blk_get_zone_model(blk)) {
status = nvme_check_zone_read(ns, slba, nlb);
if (status) {
trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status);
@@ -3549,7 +3537,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
goto invalid;
}
- if (ns->params.zoned) {
+ if (blk_get_zone_model(blk)) {
zone = nvme_get_zone_by_slba(ns, slba);
assert(zone);
@@ -3667,7 +3655,7 @@ static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c,
uint32_t dw10 = le32_to_cpu(c->cdw10);
uint32_t dw11 = le32_to_cpu(c->cdw11);
- if (!ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
trace_pci_nvme_err_invalid_opc(c->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
@@ -6527,7 +6515,7 @@ done:
static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi)
{
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
return NVME_INVALID_FORMAT | NVME_DNR;
}
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 0eabcf5cf5..82d4f7932d 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -25,7 +25,6 @@
#include "trace.h"
#define MIN_DISCARD_GRANULARITY (4 * KiB)
-#define NVME_DEFAULT_ZONE_SIZE (128 * MiB)
void nvme_ns_init_format(NvmeNamespace *ns)
{
@@ -177,19 +176,11 @@ static int nvme_ns_init_blk(NvmeNamespace *ns, Error **errp)
static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace *ns, Error **errp)
{
- uint64_t zone_size, zone_cap;
+ BlockBackend *blk = ns->blkconf.blk;
+ uint64_t zone_size = blk_get_zone_size(blk);
+ uint64_t zone_cap = blk_get_zone_capacity(blk);
/* Make sure that the values of ZNS properties are sane */
- if (ns->params.zone_size_bs) {
- zone_size = ns->params.zone_size_bs;
- } else {
- zone_size = NVME_DEFAULT_ZONE_SIZE;
- }
- if (ns->params.zone_cap_bs) {
- zone_cap = ns->params.zone_cap_bs;
- } else {
- zone_cap = zone_size;
- }
if (zone_cap > zone_size) {
error_setg(errp, "zone capacity %"PRIu64"B exceeds "
"zone size %"PRIu64"B", zone_cap, zone_size);
@@ -266,6 +257,7 @@ static void nvme_ns_zoned_init_state(NvmeNamespace *ns)
static void nvme_ns_init_zoned(NvmeNamespace *ns)
{
+ BlockBackend *blk = ns->blkconf.blk;
NvmeIdNsZoned *id_ns_z;
int i;
@@ -274,8 +266,8 @@ static void nvme_ns_init_zoned(NvmeNamespace *ns)
id_ns_z = g_new0(NvmeIdNsZoned, 1);
/* MAR/MOR are zeroes-based, FFFFFFFFFh means no limit */
- id_ns_z->mar = cpu_to_le32(ns->params.max_active_zones - 1);
- id_ns_z->mor = cpu_to_le32(ns->params.max_open_zones - 1);
+ id_ns_z->mar = cpu_to_le32(blk_get_max_active_zones(blk) - 1);
+ id_ns_z->mor = cpu_to_le32(blk_get_max_open_zones(blk) - 1);
id_ns_z->zoc = 0;
id_ns_z->ozcs = ns->params.cross_zone_read ?
NVME_ID_NS_ZONED_OZCS_RAZB : 0x00;
@@ -539,6 +531,7 @@ static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp)
static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
{
+ BlockBackend *blk = ns->blkconf.blk;
unsigned int pi_size;
if (!ns->blkconf.blk) {
@@ -577,25 +570,12 @@ static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
return -1;
}
- if (ns->params.zoned && ns->endgrp && ns->endgrp->fdp.enabled) {
+ if (blk_get_zone_model(blk) && ns->endgrp && ns->endgrp->fdp.enabled) {
error_setg(errp, "cannot be a zoned- in an FDP configuration");
return -1;
}
- if (ns->params.zoned) {
- if (ns->params.max_active_zones) {
- if (ns->params.max_open_zones > ns->params.max_active_zones) {
- error_setg(errp, "max_open_zones (%u) exceeds "
- "max_active_zones (%u)", ns->params.max_open_zones,
- ns->params.max_active_zones);
- return -1;
- }
-
- if (!ns->params.max_open_zones) {
- ns->params.max_open_zones = ns->params.max_active_zones;
- }
- }
-
+ if (blk_get_zone_model(blk)) {
if (ns->params.zd_extension_size) {
if (ns->params.zd_extension_size & 0x3f) {
error_setg(errp, "zone descriptor extension size must be a "
@@ -630,14 +610,14 @@ static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
return -1;
}
- if (ns->params.max_active_zones) {
- if (ns->params.numzrwa > ns->params.max_active_zones) {
+ int maz = blk_get_max_active_zones(blk);
+ if (maz) {
+ if (ns->params.numzrwa > maz) {
error_setg(errp, "number of zone random write area "
"resources (zoned.numzrwa, %d) must be less "
"than or equal to maximum active resources "
"(zoned.max_active_zones, %d)",
- ns->params.numzrwa,
- ns->params.max_active_zones);
+ ns->params.numzrwa, maz);
return -1;
}
}
@@ -660,7 +640,7 @@ int nvme_ns_setup(NvmeNamespace *ns, Error **errp)
if (nvme_ns_init(ns, errp)) {
return -1;
}
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
if (nvme_ns_zoned_check_calc_geometry(ns, errp) != 0) {
return -1;
}
@@ -683,15 +663,17 @@ void nvme_ns_drain(NvmeNamespace *ns)
void nvme_ns_shutdown(NvmeNamespace *ns)
{
- blk_flush(ns->blkconf.blk);
- if (ns->params.zoned) {
+
+ BlockBackend *blk = ns->blkconf.blk;
+ blk_flush(blk);
+ if (blk_get_zone_model(blk)) {
nvme_zoned_ns_shutdown(ns);
}
}
void nvme_ns_cleanup(NvmeNamespace *ns)
{
- if (ns->params.zoned) {
+ if (blk_get_zone_model(ns->blkconf.blk)) {
g_free(ns->id_ns_zoned);
g_free(ns->zone_array);
g_free(ns->zd_extensions);
@@ -806,11 +788,6 @@ static Property nvme_ns_props[] = {
DEFINE_PROP_UINT16("mssrl", NvmeNamespace, params.mssrl, 128),
DEFINE_PROP_UINT32("mcl", NvmeNamespace, params.mcl, 128),
DEFINE_PROP_UINT8("msrc", NvmeNamespace, params.msrc, 127),
- DEFINE_PROP_BOOL("zoned", NvmeNamespace, params.zoned, false),
- DEFINE_PROP_SIZE("zoned.zone_size", NvmeNamespace, params.zone_size_bs,
- NVME_DEFAULT_ZONE_SIZE),
- DEFINE_PROP_SIZE("zoned.zone_capacity", NvmeNamespace, params.zone_cap_bs,
- 0),
DEFINE_PROP_BOOL("zoned.cross_read", NvmeNamespace,
params.cross_zone_read, false),
DEFINE_PROP_UINT32("zoned.max_active", NvmeNamespace,
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 5f2ae7b28b..76677a86e9 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -189,10 +189,7 @@ typedef struct NvmeNamespaceParams {
uint32_t mcl;
uint8_t msrc;
- bool zoned;
bool cross_zone_read;
- uint64_t zone_size_bs;
- uint64_t zone_cap_bs;
uint32_t max_active_zones;
uint32_t max_open_zones;
uint32_t zd_extension_size;
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
index d174275a5c..44e44954fa 100644
--- a/include/sysemu/block-backend-io.h
+++ b/include/sysemu/block-backend-io.h
@@ -99,6 +99,15 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action,
void blk_iostatus_set_err(BlockBackend *blk, int error);
int blk_get_max_iov(BlockBackend *blk);
int blk_get_max_hw_iov(BlockBackend *blk);
+uint8_t blk_get_zone_model(BlockBackend *blk);
+uint32_t blk_get_zone_size(BlockBackend *blk);
+uint32_t blk_get_zone_capacity(BlockBackend *blk);
+uint32_t blk_get_max_open_zones(BlockBackend *blk);
+uint32_t blk_get_max_active_zones(BlockBackend *blk);
+uint32_t blk_get_max_append_sectors(BlockBackend *blk);
+uint32_t blk_get_nr_zones(BlockBackend *blk);
+uint32_t blk_get_write_granularity(BlockBackend *blk);
+BlockZoneWps *blk_get_zone_wps(BlockBackend *blk);
AioContext *blk_get_aio_context(BlockBackend *blk);
BlockAcctStats *blk_get_stats(BlockBackend *blk);
--
2.40.1
next prev parent reply other threads:[~2024-01-22 19:01 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-22 19:00 [RFC v3 0/7] Add persistence to NVMe ZNS emulation Sam Li
2024-01-22 19:00 ` [RFC v3 1/7] docs/qcow2: add zd_extension_size option to the zoned format feature Sam Li
2024-01-22 19:00 ` [RFC v3 2/7] qcow2: add zd_extension configurations to zoned metadata Sam Li
2024-01-22 19:00 ` Sam Li [this message]
2024-01-22 19:00 ` [RFC v3 4/7] hw/nvme: add blk_get_zone_extension to access zd_extensions Sam Li
2024-01-22 19:00 ` [RFC v3 5/7] hw/nvme: make the metadata of ZNS emulation persistent Sam Li
2024-01-22 19:00 ` [RFC v3 6/7] hw/nvme: refactor zone append write using block layer APIs Sam Li
2024-01-22 19:00 ` [RFC v3 7/7] hw/nvme: make ZDED persistent Sam Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240122190013.41302-4-faithilikerun@gmail.com \
--to=faithilikerun@gmail.com \
--cc=armbru@redhat.com \
--cc=david@redhat.com \
--cc=dlemoal@kernel.org \
--cc=dmitry.fomichev@wdc.com \
--cc=eblake@redhat.com \
--cc=hare@suse.de \
--cc=hreitz@redhat.com \
--cc=its@irrelevant.dk \
--cc=kbusch@kernel.org \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).