From: Minwoo Im <minwoo.im.dev@gmail.com>
To: qemu-devel@nongnu.org, qemu-block@nongnu.org
Cc: Keith Busch <kbusch@kernel.org>, Klaus Jensen <its@irrelevant.dk>,
Minwoo Im <minwoo.im.dev@gmail.com>,
Kevin Wolf <kwolf@redhat.com>, Max Reitz <mreitz@redhat.com>
Subject: [PATCH 4/6] hw/block/nvme: support allocated namespace type
Date: Sat, 6 Feb 2021 12:36:27 +0900 [thread overview]
Message-ID: <20210206033629.4278-5-minwoo.im.dev@gmail.com> (raw)
In-Reply-To: <20210206033629.4278-1-minwoo.im.dev@gmail.com>
From NVMe spec 1.4b "6.1.5. NSID and Namespace Relationships" defines
valid namespace types:
- Unallocated: Not exists in the NVMe subsystem
- Allocated: Exists in the NVMe subsystem
- Inactive: Not attached to the controller
- Active: Attached to the controller
This patch added support for allocated, but not attached namespace type:
!nvme_ns(n, nsid) && nvme_subsys_ns(n->subsys, nsid)
nvme_ns() returns attached namespace instance of the given controller
and nvme_subsys_ns() returns allocated namespace instance in the
subsystem.
Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com>
---
hw/block/nvme-subsys.h | 13 +++++++++
hw/block/nvme.c | 63 +++++++++++++++++++++++++++++++-----------
2 files changed, 60 insertions(+), 16 deletions(-)
diff --git a/hw/block/nvme-subsys.h b/hw/block/nvme-subsys.h
index 8a0732b22316..14627f9ccb41 100644
--- a/hw/block/nvme-subsys.h
+++ b/hw/block/nvme-subsys.h
@@ -30,4 +30,17 @@ typedef struct NvmeSubsystem {
int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
int nvme_subsys_register_ns(NvmeNamespace *ns, Error **errp);
+/*
+ * Return allocated namespace of the specified nsid in the subsystem.
+ */
+static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
+ uint32_t nsid)
+{
+ if (!subsys) {
+ return NULL;
+ }
+
+ return subsys->namespaces[nsid];
+}
+
#endif /* NVME_SUBSYS_H */
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index a1e930f7c8e4..d1761a82731f 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -3124,7 +3124,7 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3138,7 +3138,14 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
- return nvme_rpt_empty_id_struct(n, req);
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
@@ -3149,7 +3156,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_CMD_SET | NVME_DNR;
}
-static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3163,7 +3171,14 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
- return nvme_rpt_empty_id_struct(n, req);
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
@@ -3176,7 +3191,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3201,7 +3217,14 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
for (i = 1; i <= n->num_namespaces; i++) {
ns = nvme_ns(n, i);
if (!ns) {
- continue;
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+ } else {
+ continue;
+ }
}
if (ns->params.nsid <= min_nsid) {
continue;
@@ -3215,7 +3238,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
}
-static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3241,7 +3265,14 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
for (i = 1; i <= n->num_namespaces; i++) {
ns = nvme_ns(n, i);
if (!ns) {
- continue;
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+ } else {
+ continue;
+ }
}
if (ns->params.nsid <= min_nsid || c->csi != ns->csi) {
continue;
@@ -3321,25 +3352,25 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
switch (le32_to_cpu(c->cns)) {
case NVME_ID_CNS_NS:
- /* fall through */
+ return nvme_identify_ns(n, req, true);
case NVME_ID_CNS_NS_PRESENT:
- return nvme_identify_ns(n, req);
+ return nvme_identify_ns(n, req, false);
case NVME_ID_CNS_CS_NS:
- /* fall through */
+ return nvme_identify_ns_csi(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT:
- return nvme_identify_ns_csi(n, req);
+ return nvme_identify_ns_csi(n, req, false);
case NVME_ID_CNS_CTRL:
return nvme_identify_ctrl(n, req);
case NVME_ID_CNS_CS_CTRL:
return nvme_identify_ctrl_csi(n, req);
case NVME_ID_CNS_NS_ACTIVE_LIST:
- /* fall through */
+ return nvme_identify_nslist(n, req, true);
case NVME_ID_CNS_NS_PRESENT_LIST:
- return nvme_identify_nslist(n, req);
+ return nvme_identify_nslist(n, req, false);
case NVME_ID_CNS_CS_NS_ACTIVE_LIST:
- /* fall through */
+ return nvme_identify_nslist_csi(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT_LIST:
- return nvme_identify_nslist_csi(n, req);
+ return nvme_identify_nslist_csi(n, req, false);
case NVME_ID_CNS_NS_DESCR_LIST:
return nvme_identify_ns_descr_list(n, req);
case NVME_ID_CNS_IO_COMMAND_SET:
--
2.17.1
next prev parent reply other threads:[~2021-02-06 3:40 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-06 3:36 [PATCH 0/6] hw/block/nvme: support namespace attachment Minwoo Im
2021-02-06 3:36 ` [PATCH 1/6] hw/block/nvme: support namespace detach Minwoo Im
2021-02-06 3:36 ` [PATCH 2/6] hw/block/nvme: fix namespaces array to 1-based Minwoo Im
2021-02-06 3:36 ` [PATCH 3/6] hw/block/nvme: fix allocated namespace list to 256 Minwoo Im
2021-02-06 3:36 ` Minwoo Im [this message]
2021-02-06 3:36 ` [PATCH 5/6] hw/block/nvme: refactor nvme_select_ns_iocs Minwoo Im
2021-02-06 3:36 ` [PATCH 6/6] hw/block/nvme: support namespace attachment command Minwoo Im
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210206033629.4278-5-minwoo.im.dev@gmail.com \
--to=minwoo.im.dev@gmail.com \
--cc=its@irrelevant.dk \
--cc=kbusch@kernel.org \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).