From: Minwoo Im <minwoo.im.dev@gmail.com>
To: qemu-devel@nongnu.org, qemu-block@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
Klaus Jensen <k.jensen@samsung.com>,
Max Reitz <mreitz@redhat.com>, Klaus Jensen <its@irrelevant.dk>,
Minwoo Im <minwoo.im.dev@gmail.com>,
Keith Busch <kbusch@kernel.org>
Subject: [PATCH V4 4/8] hw/block/nvme: support allocated namespace type
Date: Tue, 2 Mar 2021 22:26:13 +0900 [thread overview]
Message-ID: <20210302132617.18495-5-minwoo.im.dev@gmail.com> (raw)
In-Reply-To: <20210302132617.18495-1-minwoo.im.dev@gmail.com>
From NVMe spec 1.4b "6.1.5. NSID and Namespace Relationships" defines
valid namespace types:
- Unallocated: Not exists in the NVMe subsystem
- Allocated: Exists in the NVMe subsystem
- Inactive: Not attached to the controller
- Active: Attached to the controller
This patch added support for allocated, but not attached namespace type:
!nvme_ns(n, nsid) && nvme_subsys_ns(n->subsys, nsid)
nvme_ns() returns attached namespace instance of the given controller
and nvme_subsys_ns() returns allocated namespace instance in the
subsystem.
Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com>
Tested-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/block/nvme-subsys.h | 13 +++++++++
hw/block/nvme.c | 63 +++++++++++++++++++++++++++++++-----------
2 files changed, 60 insertions(+), 16 deletions(-)
diff --git a/hw/block/nvme-subsys.h b/hw/block/nvme-subsys.h
index 8a0732b22316..14627f9ccb41 100644
--- a/hw/block/nvme-subsys.h
+++ b/hw/block/nvme-subsys.h
@@ -30,4 +30,17 @@ typedef struct NvmeSubsystem {
int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
int nvme_subsys_register_ns(NvmeNamespace *ns, Error **errp);
+/*
+ * Return allocated namespace of the specified nsid in the subsystem.
+ */
+static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
+ uint32_t nsid)
+{
+ if (!subsys) {
+ return NULL;
+ }
+
+ return subsys->namespaces[nsid];
+}
+
#endif /* NVME_SUBSYS_H */
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index f6aeae081840..53c4d59e09a7 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -3225,7 +3225,7 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3239,7 +3239,14 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
- return nvme_rpt_empty_id_struct(n, req);
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
@@ -3250,7 +3257,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_CMD_SET | NVME_DNR;
}
-static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3264,7 +3272,14 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
- return nvme_rpt_empty_id_struct(n, req);
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, nsid);
+ if (!ns) {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
+ } else {
+ return nvme_rpt_empty_id_struct(n, req);
+ }
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
@@ -3277,7 +3292,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3302,7 +3318,14 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
for (i = 1; i <= n->num_namespaces; i++) {
ns = nvme_ns(n, i);
if (!ns) {
- continue;
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+ } else {
+ continue;
+ }
}
if (ns->params.nsid <= min_nsid) {
continue;
@@ -3316,7 +3339,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
}
-static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -3342,7 +3366,14 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
for (i = 1; i <= n->num_namespaces; i++) {
ns = nvme_ns(n, i);
if (!ns) {
- continue;
+ if (!active) {
+ ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+ } else {
+ continue;
+ }
}
if (ns->params.nsid <= min_nsid || c->csi != ns->csi) {
continue;
@@ -3422,25 +3453,25 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
switch (le32_to_cpu(c->cns)) {
case NVME_ID_CNS_NS:
- /* fall through */
+ return nvme_identify_ns(n, req, true);
case NVME_ID_CNS_NS_PRESENT:
- return nvme_identify_ns(n, req);
+ return nvme_identify_ns(n, req, false);
case NVME_ID_CNS_CS_NS:
- /* fall through */
+ return nvme_identify_ns_csi(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT:
- return nvme_identify_ns_csi(n, req);
+ return nvme_identify_ns_csi(n, req, false);
case NVME_ID_CNS_CTRL:
return nvme_identify_ctrl(n, req);
case NVME_ID_CNS_CS_CTRL:
return nvme_identify_ctrl_csi(n, req);
case NVME_ID_CNS_NS_ACTIVE_LIST:
- /* fall through */
+ return nvme_identify_nslist(n, req, true);
case NVME_ID_CNS_NS_PRESENT_LIST:
- return nvme_identify_nslist(n, req);
+ return nvme_identify_nslist(n, req, false);
case NVME_ID_CNS_CS_NS_ACTIVE_LIST:
- /* fall through */
+ return nvme_identify_nslist_csi(n, req, true);
case NVME_ID_CNS_CS_NS_PRESENT_LIST:
- return nvme_identify_nslist_csi(n, req);
+ return nvme_identify_nslist_csi(n, req, false);
case NVME_ID_CNS_NS_DESCR_LIST:
return nvme_identify_ns_descr_list(n, req);
case NVME_ID_CNS_IO_COMMAND_SET:
--
2.27.0
next prev parent reply other threads:[~2021-03-02 13:32 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-02 13:26 [PATCH V4 0/8] hw/block/nvme: support namespace attachment Minwoo Im
2021-03-02 13:26 ` [PATCH V4 1/8] hw/block/nvme: support namespace detach Minwoo Im
2021-03-02 13:26 ` [PATCH V4 2/8] hw/block/nvme: fix namespaces array to 1-based Minwoo Im
2021-03-02 13:26 ` [PATCH V4 3/8] hw/block/nvme: fix allocated namespace list to 256 Minwoo Im
2021-03-02 13:26 ` Minwoo Im [this message]
2021-03-02 13:26 ` [PATCH V4 5/8] hw/block/nvme: refactor nvme_select_ns_iocs Minwoo Im
2021-03-02 13:26 ` [PATCH V4 6/8] hw/block/nvme: support namespace attachment command Minwoo Im
2021-03-02 13:26 ` [PATCH V4 7/8] hw/block/nvme: support changed namespace asyncrohous event Minwoo Im
2021-03-02 13:26 ` [PATCH V4 8/8] hw/block/nvme: support Identify NS Attached Controller List Minwoo Im
2021-03-04 19:33 ` [PATCH V4 0/8] hw/block/nvme: support namespace attachment Keith Busch
2021-03-08 11:30 ` Klaus Jensen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210302132617.18495-5-minwoo.im.dev@gmail.com \
--to=minwoo.im.dev@gmail.com \
--cc=its@irrelevant.dk \
--cc=k.jensen@samsung.com \
--cc=kbusch@kernel.org \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).