qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Klaus Jensen <its@irrelevant.dk>
To: Dmitry Fomichev <dmitry.fomichev@wdc.com>
Cc: "Fam Zheng" <fam@euphon.net>, "Kevin Wolf" <kwolf@redhat.com>,
	"Damien Le Moal" <damien.lemoal@wdc.com>,
	qemu-block@nongnu.org, "Niklas Cassel" <niklas.cassel@wdc.com>,
	"Klaus Jensen" <k.jensen@samsung.com>,
	qemu-devel@nongnu.org, "Maxim Levitsky" <mlevitsk@redhat.com>,
	"Alistair Francis" <alistair.francis@wdc.com>,
	"Keith Busch" <kbusch@kernel.org>,
	"Philippe Mathieu-Daudé" <philmd@redhat.com>,
	"Matias Bjorling" <matias.bjorling@wdc.com>
Subject: Re: [PATCH v7 03/11] hw/block/nvme: Add support for Namespace Types
Date: Mon, 19 Oct 2020 22:53:38 +0200	[thread overview]
Message-ID: <20201019205338.GI10549@apples.localdomain> (raw)
In-Reply-To: <20201019021726.12048-4-dmitry.fomichev@wdc.com>

[-- Attachment #1: Type: text/plain, Size: 9879 bytes --]

On Oct 19 11:17, Dmitry Fomichev wrote:
> From: Niklas Cassel <niklas.cassel@wdc.com>
> 
> Define the structures and constants required to implement
> Namespace Types support.
> 
> Namespace Types introduce a new command set, "I/O Command Sets",
> that allows the host to retrieve the command sets associated with
> a namespace. Introduce support for the command set and enable
> detection for the NVM Command Set.
> 
> The new workflows for identify commands rely heavily on zero-filled
> identify structs. E.g., certain CNS commands are defined to return
> a zero-filled identify struct when an inactive namespace NSID
> is supplied.
> 
> Add a helper function in order to avoid code duplication when
> reporting zero-filled identify structures.
> 
> Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
> Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
> ---
>  hw/block/nvme-ns.c    |   2 +
>  hw/block/nvme-ns.h    |   1 +
>  hw/block/nvme.c       | 169 +++++++++++++++++++++++++++++++++++-------
>  hw/block/trace-events |   7 ++
>  include/block/nvme.h  |  65 ++++++++++++----
>  5 files changed, 202 insertions(+), 42 deletions(-)
> 
> diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c
> index de735eb9f3..c0362426cc 100644
> --- a/hw/block/nvme-ns.c
> +++ b/hw/block/nvme-ns.c
> @@ -41,6 +41,8 @@ static void nvme_ns_init(NvmeNamespace *ns)
>  
>      id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(ns));
>  
> +    ns->csi = NVME_CSI_NVM;
> +
>      /* no thin provisioning */
>      id_ns->ncap = id_ns->nsze;
>      id_ns->nuse = id_ns->ncap;
> diff --git a/hw/block/nvme-ns.h b/hw/block/nvme-ns.h
> index a38071884a..d795e44bab 100644
> --- a/hw/block/nvme-ns.h
> +++ b/hw/block/nvme-ns.h
> @@ -31,6 +31,7 @@ typedef struct NvmeNamespace {
>      int64_t      size;
>      NvmeIdNs     id_ns;
>      const uint32_t *iocs;
> +    uint8_t      csi;
>  
>      NvmeNamespaceParams params;
>  } NvmeNamespace;
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 29139d8a17..ca0d0abf5c 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -1503,6 +1503,13 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
>      return NVME_SUCCESS;
>  }
>  
> +static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req)
> +{
> +    uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};

[-pedantic] empty initializer list

> +
> +    return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
> +}
> +
>  static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
>  {
>      trace_pci_nvme_identify_ctrl();
> @@ -1511,11 +1518,23 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
>                      DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> +static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
> +{
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> +
> +    trace_pci_nvme_identify_ctrl_csi(c->csi);
> +
> +    if (c->csi == NVME_CSI_NVM) {
> +        return nvme_rpt_empty_id_struct(n, req);
> +    }
> +
> +    return NVME_INVALID_FIELD | NVME_DNR;
> +}
> +
>  static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
>  {
>      NvmeNamespace *ns;
>      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> -    NvmeIdNs *id_ns, inactive = { 0 };
>      uint32_t nsid = le32_to_cpu(c->nsid);
>  
>      trace_pci_nvme_identify_ns(nsid);
> @@ -1526,23 +1545,46 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
>  
>      ns = nvme_ns(n, nsid);
>      if (unlikely(!ns)) {
> -        id_ns = &inactive;
> -    } else {
> -        id_ns = &ns->id_ns;
> +        return nvme_rpt_empty_id_struct(n, req);
>      }
>  
> -    return nvme_dma(n, (uint8_t *)id_ns, sizeof(NvmeIdNs),
> +    return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs),
>                      DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> +static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
> +{
> +    NvmeNamespace *ns;
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> +    uint32_t nsid = le32_to_cpu(c->nsid);
> +
> +    trace_pci_nvme_identify_ns_csi(nsid, c->csi);
> +
> +    if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
> +        return NVME_INVALID_NSID | NVME_DNR;
> +    }
> +
> +    ns = nvme_ns(n, nsid);
> +    if (unlikely(!ns)) {
> +        return nvme_rpt_empty_id_struct(n, req);
> +    }
> +
> +    if (c->csi == NVME_CSI_NVM) {
> +        return nvme_rpt_empty_id_struct(n, req);
> +    }
> +
> +    return NVME_INVALID_FIELD | NVME_DNR;
> +}
> +
>  static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeNamespace *ns;
>      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> -    static const int data_len = NVME_IDENTIFY_DATA_SIZE;
>      uint32_t min_nsid = le32_to_cpu(c->nsid);
> -    uint32_t *list;
> -    uint16_t ret;
> -    int j = 0;
> +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};

[-pedantic] empty initializer list

> +    static const int data_len = sizeof(list);
> +    uint32_t *list_ptr = (uint32_t *)list;
> +    int i, j = 0;
>  
>      trace_pci_nvme_identify_nslist(min_nsid);
>  
> @@ -1556,20 +1598,54 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
>          return NVME_INVALID_NSID | NVME_DNR;
>      }
>  
> -    list = g_malloc0(data_len);
> -    for (int i = 1; i <= n->num_namespaces; i++) {
> -        if (i <= min_nsid || !nvme_ns(n, i)) {
> +    for (i = 1; i <= n->num_namespaces; i++) {
> +        ns = nvme_ns(n, i);
> +        if (!ns) {
>              continue;
>          }
> -        list[j++] = cpu_to_le32(i);
> +        if (ns->params.nsid < min_nsid) {

Since i == ns->params.nsid, this should be '<=' like the code you
removed. It really shouldn't be called min_nsid, but oh well.

> +            continue;
> +        }
> +        list_ptr[j++] = cpu_to_le32(ns->params.nsid);
>          if (j == data_len / sizeof(uint32_t)) {
>              break;
>          }
>      }
> -    ret = nvme_dma(n, (uint8_t *)list, data_len, DMA_DIRECTION_FROM_DEVICE,
> -                   req);
> -    g_free(list);
> -    return ret;
> +
> +    return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
> +}
> +
> +static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
> +{
> +    NvmeNamespace *ns;
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> +    uint32_t min_nsid = le32_to_cpu(c->nsid);
> +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
> +    static const int data_len = sizeof(list);
> +    uint32_t *list_ptr = (uint32_t *)list;
> +    int i, j = 0;
> +
> +    trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi);
> +
> +    if (c->csi != NVME_CSI_NVM) {
> +        return NVME_INVALID_FIELD | NVME_DNR;
> +    }
> +

This is missing the check for 0xffffffff and 0xfffffffe like above.

> +    for (i = 1; i <= n->num_namespaces; i++) {
> +        ns = nvme_ns(n, i);
> +        if (!ns) {
> +            continue;
> +        }
> +        if (ns->params.nsid < min_nsid) {

Should be '<='.

> +            continue;
> +        }
> +        list_ptr[j++] = cpu_to_le32(ns->params.nsid);
> +        if (j == data_len / sizeof(uint32_t)) {
> +            break;
> +        }
> +    }
> +
> +    return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
>  static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
> @@ -1577,13 +1653,17 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
>      NvmeNamespace *ns;
>      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
>      uint32_t nsid = le32_to_cpu(c->nsid);
> -    uint8_t list[NVME_IDENTIFY_DATA_SIZE];
> +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};

[-pedantic] empty initializer list

>  
>      struct data {
>          struct {
>              NvmeIdNsDescr hdr;
> -            uint8_t v[16];
> +            uint8_t v[NVME_NIDL_UUID];
>          } uuid;
> +        struct {
> +            NvmeIdNsDescr hdr;
> +            uint8_t v;
> +        } csi;
>      };
>  
>      struct data *ns_descrs = (struct data *)list;
> @@ -1599,19 +1679,31 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
>          return NVME_INVALID_FIELD | NVME_DNR;
>      }
>  
> -    memset(list, 0x0, sizeof(list));
> -
>      /*
>       * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
>       * structure, a Namespace UUID (nidt = 0x3) must be reported in the
>       * Namespace Identification Descriptor. Add the namespace UUID here.
>       */
>      ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID;
> -    ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
> -    memcpy(&ns_descrs->uuid.v, ns->params.uuid.data, NVME_NIDT_UUID_LEN);
> +    ns_descrs->uuid.hdr.nidl = NVME_NIDL_UUID;
> +    memcpy(&ns_descrs->uuid.v, ns->params.uuid.data, NVME_NIDL_UUID);
>  
> -    return nvme_dma(n, list, NVME_IDENTIFY_DATA_SIZE,
> -                    DMA_DIRECTION_FROM_DEVICE, req);
> +    ns_descrs->csi.hdr.nidt = NVME_NIDT_CSI;
> +    ns_descrs->csi.hdr.nidl = NVME_NIDL_CSI;
> +    ns_descrs->csi.v = ns->csi;
> +
> +    return nvme_dma(n, list, sizeof(list), DMA_DIRECTION_FROM_DEVICE, req);
> +}
> +
> +static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
> +{
> +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};

[-pedantic] empty initializer list

> +    static const int data_len = sizeof(list);
> +
> +    trace_pci_nvme_identify_cmd_set();
> +
> +    NVME_SET_CSI(*list, NVME_CSI_NVM);
> +    return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  

-- 
One of us - No more doubt, silence or taboo about mental illness.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

  parent reply	other threads:[~2020-10-19 20:58 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-19  2:17 [PATCH v7 00/11] hw/block/nvme: Support Namespace Types and Zoned Namespace Command Set Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 01/11] hw/block/nvme: Add Commands Supported and Effects log Dmitry Fomichev
2020-10-19 19:22   ` Keith Busch
2020-10-19 20:16   ` Klaus Jensen
2020-10-20 23:04     ` Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 02/11] hw/block/nvme: Generate namespace UUIDs Dmitry Fomichev
2020-10-19 19:24   ` Keith Busch
2020-10-19 19:30   ` Klaus Jensen
2020-10-19  2:17 ` [PATCH v7 03/11] hw/block/nvme: Add support for Namespace Types Dmitry Fomichev
2020-10-19 19:51   ` Keith Busch
2020-10-19 20:53   ` Klaus Jensen [this message]
2020-10-21  1:50     ` Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 04/11] hw/block/nvme: Support allocated CNS command variants Dmitry Fomichev
2020-10-19 20:07   ` Keith Busch
2020-10-20  8:21   ` Klaus Jensen
2020-10-20 23:09     ` Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 05/11] hw/block/nvme: Support Zoned Namespace Command Set Dmitry Fomichev
2020-10-19  9:50   ` Klaus Jensen
2020-10-19 15:55     ` Klaus Jensen
2020-10-19 12:33   ` Klaus Jensen
2020-10-20 11:08   ` Klaus Jensen
2020-10-21 10:26   ` Klaus Jensen
2020-10-21 23:19     ` Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 06/11] hw/block/nvme: Introduce max active and open zone limits Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 07/11] hw/block/nvme: Support Zone Descriptor Extensions Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 08/11] hw/block/nvme: Add injection of Offline/Read-Only zones Dmitry Fomichev
2020-10-19 11:42   ` Klaus Jensen
2020-10-20 23:01     ` Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 09/11] hw/block/nvme: Document zoned parameters in usage text Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 10/11] hw/block/nvme: Separate read and write handlers Dmitry Fomichev
2020-10-20  8:28   ` Klaus Jensen
2020-10-20 12:36     ` Keith Busch
2020-10-20 23:05       ` Dmitry Fomichev
2020-10-19  2:17 ` [PATCH v7 11/11] hw/block/nvme: Merge nvme_write_zeroes() with nvme_write() Dmitry Fomichev
2020-10-20  8:29   ` Klaus Jensen
2020-10-19  7:32 ` [PATCH v7 00/11] hw/block/nvme: Support Namespace Types and Zoned Namespace Command Set Niklas Cassel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201019205338.GI10549@apples.localdomain \
    --to=its@irrelevant.dk \
    --cc=alistair.francis@wdc.com \
    --cc=damien.lemoal@wdc.com \
    --cc=dmitry.fomichev@wdc.com \
    --cc=fam@euphon.net \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=matias.bjorling@wdc.com \
    --cc=mlevitsk@redhat.com \
    --cc=niklas.cassel@wdc.com \
    --cc=philmd@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).