qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Minwoo Im <minwoo.im.dev@gmail.com>
To: Klaus Jensen <its@irrelevant.dk>
Cc: Kevin Wolf <kwolf@redhat.com>, Fam Zheng <fam@euphon.net>,
	qemu-block@nongnu.org, Klaus Jensen <k.jensen@samsung.com>,
	Gollu Appalanaidu <anaidu.gollu@samsung.com>,
	qemu-devel@nongnu.org, Max Reitz <mreitz@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Keith Busch <kbusch@kernel.org>
Subject: Re: [PATCH 3/3] hw/block/nvme: report non-mdts command size limit for dsm
Date: Mon, 22 Feb 2021 21:11:05 +0900	[thread overview]
Message-ID: <20210222121105.GD2856@localhost.localdomain> (raw)
In-Reply-To: <20210222070615.9177-4-its@irrelevant.dk>

On 21-02-22 08:06:15, Klaus Jensen wrote:
> From: Gollu Appalanaidu <anaidu.gollu@samsung.com>
> 
> Dataset Management is not subject to MDTS, but exceeded a certain size
> per range causes internal looping. Report this limit (DMRSL) in the NVM
> command set specific identify controller data structure.
> 
> Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
>  hw/block/nvme.h       |  1 +
>  include/block/nvme.h  | 11 +++++++++++
>  hw/block/nvme.c       | 30 ++++++++++++++++++++----------
>  hw/block/trace-events |  1 +
>  4 files changed, 33 insertions(+), 10 deletions(-)
> 
> diff --git a/hw/block/nvme.h b/hw/block/nvme.h
> index cb2b5175f1a1..3046b82b3da1 100644
> --- a/hw/block/nvme.h
> +++ b/hw/block/nvme.h
> @@ -172,6 +172,7 @@ typedef struct NvmeCtrl {
>      int         aer_queued;
>  
>      uint8_t     zasl;
> +    uint32_t    dmrsl;
>  
>      NvmeSubsystem   *subsys;
>  
> diff --git a/include/block/nvme.h b/include/block/nvme.h
> index b23f3ae2279f..16d8c4c90f7e 100644
> --- a/include/block/nvme.h
> +++ b/include/block/nvme.h
> @@ -1041,6 +1041,16 @@ typedef struct NvmeIdCtrlZoned {
>      uint8_t     rsvd1[4095];
>  } NvmeIdCtrlZoned;
>  
> +typedef struct NvmeIdCtrlNvm {
> +    uint8_t     vsl;
> +    uint8_t     wzsl;
> +    uint8_t     wusl;
> +    uint8_t     dmrl;
> +    uint32_t    dmrsl;
> +    uint64_t    dmsl;
> +    uint8_t     rsvd16[4080];
> +} NvmeIdCtrlNvm;
> +
>  enum NvmeIdCtrlOacs {
>      NVME_OACS_SECURITY  = 1 << 0,
>      NVME_OACS_FORMAT    = 1 << 1,
> @@ -1396,6 +1406,7 @@ static inline void _nvme_check_size(void)
>      QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096);
> +    QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlNvm) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 897b9ff0db91..5d6bba5fcb0d 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -1777,6 +1777,10 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
>              trace_pci_nvme_dsm_deallocate(nvme_cid(req), nvme_nsid(ns), slba,
>                                            nlb);
>  
> +            if (nlb > n->dmrsl) {
> +                trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl);
> +            }
> +
>              offset = nvme_l2b(ns, slba);
>              len = nvme_l2b(ns, nlb);
>  
> @@ -3202,21 +3206,24 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
>  static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
>  {
>      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> -    NvmeIdCtrlZoned id = {};
> +    uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
>  
>      trace_pci_nvme_identify_ctrl_csi(c->csi);
>  
> -    if (c->csi == NVME_CSI_NVM) {
> -        return nvme_rpt_empty_id_struct(n, req);
> -    } else if (c->csi == NVME_CSI_ZONED) {
> -        if (n->params.zasl_bs) {
> -            id.zasl = n->zasl;
> -        }
> -        return nvme_dma(n, (uint8_t *)&id, sizeof(id),
> -                        DMA_DIRECTION_FROM_DEVICE, req);
> +    switch (c->csi) {
> +    case NVME_CSI_NVM:
> +        ((NvmeIdCtrlNvm *)&id)->dmrsl = cpu_to_le32(n->dmrsl);
> +        break;
> +
> +    case NVME_CSI_ZONED:
> +        ((NvmeIdCtrlZoned *)&id)->zasl = n->zasl;

Question.  Are we okay without checking this like above ? :)

	if (n->params.zasl_bs) {
		((NvmeIdCtrlZoned *)&id)->zasl = n->zasl;
	}

> +        break;
> +
> +    default:
> +        return NVME_INVALID_FIELD | NVME_DNR;
>      }
>  
> -    return NVME_INVALID_FIELD | NVME_DNR;
> +    return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
>  static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
> @@ -4670,6 +4677,9 @@ int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
>  
>      n->namespaces[nsid - 1] = ns;
>  
> +    n->dmrsl = MIN_NON_ZERO(n->dmrsl,
> +                            BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
> +
>      return 0;
>  }
>  
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index 1f958d09d2a9..27940fe2e98a 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -51,6 +51,7 @@ pci_nvme_copy_cb(uint16_t cid) "cid %"PRIu16""
>  pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed %d"
>  pci_nvme_dsm(uint16_t cid, uint32_t nsid, uint32_t nr, uint32_t attr) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu32" attr 0x%"PRIx32""
>  pci_nvme_dsm_deallocate(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
> +pci_nvme_dsm_single_range_limit_exceeded(uint32_t nlb, uint32_t dmrsl) "nlb %"PRIu32" dmrsl %"PRIu32""
>  pci_nvme_compare(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32""
>  pci_nvme_compare_cb(uint16_t cid) "cid %"PRIu16""
>  pci_nvme_aio_discard_cb(uint16_t cid) "cid %"PRIu16""
> -- 
> 2.30.1
> 
> 


  reply	other threads:[~2021-02-22 12:24 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-22  7:06 [PATCH 0/3] hw/block/nvme: misc fixes Klaus Jensen
2021-02-22  7:06 ` [PATCH 1/3] hw/block/nvme: nvme_identify fixes Klaus Jensen
2021-02-22 12:00   ` Minwoo Im
2021-02-22 13:13     ` Philippe Mathieu-Daudé
2021-02-22  7:06 ` [PATCH 2/3] hw/block/nvme: fix potential compilation error Klaus Jensen
2021-02-22 12:03   ` Minwoo Im
2021-02-22 13:16   ` Philippe Mathieu-Daudé
2021-02-22  7:06 ` [PATCH 3/3] hw/block/nvme: report non-mdts command size limit for dsm Klaus Jensen
2021-02-22 12:11   ` Minwoo Im [this message]
2021-02-22 18:24     ` Klaus Jensen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210222121105.GD2856@localhost.localdomain \
    --to=minwoo.im.dev@gmail.com \
    --cc=anaidu.gollu@samsung.com \
    --cc=fam@euphon.net \
    --cc=its@irrelevant.dk \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).