qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Andrzej Jakowski <andrzej.jakowski@linux.intel.com>
To: Klaus Jensen <its@irrelevant.dk>, qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	Klaus Jensen <k.jensen@samsung.com>,
	Keith Busch <kbusch@kernel.org>,
	qemu-block@nongnu.org, Max Reitz <mreitz@redhat.com>
Subject: Re: [PATCH 02/16] hw/block/nvme: add mapping helpers
Date: Wed, 29 Jul 2020 13:40:05 -0700	[thread overview]
Message-ID: <b5e82d6a-471e-8894-f418-73f7d99094da@linux.intel.com> (raw)
In-Reply-To: <20200720113748.322965-3-its@irrelevant.dk>

On 7/20/20 4:37 AM, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> use them in nvme_map_prp.
> 
> This fixes a bug where in the case of a CMB transfer, the device would
> map to the buffer with a wrong length.
> 
> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in CMBs.")
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
>  hw/block/nvme.c       | 109 +++++++++++++++++++++++++++++++++++-------
>  hw/block/trace-events |   2 +
>  2 files changed, 94 insertions(+), 17 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 4d7b730a62b6..9b1a080cdc70 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -109,6 +109,11 @@ static uint16_t nvme_sqid(NvmeRequest *req)
>      return le16_to_cpu(req->sq->sqid);
>  }
>  
> +static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
> +{
> +    return &n->cmbuf[addr - n->ctrl_mem.addr];
> +}
> +
>  static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  {
>      hwaddr low = n->ctrl_mem.addr;
> @@ -120,7 +125,7 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
>  {
>      if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
> -        memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
> +        memcpy(buf, nvme_addr_to_cmb(n, addr), size);
>          return;
>      }
>  
> @@ -203,29 +208,91 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
>      }
>  }
>  
> +static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
> +                                  size_t len)
> +{
> +    if (!len) {
> +        return NVME_SUCCESS;
> +    }
> +
> +    trace_pci_nvme_map_addr_cmb(addr, len);
> +
> +    if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
> +        return NVME_DATA_TRAS_ERROR;
> +    }
> +
> +    qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
> +
> +    return NVME_SUCCESS;
> +}
> +
> +static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
> +                              hwaddr addr, size_t len)
> +{
> +    if (!len) {
> +        return NVME_SUCCESS;
> +    }
> +
> +    trace_pci_nvme_map_addr(addr, len);
> +
> +    if (nvme_addr_is_cmb(n, addr)) {
> +        if (qsg && qsg->sg) {
> +            return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> +        }
> +
> +        assert(iov);
> +
> +        if (!iov->iov) {
> +            qemu_iovec_init(iov, 1);
> +        }
> +
> +        return nvme_map_addr_cmb(n, iov, addr, len);
> +    }
> +
> +    if (iov && iov->iov) {
> +        return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> +    }
> +
> +    assert(qsg);
> +
> +    if (!qsg->sg) {
> +        pci_dma_sglist_init(qsg, &n->parent_obj, 1);
> +    }
> +
> +    qemu_sglist_add(qsg, addr, len);
> +
> +    return NVME_SUCCESS;
> +}
> +
>  static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
>                               uint64_t prp2, uint32_t len, NvmeCtrl *n)
>  {
>      hwaddr trans_len = n->page_size - (prp1 % n->page_size);
>      trans_len = MIN(len, trans_len);
>      int num_prps = (len >> n->page_bits) + 1;
> +    uint16_t status;
>  
>      if (unlikely(!prp1)) {
>          trace_pci_nvme_err_invalid_prp();
>          return NVME_INVALID_FIELD | NVME_DNR;
> -    } else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
> -               prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
> -        qsg->nsg = 0;
> +    }
> +
> +    if (nvme_addr_is_cmb(n, prp1)) {
>          qemu_iovec_init(iov, num_prps);
> -        qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
>      } else {
>          pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
> -        qemu_sglist_add(qsg, prp1, trans_len);
>      }
> +
> +    status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> +    if (status) {
> +        goto unmap;
> +    }
> +
>      len -= trans_len;
>      if (len) {
>          if (unlikely(!prp2)) {
>              trace_pci_nvme_err_invalid_prp2_missing();
> +            status = NVME_INVALID_FIELD | NVME_DNR;
>              goto unmap;
>          }
>          if (len > n->page_size) {
> @@ -242,6 +309,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
>                  if (i == n->max_prp_ents - 1 && len > n->page_size) {
>                      if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>                          trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +                        status = NVME_INVALID_FIELD | NVME_DNR;
>                          goto unmap;
>                      }
>  
> @@ -255,14 +323,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
>  
>                  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>                      trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +                    status = NVME_INVALID_FIELD | NVME_DNR;
>                      goto unmap;
>                  }
>  
>                  trans_len = MIN(len, n->page_size);
> -                if (qsg->nsg){
> -                    qemu_sglist_add(qsg, prp_ent, trans_len);
> -                } else {
> -                    qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
> +                status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
> +                if (status) {
> +                    goto unmap;
>                  }
>                  len -= trans_len;
>                  i++;
> @@ -270,20 +338,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
>          } else {
>              if (unlikely(prp2 & (n->page_size - 1))) {
>                  trace_pci_nvme_err_invalid_prp2_align(prp2);
> +                status = NVME_INVALID_FIELD | NVME_DNR;
>                  goto unmap;
>              }
> -            if (qsg->nsg) {
> -                qemu_sglist_add(qsg, prp2, len);
> -            } else {
> -                qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
> +            status = nvme_map_addr(n, qsg, iov, prp2, len);
> +            if (status) {
> +                goto unmap;
>              }
>          }
>      }
>      return NVME_SUCCESS;
>  
> - unmap:
> -    qemu_sglist_destroy(qsg);
> -    return NVME_INVALID_FIELD | NVME_DNR;
> +unmap:
> +    if (iov && iov->iov) {
> +        qemu_iovec_destroy(iov);
> +    }
> +
> +    if (qsg && qsg->sg) {
> +        qemu_sglist_destroy(qsg);
> +    }
> +
> +    return status;

I think it would make sense to move whole unmap block to a separate function.
That function could be called from here and after completing IO and would contain
unified deinitialization block - so no code repetitions would be necessary.
Other than that it looks good to me. Thx!

Reviewed-by: Andrzej Jakowski <andrzej.jakowski@linux.intel.com>

>  }
>  
>  static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index 7b7303cab1dd..f3b2d004e078 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -33,6 +33,8 @@ pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
>  pci_nvme_irq_pin(void) "pulsing IRQ pin"
>  pci_nvme_irq_masked(void) "IRQ is masked"
>  pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
> +pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
> +pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
>  pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8""
>  pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8""
>  pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
> 



  parent reply	other threads:[~2020-07-29 20:43 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-20 11:37 [PATCH 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-20 11:37 ` [PATCH 01/16] hw/block/nvme: memset preallocated requests structures Klaus Jensen
2020-07-20 11:37 ` [PATCH 02/16] hw/block/nvme: add mapping helpers Klaus Jensen
2020-07-29 13:57   ` Maxim Levitsky
2020-07-29 18:23     ` Klaus Jensen
2020-07-29 15:19   ` Minwoo Im
2020-07-29 20:40   ` Andrzej Jakowski [this message]
2020-07-29 21:24     ` Klaus Jensen
2020-07-29 21:51       ` Andrzej Jakowski
2020-07-29 21:53         ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 03/16] hw/block/nvme: replace dma_acct with blk_acct equivalent Klaus Jensen
2020-07-29 15:23   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 04/16] hw/block/nvme: remove redundant has_sg member Klaus Jensen
2020-07-29 15:29   ` Minwoo Im
2020-07-29 18:29     ` Klaus Jensen
     [not found]     ` <CGME20200729182946epcas2p1bef465a70c1a815654a07814aa379dc3@epcms2p5>
2020-07-30  0:34       ` Minwoo Im
2020-07-20 11:37 ` [PATCH 05/16] hw/block/nvme: refactor dma read/write Klaus Jensen
2020-07-29 15:35   ` Minwoo Im
2020-07-29 17:35   ` Maxim Levitsky
2020-07-29 18:38     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 06/16] hw/block/nvme: pass request along for tracing Klaus Jensen
2020-07-29 15:49   ` Minwoo Im
2020-07-29 19:49     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 07/16] hw/block/nvme: add request mapping helper Klaus Jensen
2020-07-29 15:52   ` Minwoo Im
2020-07-29 18:31     ` Maxim Levitsky
2020-07-29 19:22       ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 08/16] hw/block/nvme: verify validity of prp lists in the cmb Klaus Jensen
2020-07-29 15:54   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 09/16] hw/block/nvme: refactor request bounds checking Klaus Jensen
2020-07-29 15:56   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 10/16] hw/block/nvme: add check for mdts Klaus Jensen
2020-07-29 16:00   ` Minwoo Im
2020-07-29 19:30     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 11/16] hw/block/nvme: be consistent about zeros vs zeroes Klaus Jensen
2020-07-29 16:01   ` Minwoo Im
2020-07-29 17:39   ` Maxim Levitsky
2020-07-20 11:37 ` [PATCH 12/16] hw/block/nvme: refactor NvmeRequest clearing Klaus Jensen
2020-07-29 16:04   ` Minwoo Im
2020-07-29 17:47   ` Maxim Levitsky
2020-07-29 19:02     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 13/16] hw/block/nvme: add a namespace reference in NvmeRequest Klaus Jensen
2020-07-29 16:06   ` Minwoo Im
2020-07-29 17:53   ` Maxim Levitsky
2020-07-20 11:37 ` [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing Klaus Jensen
2020-07-29 16:08   ` Minwoo Im
2020-07-29 18:18   ` Maxim Levitsky
2020-07-29 19:49     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 15/16] hw/block/nvme: remove NvmeCmd parameter Klaus Jensen
2020-07-29 16:10   ` Minwoo Im
2020-07-29 19:44     ` Klaus Jensen
2020-07-29 18:25   ` Maxim Levitsky
2020-07-29 20:00     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 16/16] hw/block/nvme: use preallocated qsg/iov in nvme_dma_prp Klaus Jensen
2020-07-29 16:15   ` Minwoo Im
2020-07-29 19:57     ` Klaus Jensen
2020-07-27  9:42 ` [PATCH 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-27 20:44   ` Keith Busch

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b5e82d6a-471e-8894-f418-73f7d99094da@linux.intel.com \
    --to=andrzej.jakowski@linux.intel.com \
    --cc=its@irrelevant.dk \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).