qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: Klaus Jensen <its@irrelevant.dk>, qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	Klaus Jensen <k.jensen@samsung.com>,
	Keith Busch <kbusch@kernel.org>,
	qemu-block@nongnu.org, Max Reitz <mreitz@redhat.com>
Subject: Re: [PATCH 15/16] hw/block/nvme: remove NvmeCmd parameter
Date: Wed, 29 Jul 2020 21:25:46 +0300	[thread overview]
Message-ID: <64fae709b5869e3201d4016c2b17e984e3aa4605.camel@redhat.com> (raw)
In-Reply-To: <20200720113748.322965-16-its@irrelevant.dk>

On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Keep a copy of the raw nvme command in the NvmeRequest and remove the
> now redundant NvmeCmd parameter.

Shouldn't you clear the req->cmd in nvme_req_clear too for consistency?

Other than that looks OK, but I might have missed something.

Best regards,
	Maxim Levitsky
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
>  hw/block/nvme.c | 177 +++++++++++++++++++++++++-----------------------
>  hw/block/nvme.h |   1 +
>  2 files changed, 93 insertions(+), 85 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index b53afdeb3fb6..0b3dceccc89b 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -425,9 +425,9 @@ static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
>      return status;
>  }
>  
> -static uint16_t nvme_map(NvmeCtrl *n, NvmeCmd *cmd, size_t len,
> -                         NvmeRequest *req)
> +static uint16_t nvme_map(NvmeCtrl *n, size_t len, NvmeRequest *req)
>  {
> +    NvmeCmd *cmd = &req->cmd;
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
> @@ -597,7 +597,7 @@ static void nvme_rw_cb(void *opaque, int ret)
>      nvme_enqueue_req_completion(cq, req);
>  }
>  
> -static uint16_t nvme_flush(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
>  {
>      block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
>           BLOCK_ACCT_FLUSH);
> @@ -606,9 +606,9 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>      return NVME_NO_COMPLETE;
>  }
>  
> -static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
> +    NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
>      NvmeNamespace *ns = req->ns;
>      const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
>      const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
> @@ -633,9 +633,9 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>      return NVME_NO_COMPLETE;
>  }
>  
> -static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
> +    NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
>      NvmeNamespace *ns = req->ns;
>      uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
>      uint64_t slba = le64_to_cpu(rw->slba);
> @@ -664,7 +664,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>          return status;
>      }
>  
> -    if (nvme_map(n, cmd, data_size, req)) {
> +    if (nvme_map(n, data_size, req)) {
>          block_acct_invalid(blk_get_stats(n->conf.blk), acct);
>          return NVME_INVALID_FIELD | NVME_DNR;
>      }
> @@ -690,11 +690,12 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>      return NVME_NO_COMPLETE;
>  }
>  
> -static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    uint32_t nsid = le32_to_cpu(cmd->nsid);
> +    uint32_t nsid = le32_to_cpu(req->cmd.nsid);
>  
> -    trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req), cmd->opcode);
> +    trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
> +                          req->cmd.opcode);
>  
>      if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
>          trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
> @@ -702,16 +703,16 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>      }
>  
>      req->ns = &n->namespaces[nsid - 1];
> -    switch (cmd->opcode) {
> +    switch (req->cmd.opcode) {
>      case NVME_CMD_FLUSH:
> -        return nvme_flush(n, cmd, req);
> +        return nvme_flush(n, req);
>      case NVME_CMD_WRITE_ZEROES:
> -        return nvme_write_zeroes(n, cmd, req);
> +        return nvme_write_zeroes(n, req);
>      case NVME_CMD_WRITE:
>      case NVME_CMD_READ:
> -        return nvme_rw(n, cmd, req);
> +        return nvme_rw(n, req);
>      default:
> -        trace_pci_nvme_err_invalid_opc(cmd->opcode);
> +        trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
>          return NVME_INVALID_OPCODE | NVME_DNR;
>      }
>  }
> @@ -727,10 +728,10 @@ static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
>      }
>  }
>  
> -static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
> +static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
> -    NvmeRequest *req, *next;
> +    NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
> +    NvmeRequest *r, *next;
>      NvmeSQueue *sq;
>      NvmeCQueue *cq;
>      uint16_t qid = le16_to_cpu(c->qid);
> @@ -744,19 +745,19 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
>  
>      sq = n->sq[qid];
>      while (!QTAILQ_EMPTY(&sq->out_req_list)) {
> -        req = QTAILQ_FIRST(&sq->out_req_list);
> -        assert(req->aiocb);
> -        blk_aio_cancel(req->aiocb);
> +        r = QTAILQ_FIRST(&sq->out_req_list);
> +        assert(r->aiocb);
> +        blk_aio_cancel(r->aiocb);
>      }
>      if (!nvme_check_cqid(n, sq->cqid)) {
>          cq = n->cq[sq->cqid];
>          QTAILQ_REMOVE(&cq->sq_list, sq, entry);
>  
>          nvme_post_cqes(cq);
> -        QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
> -            if (req->sq == sq) {
> -                QTAILQ_REMOVE(&cq->req_list, req, entry);
> -                QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
> +        QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) {
> +            if (r->sq == sq) {
> +                QTAILQ_REMOVE(&cq->req_list, r, entry);
> +                QTAILQ_INSERT_TAIL(&sq->req_list, r, entry);
>              }
>          }
>      }
> @@ -793,10 +794,10 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
>      n->sq[sqid] = sq;
>  }
>  
> -static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
> +static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
>  {
>      NvmeSQueue *sq;
> -    NvmeCreateSq *c = (NvmeCreateSq *)cmd;
> +    NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd;
>  
>      uint16_t cqid = le16_to_cpu(c->cqid);
>      uint16_t sqid = le16_to_cpu(c->sqid);
> @@ -831,10 +832,10 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
>      return NVME_SUCCESS;
>  }
>  
> -static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
> -                                uint32_t buf_len, uint64_t off,
> -                                NvmeRequest *req)
> +static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
> +                                uint64_t off, NvmeRequest *req)
>  {
> +    NvmeCmd *cmd = &req->cmd;
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>      uint32_t nsid = le32_to_cpu(cmd->nsid);
> @@ -889,10 +890,11 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
>                          DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
> -                                 uint64_t off, NvmeRequest *req)
> +static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
> +                                 NvmeRequest *req)
>  {
>      uint32_t trans_len;
> +    NvmeCmd *cmd = &req->cmd;
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>      NvmeFwSlotInfoLog fw_log = {
> @@ -911,11 +913,11 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
>                          DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
> -                                uint32_t buf_len, uint64_t off,
> -                                NvmeRequest *req)
> +static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
> +                                uint64_t off, NvmeRequest *req)
>  {
>      uint32_t trans_len;
> +    NvmeCmd *cmd = &req->cmd;
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>      NvmeErrorLog errlog;
> @@ -936,8 +938,10 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
>                          DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeCmd *cmd = &req->cmd;
> +
>      uint32_t dw10 = le32_to_cpu(cmd->cdw10);
>      uint32_t dw11 = le32_to_cpu(cmd->cdw11);
>      uint32_t dw12 = le32_to_cpu(cmd->cdw12);
> @@ -972,11 +976,11 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>  
>      switch (lid) {
>      case NVME_LOG_ERROR_INFO:
> -        return nvme_error_info(n, cmd, rae, len, off, req);
> +        return nvme_error_info(n, rae, len, off, req);
>      case NVME_LOG_SMART_INFO:
> -        return nvme_smart_info(n, cmd, rae, len, off, req);
> +        return nvme_smart_info(n, rae, len, off, req);
>      case NVME_LOG_FW_SLOT_INFO:
> -        return nvme_fw_log_info(n, cmd, len, off, req);
> +        return nvme_fw_log_info(n, len, off, req);
>      default:
>          trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
>          return NVME_INVALID_FIELD | NVME_DNR;
> @@ -994,9 +998,9 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
>      }
>  }
>  
> -static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
> +static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
> +    NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
>      NvmeCQueue *cq;
>      uint16_t qid = le16_to_cpu(c->qid);
>  
> @@ -1037,10 +1041,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
>      cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
>  }
>  
> -static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
> +static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
>  {
>      NvmeCQueue *cq;
> -    NvmeCreateCq *c = (NvmeCreateCq *)cmd;
> +    NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd;
>      uint16_t cqid = le16_to_cpu(c->cqid);
>      uint16_t vector = le16_to_cpu(c->irq_vector);
>      uint16_t qsize = le16_to_cpu(c->qsize);
> @@ -1088,9 +1092,9 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
>      return NVME_SUCCESS;
>  }
>  
> -static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c,
> -                                   NvmeRequest *req)
> +static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
>      uint64_t prp1 = le64_to_cpu(c->prp1);
>      uint64_t prp2 = le64_to_cpu(c->prp2);
>  
> @@ -1100,10 +1104,10 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c,
>                          prp2, DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c,
> -                                 NvmeRequest *req)
> +static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
>  {
>      NvmeNamespace *ns;
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
>      uint32_t nsid = le32_to_cpu(c->nsid);
>      uint64_t prp1 = le64_to_cpu(c->prp1);
>      uint64_t prp2 = le64_to_cpu(c->prp2);
> @@ -1121,9 +1125,9 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c,
>                          prp2, DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c,
> -                                     NvmeRequest *req)
> +static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
>      static const int data_len = NVME_IDENTIFY_DATA_SIZE;
>      uint32_t min_nsid = le32_to_cpu(c->nsid);
>      uint64_t prp1 = le64_to_cpu(c->prp1);
> @@ -1160,9 +1164,9 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c,
>      return ret;
>  }
>  
> -static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeIdentify *c,
> -                                            NvmeRequest *req)
> +static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
>      uint32_t nsid = le32_to_cpu(c->nsid);
>      uint64_t prp1 = le64_to_cpu(c->prp1);
>      uint64_t prp2 = le64_to_cpu(c->prp2);
> @@ -1201,28 +1205,28 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeIdentify *c,
>                          DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    NvmeIdentify *c = (NvmeIdentify *)cmd;
> +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
>  
>      switch (le32_to_cpu(c->cns)) {
>      case NVME_ID_CNS_NS:
> -        return nvme_identify_ns(n, c, req);
> +        return nvme_identify_ns(n, req);
>      case NVME_ID_CNS_CTRL:
> -        return nvme_identify_ctrl(n, c, req);
> +        return nvme_identify_ctrl(n, req);
>      case NVME_ID_CNS_NS_ACTIVE_LIST:
> -        return nvme_identify_nslist(n, c, req);
> +        return nvme_identify_nslist(n, req);
>      case NVME_ID_CNS_NS_DESCR_LIST:
> -        return nvme_identify_ns_descr_list(n, c, req);
> +        return nvme_identify_ns_descr_list(n, req);
>      default:
>          trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
>          return NVME_INVALID_FIELD | NVME_DNR;
>      }
>  }
>  
> -static uint16_t nvme_abort(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    uint16_t sqid = le32_to_cpu(cmd->cdw10) & 0xffff;
> +    uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
>  
>      req->cqe.result = 1;
>      if (nvme_check_sqid(n, sqid)) {
> @@ -1272,9 +1276,9 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
>      return cpu_to_le64(ts.all);
>  }
>  
> -static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
> -                                           NvmeRequest *req)
> +static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeCmd *cmd = &req->cmd;
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
> @@ -1284,8 +1288,9 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
>                          prp2, DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
> -static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeCmd *cmd = &req->cmd;
>      uint32_t dw10 = le32_to_cpu(cmd->cdw10);
>      uint32_t dw11 = le32_to_cpu(cmd->cdw11);
>      uint32_t nsid = le32_to_cpu(cmd->nsid);
> @@ -1359,7 +1364,7 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>          result = n->features.async_config;
>          goto out;
>      case NVME_TIMESTAMP:
> -        return nvme_get_feature_timestamp(n, cmd, req);
> +        return nvme_get_feature_timestamp(n, req);
>      default:
>          break;
>      }
> @@ -1405,11 +1410,11 @@ out:
>      return NVME_SUCCESS;
>  }
>  
> -static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
> -                                           NvmeRequest *req)
> +static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
>  {
>      uint16_t ret;
>      uint64_t timestamp;
> +    NvmeCmd *cmd = &req->cmd;
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
> @@ -1424,8 +1429,9 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
>      return NVME_SUCCESS;
>  }
>  
> -static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
>  {
> +    NvmeCmd *cmd = &req->cmd;
>      uint32_t dw10 = le32_to_cpu(cmd->cdw10);
>      uint32_t dw11 = le32_to_cpu(cmd->cdw11);
>      uint32_t nsid = le32_to_cpu(cmd->nsid);
> @@ -1516,14 +1522,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>          n->features.async_config = dw11;
>          break;
>      case NVME_TIMESTAMP:
> -        return nvme_set_feature_timestamp(n, cmd, req);
> +        return nvme_set_feature_timestamp(n, req);
>      default:
>          return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
>      }
>      return NVME_SUCCESS;
>  }
>  
> -static uint16_t nvme_aer(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
>  {
>      trace_pci_nvme_aer(nvme_cid(req));
>  
> @@ -1542,33 +1548,33 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>      return NVME_NO_COMPLETE;
>  }
>  
> -static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> +static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
>  {
> -    trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), cmd->opcode);
> +    trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode);
>  
> -    switch (cmd->opcode) {
> +    switch (req->cmd.opcode) {
>      case NVME_ADM_CMD_DELETE_SQ:
> -        return nvme_del_sq(n, cmd);
> +        return nvme_del_sq(n, req);
>      case NVME_ADM_CMD_CREATE_SQ:
> -        return nvme_create_sq(n, cmd);
> +        return nvme_create_sq(n, req);
>      case NVME_ADM_CMD_GET_LOG_PAGE:
> -        return nvme_get_log(n, cmd, req);
> +        return nvme_get_log(n, req);
>      case NVME_ADM_CMD_DELETE_CQ:
> -        return nvme_del_cq(n, cmd);
> +        return nvme_del_cq(n, req);
>      case NVME_ADM_CMD_CREATE_CQ:
> -        return nvme_create_cq(n, cmd);
> +        return nvme_create_cq(n, req);
>      case NVME_ADM_CMD_IDENTIFY:
> -        return nvme_identify(n, cmd, req);
> +        return nvme_identify(n, req);
>      case NVME_ADM_CMD_ABORT:
> -        return nvme_abort(n, cmd, req);
> +        return nvme_abort(n, req);
>      case NVME_ADM_CMD_SET_FEATURES:
> -        return nvme_set_feature(n, cmd, req);
> +        return nvme_set_feature(n, req);
>      case NVME_ADM_CMD_GET_FEATURES:
> -        return nvme_get_feature(n, cmd, req);
> +        return nvme_get_feature(n, req);
>      case NVME_ADM_CMD_ASYNC_EV_REQ:
> -        return nvme_aer(n, cmd, req);
> +        return nvme_aer(n, req);
>      default:
> -        trace_pci_nvme_err_invalid_admin_opc(cmd->opcode);
> +        trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
>          return NVME_INVALID_OPCODE | NVME_DNR;
>      }
>  }
> @@ -1593,9 +1599,10 @@ static void nvme_process_sq(void *opaque)
>          QTAILQ_REMOVE(&sq->req_list, req, entry);
>          QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
>          req->cqe.cid = cmd.cid;
> +        memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
>  
> -        status = sq->sqid ? nvme_io_cmd(n, &cmd, req) :
> -            nvme_admin_cmd(n, &cmd, req);
> +        status = sq->sqid ? nvme_io_cmd(n, req) :
> +            nvme_admin_cmd(n, req);
>          if (status != NVME_NO_COMPLETE) {
>              req->status = status;
>              nvme_enqueue_req_completion(cq, req);
> diff --git a/hw/block/nvme.h b/hw/block/nvme.h
> index 586fd3d62700..52ba794f2e9a 100644
> --- a/hw/block/nvme.h
> +++ b/hw/block/nvme.h
> @@ -25,6 +25,7 @@ typedef struct NvmeRequest {
>      BlockAIOCB              *aiocb;
>      uint16_t                status;
>      NvmeCqe                 cqe;
> +    NvmeCmd                 cmd;
>      BlockAcctCookie         acct;
>      QEMUSGList              qsg;
>      QEMUIOVector            iov;




  parent reply	other threads:[~2020-07-29 18:27 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-20 11:37 [PATCH 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-20 11:37 ` [PATCH 01/16] hw/block/nvme: memset preallocated requests structures Klaus Jensen
2020-07-20 11:37 ` [PATCH 02/16] hw/block/nvme: add mapping helpers Klaus Jensen
2020-07-29 13:57   ` Maxim Levitsky
2020-07-29 18:23     ` Klaus Jensen
2020-07-29 15:19   ` Minwoo Im
2020-07-29 20:40   ` Andrzej Jakowski
2020-07-29 21:24     ` Klaus Jensen
2020-07-29 21:51       ` Andrzej Jakowski
2020-07-29 21:53         ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 03/16] hw/block/nvme: replace dma_acct with blk_acct equivalent Klaus Jensen
2020-07-29 15:23   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 04/16] hw/block/nvme: remove redundant has_sg member Klaus Jensen
2020-07-29 15:29   ` Minwoo Im
2020-07-29 18:29     ` Klaus Jensen
     [not found]     ` <CGME20200729182946epcas2p1bef465a70c1a815654a07814aa379dc3@epcms2p5>
2020-07-30  0:34       ` Minwoo Im
2020-07-20 11:37 ` [PATCH 05/16] hw/block/nvme: refactor dma read/write Klaus Jensen
2020-07-29 15:35   ` Minwoo Im
2020-07-29 17:35   ` Maxim Levitsky
2020-07-29 18:38     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 06/16] hw/block/nvme: pass request along for tracing Klaus Jensen
2020-07-29 15:49   ` Minwoo Im
2020-07-29 19:49     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 07/16] hw/block/nvme: add request mapping helper Klaus Jensen
2020-07-29 15:52   ` Minwoo Im
2020-07-29 18:31     ` Maxim Levitsky
2020-07-29 19:22       ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 08/16] hw/block/nvme: verify validity of prp lists in the cmb Klaus Jensen
2020-07-29 15:54   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 09/16] hw/block/nvme: refactor request bounds checking Klaus Jensen
2020-07-29 15:56   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 10/16] hw/block/nvme: add check for mdts Klaus Jensen
2020-07-29 16:00   ` Minwoo Im
2020-07-29 19:30     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 11/16] hw/block/nvme: be consistent about zeros vs zeroes Klaus Jensen
2020-07-29 16:01   ` Minwoo Im
2020-07-29 17:39   ` Maxim Levitsky
2020-07-20 11:37 ` [PATCH 12/16] hw/block/nvme: refactor NvmeRequest clearing Klaus Jensen
2020-07-29 16:04   ` Minwoo Im
2020-07-29 17:47   ` Maxim Levitsky
2020-07-29 19:02     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 13/16] hw/block/nvme: add a namespace reference in NvmeRequest Klaus Jensen
2020-07-29 16:06   ` Minwoo Im
2020-07-29 17:53   ` Maxim Levitsky
2020-07-20 11:37 ` [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing Klaus Jensen
2020-07-29 16:08   ` Minwoo Im
2020-07-29 18:18   ` Maxim Levitsky
2020-07-29 19:49     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 15/16] hw/block/nvme: remove NvmeCmd parameter Klaus Jensen
2020-07-29 16:10   ` Minwoo Im
2020-07-29 19:44     ` Klaus Jensen
2020-07-29 18:25   ` Maxim Levitsky [this message]
2020-07-29 20:00     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 16/16] hw/block/nvme: use preallocated qsg/iov in nvme_dma_prp Klaus Jensen
2020-07-29 16:15   ` Minwoo Im
2020-07-29 19:57     ` Klaus Jensen
2020-07-27  9:42 ` [PATCH 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-27 20:44   ` Keith Busch

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=64fae709b5869e3201d4016c2b17e984e3aa4605.camel@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=its@irrelevant.dk \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).