From: Klaus Jensen <its@irrelevant.dk>
To: qemu-devel@nongnu.org, Peter Maydell <peter.maydell@linaro.org>
Cc: Fam Zheng <fam@euphon.net>, Kevin Wolf <kwolf@redhat.com>,
qemu-block@nongnu.org, Niklas Cassel <Niklas.Cassel@wdc.com>,
Dmitry Fomichev <dmitry.fomichev@wdc.com>,
Klaus Jensen <k.jensen@samsung.com>,
Max Reitz <mreitz@redhat.com>, Klaus Jensen <its@irrelevant.dk>,
Stefan Hajnoczi <stefanha@redhat.com>,
Keith Busch <kbusch@kernel.org>
Subject: [PULL 10/56] hw/block/nvme: Separate read and write handlers
Date: Tue, 9 Feb 2021 08:30:15 +0100 [thread overview]
Message-ID: <20210209073101.548811-11-its@irrelevant.dk> (raw)
In-Reply-To: <20210209073101.548811-1-its@irrelevant.dk>
From: Dmitry Fomichev <dmitry.fomichev@wdc.com>
The majority of code in nvme_rw() is becoming read- or write-specific.
Move these parts to two separate handlers, nvme_read() and nvme_write()
to make the code more readable and to remove multiple is_write checks
that has been present in the i/o path.
This is a refactoring patch, no change in functionality.
Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
Reviewed-by: Niklas Cassel <Niklas.Cassel@wdc.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/block/nvme.c | 107 ++++++++++++++++++++++++++++--------------
hw/block/trace-events | 3 +-
2 files changed, 74 insertions(+), 36 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 7b243a56efdf..905fd1ba93f5 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1176,6 +1176,61 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
+static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
+ NvmeNamespace *ns = req->ns;
+ uint64_t slba = le64_to_cpu(rw->slba);
+ uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
+ uint64_t data_size = nvme_l2b(ns, nlb);
+ uint64_t data_offset;
+ BlockBackend *blk = ns->blkconf.blk;
+ uint16_t status;
+
+ trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, data_size, slba);
+
+ status = nvme_check_mdts(n, data_size);
+ if (status) {
+ trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
+ goto invalid;
+ }
+
+ status = nvme_check_bounds(ns, slba, nlb);
+ if (status) {
+ trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
+ goto invalid;
+ }
+
+ status = nvme_map_dptr(n, data_size, req);
+ if (status) {
+ goto invalid;
+ }
+
+ if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
+ status = nvme_check_dulbe(ns, slba, nlb);
+ if (status) {
+ goto invalid;
+ }
+ }
+
+ data_offset = nvme_l2b(ns, slba);
+
+ block_acct_start(blk_get_stats(blk), &req->acct, data_size,
+ BLOCK_ACCT_READ);
+ if (req->qsg.sg) {
+ req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
+ BDRV_SECTOR_SIZE, nvme_rw_cb, req);
+ } else {
+ req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
+ nvme_rw_cb, req);
+ }
+ return NVME_NO_COMPLETE;
+
+invalid:
+ block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
+ return status | NVME_DNR;
+}
+
static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
{
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
@@ -1201,22 +1256,19 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req)
{
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
NvmeNamespace *ns = req->ns;
- uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
uint64_t slba = le64_to_cpu(rw->slba);
-
+ uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
uint64_t data_size = nvme_l2b(ns, nlb);
- uint64_t data_offset = nvme_l2b(ns, slba);
- enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ?
- BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
+ uint64_t data_offset;
BlockBackend *blk = ns->blkconf.blk;
uint16_t status;
- trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode),
- nvme_nsid(ns), nlb, data_size, slba);
+ trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode),
+ nvme_nsid(ns), nlb, data_size, slba);
status = nvme_check_mdts(n, data_size);
if (status) {
@@ -1230,43 +1282,27 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
goto invalid;
}
- if (acct == BLOCK_ACCT_READ) {
- if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
- status = nvme_check_dulbe(ns, slba, nlb);
- if (status) {
- goto invalid;
- }
- }
- }
-
status = nvme_map_dptr(n, data_size, req);
if (status) {
goto invalid;
}
- block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct);
+ data_offset = nvme_l2b(ns, slba);
+
+ block_acct_start(blk_get_stats(blk), &req->acct, data_size,
+ BLOCK_ACCT_WRITE);
if (req->qsg.sg) {
- if (acct == BLOCK_ACCT_WRITE) {
- req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- } else {
- req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- }
+ req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
+ BDRV_SECTOR_SIZE, nvme_rw_cb, req);
} else {
- if (acct == BLOCK_ACCT_WRITE) {
- req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
- nvme_rw_cb, req);
- } else {
- req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
- nvme_rw_cb, req);
- }
+ req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
+ nvme_rw_cb, req);
}
return NVME_NO_COMPLETE;
invalid:
- block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct);
- return status;
+ block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
+ return status | NVME_DNR;
}
static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
@@ -1295,8 +1331,9 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
case NVME_CMD_WRITE_ZEROES:
return nvme_write_zeroes(n, req);
case NVME_CMD_WRITE:
+ return nvme_write(n, req);
case NVME_CMD_READ:
- return nvme_rw(n, req);
+ return nvme_read(n, req);
case NVME_CMD_COMPARE:
return nvme_compare(n, req);
case NVME_CMD_DSM:
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 68a4c8ed35e0..ec1b43220eff 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -40,7 +40,8 @@ pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2,
pci_nvme_map_sgl(uint16_t cid, uint8_t typ, uint64_t len) "cid %"PRIu16" type 0x%"PRIx8" len %"PRIu64""
pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'"
pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'"
-pci_nvme_rw(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
+pci_nvme_read(uint16_t cid, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
+pci_nvme_write(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'"
pci_nvme_write_zeroes(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed %d"
--
2.30.0
next prev parent reply other threads:[~2021-02-09 7:43 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-09 7:30 [PULL 00/56] emulated nvme patches Klaus Jensen
2021-02-09 7:30 ` [PULL 01/56] hw/block/nvme: remove superfluous NvmeCtrl parameter Klaus Jensen
2021-02-09 7:30 ` [PULL 02/56] hw/block/nvme: pull aio error handling Klaus Jensen
2021-02-09 7:30 ` [PULL 03/56] hw/block/nvme: add dulbe support Klaus Jensen
2021-02-09 7:30 ` [PULL 04/56] nvme: add namespace I/O optimization fields to shared header Klaus Jensen
2021-02-09 7:30 ` [PULL 05/56] hw/block/nvme: add the dataset management command Klaus Jensen
2021-02-09 7:30 ` [PULL 06/56] hw/block/nvme: add compare command Klaus Jensen
2021-02-09 7:30 ` [PULL 07/56] hw/block/nvme: fix bad clearing of CAP Klaus Jensen
2021-02-09 7:30 ` [PULL 08/56] hw/block/nvme: Process controller reset and shutdown differently Klaus Jensen
2021-02-09 7:30 ` [PULL 09/56] hw/block/nvme: Generate namespace UUIDs Klaus Jensen
2021-02-09 7:30 ` Klaus Jensen [this message]
2021-02-09 7:30 ` [PULL 11/56] hw/block/nvme: Combine nvme_write_zeroes() and nvme_write() Klaus Jensen
2021-02-09 7:30 ` [PULL 12/56] hw/block/nvme: Add Commands Supported and Effects log Klaus Jensen
2021-02-09 7:30 ` [PULL 13/56] hw/block/nvme: Add support for Namespace Types Klaus Jensen
2021-02-09 7:30 ` [PULL 14/56] hw/block/nvme: Support allocated CNS command variants Klaus Jensen
2021-02-09 7:30 ` [PULL 15/56] nvme: Make ZNS-related definitions Klaus Jensen
2021-02-09 7:30 ` [PULL 16/56] hw/block/nvme: Support Zoned Namespace Command Set Klaus Jensen
2021-02-09 7:30 ` [PULL 17/56] hw/block/nvme: Introduce max active and open zone limits Klaus Jensen
2021-02-09 7:30 ` [PULL 18/56] hw/block/nvme: Support Zone Descriptor Extensions Klaus Jensen
2021-02-09 7:30 ` [PULL 19/56] hw/block/nvme: Document zoned parameters in usage text Klaus Jensen
2021-02-09 7:30 ` [PULL 20/56] hw/block/nvme: fix for non-msix machines Klaus Jensen
2021-02-09 7:30 ` [PULL 21/56] hw/block/nvme: conditionally enable DULBE for zoned namespaces Klaus Jensen
2021-02-09 7:30 ` [PULL 22/56] hw/block/nvme: fix shutdown/reset logic Klaus Jensen
2021-02-09 7:30 ` [PULL 23/56] hw/block/nvme: merge implicitly/explicitly opened processing masks Klaus Jensen
2021-02-09 7:30 ` [PULL 24/56] hw/block/nvme: enum style fix Klaus Jensen
2021-02-09 7:30 ` [PULL 25/56] hw/block/nvme: zero out zones on reset Klaus Jensen
2021-02-09 7:30 ` [PULL 26/56] hw/block/nvme: add missing string representations for commands Klaus Jensen
2021-02-09 7:30 ` [PULL 27/56] hw/block/nvme: remove unnecessary check for append Klaus Jensen
2021-02-09 7:30 ` [PULL 28/56] hw/block/nvme: Correct error status for unaligned ZA Klaus Jensen
2021-02-09 7:30 ` [PULL 29/56] hw/block/nvme: remove unused argument in nvme_ns_init_zoned Klaus Jensen
2021-02-09 7:30 ` [PULL 30/56] hw/block/nvme: open code for volatile write cache Klaus Jensen
2021-02-09 7:30 ` [PULL 31/56] hw/block/nvme: remove unused argument in nvme_ns_init_blk Klaus Jensen
2021-02-09 7:30 ` [PULL 32/56] hw/block/nvme: split setup and register for namespace Klaus Jensen
2021-02-11 9:53 ` Alexander Graf
2021-02-11 10:41 ` Klaus Jensen
2021-02-11 11:40 ` Philippe Mathieu-Daudé
2021-02-11 11:46 ` Klaus Jensen
2021-02-09 7:30 ` [PULL 33/56] hw/block/nvme: remove unused argument in nvme_ns_setup Klaus Jensen
2021-02-09 7:30 ` [PULL 34/56] hw/block/nvme: fix zone write finalize Klaus Jensen
2021-02-09 7:30 ` [PULL 35/56] nvme: introduce bit 5 for critical warning Klaus Jensen
2021-02-09 7:30 ` [PULL 36/56] hw/block/nvme: add smart_critical_warning property Klaus Jensen
2021-02-09 7:30 ` [PULL 37/56] hw/block/nvme: trigger async event during injecting smart warning Klaus Jensen
2021-02-09 7:30 ` [PULL 38/56] hw/block/nvme: add size to mmio read/write trace events Klaus Jensen
2021-02-09 7:30 ` [PULL 39/56] hw/block/nvme: fix 64 bit register hi/lo split writes Klaus Jensen
2021-02-09 7:30 ` [PULL 40/56] hw/block/nvme: indicate CMB support through controller capabilities register Klaus Jensen
2021-02-09 7:30 ` [PULL 41/56] hw/block/nvme: move msix table and pba to BAR 0 Klaus Jensen
2021-02-09 7:30 ` [PULL 42/56] hw/block/nvme: allow cmb and pmr to coexist Klaus Jensen
2021-02-09 7:30 ` [PULL 43/56] hw/block/nvme: rename PMR/CMB shift/mask fields Klaus Jensen
2021-02-09 7:30 ` [PULL 44/56] hw/block/nvme: remove redundant zeroing of PMR registers Klaus Jensen
2021-02-09 7:30 ` [PULL 45/56] hw/block/nvme: disable PMR at boot up Klaus Jensen
2021-02-09 7:30 ` [PULL 46/56] hw/block/nvme: add PMR RDS/WDS support Klaus Jensen
2021-02-09 7:30 ` [PULL 47/56] hw/block/nvme: move cmb logic to v1.4 Klaus Jensen
2021-02-09 7:30 ` [PULL 48/56] hw/block/nvme: bump " Klaus Jensen
2021-02-09 7:30 ` [PULL 49/56] hw/block/nvme: lift cmb restrictions Klaus Jensen
2021-02-09 7:30 ` [PULL 50/56] hw/block/nvme: error if drive less than a zone size Klaus Jensen
2021-02-09 7:30 ` [PULL 51/56] hw/block/nvme: fix set feature for error recovery Klaus Jensen
2021-02-09 7:30 ` [PULL 52/56] hw/block/nvme: fix set feature save field check Klaus Jensen
2021-02-09 7:30 ` [PULL 53/56] hw/block/nvme: align with existing style Klaus Jensen
2021-02-09 7:30 ` [PULL 54/56] hw/block/nvme: fix wrong parameter name 'cross_read' Klaus Jensen
2021-02-09 7:31 ` [PULL 55/56] hw/block/nvme: fix zone boundary check for append Klaus Jensen
2021-02-09 7:31 ` [PULL 56/56] hw/block/nvme: refactor the logic for zone write checks Klaus Jensen
2021-02-09 14:52 ` [PULL 00/56] emulated nvme patches Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210209073101.548811-11-its@irrelevant.dk \
--to=its@irrelevant.dk \
--cc=Niklas.Cassel@wdc.com \
--cc=dmitry.fomichev@wdc.com \
--cc=fam@euphon.net \
--cc=k.jensen@samsung.com \
--cc=kbusch@kernel.org \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).