From: "Philippe Mathieu-Daudé" <philmd@redhat.com>
To: qemu-devel@nongnu.org, Stefan Hajnoczi <stefanha@redhat.com>
Cc: "Fam Zheng" <fam@euphon.net>, "Kevin Wolf" <kwolf@redhat.com>,
qemu-block@nongnu.org, "Max Reitz" <mreitz@redhat.com>,
"Maxim Levitsky" <mlevitsk@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@redhat.com>
Subject: [PATCH 03/17] block/nvme: Define QUEUE_INDEX macros to ease code review
Date: Thu, 25 Jun 2020 20:48:24 +0200 [thread overview]
Message-ID: <20200625184838.28172-4-philmd@redhat.com> (raw)
In-Reply-To: <20200625184838.28172-1-philmd@redhat.com>
Use definitions instead of '0' or '1' indexes. Also this will
be useful when using multi-queues later.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
block/nvme.c | 33 +++++++++++++++++++--------------
1 file changed, 19 insertions(+), 14 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index ec0dd21b6e..71f8cf27a8 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -89,6 +89,9 @@ typedef volatile struct {
QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
+#define QUEUE_INDEX_ADMIN 0
+#define QUEUE_INDEX_IO(n) (1 + n)
+
typedef struct {
AioContext *aio_context;
QEMUVFIOState *vfio;
@@ -459,7 +462,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
}
cmd.prp1 = cpu_to_le64(iova);
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+ if (nvme_cmd_sync(bs, s->queues[QUEUE_INDEX_ADMIN], &cmd)) {
error_setg(errp, "Failed to identify controller");
goto out;
}
@@ -483,7 +486,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
cmd.cdw10 = 0;
cmd.nsid = cpu_to_le32(namespace);
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+ if (nvme_cmd_sync(bs, s->queues[QUEUE_INDEX_ADMIN], &cmd)) {
error_setg(errp, "Failed to identify namespace");
goto out;
}
@@ -560,7 +563,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
.cdw11 = cpu_to_le32(0x3),
};
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+ if (nvme_cmd_sync(bs, s->queues[QUEUE_INDEX_ADMIN], &cmd)) {
error_setg(errp, "Failed to create io queue [%d]", n);
nvme_free_queue_pair(bs, q);
return false;
@@ -571,7 +574,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
};
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+ if (nvme_cmd_sync(bs, s->queues[QUEUE_INDEX_ADMIN], &cmd)) {
error_setg(errp, "Failed to create io queue [%d]", n);
nvme_free_queue_pair(bs, q);
return false;
@@ -655,16 +658,18 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
/* Set up admin queue. */
s->queues = g_new(NVMeQueuePair *, 1);
- s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
- if (!s->queues[0]) {
+ s->queues[QUEUE_INDEX_ADMIN] = nvme_create_queue_pair(bs, 0,
+ NVME_QUEUE_SIZE,
+ errp);
+ if (!s->queues[QUEUE_INDEX_ADMIN]) {
ret = -EINVAL;
goto out;
}
s->nr_queues = 1;
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
- s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
- s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
+ s->regs->asq = cpu_to_le64(s->queues[QUEUE_INDEX_ADMIN]->sq.iova);
+ s->regs->acq = cpu_to_le64(s->queues[QUEUE_INDEX_ADMIN]->cq.iova);
/* After setting up all control registers we can enable device now. */
s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
@@ -755,7 +760,7 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
};
- ret = nvme_cmd_sync(bs, s->queues[0], &cmd);
+ ret = nvme_cmd_sync(bs, s->queues[QUEUE_INDEX_ADMIN], &cmd);
if (ret) {
error_setg(errp, "Failed to configure NVMe write cache");
}
@@ -972,7 +977,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
{
int r;
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[QUEUE_INDEX_IO(0)];
NVMeRequest *req;
uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
@@ -1087,7 +1092,7 @@ static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
{
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[QUEUE_INDEX_IO(0)];
NVMeRequest *req;
NvmeCmd cmd = {
.opcode = NVME_CMD_FLUSH,
@@ -1118,7 +1123,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
BdrvRequestFlags flags)
{
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[QUEUE_INDEX_IO(0)];
NVMeRequest *req;
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
@@ -1171,7 +1176,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
int bytes)
{
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[QUEUE_INDEX_IO(0)];
NVMeRequest *req;
NvmeDsmRange *buf;
QEMUIOVector local_qiov;
@@ -1300,7 +1305,7 @@ static void nvme_aio_unplug(BlockDriverState *bs)
BDRVNVMeState *s = bs->opaque;
assert(s->plugged);
s->plugged = false;
- for (i = 1; i < s->nr_queues; i++) {
+ for (i = QUEUE_INDEX_IO(0); i < s->nr_queues; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock);
nvme_kick(s, q);
--
2.21.3
next prev parent reply other threads:[~2020-06-25 18:53 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-25 18:48 [PATCH 00/17] block/nvme: Various cleanups required to use multiple queues Philippe Mathieu-Daudé
2020-06-25 18:48 ` [PATCH 01/17] block/nvme: Avoid further processing if trace event not enabled Philippe Mathieu-Daudé
2020-06-26 10:36 ` Stefan Hajnoczi
2020-06-26 14:02 ` Philippe Mathieu-Daudé
2020-06-29 13:02 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 02/17] block/nvme: Let nvme_create_queue_pair() fail gracefully Philippe Mathieu-Daudé
2020-06-26 11:11 ` Stefan Hajnoczi
2020-06-25 18:48 ` Philippe Mathieu-Daudé [this message]
2020-06-26 11:12 ` [PATCH 03/17] block/nvme: Define QUEUE_INDEX macros to ease code review Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 04/17] block/nvme: Be explicit we share NvmeIdCtrl / NvmeIdNs structures Philippe Mathieu-Daudé
2020-06-26 11:19 ` Stefan Hajnoczi
2020-06-26 12:45 ` Philippe Mathieu-Daudé
2020-06-25 18:48 ` [PATCH 05/17] block/nvme: Replace qemu_try_blockalign0 by qemu_try_blockalign/memset Philippe Mathieu-Daudé
2020-06-26 12:20 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 06/17] block/nvme: Replace qemu_try_blockalign(bs) by qemu_try_memalign(pg_sz) Philippe Mathieu-Daudé
2020-06-26 12:24 ` Stefan Hajnoczi
2020-06-26 12:48 ` Philippe Mathieu-Daudé
2020-06-29 13:07 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 07/17] block/nvme: Move code around Philippe Mathieu-Daudé
2020-06-26 12:25 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 08/17] block/nvme: Use correct type void* Philippe Mathieu-Daudé
2020-06-26 12:31 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 09/17] block/nvme: Remove unused argument from nvme_free_queue_pair() Philippe Mathieu-Daudé
2020-06-26 12:31 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 10/17] block/nvme: Simplify nvme_init_queue() arguments Philippe Mathieu-Daudé
2020-06-26 12:31 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 11/17] block/nvme: Simplify nvme_create_queue_pair() arguments Philippe Mathieu-Daudé
2020-06-26 12:31 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 12/17] block/nvme: Simplify nvme_kick trace event Philippe Mathieu-Daudé
2020-06-26 12:33 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 13/17] block/nvme: Simplify completion trace events Philippe Mathieu-Daudé
2020-06-26 12:34 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 14/17] block/nvme: Replace BDRV_POLL_WHILE by AIO_WAIT_WHILE Philippe Mathieu-Daudé
2020-06-26 12:35 ` Stefan Hajnoczi
2020-06-25 18:48 ` [RFC PATCH 15/17] block/nvme: Use per-queue AIO context Philippe Mathieu-Daudé
2020-06-26 12:42 ` Stefan Hajnoczi
2020-06-26 12:59 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 16/17] block/nvme: Check BDRVNVMeState::plugged out of nvme_kick() Philippe Mathieu-Daudé
2020-06-26 12:43 ` Stefan Hajnoczi
2020-06-25 18:48 ` [PATCH 17/17] block/nvme: Check BDRVNVMeState::plugged out of nvme_process_completion Philippe Mathieu-Daudé
2020-06-26 12:46 ` Stefan Hajnoczi
2020-06-25 19:27 ` [PATCH 00/17] block/nvme: Various cleanups required to use multiple queues no-reply
2020-06-26 9:18 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200625184838.28172-4-philmd@redhat.com \
--to=philmd@redhat.com \
--cc=fam@euphon.net \
--cc=kwolf@redhat.com \
--cc=mlevitsk@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).