From: "Philippe Mathieu-Daudé" <philmd@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Kevin Wolf" <kwolf@redhat.com>, "Fam Zheng" <fam@euphon.net>,
qemu-block@nongnu.org, "Max Reitz" <mreitz@redhat.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@redhat.com>
Subject: [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier
Date: Wed, 19 Aug 2020 18:03:14 +0200 [thread overview]
Message-ID: <20200819160318.835292-2-philmd@redhat.com> (raw)
In-Reply-To: <20200819160318.835292-1-philmd@redhat.com>
In preparation of using multiple IRQ (thus multiple eventfds)
make BDRVNVMeState::irq_notifier an array (for now of a single
element, the admin queue notifier).
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
block/nvme.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index a61e86a83eb..fe8a40b7ede 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -106,6 +106,12 @@ QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
#define INDEX_ADMIN 0
#define INDEX_IO(n) (1 + n)
+/* This driver shares a single MSIX IRQ for the admin and I/O queues */
+enum {
+ MSIX_SHARED_IRQ_IDX = 0,
+ MSIX_IRQ_COUNT = 1
+};
+
struct BDRVNVMeState {
AioContext *aio_context;
QEMUVFIOState *vfio;
@@ -120,7 +126,7 @@ struct BDRVNVMeState {
/* How many uint32_t elements does each doorbell entry take. */
size_t doorbell_scale;
bool write_cache_supported;
- EventNotifier irq_notifier;
+ EventNotifier irq_notifier[MSIX_IRQ_COUNT];
uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */
@@ -631,7 +637,8 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
static void nvme_handle_event(EventNotifier *n)
{
- BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(n, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_handle_event(s);
event_notifier_test_and_clear(n);
@@ -683,7 +690,8 @@ out_error:
static bool nvme_poll_cb(void *opaque)
{
EventNotifier *e = opaque;
- BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_poll_cb(s);
return nvme_poll_queues(s);
@@ -705,7 +713,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->device = g_strdup(device);
s->nsid = namespace;
s->aio_context = bdrv_get_aio_context(bs);
- ret = event_notifier_init(&s->irq_notifier, 0);
+ ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
if (ret) {
error_setg(errp, "Failed to init event notifier");
return ret;
@@ -784,12 +792,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
}
}
- ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
+ ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
VFIO_PCI_MSIX_IRQ_INDEX, errp);
if (ret) {
goto out;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
nvme_identify(bs, namespace, &local_err);
@@ -872,9 +881,10 @@ static void nvme_close(BlockDriverState *bs)
nvme_free_queue_pair(s->queues[i]);
}
g_free(s->queues);
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
- event_notifier_cleanup(&s->irq_notifier);
+ event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
qemu_vfio_close(s->vfio);
@@ -1381,7 +1391,8 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
q->completion_bh = NULL;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
}
@@ -1391,7 +1402,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
BDRVNVMeState *s = bs->opaque;
s->aio_context = new_context;
- aio_set_event_notifier(new_context, &s->irq_notifier,
+ aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
for (int i = 0; i < s->nr_queues; i++) {
--
2.26.2
next prev parent reply other threads:[~2020-08-19 16:07 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-19 16:03 [RFC PATCH v4 0/5] util/vfio-helpers: Add support for multiple IRQs Philippe Mathieu-Daudé
2020-08-19 16:03 ` Philippe Mathieu-Daudé [this message]
2020-08-20 15:01 ` [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier Stefan Hajnoczi
2020-08-19 16:03 ` [RFC PATCH v4 2/5] util/vfio-helpers: Improve reporting unsupported IOMMU type Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 3/5] util/vfio-helpers: Report error when IOMMU page size is not supported Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 4/5] util/vfio-helpers: Introduce qemu_vfio_pci_init_msix_irqs() Philippe Mathieu-Daudé
2020-08-19 17:12 ` Alex Williamson
2020-08-19 16:03 ` [RFC PATCH v4 5/5] block/nvme: Use qemu_vfio_pci_init_msix_irqs() to initialize our IRQ Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200819160318.835292-2-philmd@redhat.com \
--to=philmd@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=fam@euphon.net \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).