* [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier
2020-08-19 16:03 [RFC PATCH v4 0/5] util/vfio-helpers: Add support for multiple IRQs Philippe Mathieu-Daudé
@ 2020-08-19 16:03 ` Philippe Mathieu-Daudé
2020-08-20 15:01 ` Stefan Hajnoczi
2020-08-19 16:03 ` [RFC PATCH v4 2/5] util/vfio-helpers: Improve reporting unsupported IOMMU type Philippe Mathieu-Daudé
` (3 subsequent siblings)
4 siblings, 1 reply; 8+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-08-19 16:03 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, Fam Zheng, qemu-block, Max Reitz, Alex Williamson,
Stefan Hajnoczi, Philippe Mathieu-Daudé
In preparation of using multiple IRQ (thus multiple eventfds)
make BDRVNVMeState::irq_notifier an array (for now of a single
element, the admin queue notifier).
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
block/nvme.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index a61e86a83eb..fe8a40b7ede 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -106,6 +106,12 @@ QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
#define INDEX_ADMIN 0
#define INDEX_IO(n) (1 + n)
+/* This driver shares a single MSIX IRQ for the admin and I/O queues */
+enum {
+ MSIX_SHARED_IRQ_IDX = 0,
+ MSIX_IRQ_COUNT = 1
+};
+
struct BDRVNVMeState {
AioContext *aio_context;
QEMUVFIOState *vfio;
@@ -120,7 +126,7 @@ struct BDRVNVMeState {
/* How many uint32_t elements does each doorbell entry take. */
size_t doorbell_scale;
bool write_cache_supported;
- EventNotifier irq_notifier;
+ EventNotifier irq_notifier[MSIX_IRQ_COUNT];
uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */
@@ -631,7 +637,8 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
static void nvme_handle_event(EventNotifier *n)
{
- BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(n, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_handle_event(s);
event_notifier_test_and_clear(n);
@@ -683,7 +690,8 @@ out_error:
static bool nvme_poll_cb(void *opaque)
{
EventNotifier *e = opaque;
- BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_poll_cb(s);
return nvme_poll_queues(s);
@@ -705,7 +713,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->device = g_strdup(device);
s->nsid = namespace;
s->aio_context = bdrv_get_aio_context(bs);
- ret = event_notifier_init(&s->irq_notifier, 0);
+ ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
if (ret) {
error_setg(errp, "Failed to init event notifier");
return ret;
@@ -784,12 +792,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
}
}
- ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
+ ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
VFIO_PCI_MSIX_IRQ_INDEX, errp);
if (ret) {
goto out;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
nvme_identify(bs, namespace, &local_err);
@@ -872,9 +881,10 @@ static void nvme_close(BlockDriverState *bs)
nvme_free_queue_pair(s->queues[i]);
}
g_free(s->queues);
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
- event_notifier_cleanup(&s->irq_notifier);
+ event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
qemu_vfio_close(s->vfio);
@@ -1381,7 +1391,8 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
q->completion_bh = NULL;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
}
@@ -1391,7 +1402,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
BDRVNVMeState *s = bs->opaque;
s->aio_context = new_context;
- aio_set_event_notifier(new_context, &s->irq_notifier,
+ aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
for (int i = 0; i < s->nr_queues; i++) {
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier
2020-08-19 16:03 ` [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier Philippe Mathieu-Daudé
@ 2020-08-20 15:01 ` Stefan Hajnoczi
0 siblings, 0 replies; 8+ messages in thread
From: Stefan Hajnoczi @ 2020-08-20 15:01 UTC (permalink / raw)
To: Philippe Mathieu-Daudé
Cc: Kevin Wolf, Fam Zheng, qemu-block, qemu-devel, Max Reitz,
Alex Williamson
[-- Attachment #1: Type: text/plain, Size: 477 bytes --]
On Wed, Aug 19, 2020 at 06:03:14PM +0200, Philippe Mathieu-Daudé wrote:
> In preparation of using multiple IRQ (thus multiple eventfds)
> make BDRVNVMeState::irq_notifier an array (for now of a single
> element, the admin queue notifier).
>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
> ---
> block/nvme.c | 31 +++++++++++++++++++++----------
> 1 file changed, 21 insertions(+), 10 deletions(-)
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* [RFC PATCH v4 2/5] util/vfio-helpers: Improve reporting unsupported IOMMU type
2020-08-19 16:03 [RFC PATCH v4 0/5] util/vfio-helpers: Add support for multiple IRQs Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier Philippe Mathieu-Daudé
@ 2020-08-19 16:03 ` Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 3/5] util/vfio-helpers: Report error when IOMMU page size is not supported Philippe Mathieu-Daudé
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-08-19 16:03 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, Fam Zheng, qemu-block, Max Reitz, Alex Williamson,
Stefan Hajnoczi, Philippe Mathieu-Daudé
Change the confuse "VFIO IOMMU check failed" error message by
the explicit "VFIO IOMMU Type1 is not supported" once.
Example on POWER:
$ qemu-system-ppc64 -drive if=none,id=nvme0,file=nvme://0001:01:00.0/1,format=raw
qemu-system-ppc64: -drive if=none,id=nvme0,file=nvme://0001:01:00.0/1,format=raw: VFIO IOMMU Type1 is not supported
Suggested-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
util/vfio-helpers.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index e399e330e26..9a83e6084e5 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -261,7 +261,7 @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
}
if (!ioctl(s->container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
- error_setg_errno(errp, errno, "VFIO IOMMU check failed");
+ error_setg_errno(errp, errno, "VFIO IOMMU Type1 is not supported");
ret = -EINVAL;
goto fail_container;
}
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [RFC PATCH v4 3/5] util/vfio-helpers: Report error when IOMMU page size is not supported
2020-08-19 16:03 [RFC PATCH v4 0/5] util/vfio-helpers: Add support for multiple IRQs Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 1/5] block/nvme: Use an array of EventNotifier Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 2/5] util/vfio-helpers: Improve reporting unsupported IOMMU type Philippe Mathieu-Daudé
@ 2020-08-19 16:03 ` Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 4/5] util/vfio-helpers: Introduce qemu_vfio_pci_init_msix_irqs() Philippe Mathieu-Daudé
2020-08-19 16:03 ` [RFC PATCH v4 5/5] block/nvme: Use qemu_vfio_pci_init_msix_irqs() to initialize our IRQ Philippe Mathieu-Daudé
4 siblings, 0 replies; 8+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-08-19 16:03 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, Fam Zheng, qemu-block, Max Reitz, Alex Williamson,
Stefan Hajnoczi, Philippe Mathieu-Daudé
This driver uses the host page size to align its memory regions,
but this size is not always compatible with the IOMMU. Add a
check if the size matches, and bails out with listing the sizes
the IOMMU supports.
Example on Aarch64:
$ qemu-system-aarch64 -M virt -drive if=none,id=nvme0,file=nvme://0006:90:00.0/1,format=raw
qemu-system-aarch64: -drive if=none,id=nvme0,file=nvme://0006:90:00.0/1,format=raw: Unsupported IOMMU page size: 4 KiB
Available page size:
64 KiB
512 MiB
Suggested-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
util/vfio-helpers.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index 9a83e6084e5..8f4a3d452ed 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/cutils.h"
#include <sys/ioctl.h>
#include <linux/vfio.h>
#include "qapi/error.h"
@@ -316,6 +317,25 @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
ret = -errno;
goto fail;
}
+ if (!(iommu_info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
+ error_setg(errp, "Failed to get IOMMU page size info");
+ ret = -errno;
+ goto fail;
+ }
+ if (!extract64(iommu_info.iova_pgsizes,
+ ctz64(qemu_real_host_page_size), 1)) {
+ g_autofree char *host_page_size = size_to_str(qemu_real_host_page_size);
+ error_setg(errp, "Unsupported IOMMU page size: %s", host_page_size);
+ error_append_hint(errp, "Available page size:\n");
+ for (int i = 0; i < 64; i++) {
+ if (extract64(iommu_info.iova_pgsizes, i, 1)) {
+ g_autofree char *iova_pgsizes = size_to_str(1UL << i);
+ error_append_hint(errp, " %s\n", iova_pgsizes);
+ }
+ }
+ ret = -EINVAL;
+ goto fail;
+ }
s->device = ioctl(s->group, VFIO_GROUP_GET_DEVICE_FD, device);
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [RFC PATCH v4 4/5] util/vfio-helpers: Introduce qemu_vfio_pci_init_msix_irqs()
2020-08-19 16:03 [RFC PATCH v4 0/5] util/vfio-helpers: Add support for multiple IRQs Philippe Mathieu-Daudé
` (2 preceding siblings ...)
2020-08-19 16:03 ` [RFC PATCH v4 3/5] util/vfio-helpers: Report error when IOMMU page size is not supported Philippe Mathieu-Daudé
@ 2020-08-19 16:03 ` Philippe Mathieu-Daudé
2020-08-19 17:12 ` Alex Williamson
2020-08-19 16:03 ` [RFC PATCH v4 5/5] block/nvme: Use qemu_vfio_pci_init_msix_irqs() to initialize our IRQ Philippe Mathieu-Daudé
4 siblings, 1 reply; 8+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-08-19 16:03 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, Fam Zheng, qemu-block, Max Reitz, Alex Williamson,
Stefan Hajnoczi, Philippe Mathieu-Daudé
qemu_vfio_pci_init_irq() allows us to initialize any type of IRQ,
but only one. Introduce qemu_vfio_pci_init_msix_irqs() which is
specific to MSIX IRQ type, and allow us to use multiple IRQs
(thus passing multiple eventfd notifiers).
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
include/qemu/vfio-helpers.h | 2 ++
util/vfio-helpers.c | 57 +++++++++++++++++++++++++++++++++++++
2 files changed, 59 insertions(+)
diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
index 1f057c2b9e4..8e6bd83ea41 100644
--- a/include/qemu/vfio-helpers.h
+++ b/include/qemu/vfio-helpers.h
@@ -28,5 +28,7 @@ void qemu_vfio_pci_unmap_bar(QEMUVFIOState *s, int index, void *bar,
uint64_t offset, uint64_t size);
int qemu_vfio_pci_init_irq(QEMUVFIOState *s, EventNotifier *e,
int irq_type, Error **errp);
+int qemu_vfio_pci_init_msix_irqs(QEMUVFIOState *s, EventNotifier *e,
+ unsigned *irq_count, Error **errp);
#endif
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index 8f4a3d452ed..6f833972587 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -216,6 +216,63 @@ int qemu_vfio_pci_init_irq(QEMUVFIOState *s, EventNotifier *e,
return 0;
}
+/**
+ * Initialize device MSIX IRQs and register event notifiers.
+ * @irq_count: pointer to number of MSIX IRQs to initialize
+ * @notifier: Array of @irq_count notifiers (each corresponding to a MSIX IRQ)
+
+ * If the number of IRQs requested exceeds the available on the device,
+ * store the number of available IRQs in @irq_count and return -EOVERFLOW.
+ */
+int qemu_vfio_pci_init_msix_irqs(QEMUVFIOState *s, EventNotifier *notifier,
+ unsigned *irq_count, Error **errp)
+{
+ int r;
+ size_t irq_set_size;
+ struct vfio_irq_set *irq_set;
+ struct vfio_irq_info irq_info = {
+ .argsz = sizeof(irq_info),
+ .index = VFIO_PCI_MSIX_IRQ_INDEX
+ };
+
+ if (ioctl(s->device, VFIO_DEVICE_GET_IRQ_INFO, &irq_info)) {
+ error_setg_errno(errp, errno, "Failed to get device interrupt info");
+ return -errno;
+ }
+ if (irq_info.count < *irq_count) {
+ error_setg(errp, "Not enough device interrupts available");
+ *irq_count = irq_info.count;
+ return -EOVERFLOW;
+ }
+ if (!(irq_info.flags & VFIO_IRQ_INFO_EVENTFD)) {
+ error_setg(errp, "Device interrupt doesn't support eventfd");
+ return -EINVAL;
+ }
+
+ irq_set_size = sizeof(*irq_set) + *irq_count * sizeof(int32_t);
+ irq_set = g_malloc0(irq_set_size);
+
+ /* Get to a known IRQ state */
+ *irq_set = (struct vfio_irq_set) {
+ .argsz = irq_set_size,
+ .flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER,
+ .index = irq_info.index,
+ .start = 0,
+ .count = *irq_count,
+ };
+
+ for (unsigned i = 0; i < *irq_count; i++) {
+ ((int32_t *)&irq_set->data)[i] = event_notifier_get_fd(¬ifier[i]);
+ }
+ r = ioctl(s->device, VFIO_DEVICE_SET_IRQS, irq_set);
+ g_free(irq_set);
+ if (r) {
+ error_setg_errno(errp, errno, "Failed to setup device interrupts");
+ return -errno;
+ }
+ return 0;
+}
+
static int qemu_vfio_pci_read_config(QEMUVFIOState *s, void *buf,
int size, int ofs)
{
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [RFC PATCH v4 4/5] util/vfio-helpers: Introduce qemu_vfio_pci_init_msix_irqs()
2020-08-19 16:03 ` [RFC PATCH v4 4/5] util/vfio-helpers: Introduce qemu_vfio_pci_init_msix_irqs() Philippe Mathieu-Daudé
@ 2020-08-19 17:12 ` Alex Williamson
0 siblings, 0 replies; 8+ messages in thread
From: Alex Williamson @ 2020-08-19 17:12 UTC (permalink / raw)
To: Philippe Mathieu-Daudé
Cc: Kevin Wolf, Fam Zheng, qemu-block, qemu-devel, Max Reitz,
Stefan Hajnoczi
On Wed, 19 Aug 2020 18:03:17 +0200
Philippe Mathieu-Daudé <philmd@redhat.com> wrote:
> qemu_vfio_pci_init_irq() allows us to initialize any type of IRQ,
> but only one. Introduce qemu_vfio_pci_init_msix_irqs() which is
> specific to MSIX IRQ type, and allow us to use multiple IRQs
> (thus passing multiple eventfd notifiers).
>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
> ---
> include/qemu/vfio-helpers.h | 2 ++
> util/vfio-helpers.c | 57 +++++++++++++++++++++++++++++++++++++
> 2 files changed, 59 insertions(+)
>
> diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
> index 1f057c2b9e4..8e6bd83ea41 100644
> --- a/include/qemu/vfio-helpers.h
> +++ b/include/qemu/vfio-helpers.h
> @@ -28,5 +28,7 @@ void qemu_vfio_pci_unmap_bar(QEMUVFIOState *s, int index, void *bar,
> uint64_t offset, uint64_t size);
> int qemu_vfio_pci_init_irq(QEMUVFIOState *s, EventNotifier *e,
> int irq_type, Error **errp);
> +int qemu_vfio_pci_init_msix_irqs(QEMUVFIOState *s, EventNotifier *e,
> + unsigned *irq_count, Error **errp);
>
> #endif
> diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
> index 8f4a3d452ed..6f833972587 100644
> --- a/util/vfio-helpers.c
> +++ b/util/vfio-helpers.c
> @@ -216,6 +216,63 @@ int qemu_vfio_pci_init_irq(QEMUVFIOState *s, EventNotifier *e,
> return 0;
> }
>
> +/**
> + * Initialize device MSIX IRQs and register event notifiers.
> + * @irq_count: pointer to number of MSIX IRQs to initialize
> + * @notifier: Array of @irq_count notifiers (each corresponding to a MSIX IRQ)
> +
> + * If the number of IRQs requested exceeds the available on the device,
> + * store the number of available IRQs in @irq_count and return -EOVERFLOW.
> + */
> +int qemu_vfio_pci_init_msix_irqs(QEMUVFIOState *s, EventNotifier *notifier,
> + unsigned *irq_count, Error **errp)
> +{
> + int r;
> + size_t irq_set_size;
> + struct vfio_irq_set *irq_set;
> + struct vfio_irq_info irq_info = {
> + .argsz = sizeof(irq_info),
> + .index = VFIO_PCI_MSIX_IRQ_INDEX
> + };
> +
> + if (ioctl(s->device, VFIO_DEVICE_GET_IRQ_INFO, &irq_info)) {
> + error_setg_errno(errp, errno, "Failed to get device interrupt info");
> + return -errno;
> + }
> + if (irq_info.count < *irq_count) {
> + error_setg(errp, "Not enough device interrupts available");
> + *irq_count = irq_info.count;
> + return -EOVERFLOW;
> + }
> + if (!(irq_info.flags & VFIO_IRQ_INFO_EVENTFD)) {
> + error_setg(errp, "Device interrupt doesn't support eventfd");
> + return -EINVAL;
> + }
> +
> + irq_set_size = sizeof(*irq_set) + *irq_count * sizeof(int32_t);
> + irq_set = g_malloc0(irq_set_size);
> +
> + /* Get to a known IRQ state */
> + *irq_set = (struct vfio_irq_set) {
> + .argsz = irq_set_size,
> + .flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER,
> + .index = irq_info.index,
> + .start = 0,
> + .count = *irq_count,
> + };
> +
> + for (unsigned i = 0; i < *irq_count; i++) {
> + ((int32_t *)&irq_set->data)[i] = event_notifier_get_fd(¬ifier[i]);
> + }
> + r = ioctl(s->device, VFIO_DEVICE_SET_IRQS, irq_set);
> + g_free(irq_set);
> + if (r) {
> + error_setg_errno(errp, errno, "Failed to setup device interrupts");
> + return -errno;
FWIW, the former irq_info.count check returns what the device is
capable of, the platform might only have limited vector space
available, therefore this ioctl can also return a value indicating the
number of vectors \actually\ available. So if r > 0 you could return
it in *irq_count (which also makes me wonder if errno would be set in
that case). Thanks,
Alex
> + }
> + return 0;
> +}
> +
> static int qemu_vfio_pci_read_config(QEMUVFIOState *s, void *buf,
> int size, int ofs)
> {
^ permalink raw reply [flat|nested] 8+ messages in thread
* [RFC PATCH v4 5/5] block/nvme: Use qemu_vfio_pci_init_msix_irqs() to initialize our IRQ
2020-08-19 16:03 [RFC PATCH v4 0/5] util/vfio-helpers: Add support for multiple IRQs Philippe Mathieu-Daudé
` (3 preceding siblings ...)
2020-08-19 16:03 ` [RFC PATCH v4 4/5] util/vfio-helpers: Introduce qemu_vfio_pci_init_msix_irqs() Philippe Mathieu-Daudé
@ 2020-08-19 16:03 ` Philippe Mathieu-Daudé
4 siblings, 0 replies; 8+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-08-19 16:03 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, Fam Zheng, qemu-block, Max Reitz, Alex Williamson,
Stefan Hajnoczi, Philippe Mathieu-Daudé
Instead of initializing one MSIX IRQ with the generic
qemu_vfio_pci_init_irq() function, use the MSIX specific one which
ill allow us to use multiple IRQs. For now we provide an array of
a single IRQ.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
block/nvme.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index fe8a40b7ede..cafa2cb3f95 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -707,6 +707,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
uint64_t timeout_ms;
uint64_t deadline, now;
Error *local_err = NULL;
+ unsigned irq_count = MSIX_IRQ_COUNT;
qemu_co_mutex_init(&s->dma_map_lock);
qemu_co_queue_init(&s->dma_flush_queue);
@@ -792,9 +793,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
}
}
- ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
- VFIO_PCI_MSIX_IRQ_INDEX, errp);
+ ret = qemu_vfio_pci_init_msix_irqs(s->vfio, s->irq_notifier,
+ &irq_count, errp);
if (ret) {
+ if (ret == -EOVERFLOW) {
+ error_append_hint(errp, "%u IRQs requested but only %u available\n",
+ MSIX_IRQ_COUNT, irq_count);
+ }
goto out;
}
aio_set_event_notifier(bdrv_get_aio_context(bs),
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread