From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>, Cindy Lu <lulu@redhat.com>
Subject: [PULL 05/52] virtio-pci: decouple the single vector from the interrupt process
Date: Thu, 6 Jan 2022 08:16:29 -0500 [thread overview]
Message-ID: <20220106131534.423671-6-mst@redhat.com> (raw)
In-Reply-To: <20220106131534.423671-1-mst@redhat.com>
From: Cindy Lu <lulu@redhat.com>
To reuse the interrupt process in configure interrupt
Need to decouple the single vector from the interrupt process. Add new function
kvm_virtio_pci_vector_use_one and _release_one. These functions are use
for the single vector, the whole process will finish in a loop for the vq number.
Signed-off-by: Cindy Lu <lulu@redhat.com>
Message-Id: <20211104164827.21911-4-lulu@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
hw/virtio/virtio-pci.c | 131 +++++++++++++++++++++++------------------
1 file changed, 73 insertions(+), 58 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 4bdb6e5694..7201cf3dc1 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -677,7 +677,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
}
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
unsigned int vector)
{
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
@@ -740,87 +739,103 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
return 0;
}
-static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
{
+ unsigned int vector;
+ int ret;
+ EventNotifier *n;
PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
- unsigned int vector;
- int ret, queue_no;
- EventNotifier *n;
- for (queue_no = 0; queue_no < nvqs; queue_no++) {
- if (!virtio_queue_get_num(vdev, queue_no)) {
- break;
- }
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
- if (ret < 0) {
- break;
- }
- if (vector >= msix_nr_vectors_allocated(dev)) {
- continue;
- }
- ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
+
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ return ret;
+ }
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return 0;
+ }
+ ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
+ if (ret < 0) {
+ goto undo;
+ }
+ /*
+ * If guest supports masking, set up irqfd now.
+ * Otherwise, delay until unmasked in the frontend.
+ */
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
if (ret < 0) {
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
goto undo;
}
- /* If guest supports masking, set up irqfd now.
- * Otherwise, delay until unmasked in the frontend.
- */
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
- if (ret < 0) {
- kvm_virtio_pci_vq_vector_release(proxy, vector);
- goto undo;
- }
- }
}
- return 0;
+ return 0;
undo:
- while (--queue_no >= 0) {
- vector = virtio_queue_vector(vdev, queue_no);
- if (vector >= msix_nr_vectors_allocated(dev)) {
- continue;
+
+ vector = virtio_queue_vector(vdev, queue_no);
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return ret;
+ }
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ return ret;
}
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
- if (ret < 0) {
- break;
- }
- kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ }
+ return ret;
+}
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+{
+ int queue_no;
+ int ret = 0;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ for (queue_no = 0; queue_no < nvqs; queue_no++) {
+ if (!virtio_queue_get_num(vdev, queue_no)) {
+ return -1;
}
- kvm_virtio_pci_vq_vector_release(proxy, vector);
+ ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
}
return ret;
}
-static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+
+static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
+ int queue_no)
{
- PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
unsigned int vector;
- int queue_no;
- VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
EventNotifier *n;
- int ret ;
+ int ret;
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ PCIDevice *dev = &proxy->pci_dev;
+
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ return;
+ }
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return;
+ }
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ }
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
+}
+
+static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+{
+ int queue_no;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
}
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
- if (ret < 0) {
- break;
- }
- if (vector >= msix_nr_vectors_allocated(dev)) {
- continue;
- }
- /* If guest supports masking, clean up irqfd now.
- * Otherwise, it was cleaned when masked in the frontend.
- */
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- kvm_virtio_pci_irqfd_release(proxy, n, vector);
- }
- kvm_virtio_pci_vq_vector_release(proxy, vector);
+ kvm_virtio_pci_vector_release_one(proxy, queue_no);
}
}
--
MST
next prev parent reply other threads:[~2022-01-06 13:21 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-06 13:16 [PULL 00/52] virtio,pci,pc: features,fixes,cleanups Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 01/52] virtio-mem: Don't skip alignment checks when warning about block size Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 02/52] acpi: validate hotplug selector on access Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 03/52] virtio: introduce macro IRTIO_CONFIG_IRQ_IDX Michael S. Tsirkin
2022-01-06 13:16 ` Michael S. Tsirkin [this message]
2022-01-06 13:16 ` [PULL 06/52] vhost: introduce new VhostOps vhost_set_config_call Michael S. Tsirkin
2022-01-06 13:22 ` Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 07/52] vhost-vdpa: add support for config interrupt Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 08/52] virtio: add support for configure interrupt Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 09/52] vhost: " Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 10/52] virtio-net: " Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 11/52] virtio-mmio: " Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 12/52] virtio-pci: " Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 13/52] trace-events,pci: unify trace events format Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 14/52] vhost-user-blk: reconnect on any error during realize Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 15/52] chardev/char-socket: tcp_chr_recv: don't clobber errno Michael S. Tsirkin
2022-01-06 13:16 ` [PULL 16/52] chardev/char-socket: tcp_chr_sync_read: " Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 17/52] vhost-backend: avoid overflow on memslots_limit Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 18/52] vhost-backend: stick to -errno error return convention Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 19/52] vhost-vdpa: " Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 20/52] vhost-user: " Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 21/52] vhost: " Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 22/52] vhost-user-blk: propagate error return from generic vhost Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 23/52] pci: Export the pci_intx() function Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 24/52] pcie_aer: Don't trigger a LSI if none are defined Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 25/52] smbios: Rename SMBIOS_ENTRY_POINT_* enums Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 26/52] hw/smbios: Use qapi for SmbiosEntryPointType Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 27/52] hw/i386: expose a "smbios-entry-point-type" PC machine property Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 28/52] hw/vhost-user-blk: turn on VIRTIO_BLK_F_SIZE_MAX feature for virtio blk device Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 29/52] util/oslib-posix: Let touch_all_pages() return an error Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 30/52] util/oslib-posix: Support MADV_POPULATE_WRITE for os_mem_prealloc() Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 31/52] util/oslib-posix: Introduce and use MemsetContext for touch_all_pages() Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 32/52] util/oslib-posix: Don't create too many threads with small memory or little pages Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 33/52] util/oslib-posix: Avoid creating a single thread with MADV_POPULATE_WRITE Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 34/52] util/oslib-posix: Support concurrent os_mem_prealloc() invocation Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 35/52] util/oslib-posix: Forward SIGBUS to MCE handler under Linux Michael S. Tsirkin
2022-01-06 13:17 ` [PULL 36/52] virtio-mem: Support "prealloc=on" option Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 37/52] virtio: signal after wrapping packed used_idx Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 38/52] MAINTAINERS: Add a separate entry for acpi/VIOT tables Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 39/52] linux-headers: sync VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 40/52] virtio-mem: Support VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 41/52] virtio-mem: Set "unplugged-inaccessible=auto" for the 7.0 machine on x86 Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 42/52] intel-iommu: correctly check passthrough during translation Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 43/52] acpi: fix QEMU crash when started with SLIC table Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 46/52] tests: acpi: SLIC: update expected blobs Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 47/52] acpihp: simplify acpi_pcihp_disable_root_bus Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 48/52] hw/i386/pc: Add missing property descriptions Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 49/52] docs: reSTify virtio-balloon-stats documentation and move to docs/interop Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 50/52] hw/scsi/vhost-scsi: don't leak vqs on error Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 51/52] hw/scsi/vhost-scsi: don't double close vhostfd " Michael S. Tsirkin
2022-01-06 13:18 ` [PULL 52/52] virtio/vhost-vsock: don't double close vhostfd, remove redundant cleanup Michael S. Tsirkin
2022-01-06 13:21 ` [PULL 45/52] tests: acpi: add SLIC table test Michael S. Tsirkin
2022-01-06 13:18 ` Michael S. Tsirkin
2022-01-06 13:21 ` [PULL 44/52] tests: acpi: whitelist expected blobs before changing them Michael S. Tsirkin
2022-01-06 13:22 ` [PULL 04/52] virtio-pci: decouple notifier from interrupt process Michael S. Tsirkin
2022-01-06 23:06 ` [PULL 00/52] virtio,pci,pc: features,fixes,cleanups Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220106131534.423671-6-mst@redhat.com \
--to=mst@redhat.com \
--cc=lulu@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).