From: Jason Wang <jasowang@redhat.com>
To: Cindy Lu <lulu@redhat.com>, mst@redhat.com, qemu-devel@nongnu.org
Subject: Re: [PATCH v4 4/4] virtio-pci: add support for configure interrupt
Date: Wed, 24 Mar 2021 14:34:06 +0800 [thread overview]
Message-ID: <1b6d8291-0ac9-06fe-6f8a-efcaad542bc3@redhat.com> (raw)
In-Reply-To: <20210323015641.10820-5-lulu@redhat.com>
在 2021/3/23 上午9:56, Cindy Lu 写道:
> Add support for configure interrupt, use kvm_irqfd_assign and set the
> gsi to kernel. When the configure notifier was eventfd_signal by host
> kernel, this will finally inject an msix interrupt to guest
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
> hw/virtio/virtio-pci.c | 171 +++++++++++++++++++++++++++++++++--------
> 1 file changed, 137 insertions(+), 34 deletions(-)
>
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 36524a5728..b0c190caba 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -664,7 +664,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
> }
>
> static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
> - unsigned int queue_no,
> unsigned int vector)
Let's use a separated patch for decoupling queue_no from those irqfd
helpers.
Thanks
> {
> VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> @@ -691,23 +690,17 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
> }
>
> static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
> - unsigned int queue_no,
> + EventNotifier *n,
> unsigned int vector)
> {
> VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> - VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> - EventNotifier *n = virtio_queue_get_guest_notifier(vq);
> return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
> }
>
> static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
> - unsigned int queue_no,
> + EventNotifier *n ,
> unsigned int vector)
> {
> - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> - VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> - EventNotifier *n = virtio_queue_get_guest_notifier(vq);
> VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> int ret;
>
> @@ -722,7 +715,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> unsigned int vector;
> int ret, queue_no;
> -
> + VirtQueue *vq;
> + EventNotifier *n;
> for (queue_no = 0; queue_no < nvqs; queue_no++) {
> if (!virtio_queue_get_num(vdev, queue_no)) {
> break;
> @@ -731,7 +725,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> if (vector >= msix_nr_vectors_allocated(dev)) {
> continue;
> }
> - ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
> + ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
> if (ret < 0) {
> goto undo;
> }
> @@ -739,7 +733,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> * Otherwise, delay until unmasked in the frontend.
> */
> if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> - ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> + vq = virtio_get_queue(vdev, queue_no);
> + n = virtio_queue_get_guest_notifier(vq);
> + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> if (ret < 0) {
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> goto undo;
> @@ -755,13 +751,69 @@ undo:
> continue;
> }
> if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> - kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> + vq = virtio_get_queue(vdev, queue_no);
> + n = virtio_queue_get_guest_notifier(vq);
> + kvm_virtio_pci_irqfd_release(proxy, n, vector);
> }
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> }
> return ret;
> }
>
> +static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
> +{
> +
> + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> + unsigned int vector;
> + int ret;
> + EventNotifier *n = virtio_get_config_notifier(vdev);
> +
> + vector = vdev->config_vector ;
> + ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
> + if (ret < 0) {
> + goto undo;
> + }
> + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> + if (ret < 0) {
> + goto undo;
> + }
> + return 0;
> +undo:
> + kvm_virtio_pci_irqfd_release(proxy, n, vector);
> + return ret;
> +}
> +static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
> +{
> + PCIDevice *dev = &proxy->pci_dev;
> + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> + unsigned int vector;
> + EventNotifier *n = virtio_get_config_notifier(vdev);
> + vector = vdev->config_vector ;
> + if (vector >= msix_nr_vectors_allocated(dev)) {
> + return;
> + }
> + kvm_virtio_pci_irqfd_release(proxy, n, vector);
> + kvm_virtio_pci_vq_vector_release(proxy, vector);
> +}
> +
> +static int virtio_pci_set_config_notifier(DeviceState *d, bool assign)
> +{
> + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
> + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> + EventNotifier *notifier = virtio_get_config_notifier(vdev);
> + int r = 0;
> + if (assign) {
> + r = event_notifier_init(notifier, 0);
> + virtio_set_config_notifier_fd_handler(vdev, true, true);
> + kvm_virtio_pci_vector_config_use(proxy);
> + } else {
> + virtio_set_config_notifier_fd_handler(vdev, false, true);
> + kvm_virtio_pci_vector_config_release(proxy);
> + event_notifier_cleanup(notifier);
> + }
> + return r;
> +}
> +
> static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> {
> PCIDevice *dev = &proxy->pci_dev;
> @@ -769,7 +821,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> unsigned int vector;
> int queue_no;
> VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -
> + VirtQueue *vq;
> + EventNotifier *n;
> for (queue_no = 0; queue_no < nvqs; queue_no++) {
> if (!virtio_queue_get_num(vdev, queue_no)) {
> break;
> @@ -782,7 +835,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> * Otherwise, it was cleaned when masked in the frontend.
> */
> if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> - kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> + vq = virtio_get_queue(vdev, queue_no);
> + n = virtio_queue_get_guest_notifier(vq);
> + kvm_virtio_pci_irqfd_release(proxy, n, vector);
> }
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> }
> @@ -791,15 +846,14 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> unsigned int queue_no,
> unsigned int vector,
> - MSIMessage msg)
> + MSIMessage msg,
> + int type,
> + EventNotifier *n)
> {
> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> - VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> - EventNotifier *n = virtio_queue_get_guest_notifier(vq);
> VirtIOIRQFD *irqfd;
> int ret = 0;
> -
> if (proxy->vector_irqfd) {
> irqfd = &proxy->vector_irqfd[vector];
> if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
> @@ -816,32 +870,33 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> * Otherwise, set it up now.
> */
> if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> - k->guest_notifier_mask(vdev, queue_no, false);
> + k->guest_notifier_mask(vdev, queue_no, false, type);
> /* Test after unmasking to avoid losing events. */
> if (k->guest_notifier_pending &&
> - k->guest_notifier_pending(vdev, queue_no)) {
> + k->guest_notifier_pending(vdev, queue_no, type)) {
> event_notifier_set(n);
> }
> } else {
> - ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> }
> return ret;
> }
>
> static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
> unsigned int queue_no,
> - unsigned int vector)
> + unsigned int vector,
> + int type,
> + EventNotifier *n)
> {
> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -
> /* If guest supports masking, keep irqfd but mask it.
> * Otherwise, clean it up now.
> */
> if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> - k->guest_notifier_mask(vdev, queue_no, true);
> + k->guest_notifier_mask(vdev, queue_no, true, type);
> } else {
> - kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> + kvm_virtio_pci_irqfd_release(proxy, n, vector);
> }
> }
>
> @@ -851,15 +906,26 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
> VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> + EventNotifier *n;
> int ret, index, unmasked = 0;
>
> + if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> + n = virtio_get_config_notifier(vdev);
> + ret = virtio_pci_vq_vector_unmask(proxy, 0, vector, msg,
> + VIRTIO_CONFIG_VECTOR, n);
> + if (ret < 0) {
> + goto config_undo;
> + }
> + }
> while (vq) {
> index = virtio_get_queue_index(vq);
> if (!virtio_queue_get_num(vdev, index)) {
> break;
> }
> if (index < proxy->nvqs_with_notifiers) {
> - ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
> + n = virtio_queue_get_guest_notifier(vq);
> + ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg,
> + VIRTIO_VQ_VECTOR, n);
> if (ret < 0) {
> goto undo;
> }
> @@ -875,11 +941,17 @@ undo:
> while (vq && unmasked >= 0) {
> index = virtio_get_queue_index(vq);
> if (index < proxy->nvqs_with_notifiers) {
> - virtio_pci_vq_vector_mask(proxy, index, vector);
> + n = virtio_queue_get_guest_notifier(vq);
> + virtio_pci_vq_vector_mask(proxy, index, vector,
> + VIRTIO_VQ_VECTOR, n);
> --unmasked;
> }
> vq = virtio_vector_next_queue(vq);
> }
> + config_undo:
> + n = virtio_get_config_notifier(vdev);
> + virtio_pci_vq_vector_mask(proxy, 0, vector,
> + VIRTIO_CONFIG_VECTOR, n);
> return ret;
> }
>
> @@ -888,18 +960,26 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
> VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> + EventNotifier *n;
> int index;
>
> + if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> + n = virtio_get_config_notifier(vdev);
> + virtio_pci_vq_vector_mask(proxy, 0, vector, VIRTIO_CONFIG_VECTOR, n);
> + }
> while (vq) {
> index = virtio_get_queue_index(vq);
> + n = virtio_queue_get_guest_notifier(vq);
> if (!virtio_queue_get_num(vdev, index)) {
> break;
> }
> if (index < proxy->nvqs_with_notifiers) {
> - virtio_pci_vq_vector_mask(proxy, index, vector);
> + virtio_pci_vq_vector_mask(proxy, index, vector,
> + VIRTIO_VQ_VECTOR, n);
> }
> vq = virtio_vector_next_queue(vq);
> }
> +
> }
>
> static void virtio_pci_vector_poll(PCIDevice *dev,
> @@ -918,6 +998,7 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
> if (!virtio_queue_get_num(vdev, queue_no)) {
> break;
> }
> +
> vector = virtio_queue_vector(vdev, queue_no);
> if (vector < vector_start || vector >= vector_end ||
> !msix_is_masked(dev, vector)) {
> @@ -926,7 +1007,22 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
> vq = virtio_get_queue(vdev, queue_no);
> notifier = virtio_queue_get_guest_notifier(vq);
> if (k->guest_notifier_pending) {
> - if (k->guest_notifier_pending(vdev, queue_no)) {
> + if (k->guest_notifier_pending(vdev, queue_no, VIRTIO_VQ_VECTOR)) {
> + msix_set_pending(dev, vector);
> + }
> + } else if (event_notifier_test_and_clear(notifier)) {
> + msix_set_pending(dev, vector);
> + }
> + }
> + if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> + vector = vdev->config_vector;
> + notifier = virtio_get_config_notifier(vdev);
> + if (vector < vector_start || vector >= vector_end ||
> + !msix_is_masked(dev, vector)) {
> + return;
> + }
> + if (k->guest_notifier_pending) {
> + if (k->guest_notifier_pending(vdev, 0, VIRTIO_CONFIG_VECTOR)) {
> msix_set_pending(dev, vector);
> }
> } else if (event_notifier_test_and_clear(notifier)) {
> @@ -958,7 +1054,7 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
> if (!msix_enabled(&proxy->pci_dev) &&
> vdev->use_guest_notifier_mask &&
> vdc->guest_notifier_mask) {
> - vdc->guest_notifier_mask(vdev, n, !assign);
> + vdc->guest_notifier_mask(vdev, n, !assign, VIRTIO_VQ_VECTOR);
> }
>
> return 0;
> @@ -1008,7 +1104,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> goto assign_error;
> }
> }
> -
> /* Must set vector notifier after guest notifier has been assigned */
> if ((with_irqfd || k->guest_notifier_mask) && assign) {
> if (with_irqfd) {
> @@ -1020,6 +1115,12 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> goto assign_error;
> }
> }
> + if (vdev->use_config_notifier == VIRTIO_CONFIG_SUPPORT) {
> + r = virtio_pci_set_config_notifier(d, assign);
> + if (r < 0) {
> + goto config_error;
> + }
> + }
> r = msix_set_vector_notifiers(&proxy->pci_dev,
> virtio_pci_vector_unmask,
> virtio_pci_vector_mask,
> @@ -1028,7 +1129,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> goto notifiers_error;
> }
> }
> -
> return 0;
>
> notifiers_error:
> @@ -1036,13 +1136,16 @@ notifiers_error:
> assert(assign);
> kvm_virtio_pci_vector_release(proxy, nvqs);
> }
> -
> + config_error:
> + /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
> + kvm_virtio_pci_vector_config_release(proxy);
> assign_error:
> /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
> assert(assign);
> while (--n >= 0) {
> virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
> }
> +
> return r;
> }
>
next prev parent reply other threads:[~2021-03-24 6:37 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-23 1:56 [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-03-23 1:56 ` [PATCH v4 1/4] virtio:add support in " Cindy Lu
2021-03-24 6:30 ` Jason Wang
2021-03-25 7:15 ` Cindy Lu
2021-03-26 8:29 ` Jason Wang
2021-03-29 6:05 ` Cindy Lu
2021-03-23 1:56 ` [PATCH v4 2/4] vhost-vdpa: add callback function for " Cindy Lu
2021-03-24 6:33 ` Jason Wang
2021-03-25 7:17 ` Cindy Lu
2021-03-23 1:56 ` [PATCH v4 3/4] virtio-mmio: add support " Cindy Lu
2021-03-23 1:56 ` [PATCH v4 4/4] virtio-pci: " Cindy Lu
2021-03-24 6:34 ` Jason Wang [this message]
2021-03-25 6:07 ` Cindy Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1b6d8291-0ac9-06fe-6f8a-efcaad542bc3@redhat.com \
--to=jasowang@redhat.com \
--cc=lulu@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).