qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Cindy Lu <lulu@redhat.com>, mst@redhat.com, qemu-devel@nongnu.org
Subject: Re: [PATCH v7 08/10] virtio-pci: decouple virtqueue from kvm_virtio_pci_vector_use
Date: Thu, 3 Jun 2021 14:39:23 +0800	[thread overview]
Message-ID: <a4dae2fa-a023-5a81-7e9e-5c6164962324@redhat.com> (raw)
In-Reply-To: <20210602034750.23377-9-lulu@redhat.com>


在 2021/6/2 上午11:47, Cindy Lu 写道:
> inorder


s/inorder/In order/


> to support configure interrupt, we need to decouple
> virtqueue from vector use and vector release function
> this patch introduce vector_release_one and vector_use_one
> to support one vector.
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>


I think we need to reorder the patches to let such decoupling comes 
first in this series.


> ---
>   hw/virtio/virtio-pci.c | 122 ++++++++++++++++++++---------------------
>   1 file changed, 61 insertions(+), 61 deletions(-)
>
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 6a4ef413a4..f863c89de6 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -666,7 +666,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
>   }
>   
>   static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
> -                                        unsigned int queue_no,
>                                           unsigned int vector)
>   {
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> @@ -710,85 +709,86 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
>       ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
>       assert(ret == 0);
>   }
> -
> -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> +static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
> +                                      EventNotifier **n, unsigned int *vector)
>   {
>       PCIDevice *dev = &proxy->pci_dev;
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> -    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -    unsigned int vector;
> -    int ret, queue_no;
>       VirtQueue *vq;
> -    EventNotifier *n;
> -    for (queue_no = 0; queue_no < nvqs; queue_no++) {
> +
> +    if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
> +        return -1;
> +    } else {
>           if (!virtio_queue_get_num(vdev, queue_no)) {
> -            break;
> -        }
> -        vector = virtio_queue_vector(vdev, queue_no);
> -        if (vector >= msix_nr_vectors_allocated(dev)) {
> -            continue;
> -        }
> -        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
> -        if (ret < 0) {
> -            goto undo;
> -        }
> -        /* If guest supports masking, set up irqfd now.
> -         * Otherwise, delay until unmasked in the frontend.
> -         */
> -        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            vq = virtio_get_queue(vdev, queue_no);
> -            n = virtio_queue_get_guest_notifier(vq);
> -            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> -            if (ret < 0) {
> -                kvm_virtio_pci_vq_vector_release(proxy, vector);
> -                goto undo;
> -            }
> +            return -1;
>           }
> +        *vector = virtio_queue_vector(vdev, queue_no);
> +        vq = virtio_get_queue(vdev, queue_no);
> +        *n = virtio_queue_get_guest_notifier(vq);
> +    }
> +    if (*vector >= msix_nr_vectors_allocated(dev)) {
> +        return -1;
>       }
>       return 0;
> +}
>   
> +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
> +{


To ease the reviewer, let's separate this patch into two.

1) factoring out the core logic
2) decouple the vq

Thanks


> +    unsigned int vector;
> +    int ret;
> +    EventNotifier *n;
> +    ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
> +    if (ret < 0) {
> +        return ret;
> +    }
> +    ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
> +    if (ret < 0) {
> +        goto undo;
> +    }
> +    ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> +    if (ret < 0) {
> +        goto undo;
> +    }
> +    return 0;
>   undo:
> -    while (--queue_no >= 0) {
> -        vector = virtio_queue_vector(vdev, queue_no);
> -        if (vector >= msix_nr_vectors_allocated(dev)) {
> -            continue;
> -        }
> -        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            vq = virtio_get_queue(vdev, queue_no);
> -            n = virtio_queue_get_guest_notifier(vq);
> -            kvm_virtio_pci_irqfd_release(proxy, n, vector);
> -        }
> -        kvm_virtio_pci_vq_vector_release(proxy, vector);
> +    kvm_virtio_pci_irqfd_release(proxy, n, vector);
> +    return ret;
> +}
> +static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> +{
> +    int queue_no;
> +    int ret = 0;
> +    for (queue_no = 0; queue_no < nvqs; queue_no++) {
> +        ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
>       }
>       return ret;
>   }
>   
> -static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> +
> +static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
> +                        int queue_no)
>   {
> -    PCIDevice *dev = &proxy->pci_dev;
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       unsigned int vector;
> -    int queue_no;
> -    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -    VirtQueue *vq;
>       EventNotifier *n;
> +    int ret;
> +    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> +    ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
> +    if (ret < 0) {
> +        return;
> +    }
> +
> +    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> +        kvm_virtio_pci_irqfd_release(proxy, n, vector);
> +    }
> +    kvm_virtio_pci_vq_vector_release(proxy, vector);
> +}
> +static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> +{
> +    int queue_no;
> +
>       for (queue_no = 0; queue_no < nvqs; queue_no++) {
> -        if (!virtio_queue_get_num(vdev, queue_no)) {
> -            break;
> -        }
> -        vector = virtio_queue_vector(vdev, queue_no);
> -        if (vector >= msix_nr_vectors_allocated(dev)) {
> -            continue;
> -        }
> -        /* If guest supports masking, clean up irqfd now.
> -         * Otherwise, it was cleaned when masked in the frontend.
> -         */
> -        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            vq = virtio_get_queue(vdev, queue_no);
> -            n = virtio_queue_get_guest_notifier(vq);
> -            kvm_virtio_pci_irqfd_release(proxy, n, vector);
> -        }
> -        kvm_virtio_pci_vq_vector_release(proxy, vector);
> +        kvm_virtio_pci_vector_release_one(proxy, queue_no);
>       }
>   }
>   



  reply	other threads:[~2021-06-03  6:40 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02  3:47 [PATCH v7 00/10] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-06-02  3:47 ` [PATCH v7 01/10] virtio: introduce macro IRTIO_CONFIG_IRQ_IDX 聽 Cindy Lu
2021-06-02  3:47 ` [PATCH v7 02/10] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
2021-06-03  5:51   ` Jason Wang
2021-06-02  3:47 ` [PATCH v7 03/10] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
2021-06-03  6:01   ` Jason Wang
2021-06-02  3:47 ` [PATCH v7 04/10] vhost: add new call back function for config interrupt Cindy Lu
2021-06-03  6:04   ` Jason Wang
2021-06-02  3:47 ` [PATCH v7 05/10] vhost-vdpa: add support for config interrupt call back Cindy Lu
2021-06-03  6:06   ` Jason Wang
2021-06-07  6:34     ` Cindy Lu
2021-06-02  3:47 ` [PATCH v7 06/10] vhost:add support for configure interrupt Cindy Lu
2021-06-03  6:28   ` Jason Wang
2021-06-08  3:20     ` Cindy Lu
2021-06-02  3:47 ` [PATCH v7 07/10] virtio-mmio: add " Cindy Lu
2021-06-03  6:35   ` Jason Wang
2021-06-07  6:35     ` Cindy Lu
2021-06-02  3:47 ` [PATCH v7 08/10] virtio-pci: decouple virtqueue from kvm_virtio_pci_vector_use Cindy Lu
2021-06-03  6:39   ` Jason Wang [this message]
2021-06-07  6:36     ` Cindy Lu
2021-06-02  3:47 ` [PATCH v7 09/10] virtio-pci: add support for configure interrupt Cindy Lu
2021-06-03  6:45   ` Jason Wang
2021-06-07  6:44     ` Cindy Lu
2021-06-02  3:47 ` [PATCH v7 10/10] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
2021-06-03  6:58   ` Jason Wang
2021-06-07  6:20     ` Cindy Lu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a4dae2fa-a023-5a81-7e9e-5c6164962324@redhat.com \
    --to=jasowang@redhat.com \
    --cc=lulu@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).