qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Si-Wei Liu <si-wei.liu@oracle.com>
To: Jason Wang <jasowang@redhat.com>, mst@redhat.com, qemu-devel@nongnu.org
Cc: elic@nvidia.com, lingshan.zhu@intel.com
Subject: Re: [PATCH V2 2/2] vhost-vdpa: map virtqueue notification area if possible
Date: Thu, 10 Jun 2021 13:53:47 -0700	[thread overview]
Message-ID: <b1b4dbfa-1227-d34d-9183-a19d0a1e7efd@oracle.com> (raw)
In-Reply-To: <20210602084106.43186-3-jasowang@redhat.com>


Looks good.

On 6/2/2021 1:41 AM, Jason Wang wrote:
> This patch implements the vq notification mapping support for
> vhost-vDPA. This is simply done by using mmap()/munmap() for the
> vhost-vDPA fd during device start/stop. For the device without
> notification mapping support, we fall back to eventfd based
> notification gracefully.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Si-Wei Liu <si-wei.liu@oracle.com>

> ---
> Changes since v1:
> - use dev->vq_index to calculate the virtqueue index
> - remove the unused host_notifier_set
> ---
>   hw/virtio/vhost-vdpa.c         | 85 ++++++++++++++++++++++++++++++++++
>   include/hw/virtio/vhost-vdpa.h |  6 +++
>   2 files changed, 91 insertions(+)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index dd4321bac2..f9a86afe64 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -285,12 +285,95 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
>       return 0;
>   }
>   
> +static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
> +                                            int queue_index)
> +{
> +    size_t page_size = qemu_real_host_page_size;
> +    struct vhost_vdpa *v = dev->opaque;
> +    VirtIODevice *vdev = dev->vdev;
> +    VhostVDPAHostNotifier *n;
> +
> +    n = &v->notifier[queue_index];
> +
> +    if (n->addr) {
> +        virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
> +        object_unparent(OBJECT(&n->mr));
> +        munmap(n->addr, page_size);
> +        n->addr = NULL;
> +    }
> +}
> +
> +static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
> +{
> +    int i;
> +
> +    for (i = 0; i < n; i++) {
> +        vhost_vdpa_host_notifier_uninit(dev, i);
> +    }
> +}
> +
> +static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
> +{
> +    size_t page_size = qemu_real_host_page_size;
> +    struct vhost_vdpa *v = dev->opaque;
> +    VirtIODevice *vdev = dev->vdev;
> +    VhostVDPAHostNotifier *n;
> +    int fd = v->device_fd;
> +    void *addr;
> +    char *name;
> +
> +    vhost_vdpa_host_notifier_uninit(dev, queue_index);
> +
> +    n = &v->notifier[queue_index];
> +
> +    addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
> +                queue_index * page_size);
> +    if (addr == MAP_FAILED) {
> +        goto err;
> +    }
> +
> +    name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
> +                           v, queue_index);
> +    memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
> +                                      page_size, addr);
> +    g_free(name);
> +
> +    if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
> +        munmap(addr, page_size);
> +        goto err;
> +    }
> +    n->addr = addr;
> +
> +    return 0;
> +
> +err:
> +    return -1;
> +}
> +
> +static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
> +{
> +    int i;
> +
> +    for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
> +        if (vhost_vdpa_host_notifier_init(dev, i)) {
> +            goto err;
> +        }
> +    }
> +
> +    return;
> +
> +err:
> +    vhost_vdpa_host_notifiers_uninit(dev, i);
> +    return;
> +}
> +
>   static int vhost_vdpa_cleanup(struct vhost_dev *dev)
>   {
>       struct vhost_vdpa *v;
>       assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
>       v = dev->opaque;
>       trace_vhost_vdpa_cleanup(dev, v);
> +    vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
>       memory_listener_unregister(&v->listener);
>   
>       dev->opaque = NULL;
> @@ -467,6 +550,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>       if (started) {
>           uint8_t status = 0;
>           memory_listener_register(&v->listener, &address_space_memory);
> +        vhost_vdpa_host_notifiers_init(dev);
>           vhost_vdpa_set_vring_ready(dev);
>           vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
>           vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
> @@ -476,6 +560,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>           vhost_vdpa_reset_device(dev);
>           vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
>                                      VIRTIO_CONFIG_S_DRIVER);
> +        vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
>           memory_listener_unregister(&v->listener);
>   
>           return 0;
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 9b81a409da..56bef30ec2 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -14,11 +14,17 @@
>   
>   #include "hw/virtio/virtio.h"
>   
> +typedef struct VhostVDPAHostNotifier {
> +    MemoryRegion mr;
> +    void *addr;
> +} VhostVDPAHostNotifier;
> +
>   typedef struct vhost_vdpa {
>       int device_fd;
>       uint32_t msg_type;
>       MemoryListener listener;
>       struct vhost_dev *dev;
> +    VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
>   } VhostVDPA;
>   
>   extern AddressSpace address_space_memory;



  reply	other threads:[~2021-06-10 20:55 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02  8:41 [PATCH V2 0/2] vhost-vDPA: vq notification map support Jason Wang
2021-06-02  8:41 ` [PATCH V2 1/2] vhost-vdpa: skip ram device from the IOTLB mapping Jason Wang
2021-06-10 20:54   ` Si-Wei Liu
2021-06-02  8:41 ` [PATCH V2 2/2] vhost-vdpa: map virtqueue notification area if possible Jason Wang
2021-06-10 20:53   ` Si-Wei Liu [this message]
2021-06-10  2:30 ` [PATCH V2 0/2] vhost-vDPA: vq notification map support Jason Wang
2021-06-14 21:57 ` no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b1b4dbfa-1227-d34d-9183-a19d0a1e7efd@oracle.com \
    --to=si-wei.liu@oracle.com \
    --cc=elic@nvidia.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).