qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Christophe de Dinechin <dinechin@redhat.com>
To: Vivek Goyal <vgoyal@redhat.com>
Cc: virtio-fs@redhat.com, qemu-devel@nongnu.org, stefanha@redhat.com,
	miklos@szeredi.hu
Subject: Re: [Virtio-fs] [PATCH 06/13] vhost-user-fs: Use helpers to create/cleanup virtqueue
Date: Wed, 06 Oct 2021 15:35:30 +0200	[thread overview]
Message-ID: <lywnmqi89e.fsf@redhat.com> (raw)
In-Reply-To: <20210930153037.1194279-7-vgoyal@redhat.com>


On 2021-09-30 at 11:30 -04, Vivek Goyal <vgoyal@redhat.com> wrote...
> Add helpers to create/cleanup virtuqueues and use those helpers. I will

Typo, virtuqueues -> virtqueues

Also, while I'm nitpicking, virtqueue could be plural in commit description ;-)

> need to reconfigure queues in later patches and using helpers will allow
> reusing the code.
>
> Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
> ---
>  hw/virtio/vhost-user-fs.c | 87 +++++++++++++++++++++++----------------
>  1 file changed, 52 insertions(+), 35 deletions(-)
>
> diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
> index c595957983..d1efbc5b18 100644
> --- a/hw/virtio/vhost-user-fs.c
> +++ b/hw/virtio/vhost-user-fs.c
> @@ -139,6 +139,55 @@ static void vuf_set_status(VirtIODevice *vdev, uint8_t status)
>      }
>  }
>
> +static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> +{
> +    /*
> +     * Not normally called; it's the daemon that handles the queue;
> +     * however virtio's cleanup path can call this.
> +     */
> +}
> +
> +static void vuf_create_vqs(VirtIODevice *vdev)
> +{
> +    VHostUserFS *fs = VHOST_USER_FS(vdev);
> +    unsigned int i;
> +
> +    /* Hiprio queue */
> +    fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size,
> +                                     vuf_handle_output);
> +
> +    /* Request queues */
> +    fs->req_vqs = g_new(VirtQueue *, fs->conf.num_request_queues);
> +    for (i = 0; i < fs->conf.num_request_queues; i++) {
> +        fs->req_vqs[i] = virtio_add_queue(vdev, fs->conf.queue_size,
> +                                          vuf_handle_output);
> +    }
> +
> +    /* 1 high prio queue, plus the number configured */
> +    fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues;
> +    fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs);
> +}
> +
> +static void vuf_cleanup_vqs(VirtIODevice *vdev)
> +{
> +    VHostUserFS *fs = VHOST_USER_FS(vdev);
> +    unsigned int i;
> +
> +    virtio_delete_queue(fs->hiprio_vq);
> +    fs->hiprio_vq = NULL;
> +
> +    for (i = 0; i < fs->conf.num_request_queues; i++) {
> +        virtio_delete_queue(fs->req_vqs[i]);
> +    }
> +
> +    g_free(fs->req_vqs);
> +    fs->req_vqs = NULL;
> +
> +    fs->vhost_dev.nvqs = 0;
> +    g_free(fs->vhost_dev.vqs);
> +    fs->vhost_dev.vqs = NULL;
> +}
> +
>  static uint64_t vuf_get_features(VirtIODevice *vdev,
>                                   uint64_t features,
>                                   Error **errp)
> @@ -148,14 +197,6 @@ static uint64_t vuf_get_features(VirtIODevice *vdev,
>      return vhost_get_features(&fs->vhost_dev, user_feature_bits, features);
>  }
>
> -static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> -{
> -    /*
> -     * Not normally called; it's the daemon that handles the queue;
> -     * however virtio's cleanup path can call this.
> -     */
> -}
> -
>  static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
>                                              bool mask)
>  {
> @@ -175,7 +216,6 @@ static void vuf_device_realize(DeviceState *dev, Error **errp)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>      VHostUserFS *fs = VHOST_USER_FS(dev);
> -    unsigned int i;
>      size_t len;
>      int ret;
>
> @@ -222,18 +262,7 @@ static void vuf_device_realize(DeviceState *dev, Error **errp)
>      virtio_init(vdev, "vhost-user-fs", VIRTIO_ID_FS,
>                  sizeof(struct virtio_fs_config));
>
> -    /* Hiprio queue */
> -    fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output);
> -
> -    /* Request queues */
> -    fs->req_vqs = g_new(VirtQueue *, fs->conf.num_request_queues);
> -    for (i = 0; i < fs->conf.num_request_queues; i++) {
> -        fs->req_vqs[i] = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output);
> -    }
> -
> -    /* 1 high prio queue, plus the number configured */
> -    fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues;
> -    fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs);
> +    vuf_create_vqs(vdev);
>      ret = vhost_dev_init(&fs->vhost_dev, &fs->vhost_user,
>                           VHOST_BACKEND_TYPE_USER, 0, errp);
>      if (ret < 0) {
> @@ -244,13 +273,8 @@ static void vuf_device_realize(DeviceState *dev, Error **errp)
>
>  err_virtio:
>      vhost_user_cleanup(&fs->vhost_user);
> -    virtio_delete_queue(fs->hiprio_vq);
> -    for (i = 0; i < fs->conf.num_request_queues; i++) {
> -        virtio_delete_queue(fs->req_vqs[i]);
> -    }
> -    g_free(fs->req_vqs);
> +    vuf_cleanup_vqs(vdev);
>      virtio_cleanup(vdev);
> -    g_free(fs->vhost_dev.vqs);
>      return;
>  }
>
> @@ -258,7 +282,6 @@ static void vuf_device_unrealize(DeviceState *dev)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>      VHostUserFS *fs = VHOST_USER_FS(dev);
> -    int i;
>
>      /* This will stop vhost backend if appropriate. */
>      vuf_set_status(vdev, 0);
> @@ -267,14 +290,8 @@ static void vuf_device_unrealize(DeviceState *dev)
>
>      vhost_user_cleanup(&fs->vhost_user);
>
> -    virtio_delete_queue(fs->hiprio_vq);
> -    for (i = 0; i < fs->conf.num_request_queues; i++) {
> -        virtio_delete_queue(fs->req_vqs[i]);
> -    }
> -    g_free(fs->req_vqs);
> +    vuf_cleanup_vqs(vdev);
>      virtio_cleanup(vdev);
> -    g_free(fs->vhost_dev.vqs);
> -    fs->vhost_dev.vqs = NULL;
>  }
>
>  static const VMStateDescription vuf_vmstate = {


--
Cheers,
Christophe de Dinechin (IRC c3d)



  parent reply	other threads:[~2021-10-06 13:38 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-30 15:30 [PATCH 00/13] virtiofsd: Support notification queue and Vivek Goyal
2021-09-30 15:30 ` [PATCH 01/13] virtio_fs.h: Add notification queue feature bit Vivek Goyal
2021-10-04 13:12   ` Stefan Hajnoczi
2021-09-30 15:30 ` [PATCH 02/13] virtiofsd: fuse.h header file changes for lock notification Vivek Goyal
2021-10-04 13:16   ` Stefan Hajnoczi
2021-10-04 14:01     ` Vivek Goyal
2021-09-30 15:30 ` [PATCH 03/13] virtiofsd: Remove unused virtio_fs_config definition Vivek Goyal
2021-10-04 13:17   ` Stefan Hajnoczi
2021-09-30 15:30 ` [PATCH 04/13] virtiofsd: Add a helper to send element on virtqueue Vivek Goyal
2021-10-04 13:19   ` Stefan Hajnoczi
2021-09-30 15:30 ` [PATCH 05/13] virtiofsd: Add a helper to stop all queues Vivek Goyal
2021-10-04 13:22   ` Stefan Hajnoczi
2021-09-30 15:30 ` [PATCH 06/13] vhost-user-fs: Use helpers to create/cleanup virtqueue Vivek Goyal
2021-10-04 13:54   ` Stefan Hajnoczi
2021-10-04 19:58     ` Vivek Goyal
2021-10-05  8:09       ` Stefan Hajnoczi
2021-10-06 13:35   ` Christophe de Dinechin [this message]
2021-10-06 17:40     ` [Virtio-fs] " Vivek Goyal
2021-09-30 15:30 ` [PATCH 07/13] virtiofsd: Release file locks using F_UNLCK Vivek Goyal
2021-10-05 13:37   ` [Virtio-fs] " Christophe de Dinechin
2021-10-05 15:38     ` Vivek Goyal
2021-09-30 15:30 ` [PATCH 08/13] virtiofsd: Create a notification queue Vivek Goyal
2021-10-04 14:30   ` Stefan Hajnoczi
2021-10-04 21:01     ` Vivek Goyal
2021-10-05  8:14       ` Stefan Hajnoczi
2021-10-05 12:31         ` Vivek Goyal
2021-09-30 15:30 ` [PATCH 09/13] virtiofsd: Specify size of notification buffer using config space Vivek Goyal
2021-10-04 14:33   ` Stefan Hajnoczi
2021-10-04 21:10     ` Vivek Goyal
2021-10-06 10:05   ` [Virtio-fs] " Christophe de Dinechin
2021-09-30 15:30 ` [PATCH 10/13] virtiofsd: Custom threadpool for remote blocking posix locks requests Vivek Goyal
2021-10-04 14:54   ` Stefan Hajnoczi
2021-10-05 13:06     ` Vivek Goyal
2021-10-05 20:09     ` Vivek Goyal
2021-10-06 10:26       ` Stefan Hajnoczi
2021-09-30 15:30 ` [PATCH 11/13] virtiofsd: Shutdown notification queue in the end Vivek Goyal
2021-10-04 15:01   ` Stefan Hajnoczi
2021-10-05 13:19     ` Vivek Goyal
2021-10-06 15:15   ` [Virtio-fs] " Christophe de Dinechin
2021-10-06 17:58     ` Vivek Goyal
2021-09-30 15:30 ` [PATCH 12/13] virtiofsd: Implement blocking posix locks Vivek Goyal
2021-10-04 15:07   ` Stefan Hajnoczi
2021-10-05 13:26     ` Vivek Goyal
2021-10-05 12:22   ` Stefan Hajnoczi
2021-10-05 15:14     ` Vivek Goyal
2021-10-05 15:49       ` Stefan Hajnoczi
2021-10-06 15:34   ` [Virtio-fs] " Christophe de Dinechin
2021-10-06 18:17     ` Vivek Goyal
2021-09-30 15:30 ` [PATCH 13/13] virtiofsd, seccomp: Add clock_nanosleep() to allow list Vivek Goyal
2021-10-05 12:22   ` Stefan Hajnoczi
2021-10-05 15:16     ` [Virtio-fs] " Vivek Goyal
2021-10-05 15:50       ` Stefan Hajnoczi
2021-10-05 17:28         ` Vivek Goyal
2021-10-06 10:27           ` Stefan Hajnoczi
2021-10-25 18:00 ` [PATCH 00/13] virtiofsd: Support notification queue and Dr. David Alan Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=lywnmqi89e.fsf@redhat.com \
    --to=dinechin@redhat.com \
    --cc=miklos@szeredi.hu \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=vgoyal@redhat.com \
    --cc=virtio-fs@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).