qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Xueming Li <xuemingl@nvidia.com>
Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>,
	qemu-devel@nongnu.org, qemu-stable@nongnu.org
Subject: Re: [PATCH v6 2/2] vhost-user: fix VirtQ notifier cleanup
Date: Mon, 1 Nov 2021 17:00:48 -0400	[thread overview]
Message-ID: <20211101165311-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20211101083813.1224522-3-xuemingl@nvidia.com>

On Mon, Nov 01, 2021 at 04:38:13PM +0800, Xueming Li wrote:
> When vhost-user device cleanup is executed and un-mmaps notifier
> address, VM cpu thread writing the notifier fails by accessing invalid
> address error.
> 
> To avoid this concurrent issue, call RCU and wait for a memory flatview
> update, then un-mmap notifiers in callback.
> 
> Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> Cc: qemu-stable@nongnu.org
> Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> ---
>  hw/virtio/vhost-user.c         | 50 +++++++++++++++++++++-------------
>  include/hw/virtio/vhost-user.h |  2 ++
>  2 files changed, 33 insertions(+), 19 deletions(-)
> 
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index c671719e9b..5adad4d029 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -25,6 +25,7 @@
>  #include "migration/migration.h"
>  #include "migration/postcopy-ram.h"
>  #include "trace.h"
> +#include "exec/ramblock.h"
>  
>  #include <sys/ioctl.h>
>  #include <sys/socket.h>
> @@ -1143,15 +1144,27 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev,
>      return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
>  }
>  
> -static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> -                                            int queue_idx)
> +static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
>  {
> -    struct vhost_user *u = dev->opaque;
> -    VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
> -    VirtIODevice *vdev = dev->vdev;
> +    assert(n && n->old_addr);
> +    munmap(n->old_addr, qemu_real_host_page_size);
> +    n->old_addr = NULL;
> +}
> +
> +static void vhost_user_host_notifier_remove(VhostUserState *user,
> +                                            VirtIODevice *vdev, int queue_idx)
> +{
> +    VhostUserHostNotifier *n = &user->notifier[queue_idx];
>  
>      if (n->addr) {
> -        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> +        if (vdev) {
> +            virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> +        }
> +        assert(n->addr);
> +        assert(!n->old_addr);
> +        n->old_addr = n->addr;
> +        n->addr = NULL;
> +        call_rcu(n, vhost_user_host_notifier_free, rcu);
>      }
>  }
>  
> @@ -1190,8 +1203,9 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
>          .payload.state = *ring,
>          .hdr.size = sizeof(msg.payload.state),
>      };
> +    struct vhost_user *u = dev->opaque;
>  
> -    vhost_user_host_notifier_remove(dev, ring->index);
> +    vhost_user_host_notifier_remove(u->user, dev->vdev, ring->index);
>  
>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>          return -1;
> @@ -1486,12 +1500,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
>  
>      n = &user->notifier[queue_idx];
>  
> -    if (n->addr) {
> -        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> -        object_unparent(OBJECT(&n->mr));
> -        munmap(n->addr, page_size);
> -        n->addr = NULL;
> -    }
> +    vhost_user_host_notifier_remove(user, vdev, queue_idx);
>  
>      if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
>          return 0;
> @@ -1510,9 +1519,12 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
>  
>      name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
>                             user, queue_idx);
> -    if (!n->mr.ram) /* Don't init again after suspend. */
> +    if (!n->mr.ram) { /* Don't init again after suspend. */
>          memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
>                                            page_size, addr);
> +    } else {
> +        n->mr.ram_block->host = addr;
> +    }
>      g_free(name);
>  
>      if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
> @@ -2460,17 +2472,17 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
>  void vhost_user_cleanup(VhostUserState *user)
>  {
>      int i;
> +    VhostUserHostNotifier *n;
>  
>      if (!user->chr) {
>          return;
>      }
>      memory_region_transaction_begin();
>      for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> -        if (user->notifier[i].addr) {
> -            object_unparent(OBJECT(&user->notifier[i].mr));
> -            munmap(user->notifier[i].addr, qemu_real_host_page_size);
> -            user->notifier[i].addr = NULL;
> -        }
> +        n = &user->notifier[i];
> +        assert(!n->addr);

I'm pretty confused as to why this assert holds.
Add a comment?

> +        vhost_user_host_notifier_remove(user, NULL, i);
> +        object_unparent(OBJECT(&n->mr));
>      }
>      memory_region_transaction_commit();
>      user->chr = NULL;

I'm also confused on why we can do unparent for notifiers which have
never been set up. Won't n->mr be invalid then?


> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> index f6012b2078..03aa22d450 100644
> --- a/include/hw/virtio/vhost-user.h
> +++ b/include/hw/virtio/vhost-user.h
> @@ -12,8 +12,10 @@
>  #include "hw/virtio/virtio.h"
>  
>  typedef struct VhostUserHostNotifier {
> +    struct rcu_head rcu;
>      MemoryRegion mr;
>      void *addr;
> +    void *old_addr;

That's not a very clear name. Is this literally just
"address for the rcu callback to unmap"?
Maybe unmap_addr then?

>  } VhostUserHostNotifier;
>  
>  typedef struct VhostUserState {
> -- 
> 2.33.0



  reply	other threads:[~2021-11-01 21:05 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-01  8:38 [PATCH v6 0/2] Improve vhost-user VQ notifier unmap Xueming Li
2021-11-01  8:38 ` [PATCH v6 1/2] vhost-user: remove VirtQ notifier restore Xueming Li
2021-11-01 21:06   ` Michael S. Tsirkin
2021-11-02  6:08     ` Xueming(Steven) Li
2021-11-02  6:49       ` Michael S. Tsirkin
2021-11-03 14:48         ` Xueming(Steven) Li
2021-11-03 20:30           ` Michael S. Tsirkin
2022-01-12 15:05             ` Xueming(Steven) Li
2022-02-04 12:25               ` Michael S. Tsirkin
2022-02-07 13:49                 ` Xueming(Steven) Li
2022-02-07 13:49                 ` Xueming(Steven) Li
2021-11-01  8:38 ` [PATCH v6 2/2] vhost-user: fix VirtQ notifier cleanup Xueming Li
2021-11-01 21:00   ` Michael S. Tsirkin [this message]
2021-11-02  6:00     ` Xueming(Steven) Li
2021-11-02  6:47       ` Michael S. Tsirkin
2022-01-12 15:36         ` Xueming(Steven) Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211101165311-mutt-send-email-mst@kernel.org \
    --to=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-stable@nongnu.org \
    --cc=xuemingl@nvidia.com \
    --cc=zhangyuwei.9149@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).