qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Greg Kurz <groug@kaod.org>
To: Vivek Goyal <vgoyal@redhat.com>
Cc: mst@redhat.com, qemu-devel@nongnu.org, dgilbert@redhat.com,
	virtio-fs@redhat.com, stefanha@redhat.com,
	marcandre.lureau@redhat.com
Subject: Re: [PATCH 2/6] libvhost-user: Use slave_mutex in all slave messages
Date: Thu, 28 Jan 2021 15:31:23 +0100	[thread overview]
Message-ID: <20210128153123.4aba231c@bahia.lan> (raw)
In-Reply-To: <20210125180115.22936-3-vgoyal@redhat.com>

On Mon, 25 Jan 2021 13:01:11 -0500
Vivek Goyal <vgoyal@redhat.com> wrote:

> dev->slave_mutex needs to be taken when sending messages on slave_fd.
> Currently _vu_queue_notify() does not do that.
> 
> Introduce a helper vu_message_slave_send_receive() which sends as well
> as receive response. Use this helper in all the paths which send
> message on slave_fd channel.
> 

Does this fix any known bug ?

> Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
> ---

LGTM

Reviewed-by: Greg Kurz <groug@kaod.org>

>  subprojects/libvhost-user/libvhost-user.c | 50 ++++++++++++-----------
>  1 file changed, 27 insertions(+), 23 deletions(-)
> 
> diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
> index 4cf4aef63d..7a56c56dc8 100644
> --- a/subprojects/libvhost-user/libvhost-user.c
> +++ b/subprojects/libvhost-user/libvhost-user.c
> @@ -403,7 +403,7 @@ vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
>   * Processes a reply on the slave channel.
>   * Entered with slave_mutex held and releases it before exit.
>   * Returns true on success.
> - * *payload is written on success
> + * *payload is written on success, if payload is not NULL.
>   */
>  static bool
>  vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg,
> @@ -427,7 +427,9 @@ vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg,
>          goto out;
>      }
>  
> -    *payload = msg_reply.payload.u64;
> +    if (payload) {
> +        *payload = msg_reply.payload.u64;
> +    }
>      result = true;
>  
>  out:
> @@ -435,6 +437,25 @@ out:
>      return result;
>  }
>  
> +/* Returns true on success, false otherwise */
> +static bool
> +vu_message_slave_send_receive(VuDev *dev, VhostUserMsg *vmsg, uint64_t *payload)
> +{
> +    pthread_mutex_lock(&dev->slave_mutex);
> +    if (!vu_message_write(dev, dev->slave_fd, vmsg)) {
> +        pthread_mutex_unlock(&dev->slave_mutex);
> +        return false;
> +    }
> +
> +    if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
> +        pthread_mutex_unlock(&dev->slave_mutex);
> +        return true;
> +    }
> +
> +    /* Also unlocks the slave_mutex */
> +    return vu_process_message_reply(dev, vmsg, payload);
> +}
> +
>  /* Kick the log_call_fd if required. */
>  static void
>  vu_log_kick(VuDev *dev)
> @@ -1340,16 +1361,8 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
>          return false;
>      }
>  
> -    pthread_mutex_lock(&dev->slave_mutex);
> -    if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
> -        pthread_mutex_unlock(&dev->slave_mutex);
> -        return false;
> -    }
> -
> -    /* Also unlocks the slave_mutex */
> -    res = vu_process_message_reply(dev, &vmsg, &payload);
> +    res = vu_message_slave_send_receive(dev, &vmsg, &payload);
>      res = res && (payload == 0);
> -
>      return res;
>  }
>  
> @@ -2395,10 +2408,7 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
>              vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
>          }
>  
> -        vu_message_write(dev, dev->slave_fd, &vmsg);
> -        if (ack) {
> -            vu_message_read_default(dev, dev->slave_fd, &vmsg);
> -        }
> +        vu_message_slave_send_receive(dev, &vmsg, NULL);
>          return;
>      }
>  
> @@ -2942,17 +2952,11 @@ int64_t vu_fs_cache_request(VuDev *dev, VhostUserSlaveRequest req, int fd,
>          return -EINVAL;
>      }
>  
> -    pthread_mutex_lock(&dev->slave_mutex);
> -    if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
> -        pthread_mutex_unlock(&dev->slave_mutex);
> -        return -EIO;
> -    }
> -
> -    /* Also unlocks the slave_mutex */
> -    res = vu_process_message_reply(dev, &vmsg, &payload);
> +    res = vu_message_slave_send_receive(dev, &vmsg, &payload);
>      if (!res) {
>          return -EIO;
>      }
> +
>      /*
>       * Payload is delivered as uint64_t but is actually signed for
>       * errors.



  reply	other threads:[~2021-01-28 14:33 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-25 18:01 [RFC PATCH 0/6] vhost-user: Shutdown/Flush slave channel properly Vivek Goyal
2021-01-25 18:01 ` [PATCH 1/6] virtiofsd: Drop ->vu_dispatch_rwlock while waiting for thread to exit Vivek Goyal
2021-01-26 15:56   ` Greg Kurz
2021-01-26 18:33     ` Vivek Goyal
2021-01-29 12:03       ` Greg Kurz
2021-01-29 15:04         ` Vivek Goyal
2021-01-25 18:01 ` [PATCH 2/6] libvhost-user: Use slave_mutex in all slave messages Vivek Goyal
2021-01-28 14:31   ` Greg Kurz [this message]
2021-01-28 14:48     ` Vivek Goyal
2021-01-28 15:06       ` Greg Kurz
2021-01-25 18:01 ` [PATCH 3/6] vhost-user: Return error code from slave_read() Vivek Goyal
2021-01-29  9:45   ` Greg Kurz
2021-01-29 15:02     ` Vivek Goyal
2021-01-25 18:01 ` [PATCH 4/6] qemu, vhost-user: Extend protocol to start/stop/flush slave channel Vivek Goyal
2021-01-28 16:52   ` Stefan Hajnoczi
2021-01-29 14:16     ` Vivek Goyal
2021-01-29 15:11       ` Vivek Goyal
2021-02-08 17:41         ` Stefan Hajnoczi
2021-01-25 18:01 ` [PATCH 5/6] libvhost-user: Add support " Vivek Goyal
2021-01-25 18:01 ` [PATCH 6/6] virtiofsd: Opt in for slave start/stop/shutdown functionality Vivek Goyal
2021-02-10 21:39 ` [RFC PATCH 0/6] vhost-user: Shutdown/Flush slave channel properly Michael S. Tsirkin
2021-02-10 22:15   ` Vivek Goyal
2021-02-23 14:14 ` Michael S. Tsirkin
2021-02-23 14:23   ` Vivek Goyal
2021-03-14 22:21 ` Michael S. Tsirkin
2021-03-14 22:26   ` Vivek Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210128153123.4aba231c@bahia.lan \
    --to=groug@kaod.org \
    --cc=dgilbert@redhat.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=vgoyal@redhat.com \
    --cc=virtio-fs@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).