From: Vivek Goyal <vgoyal@redhat.com>
To: qemu-devel@nongnu.org, virtio-fs@redhat.com
Cc: marcandre.lureau@redhat.com, stefanha@redhat.com,
dgilbert@redhat.com, vgoyal@redhat.com, mst@redhat.com
Subject: [PATCH 5/6] libvhost-user: Add support to start/stop/flush slave channel
Date: Mon, 25 Jan 2021 13:01:14 -0500 [thread overview]
Message-ID: <20210125180115.22936-6-vgoyal@redhat.com> (raw)
In-Reply-To: <20210125180115.22936-1-vgoyal@redhat.com>
This patch adds support to start/stop/flush slave channel functionality.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
---
subprojects/libvhost-user/libvhost-user.c | 103 ++++++++++++++++++++--
subprojects/libvhost-user/libvhost-user.h | 8 +-
2 files changed, 105 insertions(+), 6 deletions(-)
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 7a56c56dc8..b4c795c63e 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -140,6 +140,8 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
REQ(VHOST_USER_ADD_MEM_REG),
REQ(VHOST_USER_REM_MEM_REG),
+ REQ(VHOST_USER_START_SLAVE_CHANNEL),
+ REQ(VHOST_USER_STOP_SLAVE_CHANNEL),
REQ(VHOST_USER_MAX),
};
#undef REQ
@@ -437,11 +439,11 @@ out:
return result;
}
-/* Returns true on success, false otherwise */
+/* slave mutex should be held. Will be unlocked upon return */
static bool
-vu_message_slave_send_receive(VuDev *dev, VhostUserMsg *vmsg, uint64_t *payload)
+vu_message_slave_send_receive_locked(VuDev *dev, VhostUserMsg *vmsg,
+ uint64_t *payload)
{
- pthread_mutex_lock(&dev->slave_mutex);
if (!vu_message_write(dev, dev->slave_fd, vmsg)) {
pthread_mutex_unlock(&dev->slave_mutex);
return false;
@@ -456,6 +458,46 @@ vu_message_slave_send_receive(VuDev *dev, VhostUserMsg *vmsg, uint64_t *payload)
return vu_process_message_reply(dev, vmsg, payload);
}
+/* Returns true on success, false otherwise */
+static bool
+vu_message_slave_send_receive(VuDev *dev, VhostUserMsg *vmsg,
+ uint64_t *payload)
+{
+ pthread_mutex_lock(&dev->slave_mutex);
+ if (!dev->slave_channel_open) {
+ pthread_mutex_unlock(&dev->slave_mutex);
+ return false;
+ }
+ return vu_message_slave_send_receive_locked(dev, vmsg, payload);
+}
+
+static bool
+vu_finish_stop_slave(VuDev *dev)
+{
+ bool res;
+ uint64_t payload = 0;
+ VhostUserMsg vmsg = {
+ .request = VHOST_USER_SLAVE_STOP_CHANNEL_COMPLETE,
+ .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
+ .size = sizeof(vmsg.payload.u64),
+ .payload.u64 = 0,
+ };
+
+ /*
+ * Once we get slave_mutex, this should make sure no other caller is
+ * currently in the process of sending or receiving message on slave_fd.
+ * And setting slave_channel_open to false now will make sure any new
+ * callers will not send message and instead get error back. So it
+ * is now safe to send stop finished message to master.
+ */
+ pthread_mutex_lock(&dev->slave_mutex);
+ dev->slave_channel_open = false;
+ /* This also drops slave_mutex */
+ res = vu_message_slave_send_receive_locked(dev, &vmsg, &payload);
+ res = res && (payload == 0);
+ return res;
+}
+
/* Kick the log_call_fd if required. */
static void
vu_log_kick(VuDev *dev)
@@ -1529,6 +1571,35 @@ vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
return false;
}
+static bool
+vu_slave_channel_start(VuDev *dev, VhostUserMsg *vmsg)
+{
+ pthread_mutex_lock(&dev->slave_mutex);
+ dev->slave_channel_open = true;
+ pthread_mutex_unlock(&dev->slave_mutex);
+ /* Caller (vu_dispatch()) will send a reply */
+ return false;
+}
+
+static bool
+vu_slave_channel_stop(VuDev *dev, VhostUserMsg *vmsg, bool *reply_sent,
+ bool *reply_status)
+{
+ vmsg_set_reply_u64(vmsg, 0);
+ *reply_sent = true;
+ *reply_status = false;
+ if (!vu_send_reply(dev, dev->sock, vmsg)) {
+ return false;
+ }
+
+ if (!vu_finish_stop_slave(dev)) {
+ return false;
+ }
+
+ *reply_status = true;
+ return false;
+}
+
static bool
vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -1823,7 +1894,8 @@ static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg)
}
static bool
-vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
+vu_process_message(VuDev *dev, VhostUserMsg *vmsg, bool *reply_sent,
+ bool *reply_status)
{
int do_reply = 0;
@@ -1843,6 +1915,14 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
DPRINT("\n");
}
+ if (reply_sent) {
+ *reply_sent = false;
+ }
+
+ if (reply_status) {
+ *reply_status = false;
+ }
+
if (dev->iface->process_msg &&
dev->iface->process_msg(dev, vmsg, &do_reply)) {
return do_reply;
@@ -1912,6 +1992,10 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
return vu_add_mem_reg(dev, vmsg);
case VHOST_USER_REM_MEM_REG:
return vu_rem_mem_reg(dev, vmsg);
+ case VHOST_USER_START_SLAVE_CHANNEL:
+ return vu_slave_channel_start(dev, vmsg);
+ case VHOST_USER_STOP_SLAVE_CHANNEL:
+ return vu_slave_channel_stop(dev, vmsg, reply_sent, reply_status);
default:
vmsg_close_fds(vmsg);
vu_panic(dev, "Unhandled request: %d", vmsg->request);
@@ -1926,6 +2010,7 @@ vu_dispatch(VuDev *dev)
VhostUserMsg vmsg = { 0, };
int reply_requested;
bool need_reply, success = false;
+ bool reply_sent = false, reply_status = false;
if (!dev->read_msg(dev, dev->sock, &vmsg)) {
goto end;
@@ -1933,7 +2018,14 @@ vu_dispatch(VuDev *dev)
need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK;
- reply_requested = vu_process_message(dev, &vmsg);
+ reply_requested = vu_process_message(dev, &vmsg, &reply_sent,
+ &reply_status);
+ /* reply has already been sent, if needed */
+ if (reply_sent) {
+ success = reply_status;
+ goto end;
+ }
+
if (!reply_requested && need_reply) {
vmsg_set_reply_u64(&vmsg, 0);
reply_requested = 1;
@@ -2051,6 +2143,7 @@ vu_init(VuDev *dev,
dev->log_call_fd = -1;
pthread_mutex_init(&dev->slave_mutex, NULL);
dev->slave_fd = -1;
+ dev->slave_channel_open = false;
dev->max_queues = max_queues;
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index ee75d4931f..1d0ef54f69 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -64,6 +64,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_SLAVE_CH_START_STOP = 16,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -109,6 +110,8 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_MAX_MEM_SLOTS = 36,
VHOST_USER_ADD_MEM_REG = 37,
VHOST_USER_REM_MEM_REG = 38,
+ VHOST_USER_START_SLAVE_CHANNEL = 39,
+ VHOST_USER_STOP_SLAVE_CHANNEL = 40,
VHOST_USER_MAX
} VhostUserRequest;
@@ -123,6 +126,7 @@ typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_FS_UNMAP = 7,
VHOST_USER_SLAVE_FS_SYNC = 8,
VHOST_USER_SLAVE_FS_IO = 9,
+ VHOST_USER_SLAVE_STOP_CHANNEL_COMPLETE = 10,
VHOST_USER_SLAVE_MAX
} VhostUserSlaveRequest;
@@ -405,9 +409,11 @@ struct VuDev {
VuVirtq *vq;
VuDevInflightInfo inflight_info;
int log_call_fd;
- /* Must be held while using slave_fd */
+ /* Must be held while using slave_fd, slave_channel_open */
pthread_mutex_t slave_mutex;
int slave_fd;
+ /* If not set, do not send more requests on slave fd. */
+ bool slave_channel_open;
uint64_t log_size;
uint8_t *log_table;
uint64_t features;
--
2.25.4
next prev parent reply other threads:[~2021-01-25 18:10 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-25 18:01 [RFC PATCH 0/6] vhost-user: Shutdown/Flush slave channel properly Vivek Goyal
2021-01-25 18:01 ` [PATCH 1/6] virtiofsd: Drop ->vu_dispatch_rwlock while waiting for thread to exit Vivek Goyal
2021-01-26 15:56 ` Greg Kurz
2021-01-26 18:33 ` Vivek Goyal
2021-01-29 12:03 ` Greg Kurz
2021-01-29 15:04 ` Vivek Goyal
2021-01-25 18:01 ` [PATCH 2/6] libvhost-user: Use slave_mutex in all slave messages Vivek Goyal
2021-01-28 14:31 ` Greg Kurz
2021-01-28 14:48 ` Vivek Goyal
2021-01-28 15:06 ` Greg Kurz
2021-01-25 18:01 ` [PATCH 3/6] vhost-user: Return error code from slave_read() Vivek Goyal
2021-01-29 9:45 ` Greg Kurz
2021-01-29 15:02 ` Vivek Goyal
2021-01-25 18:01 ` [PATCH 4/6] qemu, vhost-user: Extend protocol to start/stop/flush slave channel Vivek Goyal
2021-01-28 16:52 ` Stefan Hajnoczi
2021-01-29 14:16 ` Vivek Goyal
2021-01-29 15:11 ` Vivek Goyal
2021-02-08 17:41 ` Stefan Hajnoczi
2021-01-25 18:01 ` Vivek Goyal [this message]
2021-01-25 18:01 ` [PATCH 6/6] virtiofsd: Opt in for slave start/stop/shutdown functionality Vivek Goyal
2021-02-10 21:39 ` [RFC PATCH 0/6] vhost-user: Shutdown/Flush slave channel properly Michael S. Tsirkin
2021-02-10 22:15 ` Vivek Goyal
2021-02-23 14:14 ` Michael S. Tsirkin
2021-02-23 14:23 ` Vivek Goyal
2021-03-14 22:21 ` Michael S. Tsirkin
2021-03-14 22:26 ` Vivek Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210125180115.22936-6-vgoyal@redhat.com \
--to=vgoyal@redhat.com \
--cc=dgilbert@redhat.com \
--cc=marcandre.lureau@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=virtio-fs@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).