From: Albert Esteve <aesteve@redhat.com>
To: qemu-devel@nongnu.org
Cc: marcandre.lureau@gmail.com, cohuck@redhat.com, kraxel@redhat.com,
"Michael S. Tsirkin" <mst@redhat.com>, Fam Zheng <fam@euphon.net>,
philmd@linaro.org, Albert Esteve <aesteve@redhat.com>
Subject: [PATCH v7 4/4] libvhost-user: handle shared_object msg
Date: Thu, 7 Sep 2023 09:43:18 +0200 [thread overview]
Message-ID: <20230907074318.528064-5-aesteve@redhat.com> (raw)
In-Reply-To: <20230907074318.528064-1-aesteve@redhat.com>
In the libvhost-user library we need to
handle VHOST_USER_GET_SHARED_OBJECT requests,
and add helper functions to allow sending messages
to interact with the virtio shared objects
hash table.
Signed-off-by: Albert Esteve <aesteve@redhat.com>
---
subprojects/libvhost-user/libvhost-user.c | 120 ++++++++++++++++++++++
subprojects/libvhost-user/libvhost-user.h | 55 +++++++++-
2 files changed, 174 insertions(+), 1 deletion(-)
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 0469a50101..676e57a468 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -161,6 +161,7 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
REQ(VHOST_USER_ADD_MEM_REG),
REQ(VHOST_USER_REM_MEM_REG),
+ REQ(VHOST_USER_GET_SHARED_OBJECT),
REQ(VHOST_USER_MAX),
};
#undef REQ
@@ -900,6 +901,24 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
return false;
}
+static bool
+vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int fd_num = 0;
+ int dmabuf_fd = -1;
+ if (dev->iface->get_shared_object) {
+ dmabuf_fd = dev->iface->get_shared_object(
+ dev, &vmsg->payload.object.uuid[0]);
+ }
+ if (dmabuf_fd != -1) {
+ DPRINT("dmabuf_fd found for requested UUID\n");
+ vmsg->fds[fd_num++] = dmabuf_fd;
+ }
+ vmsg->fd_num = fd_num;
+
+ return true;
+}
+
static bool
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -1403,6 +1422,105 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
return vu_process_message_reply(dev, &vmsg);
}
+bool
+vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
+ int *dmabuf_fd)
+{
+ bool result = false;
+ VhostUserMsg msg_reply;
+ VhostUserMsg msg = {
+ .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP,
+ .size = sizeof(msg.payload.object),
+ .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
+ };
+
+ memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+ return false;
+ }
+
+ pthread_mutex_lock(&dev->backend_mutex);
+ if (!vu_message_write(dev, dev->backend_fd, &msg)) {
+ goto out;
+ }
+
+ if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
+ goto out;
+ }
+
+ if (msg_reply.request != msg.request) {
+ DPRINT("Received unexpected msg type. Expected %d, received %d",
+ msg.request, msg_reply.request);
+ goto out;
+ }
+
+ if (msg_reply.fd_num != 1) {
+ DPRINT("Received unexpected number of fds. Expected 1, received %d",
+ msg_reply.fd_num);
+ goto out;
+ }
+
+ *dmabuf_fd = msg_reply.fds[0];
+ result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0;
+out:
+ pthread_mutex_unlock(&dev->backend_mutex);
+
+ return result;
+}
+
+static bool
+vu_send_message(VuDev *dev, VhostUserMsg *vmsg)
+{
+ bool result = false;
+ pthread_mutex_lock(&dev->backend_mutex);
+ if (!vu_message_write(dev, dev->backend_fd, vmsg)) {
+ goto out;
+ }
+
+ result = true;
+out:
+ pthread_mutex_unlock(&dev->backend_mutex);
+
+ return result;
+}
+
+bool
+vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD,
+ .size = sizeof(msg.payload.object),
+ .flags = VHOST_USER_VERSION,
+ };
+
+ memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+ return false;
+ }
+
+ return vu_send_message(dev, &msg);
+}
+
+bool
+vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE,
+ .size = sizeof(msg.payload.object),
+ .flags = VHOST_USER_VERSION,
+ };
+
+ memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+ return false;
+ }
+
+ return vu_send_message(dev, &msg);
+}
+
static bool
vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -1943,6 +2061,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
return vu_add_mem_reg(dev, vmsg);
case VHOST_USER_REM_MEM_REG:
return vu_rem_mem_reg(dev, vmsg);
+ case VHOST_USER_GET_SHARED_OBJECT:
+ return vu_get_shared_object(dev, vmsg);
default:
vmsg_close_fds(vmsg);
vu_panic(dev, "Unhandled request: %d", vmsg->request);
diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index 708370c5f5..b36a42a7ca 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -64,7 +64,8 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
-
+ /* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */
+ VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -109,6 +110,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_MAX_MEM_SLOTS = 36,
VHOST_USER_ADD_MEM_REG = 37,
VHOST_USER_REM_MEM_REG = 38,
+ VHOST_USER_GET_SHARED_OBJECT = 41,
VHOST_USER_MAX
} VhostUserRequest;
@@ -119,6 +121,9 @@ typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_BACKEND_VRING_CALL = 4,
VHOST_USER_BACKEND_VRING_ERR = 5,
+ VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
+ VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
+ VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
VHOST_USER_BACKEND_MAX
} VhostUserBackendRequest;
@@ -172,6 +177,12 @@ typedef struct VhostUserInflight {
uint16_t queue_size;
} VhostUserInflight;
+#define UUID_LEN 16
+
+typedef struct VhostUserShared {
+ unsigned char uuid[UUID_LEN];
+} VhostUserShared;
+
#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
# define VU_PACKED __attribute__((gcc_struct, packed))
#else
@@ -199,6 +210,7 @@ typedef struct VhostUserMsg {
VhostUserConfig config;
VhostUserVringArea area;
VhostUserInflight inflight;
+ VhostUserShared object;
} payload;
int fds[VHOST_MEMORY_BASELINE_NREGIONS];
@@ -232,6 +244,7 @@ typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
uint32_t offset, uint32_t size,
uint32_t flags);
+typedef int (*vu_get_shared_object_cb) (VuDev *dev, const unsigned char *uuid);
typedef struct VuDevIface {
/* called by VHOST_USER_GET_FEATURES to get the features bitmask */
@@ -258,6 +271,8 @@ typedef struct VuDevIface {
vu_get_config_cb get_config;
/* set the config space of the device */
vu_set_config_cb set_config;
+ /* get virtio shared object from the underlying vhost implementation. */
+ vu_get_shared_object_cb get_shared_object;
} VuDevIface;
typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
@@ -541,6 +556,44 @@ void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
int size, int offset);
+/**
+ * vu_lookup_shared_object:
+ * @dev: a VuDev context
+ * @uuid: UUID of the shared object
+ * @dmabuf_fd: output dma-buf file descriptor
+ *
+ * Lookup for a virtio shared object (i.e., dma-buf fd) associated with the
+ * received UUID. Result, if found, is stored in the dmabuf_fd argument.
+ *
+ * Returns: whether the virtio object was found.
+ */
+bool vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
+ int *dmabuf_fd);
+
+/**
+ * vu_add_shared_object:
+ * @dev: a VuDev context
+ * @uuid: UUID of the shared object
+ *
+ * Registers this back-end as the exporter for the object associated with
+ * the received UUID.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
+
+/**
+ * vu_rm_shared_object:
+ * @dev: a VuDev context
+ * @uuid: UUID of the shared object
+ *
+ * Removes a shared object entry (i.e., back-end entry) associated with the
+ * received UUID key from the hash table.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
+
/**
* vu_queue_set_notification:
* @dev: a VuDev context
--
2.41.0
prev parent reply other threads:[~2023-09-07 7:44 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-07 7:43 [PATCH v7 0/4] Virtio shared dma-buf Albert Esteve
2023-09-07 7:43 ` [PATCH v7 1/4] uuid: add a hash function Albert Esteve
2023-09-07 7:43 ` [PATCH v7 2/4] virtio-dmabuf: introduce virtio-dmabuf Albert Esteve
2023-09-07 8:19 ` Philippe Mathieu-Daudé
2023-09-07 11:51 ` Albert Esteve
2023-09-07 7:43 ` [PATCH v7 3/4] vhost-user: add shared_object msg Albert Esteve
2023-09-07 7:43 ` Albert Esteve [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230907074318.528064-5-aesteve@redhat.com \
--to=aesteve@redhat.com \
--cc=cohuck@redhat.com \
--cc=fam@euphon.net \
--cc=kraxel@redhat.com \
--cc=marcandre.lureau@gmail.com \
--cc=mst@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).