From: Albert Esteve <aesteve@redhat.com>
To: qemu-devel@nongnu.org
Cc: jasowang@redhat.com, david@redhat.com, slp@redhat.com,
"Alex Bennée" <alex.bennee@linaro.org>,
stefanha@redhat.com, "Michael S. Tsirkin" <mst@redhat.com>,
"Albert Esteve" <aesteve@redhat.com>
Subject: [RFC PATCH v2 4/5] vhost_user: Add MEM_READ/WRITE backend requests
Date: Fri, 28 Jun 2024 16:57:09 +0200 [thread overview]
Message-ID: <20240628145710.1516121-5-aesteve@redhat.com> (raw)
In-Reply-To: <20240628145710.1516121-1-aesteve@redhat.com>
With SHMEM_MAP messages, sharing descriptors between
devices will cause that these devices do not see the
mappings, and fail to access these memory regions.
To solve this, introduce MEM_READ/WRITE requests
that will get triggered as a fallback when
vhost-user memory translation fails.
Signed-off-by: Albert Esteve <aesteve@redhat.com>
---
hw/virtio/vhost-user.c | 31 +++++++++
subprojects/libvhost-user/libvhost-user.c | 84 +++++++++++++++++++++++
subprojects/libvhost-user/libvhost-user.h | 38 ++++++++++
3 files changed, 153 insertions(+)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 57406dc8b4..18cacb2d68 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -118,6 +118,8 @@ typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
VHOST_USER_BACKEND_SHMEM_MAP = 9,
VHOST_USER_BACKEND_SHMEM_UNMAP = 10,
+ VHOST_USER_BACKEND_MEM_READ = 11,
+ VHOST_USER_BACKEND_MEM_WRITE = 12,
VHOST_USER_BACKEND_MAX
} VhostUserBackendRequest;
@@ -145,6 +147,12 @@ typedef struct VhostUserShMemConfig {
uint64_t memory_sizes[VHOST_MEMORY_BASELINE_NREGIONS];
} VhostUserShMemConfig;
+typedef struct VhostUserMemRWMsg {
+ uint64_t guest_address;
+ uint32_t size;
+ uint8_t data[];
+} VhostUserMemRWMsg;
+
typedef struct VhostUserLog {
uint64_t mmap_size;
uint64_t mmap_offset;
@@ -253,6 +261,7 @@ typedef union {
VhostUserTransferDeviceState transfer_state;
VhostUserMMap mmap;
VhostUserShMemConfig shmem;
+ VhostUserMemRWMsg mem_rw;
} VhostUserPayload;
typedef struct VhostUserMsg {
@@ -1871,6 +1880,22 @@ vhost_user_backend_handle_shmem_unmap(struct vhost_dev *dev,
return 0;
}
+static int
+vhost_user_backend_handle_mem_read(struct vhost_dev *dev,
+ VhostUserMemRWMsg *mem_rw)
+{
+ /* TODO */
+ return -EPERM;
+}
+
+static int
+vhost_user_backend_handle_mem_write(struct vhost_dev *dev,
+ VhostUserMemRWMsg *mem_rw)
+{
+ /* TODO */
+ return -EPERM;
+}
+
static void close_backend_channel(struct vhost_user *u)
{
g_source_destroy(u->backend_src);
@@ -1946,6 +1971,12 @@ static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
case VHOST_USER_BACKEND_SHMEM_UNMAP:
ret = vhost_user_backend_handle_shmem_unmap(dev, &payload.mmap);
break;
+ case VHOST_USER_BACKEND_MEM_READ:
+ ret = vhost_user_backend_handle_mem_read(dev, &payload.mem_rw);
+ break;
+ case VHOST_USER_BACKEND_MEM_WRITE:
+ ret = vhost_user_backend_handle_mem_write(dev, &payload.mem_rw);
+ break;
default:
error_report("Received unexpected msg type: %d.", hdr.request);
ret = -EINVAL;
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 28556d183a..b5184064b5 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -1651,6 +1651,90 @@ vu_shmem_unmap(VuDev *dev, uint8_t shmid, uint64_t fd_offset,
return vu_process_message_reply(dev, &vmsg);
}
+bool
+vu_send_mem_read(VuDev *dev, uint64_t guest_addr, uint32_t size,
+ uint8_t *data)
+{
+ VhostUserMsg msg_reply;
+ VhostUserMsg msg = {
+ .request = VHOST_USER_BACKEND_MEM_READ,
+ .size = sizeof(msg.payload.mem_rw),
+ .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
+ .payload = {
+ .mem_rw = {
+ .guest_address = guest_addr,
+ .size = size,
+ }
+ }
+ };
+
+ pthread_mutex_lock(&dev->backend_mutex);
+ if (!vu_message_write(dev, dev->backend_fd, &msg)) {
+ goto out_err;
+ }
+
+ if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
+ goto out_err;
+ }
+
+ if (msg_reply.request != msg.request) {
+ DPRINT("Received unexpected msg type. Expected %d, received %d",
+ msg.request, msg_reply.request);
+ goto out_err;
+ }
+
+ if (msg_reply.payload.mem_rw.size != size) {
+ DPRINT("Received unexpected number of bytes in the response. "
+ "Expected %d, received %d",
+ size, msg_reply.payload.mem_rw.size);
+ goto out_err;
+ }
+
+ data = malloc(msg_reply.payload.mem_rw.size);
+ if (!data) {
+ DPRINT("Failed to malloc read memory data");
+ goto out_err;
+ }
+
+ memcpy(data, msg_reply.payload.mem_rw.data, size);
+ pthread_mutex_unlock(&dev->backend_mutex);
+ return true;
+
+out_err:
+ pthread_mutex_unlock(&dev->backend_mutex);
+ return false;
+}
+
+bool
+vu_send_mem_write(VuDev *dev, uint64_t guest_addr, uint32_t size,
+ uint8_t *data)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_BACKEND_MEM_WRITE,
+ .size = sizeof(msg.payload.mem_rw),
+ .flags = VHOST_USER_VERSION,
+ .payload = {
+ .mem_rw = {
+ .guest_address = guest_addr,
+ .size = size,
+ }
+ }
+ };
+ memcpy(msg.payload.mem_rw.data, data, size);
+
+ if (vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK)) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+
+ if (!vu_message_write(dev, dev->backend_fd, &msg)) {
+ pthread_mutex_unlock(&dev->backend_mutex);
+ return false;
+ }
+
+ /* Also unlocks the backend_mutex */
+ return vu_process_message_reply(dev, &msg);
+}
+
static bool
vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
{
diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index 7f6c22cc1a..8ef794870d 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -129,6 +129,8 @@ typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
VHOST_USER_BACKEND_SHMEM_MAP = 9,
VHOST_USER_BACKEND_SHMEM_UNMAP = 10,
+ VHOST_USER_BACKEND_MEM_READ = 11,
+ VHOST_USER_BACKEND_MEM_WRITE = 12,
VHOST_USER_BACKEND_MAX
} VhostUserBackendRequest;
@@ -152,6 +154,12 @@ typedef struct VhostUserMemRegMsg {
VhostUserMemoryRegion region;
} VhostUserMemRegMsg;
+typedef struct VhostUserMemRWMsg {
+ uint64_t guest_address;
+ uint32_t size;
+ uint8_t data[];
+} VhostUserMemRWMsg;
+
typedef struct VhostUserLog {
uint64_t mmap_size;
uint64_t mmap_offset;
@@ -235,6 +243,7 @@ typedef struct VhostUserMsg {
VhostUserInflight inflight;
VhostUserShared object;
VhostUserMMap mmap;
+ VhostUserMemRWMsg mem_rw;
} payload;
int fds[VHOST_MEMORY_BASELINE_NREGIONS];
@@ -650,6 +659,35 @@ bool vu_shmem_map(VuDev *dev, uint8_t shmid, uint64_t fd_offset,
bool vu_shmem_unmap(VuDev *dev, uint8_t shmid, uint64_t fd_offset,
uint64_t shm_offset, uint64_t len);
+/**
+ * vu_send_mem_read:
+ * @dev: a VuDev context
+ * @guest_addr: guest physical address to read
+ * @size: number of bytes to read
+ * @data: head of an unitialized bytes array
+ *
+ * Reads `size` bytes of `guest_addr` in the frontend and stores
+ * them in `data`.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_send_mem_read(VuDev *dev, uint64_t guest_addr, uint32_t size,
+ uint8_t *data);
+
+/**
+ * vu_send_mem_write:
+ * @dev: a VuDev context
+ * @guest_addr: guest physical address to write
+ * @size: number of bytes to write
+ * @data: head of an array with `size` bytes to write
+ *
+ * Writes `size` bytes from `data` into `guest_addr` in the frontend.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_send_mem_write(VuDev *dev, uint64_t guest_addr, uint32_t size,
+ uint8_t *data);
+
/**
* vu_queue_set_notification:
* @dev: a VuDev context
--
2.45.2
next prev parent reply other threads:[~2024-06-28 14:58 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-28 14:57 [RFC PATCH v2 0/5] vhost-user: Add SHMEM_MAP/UNMAP requests Albert Esteve
2024-06-28 14:57 ` [RFC PATCH v2 1/5] vhost-user: Add VIRTIO Shared Memory map request Albert Esteve
2024-07-11 7:45 ` Stefan Hajnoczi
2024-09-03 9:54 ` Albert Esteve
2024-09-03 11:54 ` Albert Esteve
2024-09-05 16:45 ` Stefan Hajnoczi
2024-09-11 11:57 ` Albert Esteve
2024-09-11 14:54 ` Stefan Hajnoczi
2024-09-04 7:28 ` Albert Esteve
2024-06-28 14:57 ` [RFC PATCH v2 2/5] vhost_user: Add frontend command for shmem config Albert Esteve
2024-07-11 8:10 ` Stefan Hajnoczi
2024-09-04 9:05 ` Albert Esteve
2024-07-11 8:15 ` Stefan Hajnoczi
2024-06-28 14:57 ` [RFC PATCH v2 3/5] vhost-user-dev: Add cache BAR Albert Esteve
2024-07-11 8:25 ` Stefan Hajnoczi
2024-09-04 11:20 ` Albert Esteve
2024-06-28 14:57 ` Albert Esteve [this message]
2024-07-11 8:53 ` [RFC PATCH v2 4/5] vhost_user: Add MEM_READ/WRITE backend requests Stefan Hajnoczi
2024-06-28 14:57 ` [RFC PATCH v2 5/5] vhost_user: Implement mem_read/mem_write handlers Albert Esteve
2024-07-11 8:55 ` Stefan Hajnoczi
2024-09-04 13:01 ` Albert Esteve
2024-09-05 19:18 ` Stefan Hajnoczi
2024-09-10 7:14 ` Albert Esteve
2024-07-11 9:01 ` [RFC PATCH v2 0/5] vhost-user: Add SHMEM_MAP/UNMAP requests Stefan Hajnoczi
2024-07-11 10:56 ` Alyssa Ross
2024-07-12 2:06 ` David Stevens
2024-07-12 5:47 ` Michael S. Tsirkin
2024-07-15 2:30 ` Jason Wang
2024-07-16 1:21 ` David Stevens
2024-09-03 8:42 ` Albert Esteve
2024-09-05 16:39 ` Stefan Hajnoczi
2024-09-06 7:03 ` Albert Esteve
2024-09-06 13:15 ` Stefan Hajnoczi
2024-09-05 15:56 ` Stefan Hajnoczi
2024-09-06 4:18 ` David Stevens
2024-09-06 13:00 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240628145710.1516121-5-aesteve@redhat.com \
--to=aesteve@redhat.com \
--cc=alex.bennee@linaro.org \
--cc=david@redhat.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=slp@redhat.com \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).