From: John Levon <john.levon@nutanix.com>
To: qemu-devel@nongnu.org
Cc: "Thanos Makatos" <thanos.makatos@nutanix.com>,
"Cédric Le Goater" <clg@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Marc-André Lureau" <marcandre.lureau@redhat.com>,
"John Levon" <john.levon@nutanix.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Daniel P. Berrangé" <berrange@redhat.com>,
"John Levon" <levon@movementarian.org>,
"John Johnson" <john.g.johnson@oracle.com>,
"Jagannathan Raman" <jag.raman@oracle.com>,
"Elena Ufimtseva" <elena.ufimtseva@oracle.com>
Subject: [PATCH v4 13/19] vfio-user: implement VFIO_USER_DMA_MAP/UNMAP
Date: Thu, 19 Jun 2025 06:31:47 -0700 [thread overview]
Message-ID: <20250619133154.264786-14-john.levon@nutanix.com> (raw)
In-Reply-To: <20250619133154.264786-1-john.levon@nutanix.com>
From: John Levon <levon@movementarian.org>
When the vfio-user container gets mapping updates, share them with the
vfio-user by sending a message; this can include the region fd, allowing
the server to directly mmap() the region as needed.
For performance, we only wait for the message responses when we're doing
with a series of updates via the listener_commit() callback.
Originally-by: John Johnson <john.g.johnson@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: John Levon <john.levon@nutanix.com>
---
hw/vfio-user/protocol.h | 32 +++++++++++
hw/vfio-user/proxy.h | 6 +++
hw/vfio-user/container.c | 108 +++++++++++++++++++++++++++++++++++++-
hw/vfio-user/proxy.c | 77 ++++++++++++++++++++++++++-
hw/vfio-user/trace-events | 4 ++
5 files changed, 223 insertions(+), 4 deletions(-)
diff --git a/hw/vfio-user/protocol.h b/hw/vfio-user/protocol.h
index 48144b2c33..524f3d633a 100644
--- a/hw/vfio-user/protocol.h
+++ b/hw/vfio-user/protocol.h
@@ -112,6 +112,31 @@ typedef struct {
*/
#define VFIO_USER_DEF_MAX_BITMAP (256 * 1024 * 1024)
+/*
+ * VFIO_USER_DMA_MAP
+ * imported from struct vfio_iommu_type1_dma_map
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint64_t offset; /* FD offset */
+ uint64_t iova;
+ uint64_t size;
+} VFIOUserDMAMap;
+
+/*
+ * VFIO_USER_DMA_UNMAP
+ * imported from struct vfio_iommu_type1_dma_unmap
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint32_t argsz;
+ uint32_t flags;
+ uint64_t iova;
+ uint64_t size;
+} VFIOUserDMAUnmap;
+
/*
* VFIO_USER_DEVICE_GET_INFO
* imported from struct vfio_device_info
@@ -175,4 +200,11 @@ typedef struct {
char data[];
} VFIOUserRegionRW;
+/*imported from struct vfio_bitmap */
+typedef struct {
+ uint64_t pgsize;
+ uint64_t size;
+ char data[];
+} VFIOUserBitmap;
+
#endif /* VFIO_USER_PROTOCOL_H */
diff --git a/hw/vfio-user/proxy.h b/hw/vfio-user/proxy.h
index e5ac558a65..a9a36e4110 100644
--- a/hw/vfio-user/proxy.h
+++ b/hw/vfio-user/proxy.h
@@ -70,6 +70,7 @@ typedef struct VFIOUserProxy {
QemuCond close_cv;
AioContext *ctx;
QEMUBH *req_bh;
+ bool async_ops;
/*
* above only changed when BQL is held
@@ -99,9 +100,14 @@ void vfio_user_set_handler(VFIODevice *vbasedev,
void *reqarg);
bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp);
+VFIOUserFDs *vfio_user_getfds(int numfds);
+
void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd,
uint32_t size, uint32_t flags);
+void vfio_user_wait_reqs(VFIOUserProxy *proxy);
void vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, int rsize);
+void vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, int rsize);
#endif /* VFIO_USER_PROXY_H */
diff --git a/hw/vfio-user/container.c b/hw/vfio-user/container.c
index f7c285ec2d..a9cc4b197e 100644
--- a/hw/vfio-user/container.c
+++ b/hw/vfio-user/container.c
@@ -12,23 +12,125 @@
#include "hw/vfio-user/container.h"
#include "hw/vfio-user/device.h"
+#include "hw/vfio-user/trace.h"
#include "hw/vfio/vfio-cpr.h"
#include "hw/vfio/vfio-device.h"
#include "hw/vfio/vfio-listener.h"
#include "qapi/error.h"
+/*
+ * When DMA space is the physical address space, the region add/del listeners
+ * will fire during memory update transactions. These depend on BQL being held,
+ * so do any resulting map/demap ops async while keeping BQL.
+ */
+static void vfio_user_listener_begin(VFIOContainerBase *bcontainer)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ container->proxy->async_ops = true;
+}
+
+static void vfio_user_listener_commit(VFIOContainerBase *bcontainer)
+{
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ /* wait here for any async requests sent during the transaction */
+ container->proxy->async_ops = false;
+ vfio_user_wait_reqs(container->proxy);
+}
+
static int vfio_user_dma_unmap(const VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
IOMMUTLBEntry *iotlb, bool unmap_all)
{
- return -ENOTSUP;
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ VFIOUserDMAUnmap *msgp = g_malloc(sizeof(*msgp));
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_UNMAP, sizeof(*msgp), 0);
+ msgp->argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
+ msgp->flags = unmap_all ? VFIO_DMA_UNMAP_FLAG_ALL : 0;
+ msgp->iova = iova;
+ msgp->size = size;
+ trace_vfio_user_dma_unmap(msgp->iova, msgp->size, msgp->flags,
+ container->proxy->async_ops);
+
+ if (container->proxy->async_ops) {
+ vfio_user_send_nowait(container->proxy, &msgp->hdr, NULL, 0);
+ return 0;
+ }
+
+ vfio_user_send_wait(container->proxy, &msgp->hdr, NULL, 0);
+ if (msgp->hdr.flags & VFIO_USER_ERROR) {
+ return -msgp->hdr.error_reply;
+ }
+
+ g_free(msgp);
+ return 0;
}
static int vfio_user_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
ram_addr_t size, void *vaddr, bool readonly,
MemoryRegion *mrp)
{
- return -ENOTSUP;
+ VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer,
+ bcontainer);
+
+ VFIOUserProxy *proxy = container->proxy;
+ int fd = memory_region_get_fd(mrp);
+ int ret;
+
+ VFIOUserFDs *fds = NULL;
+ VFIOUserDMAMap *msgp = g_malloc0(sizeof(*msgp));
+
+ vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_MAP, sizeof(*msgp), 0);
+ msgp->argsz = sizeof(struct vfio_iommu_type1_dma_map);
+ msgp->flags = VFIO_DMA_MAP_FLAG_READ;
+ msgp->offset = 0;
+ msgp->iova = iova;
+ msgp->size = size;
+
+ /*
+ * vaddr enters as a QEMU process address; make it either a file offset
+ * for mapped areas or leave as 0.
+ */
+ if (fd != -1) {
+ msgp->offset = qemu_ram_block_host_offset(mrp->ram_block, vaddr);
+ }
+
+ if (!readonly) {
+ msgp->flags |= VFIO_DMA_MAP_FLAG_WRITE;
+ }
+
+ trace_vfio_user_dma_map(msgp->iova, msgp->size, msgp->offset, msgp->flags,
+ container->proxy->async_ops);
+
+ /*
+ * The async_ops case sends without blocking. They're later waited for in
+ * vfio_send_wait_reqs.
+ */
+ if (container->proxy->async_ops) {
+ /* can't use auto variable since we don't block */
+ if (fd != -1) {
+ fds = vfio_user_getfds(1);
+ fds->send_fds = 1;
+ fds->fds[0] = fd;
+ }
+ vfio_user_send_nowait(proxy, &msgp->hdr, fds, 0);
+ ret = 0;
+ } else {
+ VFIOUserFDs local_fds = { 1, 0, &fd };
+
+ fds = fd != -1 ? &local_fds : NULL;
+ vfio_user_send_wait(proxy, &msgp->hdr, fds, 0);
+ ret = (msgp->hdr.flags & VFIO_USER_ERROR) ? -msgp->hdr.error_reply : 0;
+ g_free(msgp);
+ }
+
+ return ret;
}
static int
@@ -220,6 +322,8 @@ static void vfio_iommu_user_class_init(ObjectClass *klass, const void *data)
VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass);
vioc->setup = vfio_user_setup;
+ vioc->listener_begin = vfio_user_listener_begin,
+ vioc->listener_commit = vfio_user_listener_commit,
vioc->dma_map = vfio_user_dma_map;
vioc->dma_unmap = vfio_user_dma_unmap;
vioc->attach_device = vfio_user_device_attach;
diff --git a/hw/vfio-user/proxy.c b/hw/vfio-user/proxy.c
index c6b6628505..315f5fd350 100644
--- a/hw/vfio-user/proxy.c
+++ b/hw/vfio-user/proxy.c
@@ -28,7 +28,6 @@ static void vfio_user_shutdown(VFIOUserProxy *proxy);
static int vfio_user_send_qio(VFIOUserProxy *proxy, VFIOUserMsg *msg);
static VFIOUserMsg *vfio_user_getmsg(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds);
-static VFIOUserFDs *vfio_user_getfds(int numfds);
static void vfio_user_recycle(VFIOUserProxy *proxy, VFIOUserMsg *msg);
static void vfio_user_recv(void *opaque);
@@ -130,7 +129,7 @@ static void vfio_user_recycle(VFIOUserProxy *proxy, VFIOUserMsg *msg)
QTAILQ_INSERT_HEAD(&proxy->free, msg, next);
}
-static VFIOUserFDs *vfio_user_getfds(int numfds)
+VFIOUserFDs *vfio_user_getfds(int numfds)
{
VFIOUserFDs *fds = g_malloc0(sizeof(*fds) + (numfds * sizeof(int)));
@@ -606,6 +605,36 @@ static int vfio_user_send_queued(VFIOUserProxy *proxy, VFIOUserMsg *msg)
return 0;
}
+/*
+ * nowait send - vfio_wait_reqs() can wait for it later
+ */
+void vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds, int rsize)
+{
+ VFIOUserMsg *msg;
+ int ret;
+
+ if (hdr->flags & VFIO_USER_NO_REPLY) {
+ error_printf("vfio_user_send_nowait on async message\n");
+ return;
+ }
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ msg = vfio_user_getmsg(proxy, hdr, fds);
+ msg->id = hdr->id;
+ msg->rsize = rsize ? rsize : hdr->size;
+ msg->type = VFIO_MSG_NOWAIT;
+
+ ret = vfio_user_send_queued(proxy, msg);
+ if (ret < 0) {
+ vfio_user_recycle(proxy, msg);
+ return;
+ }
+
+ proxy->last_nowait = msg;
+}
+
void vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, int rsize)
{
@@ -644,6 +673,50 @@ void vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
qemu_mutex_unlock(&proxy->lock);
}
+void vfio_user_wait_reqs(VFIOUserProxy *proxy)
+{
+ VFIOUserMsg *msg;
+
+ /*
+ * Any DMA map/unmap requests sent in the middle
+ * of a memory region transaction were sent nowait.
+ * Wait for them here.
+ */
+ qemu_mutex_lock(&proxy->lock);
+ if (proxy->last_nowait != NULL) {
+ /*
+ * Change type to WAIT to wait for reply
+ */
+ msg = proxy->last_nowait;
+ msg->type = VFIO_MSG_WAIT;
+ proxy->last_nowait = NULL;
+ while (!msg->complete) {
+ if (!qemu_cond_timedwait(&msg->cv, &proxy->lock, wait_time)) {
+ VFIOUserMsgQ *list;
+
+ list = msg->pending ? &proxy->pending : &proxy->outgoing;
+ QTAILQ_REMOVE(list, msg, next);
+ error_printf("vfio_wait_reqs - timed out\n");
+ break;
+ }
+ }
+
+ if (msg->hdr->flags & VFIO_USER_ERROR) {
+ error_printf("vfio_user_wait_reqs - error reply on async ");
+ error_printf("request: command %x error %s\n", msg->hdr->command,
+ strerror(msg->hdr->error_reply));
+ }
+
+ /*
+ * Change type back to NOWAIT to free
+ */
+ msg->type = VFIO_MSG_NOWAIT;
+ vfio_user_recycle(proxy, msg);
+ }
+
+ qemu_mutex_unlock(&proxy->lock);
+}
+
static QLIST_HEAD(, VFIOUserProxy) vfio_user_sockets =
QLIST_HEAD_INITIALIZER(vfio_user_sockets);
diff --git a/hw/vfio-user/trace-events b/hw/vfio-user/trace-events
index 053f5932eb..7ef98813b3 100644
--- a/hw/vfio-user/trace-events
+++ b/hw/vfio-user/trace-events
@@ -11,3 +11,7 @@ vfio_user_get_region_info(uint32_t index, uint32_t flags, uint64_t size) " index
vfio_user_region_rw(uint32_t region, uint64_t off, uint32_t count) " region %d offset 0x%"PRIx64" count %d"
vfio_user_get_irq_info(uint32_t index, uint32_t flags, uint32_t count) " index %d flags 0x%x count %d"
vfio_user_set_irqs(uint32_t index, uint32_t start, uint32_t count, uint32_t flags) " index %d start %d count %d flags 0x%x"
+
+# container.c
+vfio_user_dma_map(uint64_t iova, uint64_t size, uint64_t off, uint32_t flags, bool async_ops) " iova 0x%"PRIx64" size 0x%"PRIx64" off 0x%"PRIx64" flags 0x%x async_ops %d"
+vfio_user_dma_unmap(uint64_t iova, uint64_t size, uint32_t flags, bool async_ops) " iova 0x%"PRIx64" size 0x%"PRIx64" flags 0x%x async_ops %d"
--
2.43.0
next prev parent reply other threads:[~2025-06-19 13:35 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-19 13:31 [PATCH v4 00/19] vfio-user client John Levon
2025-06-19 13:31 ` [PATCH v4 01/19] vfio-user: add vfio-user class and container John Levon
2025-06-19 13:31 ` [PATCH v4 02/19] vfio-user: connect vfio proxy to remote server John Levon
2025-06-19 13:31 ` [PATCH v4 03/19] vfio-user: implement message receive infrastructure John Levon
2025-06-25 8:02 ` Cédric Le Goater
2025-06-25 9:16 ` John Levon
2025-06-19 13:31 ` [PATCH v4 04/19] vfio-user: implement message send infrastructure John Levon
2025-06-25 8:05 ` Cédric Le Goater
2025-06-19 13:31 ` [PATCH v4 05/19] vfio-user: implement VFIO_USER_DEVICE_GET_INFO John Levon
2025-06-25 8:06 ` Cédric Le Goater
2025-06-19 13:31 ` [PATCH v4 06/19] vfio-user: implement VFIO_USER_DEVICE_GET_REGION_INFO John Levon
2025-06-19 13:31 ` [PATCH v4 07/19] vfio-user: implement VFIO_USER_REGION_READ/WRITE John Levon
2025-06-19 13:31 ` [PATCH v4 08/19] vfio-user: set up PCI in vfio_user_pci_realize() John Levon
2025-06-19 13:31 ` [PATCH v4 09/19] vfio-user: implement VFIO_USER_DEVICE_GET/SET_IRQ* John Levon
2025-06-19 13:31 ` [PATCH v4 10/19] vfio-user: forward MSI-X PBA BAR accesses to server John Levon
2025-06-19 13:31 ` [PATCH v4 11/19] vfio-user: set up container access to the proxy John Levon
2025-06-19 13:31 ` [PATCH v4 12/19] vfio-user: implement VFIO_USER_DEVICE_RESET John Levon
2025-06-19 13:31 ` John Levon [this message]
2025-06-19 13:31 ` [PATCH v4 14/19] vfio-user: implement VFIO_USER_DMA_READ/WRITE John Levon
2025-06-19 13:31 ` [PATCH v4 15/19] vfio-user: add 'x-msg-timeout' option John Levon
2025-06-19 13:31 ` [PATCH v4 16/19] vfio-user: support posted writes John Levon
2025-06-19 13:31 ` [PATCH v4 17/19] vfio-user: add coalesced " John Levon
2025-06-19 13:31 ` [PATCH v4 18/19] docs: add vfio-user documentation John Levon
2025-06-19 13:31 ` [PATCH v4 19/19] vfio-user: introduce vfio-user protocol specification John Levon
2025-06-20 9:11 ` [PATCH v4 00/19] vfio-user client Cédric Le Goater
2025-06-21 11:45 ` John Levon
2025-06-21 14:25 ` Cédric Le Goater
2025-06-21 14:50 ` John Levon
2025-06-25 8:56 ` Cédric Le Goater
2025-06-25 9:03 ` John Levon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250619133154.264786-14-john.levon@nutanix.com \
--to=john.levon@nutanix.com \
--cc=alex.williamson@redhat.com \
--cc=berrange@redhat.com \
--cc=clg@redhat.com \
--cc=elena.ufimtseva@oracle.com \
--cc=jag.raman@oracle.com \
--cc=john.g.johnson@oracle.com \
--cc=levon@movementarian.org \
--cc=marcandre.lureau@redhat.com \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=thanos.makatos@nutanix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).