From: John Levon <john.levon@nutanix.com>
To: qemu-devel@nongnu.org
Cc: "Marc-André Lureau" <marcandre.lureau@redhat.com>,
"Thanos Makatos" <thanos.makatos@nutanix.com>,
"Daniel P. Berrangé" <berrange@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Peter Xu" <peterx@redhat.com>,
"David Hildenbrand" <david@redhat.com>,
"Cédric Le Goater" <clg@redhat.com>,
"Stefano Garzarella" <sgarzare@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"John Levon" <john.levon@nutanix.com>,
"John Johnson" <john.g.johnson@oracle.com>,
"Elena Ufimtseva" <elena.ufimtseva@oracle.com>,
"Jagannathan Raman" <jag.raman@oracle.com>
Subject: [PATCH 23/27] vfio-user: implement VFIO_USER_DMA_READ/WRITE
Date: Thu, 15 May 2025 16:44:08 +0100 [thread overview]
Message-ID: <20250515154413.210315-24-john.levon@nutanix.com> (raw)
In-Reply-To: <20250515154413.210315-1-john.levon@nutanix.com>
Unlike most other messages, this is a server->client message, for when a
server wants to do "DMA"; this is slow, so normally the server has
memory directly mapped instead.
Originally-by: John Johnson <john.g.johnson@oracle.com>
Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
Signed-off-by: John Levon <john.levon@nutanix.com>
---
hw/vfio-user/protocol.h | 13 ++++-
hw/vfio-user/proxy.h | 3 ++
hw/vfio-user/pci.c | 111 ++++++++++++++++++++++++++++++++++++++++
hw/vfio-user/proxy.c | 84 ++++++++++++++++++++++++++++++
4 files changed, 210 insertions(+), 1 deletion(-)
diff --git a/hw/vfio-user/protocol.h b/hw/vfio-user/protocol.h
index 4fa6d03b3f..8f589faef4 100644
--- a/hw/vfio-user/protocol.h
+++ b/hw/vfio-user/protocol.h
@@ -203,7 +203,18 @@ typedef struct {
char data[];
} VFIOUserRegionRW;
-/*imported from struct vfio_bitmap */
+/*
+ * VFIO_USER_DMA_READ
+ * VFIO_USER_DMA_WRITE
+ */
+typedef struct {
+ VFIOUserHdr hdr;
+ uint64_t offset;
+ uint32_t count;
+ char data[];
+} VFIOUserDMARW;
+
+/* imported from struct vfio_bitmap */
typedef struct {
uint64_t pgsize;
uint64_t size;
diff --git a/hw/vfio-user/proxy.h b/hw/vfio-user/proxy.h
index 01fed3f17d..8f2d529846 100644
--- a/hw/vfio-user/proxy.h
+++ b/hw/vfio-user/proxy.h
@@ -104,6 +104,7 @@ void vfio_user_set_handler(VFIODevice *vbasedev,
bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp);
VFIOUserFDs *vfio_user_getfds(int numfds);
+void vfio_user_putfds(VFIOUserMsg *msg);
void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd,
uint32_t size, uint32_t flags);
@@ -112,5 +113,7 @@ void vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, int rsize);
void vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, int rsize);
+void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size);
+void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error);
#endif /* VFIO_USER_PROXY_H */
diff --git a/hw/vfio-user/pci.c b/hw/vfio-user/pci.c
index 55cc811d3c..a55a0a0972 100644
--- a/hw/vfio-user/pci.c
+++ b/hw/vfio-user/pci.c
@@ -11,6 +11,7 @@
#include <sys/ioctl.h>
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "hw/qdev-properties.h"
#include "hw/vfio/pci.h"
@@ -82,6 +83,95 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
vdev->msix->pba_region = NULL;
}
+static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOUserProxy *proxy = vdev->vbasedev.proxy;
+ VFIOUserDMARW *res;
+ MemTxResult r;
+ size_t size;
+
+ if (msg->hdr.size < sizeof(*msg)) {
+ vfio_user_send_error(proxy, &msg->hdr, EINVAL);
+ return;
+ }
+ if (msg->count > proxy->max_xfer_size) {
+ vfio_user_send_error(proxy, &msg->hdr, E2BIG);
+ return;
+ }
+
+ /* switch to our own message buffer */
+ size = msg->count + sizeof(VFIOUserDMARW);
+ res = g_malloc0(size);
+ memcpy(res, msg, sizeof(*res));
+ g_free(msg);
+
+ r = pci_dma_read(pdev, res->offset, &res->data, res->count);
+
+ switch (r) {
+ case MEMTX_OK:
+ if (res->hdr.flags & VFIO_USER_NO_REPLY) {
+ g_free(res);
+ return;
+ }
+ vfio_user_send_reply(proxy, &res->hdr, size);
+ break;
+ case MEMTX_ERROR:
+ vfio_user_send_error(proxy, &res->hdr, EFAULT);
+ break;
+ case MEMTX_DECODE_ERROR:
+ vfio_user_send_error(proxy, &res->hdr, ENODEV);
+ break;
+ case MEMTX_ACCESS_ERROR:
+ vfio_user_send_error(proxy, &res->hdr, EPERM);
+ break;
+ default:
+ error_printf("vfio_user_dma_read unknown error %d\n", r);
+ vfio_user_send_error(vdev->vbasedev.proxy, &res->hdr, EINVAL);
+ }
+}
+
+static void vfio_user_dma_write(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOUserProxy *proxy = vdev->vbasedev.proxy;
+ MemTxResult r;
+
+ if (msg->hdr.size < sizeof(*msg)) {
+ vfio_user_send_error(proxy, &msg->hdr, EINVAL);
+ return;
+ }
+ /* make sure transfer count isn't larger than the message data */
+ if (msg->count > msg->hdr.size - sizeof(*msg)) {
+ vfio_user_send_error(proxy, &msg->hdr, E2BIG);
+ return;
+ }
+
+ r = pci_dma_write(pdev, msg->offset, &msg->data, msg->count);
+
+ switch (r) {
+ case MEMTX_OK:
+ if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) {
+ vfio_user_send_reply(proxy, &msg->hdr, sizeof(msg->hdr));
+ } else {
+ g_free(msg);
+ }
+ break;
+ case MEMTX_ERROR:
+ vfio_user_send_error(proxy, &msg->hdr, EFAULT);
+ break;
+ case MEMTX_DECODE_ERROR:
+ vfio_user_send_error(proxy, &msg->hdr, ENODEV);
+ break;
+ case MEMTX_ACCESS_ERROR:
+ vfio_user_send_error(proxy, &msg->hdr, EPERM);
+ break;
+ default:
+ error_printf("vfio_user_dma_write unknown error %d\n", r);
+ vfio_user_send_error(vdev->vbasedev.proxy, &msg->hdr, EINVAL);
+ }
+}
+
/*
* Incoming request message callback.
*
@@ -89,7 +179,28 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
*/
static void vfio_user_pci_process_req(void *opaque, VFIOUserMsg *msg)
{
+ VFIOPCIDevice *vdev = opaque;
+ VFIOUserHdr *hdr = msg->hdr;
+
+ /* no incoming PCI requests pass FDs */
+ if (msg->fds != NULL) {
+ vfio_user_send_error(vdev->vbasedev.proxy, hdr, EINVAL);
+ vfio_user_putfds(msg);
+ return;
+ }
+ switch (hdr->command) {
+ case VFIO_USER_DMA_READ:
+ vfio_user_dma_read(vdev, (VFIOUserDMARW *)hdr);
+ break;
+ case VFIO_USER_DMA_WRITE:
+ vfio_user_dma_write(vdev, (VFIOUserDMARW *)hdr);
+ break;
+ default:
+ error_printf("vfio_user_pci_process_req unknown cmd %d\n",
+ hdr->command);
+ vfio_user_send_error(vdev->vbasedev.proxy, hdr, ENOSYS);
+ }
}
/*
diff --git a/hw/vfio-user/proxy.c b/hw/vfio-user/proxy.c
index 6d1446c38b..0935b72e9f 100644
--- a/hw/vfio-user/proxy.c
+++ b/hw/vfio-user/proxy.c
@@ -362,6 +362,10 @@ static int vfio_user_recv_one(VFIOUserProxy *proxy)
*msg->hdr = hdr;
data = (char *)msg->hdr + sizeof(hdr);
} else {
+ if (hdr.size > proxy->max_xfer_size + sizeof(VFIOUserDMARW)) {
+ error_setg(&local_err, "vfio_user_recv request larger than max");
+ goto err;
+ }
buf = g_malloc0(hdr.size);
memcpy(buf, &hdr, sizeof(hdr));
data = buf + sizeof(hdr);
@@ -676,6 +680,33 @@ void vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
qemu_mutex_unlock(&proxy->lock);
}
+/*
+ * async send - msg can be queued, but will be freed when sent
+ */
+static void vfio_user_send_async(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
+ VFIOUserFDs *fds)
+{
+ VFIOUserMsg *msg;
+ int ret;
+
+ if (!(hdr->flags & (VFIO_USER_NO_REPLY | VFIO_USER_REPLY))) {
+ error_printf("vfio_user_send_async on sync message\n");
+ return;
+ }
+
+ QEMU_LOCK_GUARD(&proxy->lock);
+
+ msg = vfio_user_getmsg(proxy, hdr, fds);
+ msg->id = hdr->id;
+ msg->rsize = 0;
+ msg->type = VFIO_MSG_ASYNC;
+
+ ret = vfio_user_send_queued(proxy, msg);
+ if (ret < 0) {
+ vfio_user_recycle(proxy, msg);
+ }
+}
+
void vfio_user_wait_reqs(VFIOUserProxy *proxy)
{
VFIOUserMsg *msg;
@@ -720,6 +751,59 @@ void vfio_user_wait_reqs(VFIOUserProxy *proxy)
qemu_mutex_unlock(&proxy->lock);
}
+/*
+ * Reply to an incoming request.
+ */
+void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size)
+{
+
+ if (size < sizeof(VFIOUserHdr)) {
+ error_printf("vfio_user_send_reply - size too small\n");
+ g_free(hdr);
+ return;
+ }
+
+ /*
+ * convert header to associated reply
+ */
+ hdr->flags = VFIO_USER_REPLY;
+ hdr->size = size;
+
+ vfio_user_send_async(proxy, hdr, NULL);
+}
+
+/*
+ * Send an error reply to an incoming request.
+ */
+void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error)
+{
+
+ /*
+ * convert header to associated reply
+ */
+ hdr->flags = VFIO_USER_REPLY;
+ hdr->flags |= VFIO_USER_ERROR;
+ hdr->error_reply = error;
+ hdr->size = sizeof(*hdr);
+
+ vfio_user_send_async(proxy, hdr, NULL);
+}
+
+/*
+ * Close FDs erroneously received in an incoming request.
+ */
+void vfio_user_putfds(VFIOUserMsg *msg)
+{
+ VFIOUserFDs *fds = msg->fds;
+ int i;
+
+ for (i = 0; i < fds->recv_fds; i++) {
+ close(fds->fds[i]);
+ }
+ g_free(fds);
+ msg->fds = NULL;
+}
+
static QLIST_HEAD(, VFIOUserProxy) vfio_user_sockets =
QLIST_HEAD_INITIALIZER(vfio_user_sockets);
--
2.43.0
next prev parent reply other threads:[~2025-05-15 15:49 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-15 15:43 [PATCH 00/27] vfio-user client John Levon
2025-05-15 15:43 ` [PATCH 01/27] vfio: return mr from vfio_get_xlat_addr John Levon
2025-05-15 15:46 ` John Levon
2025-05-15 15:43 ` [PATCH 02/27] vfio/container: pass MemoryRegion to DMA operations John Levon
2025-05-16 15:11 ` Cédric Le Goater
2025-05-18 17:00 ` John Levon
2025-05-19 8:17 ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 03/27] vfio: move more cleanup into vfio_pci_put_device() John Levon
2025-05-16 15:21 ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 04/27] vfio: move config space read into vfio_pci_config_setup() John Levon
2025-05-16 15:26 ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 05/27] vfio: refactor out IRQ signalling setup John Levon
2025-05-16 15:27 ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 06/27] vfio: enable per-IRQ MSI-X masking John Levon
2025-05-15 15:43 ` [PATCH 07/27] vfio: add per-region fd support John Levon
2025-05-15 15:43 ` [PATCH 08/27] vfio: mark posted writes in region write callbacks John Levon
2025-05-15 15:43 ` [PATCH 09/27] vfio-user: introduce vfio-user protocol specification John Levon
2025-05-15 15:43 ` [PATCH 10/27] vfio-user: add vfio-user class and container John Levon
2025-05-15 15:43 ` [PATCH 11/27] vfio-user: connect vfio proxy to remote server John Levon
2025-05-15 15:43 ` [PATCH 12/27] vfio-user: implement message receive infrastructure John Levon
2025-05-15 15:43 ` [PATCH 13/27] vfio-user: implement message send infrastructure John Levon
2025-05-15 15:43 ` [PATCH 14/27] vfio-user: implement VFIO_USER_DEVICE_GET_INFO John Levon
2025-05-15 15:44 ` [PATCH 15/27] vfio-user: implement VFIO_USER_DEVICE_GET_REGION_INFO John Levon
2025-05-15 15:44 ` [PATCH 16/27] vfio-user: implement VFIO_USER_REGION_READ/WRITE John Levon
2025-05-15 15:44 ` [PATCH 17/27] vfio-user: set up PCI in vfio_user_pci_realize() John Levon
2025-05-15 15:44 ` [PATCH 18/27] vfio-user: implement VFIO_USER_DEVICE_GET/SET_IRQ* John Levon
2025-05-15 15:44 ` [PATCH 19/27] vfio-user: forward MSI-X PBA BAR accesses to server John Levon
2025-05-15 15:44 ` [PATCH 20/27] vfio-user: set up container access to the proxy John Levon
2025-05-15 15:44 ` [PATCH 21/27] vfio-user: implement VFIO_USER_DEVICE_RESET John Levon
2025-05-15 15:44 ` [PATCH 22/27] vfio-user: implement VFIO_USER_DMA_MAP/UNMAP John Levon
2025-05-15 15:44 ` John Levon [this message]
2025-05-15 15:44 ` [PATCH 24/27] vfio-user: add 'x-msg-timeout' option John Levon
2025-05-15 15:44 ` [PATCH 25/27] vfio-user: support posted writes John Levon
2025-05-15 15:44 ` [PATCH 26/27] vfio-user: add coalesced " John Levon
2025-05-15 15:44 ` [PATCH 27/27] docs: add vfio-user documentation John Levon
2025-05-19 12:40 ` [PATCH 00/27] vfio-user client Cédric Le Goater
2025-05-19 13:29 ` John Levon
2025-05-20 5:59 ` Cédric Le Goater
2025-05-20 15:05 ` John Levon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250515154413.210315-24-john.levon@nutanix.com \
--to=john.levon@nutanix.com \
--cc=alex.williamson@redhat.com \
--cc=berrange@redhat.com \
--cc=clg@redhat.com \
--cc=david@redhat.com \
--cc=elena.ufimtseva@oracle.com \
--cc=jag.raman@oracle.com \
--cc=john.g.johnson@oracle.com \
--cc=marcandre.lureau@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=thanos.makatos@nutanix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).