From: "Cédric Le Goater" <clg@redhat.com>
To: qemu-devel@nongnu.org
Subject: Re: [PATCH v1 17/24] vfio-user: dma map/unmap operations
Date: Thu, 15 Dec 2022 13:39:58 +0100 [thread overview]
Message-ID: <9cab42e5-54ac-0e30-bc23-b1f7838cb1d5@redhat.com> (raw)
In-Reply-To: <2f842a47474f4369b9f41b22d72277abf5b3b31b.1667542066.git.john.g.johnson@oracle.com>
On 11/9/22 00:13, John Johnson wrote:
> Add ability to do async operations during memory transactions
>
> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
> Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
> ---
> hw/vfio/common.c | 63 +++++++++---
> hw/vfio/user-protocol.h | 32 ++++++
> hw/vfio/user.c | 220 ++++++++++++++++++++++++++++++++++++++++++
> include/hw/vfio/vfio-common.h | 9 +-
> 4 files changed, 308 insertions(+), 16 deletions(-)
>
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index e73a772..fe6eddd 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -507,7 +507,7 @@ static int vfio_dma_unmap(VFIOContainer *container,
> return CONT_DMA_UNMAP(container, &unmap, NULL);
> }
>
> -static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
> +static int vfio_dma_map(VFIOContainer *container, MemoryRegion *mr, hwaddr iova,
> ram_addr_t size, void *vaddr, bool readonly)
> {
> struct vfio_iommu_type1_dma_map map = {
> @@ -523,7 +523,7 @@ static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
> map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
> }
>
> - ret = CONT_DMA_MAP(container, &map);
> + ret = CONT_DMA_MAP(container, mr, &map);
>
> if (ret < 0) {
> error_report("VFIO_MAP_DMA failed: %s", strerror(-ret));
> @@ -586,7 +586,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
>
> /* Called with rcu_read_lock held. */
> static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
> - ram_addr_t *ram_addr, bool *read_only)
> + ram_addr_t *ram_addr, bool *read_only,
> + MemoryRegion **mrp)
> {
This needs a small update. A memory_get_xlat_addr() routine was introduced since.
> MemoryRegion *mr;
> hwaddr xlat;
> @@ -667,6 +668,10 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
> *read_only = !writable || mr->readonly;
> }
>
> + if (mrp != NULL) {
> + *mrp = mr;
> + }
> +
> return true;
> }
>
> @@ -674,6 +679,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
> {
> VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
> VFIOContainer *container = giommu->container;
> + MemoryRegion *mr;
> hwaddr iova = iotlb->iova + giommu->iommu_offset;
> void *vaddr;
> int ret;
> @@ -692,7 +698,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
> if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
> bool read_only;
>
> - if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
> + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, &mr)) {
> goto out;
> }
> /*
> @@ -702,14 +708,14 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
> * of vaddr will always be there, even if the memory object is
> * destroyed and its backing memory munmap-ed.
> */
> - ret = vfio_dma_map(container, iova,
> + ret = vfio_dma_map(container, mr, iova,
> iotlb->addr_mask + 1, vaddr,
> read_only);
> if (ret) {
> error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
> - "0x%"HWADDR_PRIx", %p) = %d (%m)",
> + "0x%"HWADDR_PRIx", %p)",
> container, iova,
> - iotlb->addr_mask + 1, vaddr, ret);
> + iotlb->addr_mask + 1, vaddr);
> }
> } else {
> ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
> @@ -764,7 +770,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
> section->offset_within_address_space;
> vaddr = memory_region_get_ram_ptr(section->mr) + start;
>
> - ret = vfio_dma_map(vrdl->container, iova, next - start,
> + ret = vfio_dma_map(vrdl->container, section->mr, iova, next - start,
> vaddr, section->readonly);
> if (ret) {
> /* Rollback */
> @@ -888,6 +894,29 @@ static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
> return true;
> }
>
> +static void vfio_listener_begin(MemoryListener *listener)
> +{
> + VFIOContainer *container = container_of(listener, VFIOContainer, listener);
> +
> + /*
> + * When DMA space is the physical address space,
> + * the region add/del listeners will fire during
> + * memory update transactions. These depend on BQL
> + * being held, so do any resulting map/demap ops async
> + * while keeping BQL.
> + */
> + container->async_ops = true;
> +}
> +
> +static void vfio_listener_commit(MemoryListener *listener)
> +{
> + VFIOContainer *container = container_of(listener, VFIOContainer, listener);
> +
> + /* wait here for any async requests sent during the transaction */
> + CONT_WAIT_COMMIT(container);
> + container->async_ops = false;
> +}
> +
> static void vfio_listener_region_add(MemoryListener *listener,
> MemoryRegionSection *section)
> {
> @@ -1095,12 +1124,12 @@ static void vfio_listener_region_add(MemoryListener *listener,
> }
> }
>
> - ret = vfio_dma_map(container, iova, int128_get64(llsize),
> + ret = vfio_dma_map(container, section->mr, iova, int128_get64(llsize),
> vaddr, section->readonly);
> if (ret) {
> error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
> - "0x%"HWADDR_PRIx", %p) = %d (%m)",
> - container, iova, int128_get64(llsize), vaddr, ret);
> + "0x%"HWADDR_PRIx", %p)",
> + container, iova, int128_get64(llsize), vaddr);
> if (memory_region_is_ram_device(section->mr)) {
> /* Allow unexpected mappings not to be fatal for RAM devices */
> error_report_err(err);
> @@ -1369,7 +1398,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
> }
>
> rcu_read_lock();
> - if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
> + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL, NULL)) {
> int ret;
>
> ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
> @@ -1487,6 +1516,8 @@ static void vfio_listener_log_sync(MemoryListener *listener,
>
> static const MemoryListener vfio_memory_listener = {
> .name = "vfio",
> + .begin = vfio_listener_begin,
> + .commit = vfio_listener_commit,
> .region_add = vfio_listener_region_add,
> .region_del = vfio_listener_region_del,
> .log_global_start = vfio_listener_log_global_start,
> @@ -2977,7 +3008,7 @@ VFIODevIO vfio_dev_io_ioctl = {
> .region_write = vfio_io_region_write,
> };
>
> -static int vfio_io_dma_map(VFIOContainer *container,
> +static int vfio_io_dma_map(VFIOContainer *container, MemoryRegion *mr,
> struct vfio_iommu_type1_dma_map *map)
> {
>
> @@ -3037,8 +3068,14 @@ static int vfio_io_dirty_bitmap(VFIOContainer *container,
> return ret < 0 ? -errno : ret;
> }
>
> +static void vfio_io_wait_commit(VFIOContainer *container)
> +{
> + /* ioctl()s are synchronous */
> +}
> +
> VFIOContIO vfio_cont_io_ioctl = {
> .dma_map = vfio_io_dma_map,
> .dma_unmap = vfio_io_dma_unmap,
> .dirty_bitmap = vfio_io_dirty_bitmap,
> + .wait_commit = vfio_io_wait_commit,
> };
> diff --git a/hw/vfio/user-protocol.h b/hw/vfio/user-protocol.h
> index 31704cf..e9fcf64 100644
> --- a/hw/vfio/user-protocol.h
> +++ b/hw/vfio/user-protocol.h
> @@ -114,6 +114,31 @@ typedef struct {
> #define VFIO_USER_DEF_MAX_BITMAP (256 * 1024 * 1024)
>
> /*
> + * VFIO_USER_DMA_MAP
> + * imported from struct vfio_iommu_type1_dma_map
> + */
> +typedef struct {
> + VFIOUserHdr hdr;
> + uint32_t argsz;
> + uint32_t flags;
> + uint64_t offset; /* FD offset */
> + uint64_t iova;
> + uint64_t size;
> +} VFIOUserDMAMap;
> +
> +/*
> + * VFIO_USER_DMA_UNMAP
> + * imported from struct vfio_iommu_type1_dma_unmap
> + */
> +typedef struct {
> + VFIOUserHdr hdr;
> + uint32_t argsz;
> + uint32_t flags;
> + uint64_t iova;
> + uint64_t size;
> +} VFIOUserDMAUnmap;
> +
> +/*
> * VFIO_USER_DEVICE_GET_INFO
> * imported from struct_device_info
> */
> @@ -177,4 +202,11 @@ typedef struct {
> char data[];
> } VFIOUserRegionRW;
>
> +/*imported from struct vfio_bitmap */
> +typedef struct {
> + uint64_t pgsize;
> + uint64_t size;
> + char data[];
> +} VFIOUserBitmap;
> +
> #endif /* VFIO_USER_PROTOCOL_H */
> diff --git a/hw/vfio/user.c b/hw/vfio/user.c
> index 2d35f83..1fd37cc 100644
> --- a/hw/vfio/user.c
> +++ b/hw/vfio/user.c
> @@ -59,8 +59,11 @@ static void vfio_user_request(void *opaque);
> static int vfio_user_send_queued(VFIOProxy *proxy, VFIOUserMsg *msg);
> static void vfio_user_send_async(VFIOProxy *proxy, VFIOUserHdr *hdr,
> VFIOUserFDs *fds);
> +static void vfio_user_send_nowait(VFIOProxy *proxy, VFIOUserHdr *hdr,
> + VFIOUserFDs *fds, int rsize);
> static void vfio_user_send_wait(VFIOProxy *proxy, VFIOUserHdr *hdr,
> VFIOUserFDs *fds, int rsize, bool nobql);
> +static void vfio_user_wait_reqs(VFIOProxy *proxy);
> static void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd,
> uint32_t size, uint32_t flags);
>
> @@ -647,6 +650,36 @@ static void vfio_user_send_async(VFIOProxy *proxy, VFIOUserHdr *hdr,
> }
> }
>
> +/*
> + * nowait send - vfio_wait_reqs() can wait for it later
> + */
> +static void vfio_user_send_nowait(VFIOProxy *proxy, VFIOUserHdr *hdr,
> + VFIOUserFDs *fds, int rsize)
> +{
> + VFIOUserMsg *msg;
> + int ret;
> +
> + if (hdr->flags & VFIO_USER_NO_REPLY) {
> + error_printf("vfio_user_send_nowait on async message\n");
> + return;
> + }
> +
> + QEMU_LOCK_GUARD(&proxy->lock);
> +
> + msg = vfio_user_getmsg(proxy, hdr, fds);
> + msg->id = hdr->id;
> + msg->rsize = rsize ? rsize : hdr->size;
> + msg->type = VFIO_MSG_NOWAIT;
> +
> + ret = vfio_user_send_queued(proxy, msg);
> + if (ret < 0) {
> + vfio_user_recycle(proxy, msg);
> + return;
> + }
> +
> + proxy->last_nowait = msg;
> +}
> +
> static void vfio_user_send_wait(VFIOProxy *proxy, VFIOUserHdr *hdr,
> VFIOUserFDs *fds, int rsize, bool nobql)
> {
> @@ -699,6 +732,60 @@ static void vfio_user_send_wait(VFIOProxy *proxy, VFIOUserHdr *hdr,
> }
> }
>
> +static void vfio_user_wait_reqs(VFIOProxy *proxy)
> +{
> + VFIOUserMsg *msg;
> + bool iolock = false;
> +
> + /*
> + * Any DMA map/unmap requests sent in the middle
> + * of a memory region transaction were sent nowait.
> + * Wait for them here.
> + */
> + qemu_mutex_lock(&proxy->lock);
> + if (proxy->last_nowait != NULL) {
> + iolock = qemu_mutex_iothread_locked();
> + if (iolock) {
> + qemu_mutex_unlock_iothread();
> + }
> +
> + /*
> + * Change type to WAIT to wait for reply
> + */
> + msg = proxy->last_nowait;
> + msg->type = VFIO_MSG_WAIT;
> + while (!msg->complete) {
> + if (!qemu_cond_timedwait(&msg->cv, &proxy->lock, wait_time)) {
> + VFIOUserMsgQ *list;
> +
> + list = msg->pending ? &proxy->pending : &proxy->outgoing;
> + QTAILQ_REMOVE(list, msg, next);
> + error_printf("vfio_wait_reqs - timed out\n");
> + break;
> + }
> + }
> +
> + if (msg->hdr->flags & VFIO_USER_ERROR) {
> + error_printf("vfio_user_wait_reqs - error reply on async request ");
> + error_printf("command %x error %s\n", msg->hdr->command,
> + strerror(msg->hdr->error_reply));
> + }
> +
> + proxy->last_nowait = NULL;
> + /*
> + * Change type back to NOWAIT to free
> + */
> + msg->type = VFIO_MSG_NOWAIT;
> + vfio_user_recycle(proxy, msg);
> + }
> +
> + /* lock order is BQL->proxy - don't hold proxy when getting BQL */
> + qemu_mutex_unlock(&proxy->lock);
> + if (iolock) {
> + qemu_mutex_lock_iothread();
> + }
> +}
> +
> static QLIST_HEAD(, VFIOProxy) vfio_user_sockets =
> QLIST_HEAD_INITIALIZER(vfio_user_sockets);
>
> @@ -1113,6 +1200,103 @@ int vfio_user_validate_version(VFIOProxy *proxy, Error **errp)
> return 0;
> }
>
> +static int vfio_user_dma_map(VFIOProxy *proxy,
> + struct vfio_iommu_type1_dma_map *map,
> + int fd, bool will_commit)
> +{
> + VFIOUserFDs *fds = NULL;
> + VFIOUserDMAMap *msgp = g_malloc0(sizeof(*msgp));
> + int ret;
> +
> + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_MAP, sizeof(*msgp), 0);
> + msgp->argsz = map->argsz;
> + msgp->flags = map->flags;
> + msgp->offset = map->vaddr;
> + msgp->iova = map->iova;
> + msgp->size = map->size;
> +
> + /*
> + * The will_commit case sends without blocking or dropping BQL.
> + * They're later waited for in vfio_send_wait_reqs.
> + */
> + if (will_commit) {
> + /* can't use auto variable since we don't block */
> + if (fd != -1) {
> + fds = vfio_user_getfds(1);
It is difficult to see where vfio_user_putfds() is called to free fds.
> + fds->send_fds = 1;
> + fds->fds[0] = fd;
> + }
> + vfio_user_send_nowait(proxy, &msgp->hdr, fds, 0);
> + ret = 0;
> + } else {
> + VFIOUserFDs local_fds = { 1, 0, &fd };
> +
> + fds = fd != -1 ? &local_fds : NULL;
> + vfio_user_send_wait(proxy, &msgp->hdr, fds, 0, will_commit);
> + ret = (msgp->hdr.flags & VFIO_USER_ERROR) ? -msgp->hdr.error_reply : 0;
> + g_free(msgp);
> + }
> +
> + return ret;
> +}
> +
> +static int vfio_user_dma_unmap(VFIOProxy *proxy,
> + struct vfio_iommu_type1_dma_unmap *unmap,
> + struct vfio_bitmap *bitmap, bool will_commit)
> +{
> + struct {
> + VFIOUserDMAUnmap msg;
> + VFIOUserBitmap bitmap;
> + } *msgp = NULL;
> + int msize, rsize;
> + bool blocking = !will_commit;
> +
> + if (bitmap == NULL &&
> + (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)) {
> + error_printf("vfio_user_dma_unmap mismatched flags and bitmap\n");
> + return -EINVAL;
> + }
> +
> + /*
> + * If a dirty bitmap is returned, allocate extra space for it
> + * and block for reply even in the will_commit case.
> + * Otherwise, can send the unmap request without waiting.
> + */
> + if (bitmap != NULL) {
> + blocking = true;
> + msize = sizeof(*msgp);
> + rsize = msize + bitmap->size;
> + msgp = g_malloc0(rsize);
> + msgp->bitmap.pgsize = bitmap->pgsize;
> + msgp->bitmap.size = bitmap->size;
> + } else {
> + msize = rsize = sizeof(VFIOUserDMAUnmap);
> + msgp = g_malloc0(rsize);
> + }
> +
> + vfio_user_request_msg(&msgp->msg.hdr, VFIO_USER_DMA_UNMAP, msize, 0);
> + msgp->msg.argsz = rsize - sizeof(VFIOUserHdr);
> + msgp->msg.argsz = unmap->argsz;
> + msgp->msg.flags = unmap->flags;
> + msgp->msg.iova = unmap->iova;
> + msgp->msg.size = unmap->size;
> +
> + if (blocking) {
> + vfio_user_send_wait(proxy, &msgp->msg.hdr, NULL, rsize, will_commit);
> + if (msgp->msg.hdr.flags & VFIO_USER_ERROR) {
> + return -msgp->msg.hdr.error_reply;
> + }
> + if (bitmap != NULL) {
> + memcpy(bitmap->data, &msgp->bitmap.data, bitmap->size);
> + }
> + g_free(msgp);
> + } else {
> + vfio_user_send_nowait(proxy, &msgp->msg.hdr, NULL, rsize);
> + }
> +
> + return 0;
> +}
> +
> static int vfio_user_get_info(VFIOProxy *proxy, struct vfio_device_info *info)
> {
> VFIOUserDeviceInfo msg;
> @@ -1434,5 +1618,41 @@ VFIODevIO vfio_dev_io_sock = {
> };
>
>
> +static int vfio_user_io_dma_map(VFIOContainer *container, MemoryRegion *mr,
> + struct vfio_iommu_type1_dma_map *map)
> +{
> + int fd = memory_region_get_fd(mr);
> +
> + /*
> + * map->vaddr enters as a QEMU process address
> + * make it either a file offset for mapped areas or 0
> + */
> + if (fd != -1) {
> + void *addr = (void *)(uintptr_t)map->vaddr;
> +
> + map->vaddr = qemu_ram_block_host_offset(mr->ram_block, addr);
> + } else {
> + map->vaddr = 0;
> + }
> +
> + return vfio_user_dma_map(container->proxy, map, fd, container->async_ops);
> +}
> +
> +static int vfio_user_io_dma_unmap(VFIOContainer *container,
> + struct vfio_iommu_type1_dma_unmap *unmap,
> + struct vfio_bitmap *bitmap)
> +{
> + return vfio_user_dma_unmap(container->proxy, unmap, bitmap,
> + container->async_ops);
> +}
> +
> +static void vfio_user_io_wait_commit(VFIOContainer *container)
> +{
> + vfio_user_wait_reqs(container->proxy);
> +}
> +
> VFIOContIO vfio_cont_io_sock = {
> + .dma_map = vfio_user_io_dma_map,
> + .dma_unmap = vfio_user_io_dma_unmap,
> + .wait_commit = vfio_user_io_wait_commit,
> };
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 312ef9c..413dafc 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -90,6 +90,7 @@ typedef struct VFIOContainer {
> VFIOContIO *io_ops;
> bool initialized;
> bool dirty_pages_supported;
> + bool async_ops;
> uint64_t dirty_pgsizes;
> uint64_t max_dirty_bitmap_size;
> unsigned long pgsizes;
> @@ -200,7 +201,7 @@ struct VFIODevIO {
> ((vdev)->io_ops->region_write((vdev), (nr), (off), (size), (data), (post)))
>
> struct VFIOContIO {
> - int (*dma_map)(VFIOContainer *container,
> + int (*dma_map)(VFIOContainer *container, MemoryRegion *mr,
> struct vfio_iommu_type1_dma_map *map);
> int (*dma_unmap)(VFIOContainer *container,
> struct vfio_iommu_type1_dma_unmap *unmap,
> @@ -208,14 +209,16 @@ struct VFIOContIO {
> int (*dirty_bitmap)(VFIOContainer *container,
> struct vfio_iommu_type1_dirty_bitmap *bitmap,
> struct vfio_iommu_type1_dirty_bitmap_get *range);
> + void (*wait_commit)(VFIOContainer *container);
> };
>
> -#define CONT_DMA_MAP(cont, map) \
> - ((cont)->io_ops->dma_map((cont), (map)))
> +#define CONT_DMA_MAP(cont, mr, map) \
> + ((cont)->io_ops->dma_map((cont), (mr), (map)))
> #define CONT_DMA_UNMAP(cont, unmap, bitmap) \
> ((cont)->io_ops->dma_unmap((cont), (unmap), (bitmap)))
> #define CONT_DIRTY_BITMAP(cont, bitmap, range) \
> ((cont)->io_ops->dirty_bitmap((cont), (bitmap), (range)))
> +#define CONT_WAIT_COMMIT(cont) ((cont)->io_ops->wait_commit(cont))
>
> extern VFIODevIO vfio_dev_io_ioctl;
> extern VFIOContIO vfio_cont_io_ioctl;
next prev parent reply other threads:[~2022-12-15 12:40 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-08 23:13 [PATCH v1 00/24] vfio-user client John Johnson
2022-11-08 23:13 ` [PATCH v1 01/24] vfio-user: introduce vfio-user protocol specification John Johnson
2022-11-08 23:13 ` [PATCH v1 02/24] vfio-user: add VFIO base abstract class John Johnson
2022-12-09 13:54 ` John Levon
2022-12-09 16:04 ` Cédric Le Goater
2022-12-12 20:30 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 03/24] vfio-user: add container IO ops vector John Johnson
2022-12-09 14:10 ` John Levon
2022-12-09 16:10 ` Cédric Le Goater
2022-12-12 9:40 ` Philippe Mathieu-Daudé
2022-11-08 23:13 ` [PATCH v1 04/24] vfio-user: add region cache John Johnson
2022-12-09 14:15 ` John Levon
2022-12-12 7:44 ` Cédric Le Goater
2022-12-12 11:42 ` Philippe Mathieu-Daudé
2023-02-02 5:21 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 05/24] vfio-user: add device IO ops vector John Johnson
2022-12-09 14:43 ` John Levon
2022-12-12 7:59 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 06/24] vfio-user: Define type vfio_user_pci_dev_info John Johnson
2022-12-09 15:23 ` John Levon
2022-12-12 9:01 ` Cédric Le Goater
2022-12-12 11:03 ` John Levon
2022-12-12 11:46 ` Philippe Mathieu-Daudé
2022-12-12 20:44 ` John Johnson
2022-12-12 21:32 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 07/24] vfio-user: connect vfio proxy to remote server John Johnson
2022-12-09 15:23 ` John Levon
2022-12-12 16:24 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 08/24] vfio-user: define socket receive functions John Johnson
2022-12-09 15:34 ` John Levon
2022-12-13 10:45 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 09/24] vfio-user: define socket send functions John Johnson
2022-12-09 15:52 ` John Levon
2022-12-13 13:48 ` Cédric Le Goater
2023-02-02 5:21 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 10/24] vfio-user: get device info John Johnson
2022-12-09 15:57 ` John Levon
2022-12-12 20:28 ` John Johnson
2022-12-13 14:06 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 11/24] vfio-user: get region info John Johnson
2022-12-09 17:04 ` John Levon
2022-12-13 15:15 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 12/24] vfio-user: region read/write John Johnson
2022-12-09 17:11 ` John Levon
2022-12-13 16:13 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 13/24] vfio-user: pci_user_realize PCI setup John Johnson
2022-12-09 17:22 ` John Levon
2022-12-13 16:13 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 14/24] vfio-user: get and set IRQs John Johnson
2022-12-09 17:29 ` John Levon
2022-12-12 20:28 ` John Johnson
2022-12-13 16:39 ` Cédric Le Goater
2022-12-13 23:10 ` John Johnson
2023-02-02 5:21 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 15/24] vfio-user: forward msix BAR accesses to server John Johnson
2022-12-09 17:45 ` John Levon
2022-12-14 17:00 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 16/24] vfio-user: proxy container connect/disconnect John Johnson
2022-12-09 17:54 ` John Levon
2022-12-14 17:59 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 17/24] vfio-user: dma map/unmap operations John Johnson
2022-12-15 12:39 ` Cédric Le Goater [this message]
2022-11-08 23:13 ` [PATCH v1 18/24] vfio-user: add dma_unmap_all John Johnson
2022-12-09 17:58 ` John Levon
2022-11-08 23:13 ` [PATCH v1 19/24] vfio-user: secure DMA support John Johnson
2022-12-09 18:01 ` John Levon
2022-12-12 20:31 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 20/24] vfio-user: dma read/write operations John Johnson
2022-12-09 18:11 ` John Levon
2022-11-08 23:13 ` [PATCH v1 21/24] vfio-user: pci reset John Johnson
2022-12-09 18:13 ` John Levon
2022-11-08 23:13 ` [PATCH v1 22/24] vfio-user: add 'x-msg-timeout' option that specifies msg wait times John Johnson
2022-12-09 18:14 ` John Levon
2022-12-15 12:56 ` Cédric Le Goater
2022-12-16 4:22 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 23/24] vfio-user: add coalesced posted writes John Johnson
2022-12-09 18:16 ` John Levon
2022-11-08 23:13 ` [PATCH v1 24/24] vfio-user: add trace points John Johnson
2022-12-15 12:59 ` Cédric Le Goater
2022-12-12 9:41 ` [PATCH v1 00/24] vfio-user client Philippe Mathieu-Daudé
2022-12-16 11:31 ` Cédric Le Goater
2023-02-02 5:20 ` John Johnson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9cab42e5-54ac-0e30-bc23-b1f7838cb1d5@redhat.com \
--to=clg@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).