From: Zhenzhong Duan <zhenzhong.duan@intel.com>
To: qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, clg@redhat.com, jgg@nvidia.com,
nicolinc@nvidia.com, joao.m.martins@oracle.com,
eric.auger@redhat.com, peterx@redhat.com, jasowang@redhat.com,
kevin.tian@intel.com, yi.l.liu@intel.com, yi.y.sun@intel.com,
chao.p.peng@intel.com, Yi Sun <yi.y.sun@linux.intel.com>,
Zhenzhong Duan <zhenzhong.duan@intel.com>
Subject: [PATCH v2 04/27] vfio/container: Switch to dma_map|unmap API
Date: Mon, 16 Oct 2023 16:32:00 +0800 [thread overview]
Message-ID: <20231016083223.1519410-5-zhenzhong.duan@intel.com> (raw)
In-Reply-To: <20231016083223.1519410-1-zhenzhong.duan@intel.com>
From: Eric Auger <eric.auger@redhat.com>
No fucntional change intended.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
include/hw/vfio/vfio-common.h | 4 ---
include/hw/vfio/vfio-container-base.h | 7 +++++
hw/vfio/common.c | 45 +++++++++++++++------------
hw/vfio/container-base.c | 22 +++++++++++++
hw/vfio/container.c | 25 +++++++++++----
hw/vfio/trace-events | 2 +-
6 files changed, 74 insertions(+), 31 deletions(-)
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 9651cf921c..f2aa122c47 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -212,10 +212,6 @@ void vfio_put_address_space(VFIOAddressSpace *space);
bool vfio_devices_all_running_and_saving(VFIOLegacyContainer *container);
/* container->fd */
-int vfio_dma_unmap(VFIOLegacyContainer *container, hwaddr iova,
- ram_addr_t size, IOMMUTLBEntry *iotlb);
-int vfio_dma_map(VFIOLegacyContainer *container, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly);
int vfio_set_dirty_page_tracking(VFIOLegacyContainer *container, bool start);
int vfio_query_dirty_bitmap(VFIOLegacyContainer *container, VFIOBitmap *vbmap,
hwaddr iova, hwaddr size);
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
index 226e960fb5..1483e77441 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -46,6 +46,13 @@ struct VFIOContainer {
VFIOIOMMUBackendOpsClass *ops;
};
+int vfio_container_dma_map(VFIOContainer *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+int vfio_container_dma_unmap(VFIOContainer *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+
#define TYPE_VFIO_IOMMU_BACKEND_LEGACY_OPS "vfio-iommu-backend-legacy-ops"
#define TYPE_VFIO_IOMMU_BACKEND_OPS "vfio-iommu-backend-ops"
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index b51ef3a15a..6be1526d79 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -337,7 +337,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
- VFIOLegacyContainer *container = giommu->container;
+ VFIOContainer *bcontainer = &giommu->container->bcontainer;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
void *vaddr;
int ret;
@@ -367,21 +367,22 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
* of vaddr will always be there, even if the memory object is
* destroyed and its backing memory munmap-ed.
*/
- ret = vfio_dma_map(container, iova,
- iotlb->addr_mask + 1, vaddr,
- read_only);
+ ret = vfio_container_dma_map(bcontainer, iova,
+ iotlb->addr_mask + 1, vaddr,
+ read_only);
if (ret) {
- error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx", %p) = %d (%s)",
- container, iova,
+ bcontainer, iova,
iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
}
} else {
- ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
+ ret = vfio_container_dma_unmap(bcontainer, iova,
+ iotlb->addr_mask + 1, iotlb);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
- container, iova,
+ bcontainer, iova,
iotlb->addr_mask + 1, ret, strerror(-ret));
vfio_set_migration_error(ret);
}
@@ -400,9 +401,10 @@ static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
int ret;
/* Unmap with a single call. */
- ret = vfio_dma_unmap(vrdl->container, iova, size , NULL);
+ ret = vfio_container_dma_unmap(&vrdl->container->bcontainer,
+ iova, size , NULL);
if (ret) {
- error_report("%s: vfio_dma_unmap() failed: %s", __func__,
+ error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
strerror(-ret));
}
}
@@ -430,8 +432,8 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
section->offset_within_address_space;
vaddr = memory_region_get_ram_ptr(section->mr) + start;
- ret = vfio_dma_map(vrdl->container, iova, next - start,
- vaddr, section->readonly);
+ ret = vfio_container_dma_map(&vrdl->container->bcontainer, iova,
+ next - start, vaddr, section->readonly);
if (ret) {
/* Rollback */
vfio_ram_discard_notify_discard(rdl, section);
@@ -746,10 +748,11 @@ static void vfio_listener_region_add(MemoryListener *listener,
}
}
- ret = vfio_dma_map(container, iova, int128_get64(llsize),
- vaddr, section->readonly);
+ ret = vfio_container_dma_map(&container->bcontainer,
+ iova, int128_get64(llsize), vaddr,
+ section->readonly);
if (ret) {
- error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx", %p) = %d (%s)",
container, iova, int128_get64(llsize), vaddr, ret,
strerror(-ret));
@@ -852,18 +855,20 @@ static void vfio_listener_region_del(MemoryListener *listener,
if (int128_eq(llsize, int128_2_64())) {
/* The unmap ioctl doesn't accept a full 64-bit span. */
llsize = int128_rshift(llsize, 1);
- ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
+ ret = vfio_container_dma_unmap(&container->bcontainer, iova,
+ int128_get64(llsize), NULL);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
container, iova, int128_get64(llsize), ret,
strerror(-ret));
}
iova += int128_get64(llsize);
}
- ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
+ ret = vfio_container_dma_unmap(&container->bcontainer, iova,
+ int128_get64(llsize), NULL);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
container, iova, int128_get64(llsize), ret,
strerror(-ret));
diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c
index 0c21e77039..78329935f6 100644
--- a/hw/vfio/container-base.c
+++ b/hw/vfio/container-base.c
@@ -26,6 +26,28 @@
#include "qemu/error-report.h"
#include "hw/vfio/vfio-container-base.h"
+int vfio_container_dma_map(VFIOContainer *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly)
+{
+ if (!bcontainer->ops->dma_map) {
+ return -EINVAL;
+ }
+
+ return bcontainer->ops->dma_map(bcontainer, iova, size, vaddr, readonly);
+}
+
+int vfio_container_dma_unmap(VFIOContainer *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
+{
+ if (!bcontainer->ops->dma_unmap) {
+ return -EINVAL;
+ }
+
+ return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
+}
+
static const TypeInfo vfio_iommu_backend_ops_type_info = {
.name = TYPE_VFIO_IOMMU_BACKEND_OPS,
.parent = TYPE_OBJECT,
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index acc4a6bf8a..80aafa21ed 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -121,9 +121,13 @@ unmap_exit:
/*
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
*/
-int vfio_dma_unmap(VFIOLegacyContainer *container, hwaddr iova,
- ram_addr_t size, IOMMUTLBEntry *iotlb)
+static int vfio_legacy_dma_unmap(VFIOContainer *bcontainer, hwaddr iova,
+ ram_addr_t size, IOMMUTLBEntry *iotlb)
{
+ VFIOLegacyContainer *container = container_of(bcontainer,
+ VFIOLegacyContainer,
+ bcontainer);
+
struct vfio_iommu_type1_dma_unmap unmap = {
.argsz = sizeof(unmap),
.flags = 0,
@@ -157,7 +161,7 @@ int vfio_dma_unmap(VFIOLegacyContainer *container, hwaddr iova,
*/
if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
container->iommu_type == VFIO_TYPE1v2_IOMMU) {
- trace_vfio_dma_unmap_overflow_workaround();
+ trace_vfio_legacy_dma_unmap_overflow_workaround();
unmap.size -= 1ULL << ctz64(container->pgsizes);
continue;
}
@@ -176,9 +180,13 @@ int vfio_dma_unmap(VFIOLegacyContainer *container, hwaddr iova,
return 0;
}
-int vfio_dma_map(VFIOLegacyContainer *container, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly)
+static int vfio_legacy_dma_map(VFIOContainer *bcontainer, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly)
{
+ VFIOLegacyContainer *container = container_of(bcontainer,
+ VFIOLegacyContainer,
+ bcontainer);
+
struct vfio_iommu_type1_dma_map map = {
.argsz = sizeof(map),
.flags = VFIO_DMA_MAP_FLAG_READ,
@@ -197,7 +205,8 @@ int vfio_dma_map(VFIOLegacyContainer *container, hwaddr iova,
* the VGA ROM space.
*/
if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
- (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
+ (errno == EBUSY &&
+ vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 &&
ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
return 0;
}
@@ -1168,6 +1177,10 @@ void vfio_detach_device(VFIODevice *vbasedev)
static void vfio_iommu_backend_legacy_ops_class_init(ObjectClass *oc,
void *data) {
+ VFIOIOMMUBackendOpsClass *ops = VFIO_IOMMU_BACKEND_OPS_CLASS(oc);
+
+ ops->dma_map = vfio_legacy_dma_map;
+ ops->dma_unmap = vfio_legacy_dma_unmap;
}
static const TypeInfo vfio_iommu_backend_legacy_ops_type = {
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 0eb2387cf2..9f7fedee98 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -116,7 +116,7 @@ vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Re
vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries"
vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]"
vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x"
-vfio_dma_unmap_overflow_workaround(void) ""
+vfio_legacy_dma_unmap_overflow_workaround(void) ""
vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64
vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
--
2.34.1
next prev parent reply other threads:[~2023-10-16 8:51 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-16 8:31 [PATCH v2 00/27] vfio: Adopt iommufd Zhenzhong Duan
2023-10-16 8:31 ` [PATCH v2 01/27] vfio: Rename VFIOContainer into VFIOLegacyContainer Zhenzhong Duan
2023-10-17 15:50 ` Cédric Le Goater
2023-10-18 2:33 ` Duan, Zhenzhong
2023-10-16 8:31 ` [PATCH v2 02/27] vfio: Introduce base object for VFIOContainer and targetted interface Zhenzhong Duan
2023-10-17 15:51 ` Cédric Le Goater
2023-10-18 2:41 ` Duan, Zhenzhong
2023-10-18 8:04 ` Cédric Le Goater
2023-10-19 2:29 ` Duan, Zhenzhong
2023-10-19 12:17 ` Cédric Le Goater
2023-10-20 5:48 ` Duan, Zhenzhong
2023-10-20 8:19 ` Eric Auger
2023-10-20 8:28 ` Duan, Zhenzhong
2023-10-23 15:28 ` Cédric Le Goater
2023-10-24 6:03 ` Duan, Zhenzhong
2023-10-24 6:51 ` Cédric Le Goater
2023-10-16 8:31 ` [PATCH v2 03/27] VFIO/container: Introduce dummy VFIOContainerClass implementation Zhenzhong Duan
2023-10-16 8:32 ` Zhenzhong Duan [this message]
2023-10-16 8:32 ` [PATCH v2 05/27] vfio/common: Move giommu_list in base container Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 06/27] vfio/container: Move space field to " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 07/27] vfio/container: switch to IOMMU BE add/del_section_window Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 08/27] vfio/container: Move hostwin_list in base container Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 09/27] vfio/container: Switch to IOMMU BE set_dirty_page_tracking/query_dirty_bitmap API Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 10/27] vfio/container: Move per container device list in base container Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 11/27] vfio/container: Convert functions to " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 12/27] vfio/container: Move vrdl_list, pgsizes and dma_max_mappings " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 13/27] vfio/container: Move listener " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 14/27] vfio/container: Move dirty_pgsizes and max_dirty_bitmap_size " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 15/27] vfio/container: Implement attach/detach_device Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 16/27] Add iommufd configure option Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 17/27] backends/iommufd: Introduce the iommufd object Zhenzhong Duan
2023-10-16 10:00 ` Markus Armbruster
2023-10-17 8:27 ` Duan, Zhenzhong
2023-10-16 8:32 ` [PATCH v2 18/27] util/char_dev: Add open_cdev() Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 19/27] vfio/iommufd: Implement the iommufd backend Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 20/27] vfio/container: Bypass EEH if " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 21/27] vfio/pci: Adapt vfio pci hot reset support with iommufd BE Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 22/27] vfio/pci: Allow the selection of a given iommu backend Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 23/27] vfio/pci: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 24/27] vfio: Allow the selection of a given iommu backend for platform ap and ccw Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 25/27] vfio/platform: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 26/27] vfio/ap: " Zhenzhong Duan
2023-10-16 8:32 ` [PATCH v2 27/27] vfio/ccw: " Zhenzhong Duan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231016083223.1519410-5-zhenzhong.duan@intel.com \
--to=zhenzhong.duan@intel.com \
--cc=alex.williamson@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=clg@redhat.com \
--cc=eric.auger@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=nicolinc@nvidia.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@intel.com \
--cc=yi.y.sun@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).