From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: qemu-devel@nongnu.org
Cc: Peter Crosthwaite <peter.crosthwaite@xilinx.com>,
Alexey Kardashevskiy <aik@ozlabs.ru>,
Michael Roth <mdroth@linux.vnet.ibm.com>,
Alex Williamson <alex.williamson@redhat.com>,
qemu-ppc@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
David Gibson <david@gibson.dropbear.id.au>
Subject: [Qemu-devel] [RFC PATCH qemu v3 4/4] vfio: spapr: Add SPAPR IOMMU v2 support (DMA memory preregistering)
Date: Tue, 14 Jul 2015 22:21:54 +1000 [thread overview]
Message-ID: <1436876514-2946-5-git-send-email-aik@ozlabs.ru> (raw)
In-Reply-To: <1436876514-2946-1-git-send-email-aik@ozlabs.ru>
This makes use of the new "memory registering" feature. The idea is
to provide the userspace ability to notify the host kernel about pages
which are going to be used for DMA. Having this information, the host
kernel can pin them all once per user process, do locked pages
accounting (once) and not spent time on doing that in real time with
possible failures which cannot be handled nicely in some cases.
This adds a guest RAM memory listener which notifies a VFIO container
about memory which needs to be pinned/unpinned. VFIO MMIO regions
(i.e. "skip dump" regions) are skipped.
The feature is only enabled for SPAPR IOMMU v2. The host kernel changes
are required. Since v2 does not need/support VFIO_IOMMU_ENABLE, this does
not call it when v2 is detected and enabled.
This does not change the guest visible interface.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
Changes:
v3:
* new RAM listener skips BARs (i.e. "skip dump" regions)
v2:
* added another listener for RAM
---
hw/vfio/common.c | 99 +++++++++++++++++++++++++++++++++++++++----
include/hw/vfio/vfio-common.h | 1 +
trace-events | 2 +
3 files changed, 94 insertions(+), 8 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 6982b8f..d78a83f 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -406,6 +406,19 @@ static void vfio_listener_region_add(VFIOContainer *container,
goto error_exit;
}
break;
+
+ case VFIO_SPAPR_TCE_v2_IOMMU: {
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0,
+ .vaddr = (uint64_t) vaddr,
+ .size = end - iova
+ };
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®);
+ trace_vfio_ram_register(reg.vaddr, reg.size, ret ? -errno : 0);
+ break;
+ }
}
return;
@@ -486,6 +499,26 @@ static void vfio_listener_region_del(VFIOContainer *container,
"0x%"HWADDR_PRIx") = %d (%m)",
container, iova, end - iova, ret);
}
+
+ switch (container->iommu_data.type) {
+ case VFIO_SPAPR_TCE_v2_IOMMU:
+ if (!memory_region_is_iommu(section->mr)) {
+ void *vaddr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (iova - section->offset_within_address_space);
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0,
+ .vaddr = (uint64_t) vaddr,
+ .size = end - iova
+ };
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY,
+ ®);
+ trace_vfio_ram_unregister(reg.vaddr, reg.size, ret ? -errno : 0);
+ }
+ break;
+ }
}
static void vfio_type1_iommu_listener_region_add(MemoryListener *listener,
@@ -550,8 +583,42 @@ static const MemoryListener vfio_spapr_iommu_listener = {
.region_del = vfio_spapr_iommu_listener_region_del,
};
+static void vfio_spapr_ram_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ iommu_data.spapr.ram_listener);
+
+ if (memory_region_is_skip_dump(section->mr)) {
+ return;
+ }
+ vfio_listener_region_add(container, qemu_real_host_page_mask, listener,
+ section);
+}
+
+static void vfio_spapr_ram_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ iommu_data.spapr.ram_listener);
+
+ if (memory_region_is_skip_dump(section->mr)) {
+ return;
+ }
+ vfio_listener_region_del(container, qemu_real_host_page_mask, listener,
+ section);
+}
+
+static const MemoryListener vfio_spapr_ram_listener = {
+ .region_add = vfio_spapr_ram_listener_region_add,
+ .region_del = vfio_spapr_ram_listener_region_del,
+};
+
static void vfio_listener_release(VFIOContainer *container)
{
+ if (container->iommu_data.type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ memory_listener_unregister(&container->iommu_data.spapr.ram_listener);
+ }
memory_listener_unregister(&container->iommu_data.type1.listener);
}
@@ -765,14 +832,18 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
container->iommu_data.type1.initialized = true;
- } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
+ } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
+ ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
+ bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
+
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
error_report("vfio: failed to set group container: %m");
ret = -errno;
goto free_container_exit;
}
- container->iommu_data.type = VFIO_SPAPR_TCE_IOMMU;
+ container->iommu_data.type =
+ v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_data.type);
if (ret) {
error_report("vfio: failed to set iommu for container: %m");
@@ -785,18 +856,30 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
* when container fd is closed so we do not call it explicitly
* in this file.
*/
- ret = ioctl(fd, VFIO_IOMMU_ENABLE);
- if (ret) {
- error_report("vfio: failed to enable container: %m");
- ret = -errno;
- goto free_container_exit;
+ if (!v2) {
+ ret = ioctl(fd, VFIO_IOMMU_ENABLE);
+ if (ret) {
+ error_report("vfio: failed to enable container: %m");
+ ret = -errno;
+ goto free_container_exit;
+ }
}
container->iommu_data.spapr.common.listener = vfio_spapr_iommu_listener;
container->iommu_data.release = vfio_listener_release;
-
memory_listener_register(&container->iommu_data.spapr.common.listener,
container->space->as);
+ if (v2) {
+ container->iommu_data.spapr.ram_listener = vfio_spapr_ram_listener;
+ memory_listener_register(&container->iommu_data.spapr.ram_listener,
+ &address_space_memory);
+ if (container->iommu_data.spapr.common.error) {
+ error_report("vfio: RAM memory listener initialization failed for container");
+ goto listener_release_exit;
+ }
+
+ container->iommu_data.spapr.common.initialized = true;
+ }
} else {
error_report("vfio: No available IOMMU models");
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 135ea64..8edd572 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -72,6 +72,7 @@ typedef struct VFIOType1 {
typedef struct VFIOSPAPR {
VFIOType1 common;
+ MemoryListener ram_listener;
} VFIOSPAPR;
typedef struct VFIOContainer {
diff --git a/trace-events b/trace-events
index d24d80a..f859ad0 100644
--- a/trace-events
+++ b/trace-events
@@ -1582,6 +1582,8 @@ vfio_disconnect_container(int fd) "close container->fd=%d"
vfio_put_group(int fd) "close group->fd=%d"
vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u"
vfio_put_base_device(int fd) "close vdev->fd=%d"
+vfio_ram_register(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" size=%"PRIx64" ret=%d"
+vfio_ram_unregister(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" size=%"PRIx64" ret=%d"
# hw/vfio/platform.c
vfio_platform_populate_regions(int region_index, unsigned long flag, unsigned long size, int fd, unsigned long offset) "- region %d flags = 0x%lx, size = 0x%lx, fd= %d, offset = 0x%lx"
--
2.4.0.rc3.8.gfb3e7d5
next prev parent reply other threads:[~2015-07-14 12:23 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-14 12:21 [Qemu-devel] [RFC PATCH qemu v3 0/4] vfio: SPAPR IOMMU v2 (memory preregistration support) Alexey Kardashevskiy
2015-07-14 12:21 ` [Qemu-devel] [RFC PATCH qemu v3 1/4] memory: Add reporting of supported page sizes Alexey Kardashevskiy
2015-07-15 18:26 ` Alex Williamson
2015-07-16 1:12 ` Alexey Kardashevskiy
2015-07-14 12:21 ` [Qemu-devel] [RFC PATCH qemu v3 2/4] vfio: Use different page size for different IOMMU types Alexey Kardashevskiy
2015-07-15 18:26 ` Alex Williamson
2015-07-16 1:26 ` Alexey Kardashevskiy
2015-07-16 2:51 ` Alex Williamson
2015-07-16 3:31 ` Alexey Kardashevskiy
2015-07-14 12:21 ` [Qemu-devel] [RFC PATCH qemu v3 3/4] vfio: Store IOMMU type in container Alexey Kardashevskiy
2015-07-15 18:26 ` Alex Williamson
2015-07-14 12:21 ` Alexey Kardashevskiy [this message]
2015-07-16 5:11 ` [Qemu-devel] [RFC PATCH qemu v3 4/4] vfio: spapr: Add SPAPR IOMMU v2 support (DMA memory preregistering) David Gibson
2015-07-16 14:44 ` Alex Williamson
2015-07-17 5:20 ` David Gibson
2015-07-17 18:25 ` Alex Williamson
2015-07-18 15:05 ` David Gibson
2015-07-19 15:04 ` Alex Williamson
2015-07-17 7:13 ` Alexey Kardashevskiy
2015-07-17 13:39 ` David Gibson
2015-07-17 15:47 ` Alexey Kardashevskiy
2015-07-18 15:17 ` David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1436876514-2946-5-git-send-email-aik@ozlabs.ru \
--to=aik@ozlabs.ru \
--cc=alex.williamson@redhat.com \
--cc=david@gibson.dropbear.id.au \
--cc=mdroth@linux.vnet.ibm.com \
--cc=pbonzini@redhat.com \
--cc=peter.crosthwaite@xilinx.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).