* [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd
@ 2025-05-25 7:49 Aneesh Kumar K.V (Arm)
2025-05-25 7:49 ` [RFC PATCH kvmtool 02/10] vfio: Rename some functions Aneesh Kumar K.V (Arm)
` (9 more replies)
0 siblings, 10 replies; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
This is needed for followup patches
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
vfio/core.c | 24 +++++++++++++++++++++++-
1 file changed, 23 insertions(+), 1 deletion(-)
diff --git a/vfio/core.c b/vfio/core.c
index 3ff2c0b075df..c6b305c30cf7 100644
--- a/vfio/core.c
+++ b/vfio/core.c
@@ -9,6 +9,7 @@
#define IOMMU_GROUP_DIR "/sys/kernel/iommu_groups"
static int vfio_container;
+static int kvm_vfio_device;
static LIST_HEAD(vfio_groups);
static struct vfio_device *vfio_devices;
@@ -437,8 +438,19 @@ static int vfio_configure_groups(struct kvm *kvm)
ret = vfio_configure_reserved_regions(kvm, group);
if (ret)
return ret;
- }
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_ADD,
+ .addr = (__u64)&group->fd,
+ };
+
+ if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
+ pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
+ return -ENODEV;
+ }
+
+ }
return 0;
}
@@ -656,6 +668,16 @@ static int vfio__init(struct kvm *kvm)
if (!vfio_devices)
return -ENOMEM;
+ struct kvm_create_device device = {
+ .type = KVM_DEV_TYPE_VFIO,
+ };
+
+ if (ioctl(kvm->vm_fd, KVM_CREATE_DEVICE, &device)) {
+ pr_err("Failed KVM_CREATE_DEVICE ioctl");
+ return -ENODEV;
+ }
+ kvm_vfio_device = device.fd;
+
ret = vfio_container_init(kvm);
if (ret)
return ret;
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 02/10] vfio: Rename some functions
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:20 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c Aneesh Kumar K.V (Arm)
` (8 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
We will add iommufd support in later patches. Rename the old vfio
method as legacy vfio.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
vfio/core.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/vfio/core.c b/vfio/core.c
index c6b305c30cf7..424dc4ed3aef 100644
--- a/vfio/core.c
+++ b/vfio/core.c
@@ -282,7 +282,7 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
}
}
-static int vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
+static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
{
int ret;
struct vfio_group *group = vdev->group;
@@ -340,12 +340,12 @@ err_close_device:
return ret;
}
-static int vfio_configure_devices(struct kvm *kvm)
+static int legacy_vfio_configure_devices(struct kvm *kvm)
{
int i, ret;
for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
- ret = vfio_configure_device(kvm, &vfio_devices[i]);
+ ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
if (ret)
return ret;
}
@@ -429,7 +429,7 @@ static int vfio_configure_reserved_regions(struct kvm *kvm,
return ret;
}
-static int vfio_configure_groups(struct kvm *kvm)
+static int legacy_vfio_configure_groups(struct kvm *kvm)
{
int ret;
struct vfio_group *group;
@@ -454,7 +454,7 @@ static int vfio_configure_groups(struct kvm *kvm)
return 0;
}
-static struct vfio_group *vfio_group_create(struct kvm *kvm, unsigned long id)
+static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
{
int ret;
struct vfio_group *group;
@@ -512,10 +512,11 @@ static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
if (--group->refs != 0)
return;
- ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER);
-
list_del(&group->list);
- close(group->fd);
+ if (group->fd != -1) {
+ ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER);
+ close(group->fd);
+ }
free(group);
}
@@ -559,14 +560,14 @@ vfio_group_get_for_dev(struct kvm *kvm, struct vfio_device *vdev)
}
}
- group = vfio_group_create(kvm, group_id);
+ group = legacy_vfio_group_create(kvm, group_id);
out_close:
close(dirfd);
return group;
}
-static int vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
+static int legacy_vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
{
int ret;
char dev_path[PATH_MAX];
@@ -610,7 +611,7 @@ static void vfio_device_exit(struct kvm *kvm, struct vfio_device *vdev)
free(vdev->sysfs_path);
}
-static int vfio_container_init(struct kvm *kvm)
+static int legacy_vfio_container_init(struct kvm *kvm)
{
int api, i, ret, iommu_type;;
@@ -638,7 +639,7 @@ static int vfio_container_init(struct kvm *kvm)
for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
- ret = vfio_device_init(kvm, &vfio_devices[i]);
+ ret = legacy_vfio_device_init(kvm, &vfio_devices[i]);
if (ret)
return ret;
}
@@ -678,15 +679,15 @@ static int vfio__init(struct kvm *kvm)
}
kvm_vfio_device = device.fd;
- ret = vfio_container_init(kvm);
+ ret = legacy_vfio_container_init(kvm);
if (ret)
return ret;
- ret = vfio_configure_groups(kvm);
+ ret = legacy_vfio_configure_groups(kvm);
if (ret)
return ret;
- ret = vfio_configure_devices(kvm);
+ ret = legacy_vfio_configure_devices(kvm);
if (ret)
return ret;
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
2025-05-25 7:49 ` [RFC PATCH kvmtool 02/10] vfio: Rename some functions Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:23 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 04/10] vfio: Update vfio header from linux kernel Aneesh Kumar K.V (Arm)
` (7 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
Move legacy vfio config methodology to legacy.c. Also add helper
vfio_map/unmap_mem_range which will be switched to function pointers in
the later patch.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
Makefile | 1 +
include/kvm/vfio.h | 14 ++
vfio/core.c | 342 ++------------------------------------------
vfio/legacy.c | 347 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 372 insertions(+), 332 deletions(-)
create mode 100644 vfio/legacy.c
diff --git a/Makefile b/Makefile
index 60e551fd0c2a..8b2720f73386 100644
--- a/Makefile
+++ b/Makefile
@@ -65,6 +65,7 @@ OBJS += pci.o
OBJS += term.o
OBJS += vfio/core.o
OBJS += vfio/pci.o
+OBJS += vfio/legacy.o
OBJS += virtio/blk.o
OBJS += virtio/scsi.o
OBJS += virtio/console.o
diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
index ac7b6226239a..67a528f18d33 100644
--- a/include/kvm/vfio.h
+++ b/include/kvm/vfio.h
@@ -126,4 +126,18 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region);
int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *device);
void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
+int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
+int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size);
+
+struct kvm_mem_bank;
+int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
+int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
+int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group);
+int legacy_vfio__init(struct kvm *kvm);
+int legacy_vfio__exit(struct kvm *kvm);
+
+extern int kvm_vfio_device;
+extern struct list_head vfio_groups;
+extern struct vfio_device *vfio_devices;
+
#endif /* KVM__VFIO_H */
diff --git a/vfio/core.c b/vfio/core.c
index 424dc4ed3aef..2af30df3b2b9 100644
--- a/vfio/core.c
+++ b/vfio/core.c
@@ -4,14 +4,11 @@
#include <linux/list.h>
-#define VFIO_DEV_DIR "/dev/vfio"
-#define VFIO_DEV_NODE VFIO_DEV_DIR "/vfio"
#define IOMMU_GROUP_DIR "/sys/kernel/iommu_groups"
-static int vfio_container;
-static int kvm_vfio_device;
-static LIST_HEAD(vfio_groups);
-static struct vfio_device *vfio_devices;
+int kvm_vfio_device;
+LIST_HEAD(vfio_groups);
+struct vfio_device *vfio_devices;
static int vfio_device_pci_parser(const struct option *opt, char *arg,
struct vfio_device_params *dev)
@@ -282,124 +279,17 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
}
}
-static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
+int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
- int ret;
- struct vfio_group *group = vdev->group;
-
- vdev->fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD,
- vdev->params->name);
- if (vdev->fd < 0) {
- vfio_dev_warn(vdev, "failed to get fd");
-
- /* The device might be a bridge without an fd */
- return 0;
- }
-
- vdev->info.argsz = sizeof(vdev->info);
- if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
- ret = -errno;
- vfio_dev_err(vdev, "failed to get info");
- goto err_close_device;
- }
-
- if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
- ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
- vfio_dev_warn(vdev, "failed to reset device");
-
- vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
- if (!vdev->regions) {
- ret = -ENOMEM;
- goto err_close_device;
- }
-
- /* Now for the bus-specific initialization... */
- switch (vdev->params->type) {
- case VFIO_DEVICE_PCI:
- BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
- ret = vfio_pci_setup_device(kvm, vdev);
- break;
- default:
- BUG_ON(1);
- ret = -EINVAL;
- }
-
- if (ret)
- goto err_free_regions;
-
- vfio_dev_info(vdev, "assigned to device number 0x%x in group %lu",
- vdev->dev_hdr.dev_num, group->id);
-
- return 0;
-
-err_free_regions:
- free(vdev->regions);
-err_close_device:
- close(vdev->fd);
-
- return ret;
-}
-
-static int legacy_vfio_configure_devices(struct kvm *kvm)
-{
- int i, ret;
-
- for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
- ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
- if (ret)
- return ret;
- }
-
- return 0;
+ return vfio_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
}
-static int vfio_get_iommu_type(void)
+int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
- if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU))
- return VFIO_TYPE1v2_IOMMU;
-
- if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU))
- return VFIO_TYPE1_IOMMU;
-
- return -ENODEV;
-}
-
-static int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
-{
- int ret = 0;
- struct vfio_iommu_type1_dma_map dma_map = {
- .argsz = sizeof(dma_map),
- .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
- .vaddr = (unsigned long)bank->host_addr,
- .iova = (u64)bank->guest_phys_addr,
- .size = bank->size,
- };
-
- /* Map the guest memory for DMA (i.e. provide isolation) */
- if (ioctl(vfio_container, VFIO_IOMMU_MAP_DMA, &dma_map)) {
- ret = -errno;
- pr_err("Failed to map 0x%llx -> 0x%llx (%llu) for DMA",
- dma_map.iova, dma_map.vaddr, dma_map.size);
- }
-
- return ret;
-}
-
-static int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
-{
- struct vfio_iommu_type1_dma_unmap dma_unmap = {
- .argsz = sizeof(dma_unmap),
- .size = bank->size,
- .iova = bank->guest_phys_addr,
- };
-
- ioctl(vfio_container, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
-
- return 0;
+ return vfio_unmap_mem_range(kvm, bank->guest_phys_addr, bank->size);
}
-static int vfio_configure_reserved_regions(struct kvm *kvm,
- struct vfio_group *group)
+int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group)
{
FILE *file;
int ret = 0;
@@ -429,84 +319,6 @@ static int vfio_configure_reserved_regions(struct kvm *kvm,
return ret;
}
-static int legacy_vfio_configure_groups(struct kvm *kvm)
-{
- int ret;
- struct vfio_group *group;
-
- list_for_each_entry(group, &vfio_groups, list) {
- ret = vfio_configure_reserved_regions(kvm, group);
- if (ret)
- return ret;
-
- struct kvm_device_attr attr = {
- .group = KVM_DEV_VFIO_FILE,
- .attr = KVM_DEV_VFIO_FILE_ADD,
- .addr = (__u64)&group->fd,
- };
-
- if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
- pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
- return -ENODEV;
- }
-
- }
- return 0;
-}
-
-static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
-{
- int ret;
- struct vfio_group *group;
- char group_node[PATH_MAX];
- struct vfio_group_status group_status = {
- .argsz = sizeof(group_status),
- };
-
- group = calloc(1, sizeof(*group));
- if (!group)
- return NULL;
-
- group->id = id;
- group->refs = 1;
-
- ret = snprintf(group_node, PATH_MAX, VFIO_DEV_DIR "/%lu", id);
- if (ret < 0 || ret == PATH_MAX)
- return NULL;
-
- group->fd = open(group_node, O_RDWR);
- if (group->fd < 0) {
- pr_err("Failed to open IOMMU group %s", group_node);
- goto err_free_group;
- }
-
- if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &group_status)) {
- pr_err("Failed to determine status of IOMMU group %lu", id);
- goto err_close_group;
- }
-
- if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
- pr_err("IOMMU group %lu is not viable", id);
- goto err_close_group;
- }
-
- if (ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &vfio_container)) {
- pr_err("Failed to add IOMMU group %lu to VFIO container", id);
- goto err_close_group;
- }
-
- list_add(&group->list, &vfio_groups);
-
- return group;
-
-err_close_group:
- close(group->fd);
-err_free_group:
- free(group);
-
- return NULL;
-}
-
static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
{
if (--group->refs != 0)
@@ -520,78 +332,6 @@ static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
free(group);
}
-static struct vfio_group *
-vfio_group_get_for_dev(struct kvm *kvm, struct vfio_device *vdev)
-{
- int dirfd;
- ssize_t ret;
- char *group_name;
- unsigned long group_id;
- char group_path[PATH_MAX];
- struct vfio_group *group = NULL;
-
- /* Find IOMMU group for this device */
- dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
- if (dirfd < 0) {
- vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
- return NULL;
- }
-
- ret = readlinkat(dirfd, "iommu_group", group_path, PATH_MAX);
- if (ret < 0) {
- vfio_dev_err(vdev, "no iommu_group");
- goto out_close;
- }
- if (ret == PATH_MAX)
- goto out_close;
-
- group_path[ret] = '\0';
-
- group_name = basename(group_path);
- errno = 0;
- group_id = strtoul(group_name, NULL, 10);
- if (errno)
- goto out_close;
-
- list_for_each_entry(group, &vfio_groups, list) {
- if (group->id == group_id) {
- group->refs++;
- return group;
- }
- }
-
- group = legacy_vfio_group_create(kvm, group_id);
-
-out_close:
- close(dirfd);
- return group;
-}
-
-static int legacy_vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
-{
- int ret;
- char dev_path[PATH_MAX];
- struct vfio_group *group;
-
- ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
- vdev->params->bus, vdev->params->name);
- if (ret < 0 || ret == PATH_MAX)
- return -EINVAL;
-
- vdev->sysfs_path = strndup(dev_path, PATH_MAX);
- if (!vdev->sysfs_path)
- return -errno;
-
- group = vfio_group_get_for_dev(kvm, vdev);
- if (!group) {
- free(vdev->sysfs_path);
- return -EINVAL;
- }
-
- vdev->group = group;
-
- return 0;
-}
static void vfio_device_exit(struct kvm *kvm, struct vfio_device *vdev)
{
@@ -611,57 +351,8 @@ static void vfio_device_exit(struct kvm *kvm, struct vfio_device *vdev)
free(vdev->sysfs_path);
}
-static int legacy_vfio_container_init(struct kvm *kvm)
-{
- int api, i, ret, iommu_type;;
-
- /* Create a container for our IOMMU groups */
- vfio_container = open(VFIO_DEV_NODE, O_RDWR);
- if (vfio_container == -1) {
- ret = errno;
- pr_err("Failed to open %s", VFIO_DEV_NODE);
- return ret;
- }
-
- api = ioctl(vfio_container, VFIO_GET_API_VERSION);
- if (api != VFIO_API_VERSION) {
- pr_err("Unknown VFIO API version %d", api);
- return -ENODEV;
- }
-
- iommu_type = vfio_get_iommu_type();
- if (iommu_type < 0) {
- pr_err("VFIO type-1 IOMMU not supported on this platform");
- return iommu_type;
- }
-
- /* Create groups for our devices and add them to the container */
- for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
- vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
-
- ret = legacy_vfio_device_init(kvm, &vfio_devices[i]);
- if (ret)
- return ret;
- }
-
- /* Finalise the container */
- if (ioctl(vfio_container, VFIO_SET_IOMMU, iommu_type)) {
- ret = -errno;
- pr_err("Failed to set IOMMU type %d for VFIO container",
- iommu_type);
- return ret;
- } else {
- pr_info("Using IOMMU type %d for VFIO container", iommu_type);
- }
-
- return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, vfio_map_mem_bank,
- NULL);
-}
-
static int vfio__init(struct kvm *kvm)
{
- int ret;
-
if (!kvm->cfg.num_vfio_devices)
return 0;
@@ -679,19 +370,7 @@ static int vfio__init(struct kvm *kvm)
}
kvm_vfio_device = device.fd;
- ret = legacy_vfio_container_init(kvm);
- if (ret)
- return ret;
-
- ret = legacy_vfio_configure_groups(kvm);
- if (ret)
- return ret;
-
- ret = legacy_vfio_configure_devices(kvm);
- if (ret)
- return ret;
-
- return 0;
+ return legacy_vfio__init(kvm);
}
dev_base_init(vfio__init);
@@ -708,10 +387,9 @@ static int vfio__exit(struct kvm *kvm)
free(vfio_devices);
kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, vfio_unmap_mem_bank, NULL);
- close(vfio_container);
free(kvm->cfg.vfio_devices);
- return 0;
+ return legacy_vfio__exit(kvm);
}
dev_base_exit(vfio__exit);
diff --git a/vfio/legacy.c b/vfio/legacy.c
new file mode 100644
index 000000000000..92d6d0bd5c80
--- /dev/null
+++ b/vfio/legacy.c
@@ -0,0 +1,347 @@
+#include "kvm/kvm.h"
+#include "kvm/vfio.h"
+
+#include <linux/list.h>
+
+#define VFIO_DEV_DIR "/dev/vfio"
+#define VFIO_DEV_NODE VFIO_DEV_DIR "/vfio"
+static int vfio_container;
+
+static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
+{
+ int ret;
+ struct vfio_group *group = vdev->group;
+
+ vdev->fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD,
+ vdev->params->name);
+ if (vdev->fd < 0) {
+ vfio_dev_warn(vdev, "failed to get fd");
+
+ /* The device might be a bridge without an fd */
+ return 0;
+ }
+
+ vdev->info.argsz = sizeof(vdev->info);
+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to get info");
+ goto err_close_device;
+ }
+
+ if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
+ ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
+ vfio_dev_warn(vdev, "failed to reset device");
+
+ vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
+ if (!vdev->regions) {
+ ret = -ENOMEM;
+ goto err_close_device;
+ }
+
+ /* Now for the bus-specific initialization... */
+ switch (vdev->params->type) {
+ case VFIO_DEVICE_PCI:
+ BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
+ ret = vfio_pci_setup_device(kvm, vdev);
+ break;
+ default:
+ BUG_ON(1);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto err_free_regions;
+
+ vfio_dev_info(vdev, "assigned to device number 0x%x in group %lu",
+ vdev->dev_hdr.dev_num, group->id);
+
+ return 0;
+
+err_free_regions:
+ free(vdev->regions);
+err_close_device:
+ close(vdev->fd);
+
+ return ret;
+}
+
+static int legacy_vfio_configure_devices(struct kvm *kvm)
+{
+ int i, ret;
+
+ for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
+ ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vfio_get_iommu_type(void)
+{
+ if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU))
+ return VFIO_TYPE1v2_IOMMU;
+
+ if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU))
+ return VFIO_TYPE1_IOMMU;
+
+ return -ENODEV;
+}
+
+int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
+{
+ int ret = 0;
+ struct vfio_iommu_type1_dma_map dma_map = {
+ .argsz = sizeof(dma_map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ .vaddr = host_addr,
+ .iova = iova,
+ .size = size,
+ };
+
+ /* Map the guest memory for DMA (i.e. provide isolation) */
+ if (ioctl(vfio_container, VFIO_IOMMU_MAP_DMA, &dma_map)) {
+ ret = -errno;
+ pr_err("Failed to map 0x%llx -> 0x%llx (%llu) for DMA",
+ dma_map.iova, dma_map.vaddr, dma_map.size);
+ }
+
+ return ret;
+}
+
+int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
+{
+ struct vfio_iommu_type1_dma_unmap dma_unmap = {
+ .argsz = sizeof(dma_unmap),
+ .size = size,
+ .iova = iova,
+ };
+
+ ioctl(vfio_container, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+
+ return 0;
+}
+
+static int legacy_vfio_configure_groups(struct kvm *kvm)
+{
+ int ret;
+ struct vfio_group *group;
+
+ list_for_each_entry(group, &vfio_groups, list) {
+ ret = vfio_configure_reserved_regions(kvm, group);
+ if (ret)
+ return ret;
+
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_ADD,
+ .addr = (__u64)&group->fd,
+ };
+
+ if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
+ pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
+ return -ENODEV;
+ }
+
+ }
+ return 0;
+}
+
+static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
+{
+ int ret;
+ struct vfio_group *group;
+ char group_node[PATH_MAX];
+ struct vfio_group_status group_status = {
+ .argsz = sizeof(group_status),
+ };
+
+ group = calloc(1, sizeof(*group));
+ if (!group)
+ return NULL;
+
+ group->id = id;
+ group->refs = 1;
+
+ ret = snprintf(group_node, PATH_MAX, VFIO_DEV_DIR "/%lu", id);
+ if (ret < 0 || ret == PATH_MAX)
+ return NULL;
+
+ group->fd = open(group_node, O_RDWR);
+ if (group->fd < 0) {
+ pr_err("Failed to open IOMMU group %s", group_node);
+ goto err_free_group;
+ }
+
+ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &group_status)) {
+ pr_err("Failed to determine status of IOMMU group %lu", id);
+ goto err_close_group;
+ }
+
+ if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ pr_err("IOMMU group %lu is not viable", id);
+ goto err_close_group;
+ }
+
+ if (ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &vfio_container)) {
+ pr_err("Failed to add IOMMU group %lu to VFIO container", id);
+ goto err_close_group;
+ }
+
+ list_add(&group->list, &vfio_groups);
+
+ return group;
+
+err_close_group:
+ close(group->fd);
+err_free_group:
+ free(group);
+
+ return NULL;
+}
+
+static struct vfio_group *
+vfio_group_get_for_dev(struct kvm *kvm, struct vfio_device *vdev)
+{
+ int dirfd;
+ ssize_t ret;
+ char *group_name;
+ unsigned long group_id;
+ char group_path[PATH_MAX];
+ struct vfio_group *group = NULL;
+
+ /* Find IOMMU group for this device */
+ dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
+ if (dirfd < 0) {
+ vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
+ return NULL;
+ }
+
+ ret = readlinkat(dirfd, "iommu_group", group_path, PATH_MAX);
+ if (ret < 0) {
+ vfio_dev_err(vdev, "no iommu_group");
+ goto out_close;
+ }
+ if (ret == PATH_MAX)
+ goto out_close;
+
+ group_path[ret] = '\0';
+
+ group_name = basename(group_path);
+ errno = 0;
+ group_id = strtoul(group_name, NULL, 10);
+ if (errno)
+ goto out_close;
+
+ list_for_each_entry(group, &vfio_groups, list) {
+ if (group->id == group_id) {
+ group->refs++;
+ return group;
+ }
+ }
+
+ group = legacy_vfio_group_create(kvm, group_id);
+
+out_close:
+ close(dirfd);
+ return group;
+}
+
+static int legacy_vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
+{
+ int ret;
+ char dev_path[PATH_MAX];
+ struct vfio_group *group;
+
+ ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
+ vdev->params->bus, vdev->params->name);
+ if (ret < 0 || ret == PATH_MAX)
+ return -EINVAL;
+
+ vdev->sysfs_path = strndup(dev_path, PATH_MAX);
+ if (!vdev->sysfs_path)
+ return -errno;
+
+ group = vfio_group_get_for_dev(kvm, vdev);
+ if (!group) {
+ free(vdev->sysfs_path);
+ return -EINVAL;
+ }
+
+ vdev->group = group;
+
+ return 0;
+}
+
+static int legacy_vfio_container_init(struct kvm *kvm)
+{
+ int api, i, ret, iommu_type;;
+
+ /* Create a container for our IOMMU groups */
+ vfio_container = open(VFIO_DEV_NODE, O_RDWR);
+ if (vfio_container == -1) {
+ ret = errno;
+ pr_err("Failed to open %s", VFIO_DEV_NODE);
+ return ret;
+ }
+
+ api = ioctl(vfio_container, VFIO_GET_API_VERSION);
+ if (api != VFIO_API_VERSION) {
+ pr_err("Unknown VFIO API version %d", api);
+ return -ENODEV;
+ }
+
+ iommu_type = vfio_get_iommu_type();
+ if (iommu_type < 0) {
+ pr_err("VFIO type-1 IOMMU not supported on this platform");
+ return iommu_type;
+ }
+
+ /* Create groups for our devices and add them to the container */
+ for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
+ vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
+
+ ret = legacy_vfio_device_init(kvm, &vfio_devices[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Finalise the container */
+ if (ioctl(vfio_container, VFIO_SET_IOMMU, iommu_type)) {
+ ret = -errno;
+ pr_err("Failed to set IOMMU type %d for VFIO container",
+ iommu_type);
+ return ret;
+ } else {
+ pr_info("Using IOMMU type %d for VFIO container", iommu_type);
+ }
+
+ return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, vfio_map_mem_bank,
+ NULL);
+}
+
+int legacy_vfio__init(struct kvm *kvm)
+{
+ int ret;
+
+ ret = legacy_vfio_container_init(kvm);
+ if (ret)
+ return ret;
+
+ ret = legacy_vfio_configure_groups(kvm);
+ if (ret)
+ return ret;
+
+ ret = legacy_vfio_configure_devices(kvm);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int legacy_vfio__exit(struct kvm *kvm)
+{
+ close(vfio_container);
+ return 0;
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 04/10] vfio: Update vfio header from linux kernel
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
2025-05-25 7:49 ` [RFC PATCH kvmtool 02/10] vfio: Rename some functions Aneesh Kumar K.V (Arm)
2025-05-25 7:49 ` [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:23 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers Aneesh Kumar K.V (Arm)
` (6 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
sync with include/uapi/linux/vfio.h from v6.14
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
include/linux/types.h | 13 +
include/linux/vfio.h | 1131 ++++++++++++++++++++++++++++++++++++++++-
2 files changed, 1132 insertions(+), 12 deletions(-)
diff --git a/include/linux/types.h b/include/linux/types.h
index 5e20f10f8830..652c33bf5c87 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -36,6 +36,19 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
+/*
+ * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
+ * common 32/64-bit compat problems.
+ * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
+ * architectures) and to 8-byte boundaries on 64-bit architectures. The new
+ * aligned_64 type enforces 8-byte alignment so that structs containing
+ * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
+ * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+ */
+#define __aligned_u64 __u64 __attribute__((aligned(8)))
+#define __aligned_be64 __be64 __attribute__((aligned(8)))
+#define __aligned_le64 __le64 __attribute__((aligned(8)))
+
struct list_head {
struct list_head *next, *prev;
};
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 4e7ab4c52a4a..c8dbf8219c4f 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* VFIO API definition
*
@@ -8,8 +9,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef VFIO_H
-#define VFIO_H
+#ifndef _UAPIVFIO_H
+#define _UAPIVFIO_H
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -34,7 +35,7 @@
#define VFIO_EEH 5
/* Two-stage IOMMU */
-#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
#define VFIO_SPAPR_TCE_v2_IOMMU 7
@@ -45,6 +46,16 @@
*/
#define VFIO_NOIOMMU_IOMMU 8
+/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
+#define VFIO_UNMAP_ALL 9
+
+/*
+ * Supports the vaddr flag for DMA map and unmap. Not supported for mediated
+ * devices, so this capability is subject to change as groups are added or
+ * removed.
+ */
+#define VFIO_UPDATE_VADDR 10
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@@ -199,8 +210,14 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
+#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
+#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */
+#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */
+#define VFIO_DEVICE_FLAGS_CDX (1 << 8) /* vfio-cdx device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
+ __u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
@@ -214,6 +231,30 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
+#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
+
+/*
+ * The following capabilities are unique to s390 zPCI devices. Their contents
+ * are further-defined in vfio_zdev.h
+ */
+#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE 1
+#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP 2
+#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3
+#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4
+
+/*
+ * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
+ * completion to the root bus with supported widths provided via flags.
+ */
+#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP 5
+struct vfio_device_info_cap_pci_atomic_comp {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+#define VFIO_PCI_ATOMIC_COMP32 (1 << 0)
+#define VFIO_PCI_ATOMIC_COMP64 (1 << 1)
+#define VFIO_PCI_ATOMIC_COMP128 (1 << 2)
+ __u32 reserved;
+};
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
@@ -236,8 +277,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 cap_offset; /* Offset within info struct of first cap */
- __u64 size; /* Region size (bytes) */
- __u64 offset; /* Region offset from start of device fd */
+ __aligned_u64 size; /* Region size (bytes) */
+ __aligned_u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
@@ -253,8 +294,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
- __u64 offset; /* Offset of mmap'able area within region */
- __u64 size; /* Size of mmap'able area */
+ __aligned_u64 offset; /* Offset of mmap'able area within region */
+ __aligned_u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
@@ -292,14 +333,169 @@ struct vfio_region_info_cap_type {
__u32 subtype; /* type specific */
};
+/*
+ * List of region types, global per bus driver.
+ * If you introduce a new type, please add it here.
+ */
+
+/* PCI region type containing a PCI vendor part */
#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31)
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
+#define VFIO_REGION_TYPE_GFX (1)
+#define VFIO_REGION_TYPE_CCW (2)
+#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3)
+
+/* sub-types for VFIO_REGION_TYPE_PCI_* */
-/* 8086 Vendor sub-types */
+/* 8086 vendor PCI sub-types */
#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
+/* 10de vendor PCI sub-types */
+/*
+ * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ *
+ * Deprecated, region no longer provided
+ */
+#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
+
+/* 1014 vendor PCI sub-types */
+/*
+ * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
+ * to do TLB invalidation on a GPU.
+ *
+ * Deprecated, region no longer provided
+ */
+#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
+
+/* sub-types for VFIO_REGION_TYPE_GFX */
+#define VFIO_REGION_SUBTYPE_GFX_EDID (1)
+
+/**
+ * struct vfio_region_gfx_edid - EDID region layout.
+ *
+ * Set display link state and EDID blob.
+ *
+ * The EDID blob has monitor information such as brand, name, serial
+ * number, physical size, supported video modes and more.
+ *
+ * This special region allows userspace (typically qemu) set a virtual
+ * EDID for the virtual monitor, which allows a flexible display
+ * configuration.
+ *
+ * For the edid blob spec look here:
+ * https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
+ *
+ * On linux systems you can find the EDID blob in sysfs:
+ * /sys/class/drm/${card}/${connector}/edid
+ *
+ * You can use the edid-decode ulility (comes with xorg-x11-utils) to
+ * decode the EDID blob.
+ *
+ * @edid_offset: location of the edid blob, relative to the
+ * start of the region (readonly).
+ * @edid_max_size: max size of the edid blob (readonly).
+ * @edid_size: actual edid size (read/write).
+ * @link_state: display link state (read/write).
+ * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
+ * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
+ * @max_xres: max display width (0 == no limitation, readonly).
+ * @max_yres: max display height (0 == no limitation, readonly).
+ *
+ * EDID update protocol:
+ * (1) set link-state to down.
+ * (2) update edid blob and size.
+ * (3) set link-state to up.
+ */
+struct vfio_region_gfx_edid {
+ __u32 edid_offset;
+ __u32 edid_max_size;
+ __u32 edid_size;
+ __u32 max_xres;
+ __u32 max_yres;
+ __u32 link_state;
+#define VFIO_DEVICE_GFX_LINK_STATE_UP 1
+#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
+};
+
+/* sub-types for VFIO_REGION_TYPE_CCW */
+#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
+#define VFIO_REGION_SUBTYPE_CCW_SCHIB (2)
+#define VFIO_REGION_SUBTYPE_CCW_CRW (3)
+
+/* sub-types for VFIO_REGION_TYPE_MIGRATION */
+#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
+
+struct vfio_device_migration_info {
+ __u32 device_state; /* VFIO device state */
+#define VFIO_DEVICE_STATE_V1_STOP (0)
+#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0)
+#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1)
+#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2)
+#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \
+ VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING)
+
+#define VFIO_DEVICE_STATE_VALID(state) \
+ (state & VFIO_DEVICE_STATE_V1_RESUMING ? \
+ (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
+
+#define VFIO_DEVICE_STATE_IS_ERROR(state) \
+ ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING))
+
+#define VFIO_DEVICE_STATE_SET_ERROR(state) \
+ ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
+ VFIO_DEVICE_STATE_V1_RESUMING)
+
+ __u32 reserved;
+ __aligned_u64 pending_bytes;
+ __aligned_u64 data_offset;
+ __aligned_u64 data_size;
+};
+
+/*
+ * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
+ * which allows direct access to non-MSIX registers which happened to be within
+ * the same system page.
+ *
+ * Even though the userspace gets direct access to the MSIX data, the existing
+ * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
+ */
+#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
+
+/*
+ * Capability with compressed real address (aka SSA - small system address)
+ * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
+ * and by the userspace to associate a NVLink bridge with a GPU.
+ *
+ * Deprecated, capability no longer provided
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
+
+struct vfio_region_info_cap_nvlink2_ssatgt {
+ struct vfio_info_cap_header header;
+ __aligned_u64 tgt;
+};
+
+/*
+ * Capability with an NVLink link speed. The value is read by
+ * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
+ * property in the device tree. The value is fixed in the hardware
+ * and failing to provide the correct value results in the link
+ * not working with no indication from the driver why.
+ *
+ * Deprecated, capability no longer provided
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
+
+struct vfio_region_info_cap_nvlink2_lnkspd {
+ struct vfio_info_cap_header header;
+ __u32 link_speed;
+ __u32 __pad;
+};
+
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
@@ -331,6 +527,9 @@ struct vfio_region_info_cap_type {
* then add and unmask vectors, it's up to userspace to make the decision
* whether to allocate the maximum supported number of vectors or tear
* down setup and incrementally increase the vectors as each is enabled.
+ * Absence of the NORESIZE flag indicates that vectors can be enabled
+ * and disabled dynamically without impacting other vectors within the
+ * index.
*/
struct vfio_irq_info {
__u32 argsz;
@@ -461,18 +660,78 @@ enum {
enum {
VFIO_CCW_IO_IRQ_INDEX,
+ VFIO_CCW_CRW_IRQ_INDEX,
+ VFIO_CCW_REQ_IRQ_INDEX,
VFIO_CCW_NUM_IRQS
};
+/*
+ * The vfio-ap bus driver makes use of the following IRQ index mapping.
+ * Unimplemented IRQ types return a count of zero.
+ */
+enum {
+ VFIO_AP_REQ_IRQ_INDEX,
+ VFIO_AP_NUM_IRQS
+};
+
/**
- * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
+ * This command is used to query the affected devices in the hot reset for
+ * a given device.
+ *
+ * This command always reports the segment, bus, and devfn information for
+ * each affected device, and selectively reports the group_id or devid per
+ * the way how the calling device is opened.
+ *
+ * - If the calling device is opened via the traditional group/container
+ * API, group_id is reported. User should check if it has owned all
+ * the affected devices and provides a set of group fds to prove the
+ * ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
+ *
+ * - If the calling device is opened as a cdev, devid is reported.
+ * Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
+ * data type. All the affected devices should be represented in
+ * the dev_set, ex. bound to a vfio driver, and also be owned by
+ * this interface which is determined by the following conditions:
+ * 1) Has a valid devid within the iommufd_ctx of the calling device.
+ * Ownership cannot be determined across separate iommufd_ctx and
+ * the cdev calling conventions do not support a proof-of-ownership
+ * model as provided in the legacy group interface. In this case
+ * valid devid with value greater than zero is provided in the return
+ * structure.
+ * 2) Does not have a valid devid within the iommufd_ctx of the calling
+ * device, but belongs to the same IOMMU group as the calling device
+ * or another opened device that has a valid devid within the
+ * iommufd_ctx of the calling device. This provides implicit ownership
+ * for devices within the same DMA isolation context. In this case
+ * the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
+ * structure.
+ *
+ * A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
+ * structure for affected devices where device is NOT represented in the
+ * dev_set or ownership is not available. Such devices prevent the use
+ * of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
+ * calling conventions (ie. via legacy group accessed devices). Flag
+ * VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
+ * affected devices are represented in the dev_set and also owned by
+ * the user. This flag is available only when
+ * flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
+ * When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
+ * length fd array on the calling device as the ownership is validated
+ * by iommufd_ctx.
+ *
* Return: 0 on success, -errno on failure:
* -enospc = insufficient buffer, -enodev = unsupported for device.
*/
struct vfio_pci_dependent_device {
- __u32 group_id;
+ union {
+ __u32 group_id;
+ __u32 devid;
+#define VFIO_PCI_DEVID_OWNED 0
+#define VFIO_PCI_DEVID_NOT_OWNED -1
+ };
__u16 segment;
__u8 bus;
__u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
@@ -481,6 +740,8 @@ struct vfio_pci_dependent_device {
struct vfio_pci_hot_reset_info {
__u32 argsz;
__u32 flags;
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID (1 << 0)
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED (1 << 1)
__u32 count;
struct vfio_pci_dependent_device devices[];
};
@@ -491,6 +752,24 @@ struct vfio_pci_hot_reset_info {
* VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
* struct vfio_pci_hot_reset)
*
+ * A PCI hot reset results in either a bus or slot reset which may affect
+ * other devices sharing the bus/slot. The calling user must have
+ * ownership of the full set of affected devices as determined by the
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
+ *
+ * When called on a device file descriptor acquired through the vfio
+ * group interface, the user is required to provide proof of ownership
+ * of those affected devices via the group_fds array in struct
+ * vfio_pci_hot_reset.
+ *
+ * When called on a direct cdev opened vfio device, the flags field of
+ * struct vfio_pci_hot_reset_info reports the ownership status of the
+ * affected devices and this ioctl must be called with an empty group_fds
+ * array. See above INFO ioctl definition for ownership requirements.
+ *
+ * Mixed usage of legacy groups and cdevs across the set of affected
+ * devices is not supported.
+ *
* Return: 0 on success, -errno on failure.
*/
struct vfio_pci_hot_reset {
@@ -502,6 +781,683 @@ struct vfio_pci_hot_reset {
#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
+/**
+ * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
+ * struct vfio_device_query_gfx_plane)
+ *
+ * Set the drm_plane_type and flags, then retrieve the gfx plane info.
+ *
+ * flags supported:
+ * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
+ * to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
+ * support for dma-buf.
+ * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
+ * to ask if the mdev supports region. 0 on support, -EINVAL on no
+ * support for region.
+ * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
+ * with each call to query the plane info.
+ * - Others are invalid and return -EINVAL.
+ *
+ * Note:
+ * 1. Plane could be disabled by guest. In that case, success will be
+ * returned with zero-initialized drm_format, size, width and height
+ * fields.
+ * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
+ *
+ * Return: 0 on success, -errno on other failure.
+ */
+struct vfio_device_gfx_plane_info {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
+#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
+#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
+ /* in */
+ __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
+ /* out */
+ __u32 drm_format; /* drm format of plane */
+ __aligned_u64 drm_format_mod; /* tiled mode */
+ __u32 width; /* width of plane */
+ __u32 height; /* height of plane */
+ __u32 stride; /* stride of plane */
+ __u32 size; /* size of plane in bytes, align on page*/
+ __u32 x_pos; /* horizontal position of cursor plane */
+ __u32 y_pos; /* vertical position of cursor plane*/
+ __u32 x_hot; /* horizontal position of cursor hotspot */
+ __u32 y_hot; /* vertical position of cursor hotspot */
+ union {
+ __u32 region_index; /* region index */
+ __u32 dmabuf_id; /* dma-buf id */
+ };
+ __u32 reserved;
+};
+
+#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
+
+/**
+ * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
+ *
+ * Return a new dma-buf file descriptor for an exposed guest framebuffer
+ * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
+ * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
+ */
+
+#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
+
+/**
+ * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
+ * struct vfio_device_ioeventfd)
+ *
+ * Perform a write to the device at the specified device fd offset, with
+ * the specified data and width when the provided eventfd is triggered.
+ * vfio bus drivers may not support this for all regions, for all widths,
+ * or at all. vfio-pci currently only enables support for BAR regions,
+ * excluding the MSI-X vector table.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_ioeventfd {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */
+#define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */
+#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
+#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
+#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
+ __aligned_u64 offset; /* device fd offset of write */
+ __aligned_u64 data; /* data to be written */
+ __s32 fd; /* -1 for de-assignment */
+ __u32 reserved;
+};
+
+#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
+
+/**
+ * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
+ * struct vfio_device_feature)
+ *
+ * Get, set, or probe feature data of the device. The feature is selected
+ * using the FEATURE_MASK portion of the flags field. Support for a feature
+ * can be probed by setting both the FEATURE_MASK and PROBE bits. A probe
+ * may optionally include the GET and/or SET bits to determine read vs write
+ * access of the feature respectively. Probing a feature will return success
+ * if the feature is supported and all of the optionally indicated GET/SET
+ * methods are supported. The format of the data portion of the structure is
+ * specific to the given feature. The data portion is not required for
+ * probing. GET and SET are mutually exclusive, except for use with PROBE.
+ *
+ * Return 0 on success, -errno on failure.
+ */
+struct vfio_device_feature {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_FEATURE_MASK (0xffff) /* 16-bit feature index */
+#define VFIO_DEVICE_FEATURE_GET (1 << 16) /* Get feature into data[] */
+#define VFIO_DEVICE_FEATURE_SET (1 << 17) /* Set feature from data[] */
+#define VFIO_DEVICE_FEATURE_PROBE (1 << 18) /* Probe feature support */
+ __u8 data[];
+};
+
+#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
+
+/*
+ * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
+ * struct vfio_device_bind_iommufd)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @iommufd: iommufd to bind.
+ * @out_devid: The device id generated by this bind. devid is a handle for
+ * this device/iommufd bond and can be used in IOMMUFD commands.
+ *
+ * Bind a vfio_device to the specified iommufd.
+ *
+ * User is restricted from accessing the device before the binding operation
+ * is completed. Only allowed on cdev fds.
+ *
+ * Unbind is automatically conducted when device fd is closed.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_bind_iommufd {
+ __u32 argsz;
+ __u32 flags;
+ __s32 iommufd;
+ __u32 out_devid;
+};
+
+#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/*
+ * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
+ * struct vfio_device_attach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @pt_id: Input the target id which can represent an ioas or a hwpt
+ * allocated via iommufd subsystem.
+ * Output the input ioas id or the attached hwpt id which could
+ * be the specified hwpt itself or a hwpt automatically created
+ * for the specified ioas by kernel during the attachment.
+ *
+ * Associate the device with an address space within the bound iommufd.
+ * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
+ * allowed on cdev fds.
+ *
+ * If a vfio device is currently attached to a valid hw_pagetable, without doing
+ * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
+ * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
+ * as a hw_pagetable replacement, will replace the device's currently attached
+ * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_attach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+ __u32 pt_id;
+};
+
+#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/*
+ * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
+ * struct vfio_device_detach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ *
+ * Remove the association of the device and its current associated address
+ * space. After it, the device should be in a blocking DMA state. This is only
+ * allowed on cdev fds.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_detach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+};
+
+#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
+
+/*
+ * Provide support for setting a PCI VF Token, which is used as a shared
+ * secret between PF and VF drivers. This feature may only be set on a
+ * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
+ * open VFs. Data provided when setting this feature is a 16-byte array
+ * (__u8 b[16]), representing a UUID.
+ */
+#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0)
+
+/*
+ * Indicates the device can support the migration API through
+ * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
+ * ERROR states are always supported. Support for additional states is
+ * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
+ * set.
+ *
+ * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
+ * RESUMING are supported.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
+ * is supported in addition to the STOP_COPY states.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
+ * PRE_COPY is supported in addition to the STOP_COPY states.
+ *
+ * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
+ * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
+ * in addition to the STOP_COPY states.
+ *
+ * Other combinations of flags have behavior to be defined in the future.
+ */
+struct vfio_device_feature_migration {
+ __aligned_u64 flags;
+#define VFIO_MIGRATION_STOP_COPY (1 << 0)
+#define VFIO_MIGRATION_P2P (1 << 1)
+#define VFIO_MIGRATION_PRE_COPY (1 << 2)
+};
+#define VFIO_DEVICE_FEATURE_MIGRATION 1
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
+ * device. The new state is supplied in device_state, see enum
+ * vfio_device_mig_state for details
+ *
+ * The kernel migration driver must fully transition the device to the new state
+ * value before the operation returns to the user.
+ *
+ * The kernel migration driver must not generate asynchronous device state
+ * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
+ * ioctl as described above.
+ *
+ * If this function fails then current device_state may be the original
+ * operating state or some other state along the combination transition path.
+ * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
+ * to return to the original state, or attempt to return to some other state
+ * such as RUNNING or STOP.
+ *
+ * If the new_state starts a new data transfer session then the FD associated
+ * with that session is returned in data_fd. The user is responsible to close
+ * this FD when it is finished. The user must consider the migration data stream
+ * carried over the FD to be opaque and must preserve the byte order of the
+ * stream. The user is not required to preserve buffer segmentation when writing
+ * the data stream during the RESUMING operation.
+ *
+ * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
+ * device, data_fd will be -1.
+ */
+struct vfio_device_feature_mig_state {
+ __u32 device_state; /* From enum vfio_device_mig_state */
+ __s32 data_fd;
+};
+#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
+
+/*
+ * The device migration Finite State Machine is described by the enum
+ * vfio_device_mig_state. Some of the FSM arcs will create a migration data
+ * transfer session by returning a FD, in this case the migration data will
+ * flow over the FD using read() and write() as discussed below.
+ *
+ * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
+ * RUNNING - The device is running normally
+ * STOP - The device does not change the internal or external state
+ * STOP_COPY - The device internal state can be read out
+ * RESUMING - The device is stopped and is loading a new internal state
+ * ERROR - The device has failed and must be reset
+ *
+ * And optional states to support VFIO_MIGRATION_P2P:
+ * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
+ * And VFIO_MIGRATION_PRE_COPY:
+ * PRE_COPY - The device is running normally but tracking internal state
+ * changes
+ * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
+ * PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
+ *
+ * The FSM takes actions on the arcs between FSM states. The driver implements
+ * the following behavior for the FSM arcs:
+ *
+ * RUNNING_P2P -> STOP
+ * STOP_COPY -> STOP
+ * While in STOP the device must stop the operation of the device. The device
+ * must not generate interrupts, DMA, or any other change to external state.
+ * It must not change its internal state. When stopped the device and kernel
+ * migration driver must accept and respond to interaction to support external
+ * subsystems in the STOP state, for example PCI MSI-X and PCI config space.
+ * Failure by the user to restrict device access while in STOP must not result
+ * in error conditions outside the user context (ex. host system faults).
+ *
+ * The STOP_COPY arc will terminate a data transfer session.
+ *
+ * RESUMING -> STOP
+ * Leaving RESUMING terminates a data transfer session and indicates the
+ * device should complete processing of the data delivered by write(). The
+ * kernel migration driver should complete the incorporation of data written
+ * to the data transfer FD into the device internal state and perform
+ * final validity and consistency checking of the new device state. If the
+ * user provided data is found to be incomplete, inconsistent, or otherwise
+ * invalid, the migration driver must fail the SET_STATE ioctl and
+ * optionally go to the ERROR state as described below.
+ *
+ * While in STOP the device has the same behavior as other STOP states
+ * described above.
+ *
+ * To abort a RESUMING session the device must be reset.
+ *
+ * PRE_COPY -> RUNNING
+ * RUNNING_P2P -> RUNNING
+ * While in RUNNING the device is fully operational, the device may generate
+ * interrupts, DMA, respond to MMIO, all vfio device regions are functional,
+ * and the device may advance its internal state.
+ *
+ * The PRE_COPY arc will terminate a data transfer session.
+ *
+ * PRE_COPY_P2P -> RUNNING_P2P
+ * RUNNING -> RUNNING_P2P
+ * STOP -> RUNNING_P2P
+ * While in RUNNING_P2P the device is partially running in the P2P quiescent
+ * state defined below.
+ *
+ * The PRE_COPY_P2P arc will terminate a data transfer session.
+ *
+ * RUNNING -> PRE_COPY
+ * RUNNING_P2P -> PRE_COPY_P2P
+ * STOP -> STOP_COPY
+ * PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
+ * which share a data transfer session. Moving between these states alters
+ * what is streamed in session, but does not terminate or otherwise affect
+ * the associated fd.
+ *
+ * These arcs begin the process of saving the device state and will return a
+ * new data_fd. The migration driver may perform actions such as enabling
+ * dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
+ *
+ * Each arc does not change the device operation, the device remains
+ * RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
+ * in PRE_COPY_P2P -> STOP_COPY.
+ *
+ * PRE_COPY -> PRE_COPY_P2P
+ * Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
+ * However, while in the PRE_COPY_P2P state, the device is partially running
+ * in the P2P quiescent state defined below, like RUNNING_P2P.
+ *
+ * PRE_COPY_P2P -> PRE_COPY
+ * This arc allows returning the device to a full RUNNING behavior while
+ * continuing all the behaviors of PRE_COPY.
+ *
+ * PRE_COPY_P2P -> STOP_COPY
+ * While in the STOP_COPY state the device has the same behavior as STOP
+ * with the addition that the data transfers session continues to stream the
+ * migration state. End of stream on the FD indicates the entire device
+ * state has been transferred.
+ *
+ * The user should take steps to restrict access to vfio device regions while
+ * the device is in STOP_COPY or risk corruption of the device migration data
+ * stream.
+ *
+ * STOP -> RESUMING
+ * Entering the RESUMING state starts a process of restoring the device state
+ * and will return a new data_fd. The data stream fed into the data_fd should
+ * be taken from the data transfer output of a single FD during saving from
+ * a compatible device. The migration driver may alter/reset the internal
+ * device state for this arc if required to prepare the device to receive the
+ * migration data.
+ *
+ * STOP_COPY -> PRE_COPY
+ * STOP_COPY -> PRE_COPY_P2P
+ * These arcs are not permitted and return error if requested. Future
+ * revisions of this API may define behaviors for these arcs, in this case
+ * support will be discoverable by a new flag in
+ * VFIO_DEVICE_FEATURE_MIGRATION.
+ *
+ * any -> ERROR
+ * ERROR cannot be specified as a device state, however any transition request
+ * can be failed with an errno return and may then move the device_state into
+ * ERROR. In this case the device was unable to execute the requested arc and
+ * was also unable to restore the device to any valid device_state.
+ * To recover from ERROR VFIO_DEVICE_RESET must be used to return the
+ * device_state back to RUNNING.
+ *
+ * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
+ * state for the device for the purposes of managing multiple devices within a
+ * user context where peer-to-peer DMA between devices may be active. The
+ * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
+ * any new P2P DMA transactions. If the device can identify P2P transactions
+ * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
+ * driver must complete any such outstanding operations prior to completing the
+ * FSM arc into a P2P state. For the purpose of specification the states
+ * behave as though the device was fully running if not supported. Like while in
+ * STOP or STOP_COPY the user must not touch the device, otherwise the state
+ * can be exited.
+ *
+ * The remaining possible transitions are interpreted as combinations of the
+ * above FSM arcs. As there are multiple paths through the FSM arcs the path
+ * should be selected based on the following rules:
+ * - Select the shortest path.
+ * - The path cannot have saving group states as interior arcs, only
+ * starting/end states.
+ * Refer to vfio_mig_get_next_state() for the result of the algorithm.
+ *
+ * The automatic transit through the FSM arcs that make up the combination
+ * transition is invisible to the user. When working with combination arcs the
+ * user may see any step along the path in the device_state if SET_STATE
+ * fails. When handling these types of errors users should anticipate future
+ * revisions of this protocol using new states and those states becoming
+ * visible in this case.
+ *
+ * The optional states cannot be used with SET_STATE if the device does not
+ * support them. The user can discover if these states are supported by using
+ * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
+ * avoid knowing about these optional states if the kernel driver supports them.
+ *
+ * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
+ * is not present.
+ */
+enum vfio_device_mig_state {
+ VFIO_DEVICE_STATE_ERROR = 0,
+ VFIO_DEVICE_STATE_STOP = 1,
+ VFIO_DEVICE_STATE_RUNNING = 2,
+ VFIO_DEVICE_STATE_STOP_COPY = 3,
+ VFIO_DEVICE_STATE_RESUMING = 4,
+ VFIO_DEVICE_STATE_RUNNING_P2P = 5,
+ VFIO_DEVICE_STATE_PRE_COPY = 6,
+ VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
+ VFIO_DEVICE_STATE_NR,
+};
+
+/**
+ * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
+ *
+ * This ioctl is used on the migration data FD in the precopy phase of the
+ * migration data transfer. It returns an estimate of the current data sizes
+ * remaining to be transferred. It allows the user to judge when it is
+ * appropriate to leave PRE_COPY for STOP_COPY.
+ *
+ * This ioctl is valid only in PRE_COPY states and kernel driver should
+ * return -EINVAL from any other migration state.
+ *
+ * The vfio_precopy_info data structure returned by this ioctl provides
+ * estimates of data available from the device during the PRE_COPY states.
+ * This estimate is split into two categories, initial_bytes and
+ * dirty_bytes.
+ *
+ * The initial_bytes field indicates the amount of initial precopy
+ * data available from the device. This field should have a non-zero initial
+ * value and decrease as migration data is read from the device.
+ * It is recommended to leave PRE_COPY for STOP_COPY only after this field
+ * reaches zero. Leaving PRE_COPY earlier might make things slower.
+ *
+ * The dirty_bytes field tracks device state changes relative to data
+ * previously retrieved. This field starts at zero and may increase as
+ * the internal device state is modified or decrease as that modified
+ * state is read from the device.
+ *
+ * Userspace may use the combination of these fields to estimate the
+ * potential data size available during the PRE_COPY phases, as well as
+ * trends relative to the rate the device is dirtying its internal
+ * state, but these fields are not required to have any bearing relative
+ * to the data size available during the STOP_COPY phase.
+ *
+ * Drivers have a lot of flexibility in when and what they transfer during the
+ * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
+ *
+ * During pre-copy the migration data FD has a temporary "end of stream" that is
+ * reached when both initial_bytes and dirty_byte are zero. For instance, this
+ * may indicate that the device is idle and not currently dirtying any internal
+ * state. When read() is done on this temporary end of stream the kernel driver
+ * should return ENOMSG from read(). Userspace can wait for more data (which may
+ * never come) by using poll.
+ *
+ * Once in STOP_COPY the migration data FD has a permanent end of stream
+ * signaled in the usual way by read() always returning 0 and poll always
+ * returning readable. ENOMSG may not be returned in STOP_COPY.
+ * Support for this ioctl is mandatory if a driver claims to support
+ * VFIO_MIGRATION_PRE_COPY.
+ *
+ * Return: 0 on success, -1 and errno set on failure.
+ */
+struct vfio_precopy_info {
+ __u32 argsz;
+ __u32 flags;
+ __aligned_u64 initial_bytes;
+ __aligned_u64 dirty_bytes;
+};
+
+#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
+ * state with the platform-based power management. Device use of lower power
+ * states depends on factors managed by the runtime power management core,
+ * including system level support and coordinating support among dependent
+ * devices. Enabling device low power entry does not guarantee lower power
+ * usage by the device, nor is a mechanism provided through this feature to
+ * know the current power state of the device. If any device access happens
+ * (either from the host or through the vfio uAPI) when the device is in the
+ * low power state, then the host will move the device out of the low power
+ * state as necessary prior to the access. Once the access is completed, the
+ * device may re-enter the low power state. For single shot low power support
+ * with wake-up notification, see
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below. Access to mmap'd
+ * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
+ * calling LOW_POWER_EXIT.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
+
+/*
+ * This device feature has the same behavior as
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
+ * provides an eventfd for wake-up notification. When the device moves out of
+ * the low power state for the wake-up, the host will not allow the device to
+ * re-enter a low power state without a subsequent user call to one of the low
+ * power entry device feature IOCTLs. Access to mmap'd device regions is
+ * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
+ * low power exit. The low power exit can happen either through LOW_POWER_EXIT
+ * or through any other access (where the wake-up notification has been
+ * generated). The access to mmap'd device regions will not trigger low power
+ * exit.
+ *
+ * The notification through the provided eventfd will be generated only when
+ * the device has entered and is resumed from a low power state after
+ * calling this device feature IOCTL. A device that has not entered low power
+ * state, as managed through the runtime power management core, will not
+ * generate a notification through the provided eventfd on access. Calling the
+ * LOW_POWER_EXIT feature is optional in the case where notification has been
+ * signaled on the provided eventfd that a resume from low power has occurred.
+ */
+struct vfio_device_low_power_entry_with_wakeup {
+ __s32 wakeup_eventfd;
+ __u32 reserved;
+};
+
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
+ * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
+ * This device feature IOCTL may itself generate a wakeup eventfd notification
+ * in the latter case if the device had previously entered a low power state.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
+ * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
+ * DMA logging.
+ *
+ * DMA logging allows a device to internally record what DMAs the device is
+ * initiating and report them back to userspace. It is part of the VFIO
+ * migration infrastructure that allows implementing dirty page tracking
+ * during the pre copy phase of live migration. Only DMA WRITEs are logged,
+ * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
+ *
+ * When DMA logging is started a range of IOVAs to monitor is provided and the
+ * device can optimize its logging to cover only the IOVA range given. Each
+ * DMA that the device initiates inside the range will be logged by the device
+ * for later retrieval.
+ *
+ * page_size is an input that hints what tracking granularity the device
+ * should try to achieve. If the device cannot do the hinted page size then
+ * it's the driver choice which page size to pick based on its support.
+ * On output the device will return the page size it selected.
+ *
+ * ranges is a pointer to an array of
+ * struct vfio_device_feature_dma_logging_range.
+ *
+ * The core kernel code guarantees to support by minimum num_ranges that fit
+ * into a single kernel page. User space can try higher values but should give
+ * up if the above can't be achieved as of some driver limitations.
+ *
+ * A single call to start device DMA logging can be issued and a matching stop
+ * should follow at the end. Another start is not allowed in the meantime.
+ */
+struct vfio_device_feature_dma_logging_control {
+ __aligned_u64 page_size;
+ __u32 num_ranges;
+ __u32 __reserved;
+ __aligned_u64 ranges;
+};
+
+struct vfio_device_feature_dma_logging_range {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
+ * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
+ */
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
+ *
+ * Query the device's DMA log for written pages within the given IOVA range.
+ * During querying the log is cleared for the IOVA range.
+ *
+ * bitmap is a pointer to an array of u64s that will hold the output bitmap
+ * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
+ * is given by:
+ * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
+ *
+ * The input page_size can be any power of two value and does not have to
+ * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
+ * will format its internal logging to match the reporting page size, possibly
+ * by replicating bits if the internal page size is lower than requested.
+ *
+ * The LOGGING_REPORT will only set bits in the bitmap and never clear or
+ * perform any initialization of the user provided bitmap.
+ *
+ * If any error is returned userspace should assume that the dirty log is
+ * corrupted. Error recovery is to consider all memory dirty and try to
+ * restart the dirty tracking, or to abort/restart the whole migration.
+ *
+ * If DMA logging is not enabled, an error will be returned.
+ *
+ */
+struct vfio_device_feature_dma_logging_report {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 bitmap;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
+ * be required to complete stop copy.
+ *
+ * Note: Can be called on each device state.
+ */
+
+struct vfio_device_feature_mig_data_size {
+ __aligned_u64 stop_copy_length;
+};
+
+#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
+
+/**
+ * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
+ * based on the operation specified in op flag.
+ *
+ * The functionality is incorporated for devices that needs bus master control,
+ * but the in-band device interface lacks the support. Consequently, it is not
+ * applicable to PCI devices, as bus master control for PCI devices is managed
+ * in-band through the configuration space. At present, this feature is supported
+ * only for CDX devices.
+ * When the device's BUS MASTER setting is configured as CLEAR, it will result in
+ * blocking all incoming DMA requests from the device. On the other hand, configuring
+ * the device's BUS MASTER setting as SET (enable) will grant the device the
+ * capability to perform DMA to the host memory.
+ */
+struct vfio_device_feature_bus_master {
+ __u32 op;
+#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */
+#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */
+};
+#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@@ -516,7 +1472,70 @@ struct vfio_iommu_type1_info {
__u32 argsz;
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
- __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
+ __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */
+ __u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
+};
+
+/*
+ * The IOVA capability allows to report the valid IOVA range(s)
+ * excluding any non-relaxable reserved regions exposed by
+ * devices attached to the container. Any DMA map attempt
+ * outside the valid iova range will return error.
+ *
+ * The structures below define version 1 of this capability.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1
+
+struct vfio_iova_range {
+ __u64 start;
+ __u64 end;
+};
+
+struct vfio_iommu_type1_info_cap_iova_range {
+ struct vfio_info_cap_header header;
+ __u32 nr_iovas;
+ __u32 reserved;
+ struct vfio_iova_range iova_ranges[];
+};
+
+/*
+ * The migration capability allows to report supported features for migration.
+ *
+ * The structures below define version 1 of this capability.
+ *
+ * The existence of this capability indicates that IOMMU kernel driver supports
+ * dirty page logging.
+ *
+ * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
+ * page logging.
+ * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
+ * size in bytes that can be used by user applications when getting the dirty
+ * bitmap.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 2
+
+struct vfio_iommu_type1_info_cap_migration {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+ __u64 pgsize_bitmap;
+ __u64 max_dirty_bitmap_size; /* in bytes */
+};
+
+/*
+ * The DMA available capability allows to report the current number of
+ * simultaneously outstanding DMA mappings that are allowed.
+ *
+ * The structure below defines version 1 of this capability.
+ *
+ * avail: specifies the current number of outstanding DMA mappings allowed.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
+
+struct vfio_iommu_type1_info_dma_avail {
+ struct vfio_info_cap_header header;
+ __u32 avail;
};
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
@@ -526,12 +1545,21 @@ struct vfio_iommu_type1_info {
*
* Map process virtual addresses to IO virtual addresses using the
* provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
+ * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To
+ * maintain memory consistency within the user application, the updated vaddr
+ * must address the same memory object as originally mapped. Failure to do so
+ * will result in user memory corruption and/or device misbehavior. iova and
+ * size must match those in the original MAP_DMA call. Protection is not
+ * changed, and the READ & WRITE flags must be 0.
*/
struct vfio_iommu_type1_dma_map {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
+#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
__u64 vaddr; /* Process virtual address */
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
@@ -539,6 +1567,12 @@ struct vfio_iommu_type1_dma_map {
#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
+struct vfio_bitmap {
+ __u64 pgsize; /* page size for bitmap in bytes */
+ __u64 size; /* in bytes */
+ __u64 __user *data; /* one bit per page */
+};
+
/**
* VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_dma_unmap)
@@ -548,12 +1582,34 @@ struct vfio_iommu_type1_dma_map {
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
+ *
+ * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
+ * before unmapping IO virtual addresses. When this flag is set, the user must
+ * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
+ * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
+ * A bit in the bitmap represents one page, of user provided page size in
+ * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
+ * indicates that the page at that offset from iova is dirty. A Bitmap of the
+ * pages in the range of unmapped size is returned in the user-provided
+ * vfio_bitmap.data.
+ *
+ * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses. iova and size
+ * must be 0. This cannot be combined with the get-dirty-bitmap flag.
+ *
+ * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
+ * virtual addresses in the iova range. DMA to already-mapped pages continues.
+ * Groups may not be added to the container while any addresses are invalid.
+ * This cannot be combined with the get-dirty-bitmap flag.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
__u32 flags;
+#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
+#define VFIO_DMA_UNMAP_FLAG_ALL (1 << 1)
+#define VFIO_DMA_UNMAP_FLAG_VADDR (1 << 2)
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
+ __u8 data[];
};
#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
@@ -565,6 +1621,57 @@ struct vfio_iommu_type1_dma_unmap {
#define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
#define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
+/**
+ * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
+ * struct vfio_iommu_type1_dirty_bitmap)
+ * IOCTL is used for dirty pages logging.
+ * Caller should set flag depending on which operation to perform, details as
+ * below:
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
+ * the IOMMU driver to log pages that are dirtied or potentially dirtied by
+ * the device; designed to be used when a migration is in progress. Dirty pages
+ * are logged until logging is disabled by user application by calling the IOCTL
+ * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
+ * the IOMMU driver to stop logging dirtied pages.
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
+ * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
+ * The user must specify the IOVA range and the pgsize through the structure
+ * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
+ * supports getting a bitmap of the smallest supported pgsize only and can be
+ * modified in future to get a bitmap of any specified supported pgsize. The
+ * user must provide a zeroed memory area for the bitmap memory and specify its
+ * size in bitmap.size. One bit is used to represent one page consecutively
+ * starting from iova offset. The user should provide page size in bitmap.pgsize
+ * field. A bit set in the bitmap indicates that the page at that offset from
+ * iova is dirty. The caller must set argsz to a value including the size of
+ * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
+ * actual bitmap. If dirty pages logging is not enabled, an error will be
+ * returned.
+ *
+ * Only one of the flags _START, _STOP and _GET may be specified at a time.
+ *
+ */
+struct vfio_iommu_type1_dirty_bitmap {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
+ __u8 data[];
+};
+
+struct vfio_iommu_type1_dirty_bitmap_get {
+ __u64 iova; /* IO virtual address */
+ __u64 size; /* Size of iova range */
+ struct vfio_bitmap bitmap;
+};
+
+#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
@@ -716,4 +1823,4 @@ struct vfio_iommu_spapr_tce_remove {
/* ***************************************************************** */
-#endif /* VFIO_H */
+#endif /* _UAPIVFIO_H */
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (2 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 04/10] vfio: Update vfio header from linux kernel Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:25 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 06/10] vfio/iommufd: Import iommufd header from kernel Aneesh Kumar K.V (Arm)
` (5 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
include/kvm/vfio.h | 4 ++--
vfio/core.c | 7 +++++--
vfio/legacy.c | 7 +++++--
3 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
index 67a528f18d33..fed692b0f265 100644
--- a/include/kvm/vfio.h
+++ b/include/kvm/vfio.h
@@ -126,8 +126,8 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region);
int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *device);
void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
-int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
-int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size);
+extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
+extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
struct kvm_mem_bank;
int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
diff --git a/vfio/core.c b/vfio/core.c
index 2af30df3b2b9..32a8e0fe67c0 100644
--- a/vfio/core.c
+++ b/vfio/core.c
@@ -10,6 +10,9 @@ int kvm_vfio_device;
LIST_HEAD(vfio_groups);
struct vfio_device *vfio_devices;
+int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
+int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
+
static int vfio_device_pci_parser(const struct option *opt, char *arg,
struct vfio_device_params *dev)
{
@@ -281,12 +284,12 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
- return vfio_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
+ return dma_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
}
int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
- return vfio_unmap_mem_range(kvm, bank->guest_phys_addr, bank->size);
+ return dma_unmap_mem_range(kvm, bank->guest_phys_addr, bank->size);
}
int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group)
diff --git a/vfio/legacy.c b/vfio/legacy.c
index 92d6d0bd5c80..5b35d6ebff69 100644
--- a/vfio/legacy.c
+++ b/vfio/legacy.c
@@ -89,7 +89,7 @@ static int vfio_get_iommu_type(void)
return -ENODEV;
}
-int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
+static int legacy_vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
{
int ret = 0;
struct vfio_iommu_type1_dma_map dma_map = {
@@ -110,7 +110,7 @@ int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
return ret;
}
-int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
+static int legacy_vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
{
struct vfio_iommu_type1_dma_unmap dma_unmap = {
.argsz = sizeof(dma_unmap),
@@ -325,6 +325,9 @@ int legacy_vfio__init(struct kvm *kvm)
{
int ret;
+ dma_map_mem_range = legacy_vfio_map_mem_range;
+ dma_unmap_mem_range = legacy_vfio_unmap_mem_range;
+
ret = legacy_vfio_container_init(kvm);
if (ret)
return ret;
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 06/10] vfio/iommufd: Import iommufd header from kernel
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (3 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:25 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support Aneesh Kumar K.V (Arm)
` (4 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
sync with include/uapi/linux/iommufd.h from v6.14
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
include/linux/iommufd.h | 1017 +++++++++++++++++++++++++++++++++++++++
1 file changed, 1017 insertions(+)
create mode 100644 include/linux/iommufd.h
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
new file mode 100644
index 000000000000..78747b24bd0f
--- /dev/null
+++ b/include/linux/iommufd.h
@@ -0,0 +1,1017 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
+ */
+#ifndef _UAPI_IOMMUFD_H
+#define _UAPI_IOMMUFD_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define IOMMUFD_TYPE (';')
+
+/**
+ * DOC: General ioctl format
+ *
+ * The ioctl interface follows a general format to allow for extensibility. Each
+ * ioctl is passed in a structure pointer as the argument providing the size of
+ * the structure in the first u32. The kernel checks that any structure space
+ * beyond what it understands is 0. This allows userspace to use the backward
+ * compatible portion while consistently using the newer, larger, structures.
+ *
+ * ioctls use a standard meaning for common errnos:
+ *
+ * - ENOTTY: The IOCTL number itself is not supported at all
+ * - E2BIG: The IOCTL number is supported, but the provided structure has
+ * non-zero in a part the kernel does not understand.
+ * - EOPNOTSUPP: The IOCTL number is supported, and the structure is
+ * understood, however a known field has a value the kernel does not
+ * understand or support.
+ * - EINVAL: Everything about the IOCTL was understood, but a field is not
+ * correct.
+ * - ENOENT: An ID or IOVA provided does not exist.
+ * - ENOMEM: Out of memory.
+ * - EOVERFLOW: Mathematics overflowed.
+ *
+ * As well as additional errnos, within specific ioctls.
+ */
+enum {
+ IOMMUFD_CMD_BASE = 0x80,
+ IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE,
+ IOMMUFD_CMD_IOAS_ALLOC = 0x81,
+ IOMMUFD_CMD_IOAS_ALLOW_IOVAS = 0x82,
+ IOMMUFD_CMD_IOAS_COPY = 0x83,
+ IOMMUFD_CMD_IOAS_IOVA_RANGES = 0x84,
+ IOMMUFD_CMD_IOAS_MAP = 0x85,
+ IOMMUFD_CMD_IOAS_UNMAP = 0x86,
+ IOMMUFD_CMD_OPTION = 0x87,
+ IOMMUFD_CMD_VFIO_IOAS = 0x88,
+ IOMMUFD_CMD_HWPT_ALLOC = 0x89,
+ IOMMUFD_CMD_GET_HW_INFO = 0x8a,
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING = 0x8b,
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP = 0x8c,
+ IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d,
+ IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e,
+ IOMMUFD_CMD_IOAS_MAP_FILE = 0x8f,
+ IOMMUFD_CMD_VIOMMU_ALLOC = 0x90,
+ IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
+ IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
+};
+
+/**
+ * struct iommu_destroy - ioctl(IOMMU_DESTROY)
+ * @size: sizeof(struct iommu_destroy)
+ * @id: iommufd object ID to destroy. Can be any destroyable object type.
+ *
+ * Destroy any object held within iommufd.
+ */
+struct iommu_destroy {
+ __u32 size;
+ __u32 id;
+};
+#define IOMMU_DESTROY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_DESTROY)
+
+/**
+ * struct iommu_ioas_alloc - ioctl(IOMMU_IOAS_ALLOC)
+ * @size: sizeof(struct iommu_ioas_alloc)
+ * @flags: Must be 0
+ * @out_ioas_id: Output IOAS ID for the allocated object
+ *
+ * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA)
+ * to memory mapping.
+ */
+struct iommu_ioas_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 out_ioas_id;
+};
+#define IOMMU_IOAS_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOC)
+
+/**
+ * struct iommu_iova_range - ioctl(IOMMU_IOVA_RANGE)
+ * @start: First IOVA
+ * @last: Inclusive last IOVA
+ *
+ * An interval in IOVA space.
+ */
+struct iommu_iova_range {
+ __aligned_u64 start;
+ __aligned_u64 last;
+};
+
+/**
+ * struct iommu_ioas_iova_ranges - ioctl(IOMMU_IOAS_IOVA_RANGES)
+ * @size: sizeof(struct iommu_ioas_iova_ranges)
+ * @ioas_id: IOAS ID to read ranges from
+ * @num_iovas: Input/Output total number of ranges in the IOAS
+ * @__reserved: Must be 0
+ * @allowed_iovas: Pointer to the output array of struct iommu_iova_range
+ * @out_iova_alignment: Minimum alignment required for mapping IOVA
+ *
+ * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges
+ * is not allowed. num_iovas will be set to the total number of iovas and
+ * the allowed_iovas[] will be filled in as space permits.
+ *
+ * The allowed ranges are dependent on the HW path the DMA operation takes, and
+ * can change during the lifetime of the IOAS. A fresh empty IOAS will have a
+ * full range, and each attached device will narrow the ranges based on that
+ * device's HW restrictions. Detaching a device can widen the ranges. Userspace
+ * should query ranges after every attach/detach to know what IOVAs are valid
+ * for mapping.
+ *
+ * On input num_iovas is the length of the allowed_iovas array. On output it is
+ * the total number of iovas filled in. The ioctl will return -EMSGSIZE and set
+ * num_iovas to the required value if num_iovas is too small. In this case the
+ * caller should allocate a larger output array and re-issue the ioctl.
+ *
+ * out_iova_alignment returns the minimum IOVA alignment that can be given
+ * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy::
+ *
+ * starting_iova % out_iova_alignment == 0
+ * (starting_iova + length) % out_iova_alignment == 0
+ *
+ * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot
+ * be higher than the system PAGE_SIZE.
+ */
+struct iommu_ioas_iova_ranges {
+ __u32 size;
+ __u32 ioas_id;
+ __u32 num_iovas;
+ __u32 __reserved;
+ __aligned_u64 allowed_iovas;
+ __aligned_u64 out_iova_alignment;
+};
+#define IOMMU_IOAS_IOVA_RANGES _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_IOVA_RANGES)
+
+/**
+ * struct iommu_ioas_allow_iovas - ioctl(IOMMU_IOAS_ALLOW_IOVAS)
+ * @size: sizeof(struct iommu_ioas_allow_iovas)
+ * @ioas_id: IOAS ID to allow IOVAs from
+ * @num_iovas: Input/Output total number of ranges in the IOAS
+ * @__reserved: Must be 0
+ * @allowed_iovas: Pointer to array of struct iommu_iova_range
+ *
+ * Ensure a range of IOVAs are always available for allocation. If this call
+ * succeeds then IOMMU_IOAS_IOVA_RANGES will never return a list of IOVA ranges
+ * that are narrower than the ranges provided here. This call will fail if
+ * IOMMU_IOAS_IOVA_RANGES is currently narrower than the given ranges.
+ *
+ * When an IOAS is first created the IOVA_RANGES will be maximally sized, and as
+ * devices are attached the IOVA will narrow based on the device restrictions.
+ * When an allowed range is specified any narrowing will be refused, ie device
+ * attachment can fail if the device requires limiting within the allowed range.
+ *
+ * Automatic IOVA allocation is also impacted by this call. MAP will only
+ * allocate within the allowed IOVAs if they are present.
+ *
+ * This call replaces the entire allowed list with the given list.
+ */
+struct iommu_ioas_allow_iovas {
+ __u32 size;
+ __u32 ioas_id;
+ __u32 num_iovas;
+ __u32 __reserved;
+ __aligned_u64 allowed_iovas;
+};
+#define IOMMU_IOAS_ALLOW_IOVAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOW_IOVAS)
+
+/**
+ * enum iommufd_ioas_map_flags - Flags for map and copy
+ * @IOMMU_IOAS_MAP_FIXED_IOVA: If clear the kernel will compute an appropriate
+ * IOVA to place the mapping at
+ * @IOMMU_IOAS_MAP_WRITEABLE: DMA is allowed to write to this mapping
+ * @IOMMU_IOAS_MAP_READABLE: DMA is allowed to read from this mapping
+ */
+enum iommufd_ioas_map_flags {
+ IOMMU_IOAS_MAP_FIXED_IOVA = 1 << 0,
+ IOMMU_IOAS_MAP_WRITEABLE = 1 << 1,
+ IOMMU_IOAS_MAP_READABLE = 1 << 2,
+};
+
+/**
+ * struct iommu_ioas_map - ioctl(IOMMU_IOAS_MAP)
+ * @size: sizeof(struct iommu_ioas_map)
+ * @flags: Combination of enum iommufd_ioas_map_flags
+ * @ioas_id: IOAS ID to change the mapping of
+ * @__reserved: Must be 0
+ * @user_va: Userspace pointer to start mapping from
+ * @length: Number of bytes to map
+ * @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set
+ * then this must be provided as input.
+ *
+ * Set an IOVA mapping from a user pointer. If FIXED_IOVA is specified then the
+ * mapping will be established at iova, otherwise a suitable location based on
+ * the reserved and allowed lists will be automatically selected and returned in
+ * iova.
+ *
+ * If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently
+ * be unused, existing IOVA cannot be replaced.
+ */
+struct iommu_ioas_map {
+ __u32 size;
+ __u32 flags;
+ __u32 ioas_id;
+ __u32 __reserved;
+ __aligned_u64 user_va;
+ __aligned_u64 length;
+ __aligned_u64 iova;
+};
+#define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP)
+
+/**
+ * struct iommu_ioas_map_file - ioctl(IOMMU_IOAS_MAP_FILE)
+ * @size: sizeof(struct iommu_ioas_map_file)
+ * @flags: same as for iommu_ioas_map
+ * @ioas_id: same as for iommu_ioas_map
+ * @fd: the memfd to map
+ * @start: byte offset from start of file to map from
+ * @length: same as for iommu_ioas_map
+ * @iova: same as for iommu_ioas_map
+ *
+ * Set an IOVA mapping from a memfd file. All other arguments and semantics
+ * match those of IOMMU_IOAS_MAP.
+ */
+struct iommu_ioas_map_file {
+ __u32 size;
+ __u32 flags;
+ __u32 ioas_id;
+ __s32 fd;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 iova;
+};
+#define IOMMU_IOAS_MAP_FILE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP_FILE)
+
+/**
+ * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY)
+ * @size: sizeof(struct iommu_ioas_copy)
+ * @flags: Combination of enum iommufd_ioas_map_flags
+ * @dst_ioas_id: IOAS ID to change the mapping of
+ * @src_ioas_id: IOAS ID to copy from
+ * @length: Number of bytes to copy and map
+ * @dst_iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is
+ * set then this must be provided as input.
+ * @src_iova: IOVA to start the copy
+ *
+ * Copy an already existing mapping from src_ioas_id and establish it in
+ * dst_ioas_id. The src iova/length must exactly match a range used with
+ * IOMMU_IOAS_MAP.
+ *
+ * This may be used to efficiently clone a subset of an IOAS to another, or as a
+ * kind of 'cache' to speed up mapping. Copy has an efficiency advantage over
+ * establishing equivalent new mappings, as internal resources are shared, and
+ * the kernel will pin the user memory only once.
+ */
+struct iommu_ioas_copy {
+ __u32 size;
+ __u32 flags;
+ __u32 dst_ioas_id;
+ __u32 src_ioas_id;
+ __aligned_u64 length;
+ __aligned_u64 dst_iova;
+ __aligned_u64 src_iova;
+};
+#define IOMMU_IOAS_COPY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_COPY)
+
+/**
+ * struct iommu_ioas_unmap - ioctl(IOMMU_IOAS_UNMAP)
+ * @size: sizeof(struct iommu_ioas_unmap)
+ * @ioas_id: IOAS ID to change the mapping of
+ * @iova: IOVA to start the unmapping at
+ * @length: Number of bytes to unmap, and return back the bytes unmapped
+ *
+ * Unmap an IOVA range. The iova/length must be a superset of a previously
+ * mapped range used with IOMMU_IOAS_MAP or IOMMU_IOAS_COPY. Splitting or
+ * truncating ranges is not allowed. The values 0 to U64_MAX will unmap
+ * everything.
+ */
+struct iommu_ioas_unmap {
+ __u32 size;
+ __u32 ioas_id;
+ __aligned_u64 iova;
+ __aligned_u64 length;
+};
+#define IOMMU_IOAS_UNMAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_UNMAP)
+
+/**
+ * enum iommufd_option - ioctl(IOMMU_OPTION_RLIMIT_MODE) and
+ * ioctl(IOMMU_OPTION_HUGE_PAGES)
+ * @IOMMU_OPTION_RLIMIT_MODE:
+ * Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
+ * to invoke this. Value 0 (default) is user based accounting, 1 uses process
+ * based accounting. Global option, object_id must be 0
+ * @IOMMU_OPTION_HUGE_PAGES:
+ * Value 1 (default) allows contiguous pages to be combined when generating
+ * iommu mappings. Value 0 disables combining, everything is mapped to
+ * PAGE_SIZE. This can be useful for benchmarking. This is a per-IOAS
+ * option, the object_id must be the IOAS ID.
+ */
+enum iommufd_option {
+ IOMMU_OPTION_RLIMIT_MODE = 0,
+ IOMMU_OPTION_HUGE_PAGES = 1,
+};
+
+/**
+ * enum iommufd_option_ops - ioctl(IOMMU_OPTION_OP_SET) and
+ * ioctl(IOMMU_OPTION_OP_GET)
+ * @IOMMU_OPTION_OP_SET: Set the option's value
+ * @IOMMU_OPTION_OP_GET: Get the option's value
+ */
+enum iommufd_option_ops {
+ IOMMU_OPTION_OP_SET = 0,
+ IOMMU_OPTION_OP_GET = 1,
+};
+
+/**
+ * struct iommu_option - iommu option multiplexer
+ * @size: sizeof(struct iommu_option)
+ * @option_id: One of enum iommufd_option
+ * @op: One of enum iommufd_option_ops
+ * @__reserved: Must be 0
+ * @object_id: ID of the object if required
+ * @val64: Option value to set or value returned on get
+ *
+ * Change a simple option value. This multiplexor allows controlling options
+ * on objects. IOMMU_OPTION_OP_SET will load an option and IOMMU_OPTION_OP_GET
+ * will return the current value.
+ */
+struct iommu_option {
+ __u32 size;
+ __u32 option_id;
+ __u16 op;
+ __u16 __reserved;
+ __u32 object_id;
+ __aligned_u64 val64;
+};
+#define IOMMU_OPTION _IO(IOMMUFD_TYPE, IOMMUFD_CMD_OPTION)
+
+/**
+ * enum iommufd_vfio_ioas_op - IOMMU_VFIO_IOAS_* ioctls
+ * @IOMMU_VFIO_IOAS_GET: Get the current compatibility IOAS
+ * @IOMMU_VFIO_IOAS_SET: Change the current compatibility IOAS
+ * @IOMMU_VFIO_IOAS_CLEAR: Disable VFIO compatibility
+ */
+enum iommufd_vfio_ioas_op {
+ IOMMU_VFIO_IOAS_GET = 0,
+ IOMMU_VFIO_IOAS_SET = 1,
+ IOMMU_VFIO_IOAS_CLEAR = 2,
+};
+
+/**
+ * struct iommu_vfio_ioas - ioctl(IOMMU_VFIO_IOAS)
+ * @size: sizeof(struct iommu_vfio_ioas)
+ * @ioas_id: For IOMMU_VFIO_IOAS_SET the input IOAS ID to set
+ * For IOMMU_VFIO_IOAS_GET will output the IOAS ID
+ * @op: One of enum iommufd_vfio_ioas_op
+ * @__reserved: Must be 0
+ *
+ * The VFIO compatibility support uses a single ioas because VFIO APIs do not
+ * support the ID field. Set or Get the IOAS that VFIO compatibility will use.
+ * When VFIO_GROUP_SET_CONTAINER is used on an iommufd it will get the
+ * compatibility ioas, either by taking what is already set, or auto creating
+ * one. From then on VFIO will continue to use that ioas and is not effected by
+ * this ioctl. SET or CLEAR does not destroy any auto-created IOAS.
+ */
+struct iommu_vfio_ioas {
+ __u32 size;
+ __u32 ioas_id;
+ __u16 op;
+ __u16 __reserved;
+};
+#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
+
+/**
+ * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
+ * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
+ * the parent HWPT in a nesting configuration.
+ * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
+ * enforced on device attachment
+ * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is
+ * valid.
+ * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
+ * domain can be attached to any PASID on the device.
+ * Any domain attached to the non-PASID part of the
+ * device must also be flagged, otherwise attaching a
+ * PASID will blocked.
+ * If IOMMU does not support PASID it will return
+ * error (-EOPNOTSUPP).
+ */
+enum iommufd_hwpt_alloc_flags {
+ IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
+ IOMMU_HWPT_FAULT_ID_VALID = 1 << 2,
+ IOMMU_HWPT_ALLOC_PASID = 1 << 3,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
+ * entry attributes
+ * @IOMMU_VTD_S1_SRE: Supervisor request
+ * @IOMMU_VTD_S1_EAFE: Extended access enable
+ * @IOMMU_VTD_S1_WPE: Write protect enable
+ */
+enum iommu_hwpt_vtd_s1_flags {
+ IOMMU_VTD_S1_SRE = 1 << 0,
+ IOMMU_VTD_S1_EAFE = 1 << 1,
+ IOMMU_VTD_S1_WPE = 1 << 2,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
+ * info (IOMMU_HWPT_DATA_VTD_S1)
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_flags
+ * @pgtbl_addr: The base address of the stage-1 page table.
+ * @addr_width: The address width of the stage-1 page table
+ * @__reserved: Must be 0
+ */
+struct iommu_hwpt_vtd_s1 {
+ __aligned_u64 flags;
+ __aligned_u64 pgtbl_addr;
+ __u32 addr_width;
+ __u32 __reserved;
+};
+
+/**
+ * struct iommu_hwpt_arm_smmuv3 - ARM SMMUv3 nested STE
+ * (IOMMU_HWPT_DATA_ARM_SMMUV3)
+ *
+ * @ste: The first two double words of the user space Stream Table Entry for
+ * the translation. Must be little-endian.
+ * Allowed fields: (Refer to "5.2 Stream Table Entry" in SMMUv3 HW Spec)
+ * - word-0: V, Cfg, S1Fmt, S1ContextPtr, S1CDMax
+ * - word-1: EATS, S1DSS, S1CIR, S1COR, S1CSH, S1STALLD
+ *
+ * -EIO will be returned if @ste is not legal or contains any non-allowed field.
+ * Cfg can be used to select a S1, Bypass or Abort configuration. A Bypass
+ * nested domain will translate the same as the nesting parent. The S1 will
+ * install a Context Descriptor Table pointing at userspace memory translated
+ * by the nesting parent.
+ */
+struct iommu_hwpt_arm_smmuv3 {
+ __aligned_le64 ste[2];
+};
+
+/**
+ * enum iommu_hwpt_data_type - IOMMU HWPT Data Type
+ * @IOMMU_HWPT_DATA_NONE: no data
+ * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
+ * @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table
+ */
+enum iommu_hwpt_data_type {
+ IOMMU_HWPT_DATA_NONE = 0,
+ IOMMU_HWPT_DATA_VTD_S1 = 1,
+ IOMMU_HWPT_DATA_ARM_SMMUV3 = 2,
+};
+
+/**
+ * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
+ * @size: sizeof(struct iommu_hwpt_alloc)
+ * @flags: Combination of enum iommufd_hwpt_alloc_flags
+ * @dev_id: The device to allocate this HWPT for
+ * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to
+ * @out_hwpt_id: The ID of the new HWPT
+ * @__reserved: Must be 0
+ * @data_type: One of enum iommu_hwpt_data_type
+ * @data_len: Length of the type specific data
+ * @data_uptr: User pointer to the type specific data
+ * @fault_id: The ID of IOMMUFD_FAULT object. Valid only if flags field of
+ * IOMMU_HWPT_FAULT_ID_VALID is set.
+ * @__reserved2: Padding to 64-bit alignment. Must be 0.
+ *
+ * Explicitly allocate a hardware page table object. This is the same object
+ * type that is returned by iommufd_device_attach() and represents the
+ * underlying iommu driver's iommu_domain kernel object.
+ *
+ * A kernel-managed HWPT will be created with the mappings from the given
+ * IOAS via the @pt_id. The @data_type for this allocation must be set to
+ * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
+ * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
+ *
+ * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a
+ * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be
+ * allocated previously via the same ioctl from a given IOAS (@pt_id). In this
+ * case, the @data_type must be set to a pre-defined type corresponding to an
+ * I/O page table type supported by the underlying IOMMU hardware. The device
+ * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU
+ * instance.
+ *
+ * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
+ * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
+ * must be given.
+ */
+struct iommu_hwpt_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 pt_id;
+ __u32 out_hwpt_id;
+ __u32 __reserved;
+ __u32 data_type;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+ __u32 fault_id;
+ __u32 __reserved2;
+};
+#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
+
+/**
+ * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
+ * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
+ * on a nested_parent domain.
+ * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
+ */
+enum iommu_hw_info_vtd_flags {
+ IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
+};
+
+/**
+ * struct iommu_hw_info_vtd - Intel VT-d hardware information
+ *
+ * @flags: Combination of enum iommu_hw_info_vtd_flags
+ * @__reserved: Must be 0
+ *
+ * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.2 Capability Register.
+ * @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.3 Extended Capability Register.
+ *
+ * User needs to understand the Intel VT-d specification to decode the
+ * register value.
+ */
+struct iommu_hw_info_vtd {
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 cap_reg;
+ __aligned_u64 ecap_reg;
+};
+
+/**
+ * struct iommu_hw_info_arm_smmuv3 - ARM SMMUv3 hardware information
+ * (IOMMU_HW_INFO_TYPE_ARM_SMMUV3)
+ *
+ * @flags: Must be set to 0
+ * @__reserved: Must be 0
+ * @idr: Implemented features for ARM SMMU Non-secure programming interface
+ * @iidr: Information about the implementation and implementer of ARM SMMU,
+ * and architecture version supported
+ * @aidr: ARM SMMU architecture version
+ *
+ * For the details of @idr, @iidr and @aidr, please refer to the chapters
+ * from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
+ *
+ * This reports the raw HW capability, and not all bits are meaningful to be
+ * read by userspace. Only the following fields should be used:
+ *
+ * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
+ * idr[1]: SIDSIZE, SSIDSIZE
+ * idr[3]: BBML, RIL
+ * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
+ *
+ * - S1P should be assumed to be true if a NESTED HWPT can be created
+ * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
+ * true.
+ * - ATS is a per-device property. If the VMM describes any devices as ATS
+ * capable in ACPI/DT it should set the corresponding idr.
+ *
+ * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
+ * important that VMMs do not read bits outside the list to allow for
+ * compatibility with future kernels. Several features in the SMMUv3
+ * architecture are not currently supported by the kernel for nesting: HTTU,
+ * BTM, MPAM and others.
+ */
+struct iommu_hw_info_arm_smmuv3 {
+ __u32 flags;
+ __u32 __reserved;
+ __u32 idr[6];
+ __u32 iidr;
+ __u32 aidr;
+};
+
+/**
+ * enum iommu_hw_info_type - IOMMU Hardware Info Types
+ * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
+ * info
+ * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
+ * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
+ */
+enum iommu_hw_info_type {
+ IOMMU_HW_INFO_TYPE_NONE = 0,
+ IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
+ IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
+};
+
+/**
+ * enum iommufd_hw_capabilities
+ * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
+ * If available, it means the following APIs
+ * are supported:
+ *
+ * IOMMU_HWPT_GET_DIRTY_BITMAP
+ * IOMMU_HWPT_SET_DIRTY_TRACKING
+ *
+ */
+enum iommufd_hw_capabilities {
+ IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
+};
+
+/**
+ * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
+ * @size: sizeof(struct iommu_hw_info)
+ * @flags: Must be 0
+ * @dev_id: The device bound to the iommufd
+ * @data_len: Input the length of a user buffer in bytes. Output the length of
+ * data that kernel supports
+ * @data_uptr: User pointer to a user-space buffer used by the kernel to fill
+ * the iommu type specific hardware information data
+ * @out_data_type: Output the iommu hardware info type as defined in the enum
+ * iommu_hw_info_type.
+ * @out_capabilities: Output the generic iommu capability info type as defined
+ * in the enum iommu_hw_capabilities.
+ * @__reserved: Must be 0
+ *
+ * Query an iommu type specific hardware information data from an iommu behind
+ * a given device that has been bound to iommufd. This hardware info data will
+ * be used to sync capabilities between the virtual iommu and the physical
+ * iommu, e.g. a nested translation setup needs to check the hardware info, so
+ * a guest stage-1 page table can be compatible with the physical iommu.
+ *
+ * To capture an iommu type specific hardware information data, @data_uptr and
+ * its length @data_len must be provided. Trailing bytes will be zeroed if the
+ * user buffer is larger than the data that kernel has. Otherwise, kernel only
+ * fills the buffer using the given length in @data_len. If the ioctl succeeds,
+ * @data_len will be updated to the length that kernel actually supports,
+ * @out_data_type will be filled to decode the data filled in the buffer
+ * pointed by @data_uptr. Input @data_len == zero is allowed.
+ */
+struct iommu_hw_info {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+ __u32 out_data_type;
+ __u32 __reserved;
+ __aligned_u64 out_capabilities;
+};
+#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
+
+/*
+ * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
+ * tracking
+ * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
+ */
+enum iommufd_hwpt_set_dirty_tracking_flags {
+ IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
+};
+
+/**
+ * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
+ * @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
+ * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @__reserved: Must be 0
+ *
+ * Toggle dirty tracking on an HW pagetable.
+ */
+struct iommu_hwpt_set_dirty_tracking {
+ __u32 size;
+ __u32 flags;
+ __u32 hwpt_id;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
+
+/**
+ * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
+ * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
+ * any dirty bits metadata. This flag
+ * can be passed in the expectation
+ * where the next operation is an unmap
+ * of the same IOVA range.
+ *
+ */
+enum iommufd_hwpt_get_dirty_bitmap_flags {
+ IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
+};
+
+/**
+ * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
+ * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
+ * @__reserved: Must be 0
+ * @iova: base IOVA of the bitmap first bit
+ * @length: IOVA range size
+ * @page_size: page size granularity of each bit in the bitmap
+ * @data: bitmap where to set the dirty bits. The bitmap bits each
+ * represent a page_size which you deviate from an arbitrary iova.
+ *
+ * Checking a given IOVA is dirty:
+ *
+ * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
+ *
+ * Walk the IOMMU pagetables for a given IOVA range to return a bitmap
+ * with the dirty IOVAs. In doing so it will also by default clear any
+ * dirty bit metadata set in the IOPTE.
+ */
+struct iommu_hwpt_get_dirty_bitmap {
+ __u32 size;
+ __u32 hwpt_id;
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 data;
+};
+#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
+
+/**
+ * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
+ * Data Type
+ * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ * @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
+ */
+enum iommu_hwpt_invalidate_data_type {
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
+ IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
+ * stage-1 cache invalidation
+ * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
+ * to all-levels page structure cache or just
+ * the leaf PTE cache.
+ */
+enum iommu_hwpt_vtd_s1_invalidate_flags {
+ IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
+ * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
+ * @addr: The start address of the range to be invalidated. It needs to
+ * be 4KB aligned.
+ * @npages: Number of contiguous 4K pages to be invalidated.
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
+ * @__reserved: Must be 0
+ *
+ * The Intel VT-d specific invalidation data for user-managed stage-1 cache
+ * invalidation in nested translation. Userspace uses this structure to
+ * tell the impacted cache scope after modifying the stage-1 page table.
+ *
+ * Invalidating all the caches related to the page table by setting @addr
+ * to be 0 and @npages to be U64_MAX.
+ *
+ * The device TLB will be invalidated automatically if ATS is enabled.
+ */
+struct iommu_hwpt_vtd_s1_invalidate {
+ __aligned_u64 addr;
+ __aligned_u64 npages;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/**
+ * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
+ * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
+ * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
+ * Must be little-endian.
+ *
+ * Supported command list only when passing in a vIOMMU via @hwpt_id:
+ * CMDQ_OP_TLBI_NSNH_ALL
+ * CMDQ_OP_TLBI_NH_VA
+ * CMDQ_OP_TLBI_NH_VAA
+ * CMDQ_OP_TLBI_NH_ALL
+ * CMDQ_OP_TLBI_NH_ASID
+ * CMDQ_OP_ATC_INV
+ * CMDQ_OP_CFGI_CD
+ * CMDQ_OP_CFGI_CD_ALL
+ *
+ * -EIO will be returned if the command is not supported.
+ */
+struct iommu_viommu_arm_smmuv3_invalidate {
+ __aligned_le64 cmd[2];
+};
+
+/**
+ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
+ * @size: sizeof(struct iommu_hwpt_invalidate)
+ * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation
+ * @data_uptr: User pointer to an array of driver-specific cache invalidation
+ * data.
+ * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
+ * type of all the entries in the invalidation request array. It
+ * should be a type supported by the hwpt pointed by @hwpt_id.
+ * @entry_len: Length (in bytes) of a request entry in the request array
+ * @entry_num: Input the number of cache invalidation requests in the array.
+ * Output the number of requests successfully handled by kernel.
+ * @__reserved: Must be 0.
+ *
+ * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications
+ * on a user-managed page table should be followed by this operation, if a HWPT
+ * is passed in via @hwpt_id. Other caches, such as device cache or descriptor
+ * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field.
+ *
+ * Each ioctl can support one or more cache invalidation requests in the array
+ * that has a total size of @entry_len * @entry_num.
+ *
+ * An empty invalidation request array by setting @entry_num==0 is allowed, and
+ * @entry_len and @data_uptr would be ignored in this case. This can be used to
+ * check if the given @data_type is supported or not by kernel.
+ */
+struct iommu_hwpt_invalidate {
+ __u32 size;
+ __u32 hwpt_id;
+ __aligned_u64 data_uptr;
+ __u32 data_type;
+ __u32 entry_len;
+ __u32 entry_num;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
+
+/**
+ * enum iommu_hwpt_pgfault_flags - flags for struct iommu_hwpt_pgfault
+ * @IOMMU_PGFAULT_FLAGS_PASID_VALID: The pasid field of the fault data is
+ * valid.
+ * @IOMMU_PGFAULT_FLAGS_LAST_PAGE: It's the last fault of a fault group.
+ */
+enum iommu_hwpt_pgfault_flags {
+ IOMMU_PGFAULT_FLAGS_PASID_VALID = (1 << 0),
+ IOMMU_PGFAULT_FLAGS_LAST_PAGE = (1 << 1),
+};
+
+/**
+ * enum iommu_hwpt_pgfault_perm - perm bits for struct iommu_hwpt_pgfault
+ * @IOMMU_PGFAULT_PERM_READ: request for read permission
+ * @IOMMU_PGFAULT_PERM_WRITE: request for write permission
+ * @IOMMU_PGFAULT_PERM_EXEC: (PCIE 10.4.1) request with a PASID that has the
+ * Execute Requested bit set in PASID TLP Prefix.
+ * @IOMMU_PGFAULT_PERM_PRIV: (PCIE 10.4.1) request with a PASID that has the
+ * Privileged Mode Requested bit set in PASID TLP
+ * Prefix.
+ */
+enum iommu_hwpt_pgfault_perm {
+ IOMMU_PGFAULT_PERM_READ = (1 << 0),
+ IOMMU_PGFAULT_PERM_WRITE = (1 << 1),
+ IOMMU_PGFAULT_PERM_EXEC = (1 << 2),
+ IOMMU_PGFAULT_PERM_PRIV = (1 << 3),
+};
+
+/**
+ * struct iommu_hwpt_pgfault - iommu page fault data
+ * @flags: Combination of enum iommu_hwpt_pgfault_flags
+ * @dev_id: id of the originated device
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: Combination of enum iommu_hwpt_pgfault_perm
+ * @__reserved: Must be 0.
+ * @addr: Fault address
+ * @length: a hint of how much data the requestor is expecting to fetch. For
+ * example, if the PRI initiator knows it is going to do a 10MB
+ * transfer, it could fill in 10MB and the OS could pre-fault in
+ * 10MB of IOVA. It's default to 0 if there's no such hint.
+ * @cookie: kernel-managed cookie identifying a group of fault messages. The
+ * cookie number encoded in the last page fault of the group should
+ * be echoed back in the response message.
+ */
+struct iommu_hwpt_pgfault {
+ __u32 flags;
+ __u32 dev_id;
+ __u32 pasid;
+ __u32 grpid;
+ __u32 perm;
+ __u32 __reserved;
+ __aligned_u64 addr;
+ __u32 length;
+ __u32 cookie;
+};
+
+/**
+ * enum iommufd_page_response_code - Return status of fault handlers
+ * @IOMMUFD_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ * populated, retry the access. This is the
+ * "Success" defined in PCI 10.4.2.1.
+ * @IOMMUFD_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ * access. This is the "Invalid Request" in PCI
+ * 10.4.2.1.
+ */
+enum iommufd_page_response_code {
+ IOMMUFD_PAGE_RESP_SUCCESS = 0,
+ IOMMUFD_PAGE_RESP_INVALID = 1,
+};
+
+/**
+ * struct iommu_hwpt_page_response - IOMMU page fault response
+ * @cookie: The kernel-managed cookie reported in the fault message.
+ * @code: One of response code in enum iommufd_page_response_code.
+ */
+struct iommu_hwpt_page_response {
+ __u32 cookie;
+ __u32 code;
+};
+
+/**
+ * struct iommu_fault_alloc - ioctl(IOMMU_FAULT_QUEUE_ALLOC)
+ * @size: sizeof(struct iommu_fault_alloc)
+ * @flags: Must be 0
+ * @out_fault_id: The ID of the new FAULT
+ * @out_fault_fd: The fd of the new FAULT
+ *
+ * Explicitly allocate a fault handling object.
+ */
+struct iommu_fault_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 out_fault_id;
+ __u32 out_fault_fd;
+};
+#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC)
+
+/**
+ * enum iommu_viommu_type - Virtual IOMMU Type
+ * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
+ */
+enum iommu_viommu_type {
+ IOMMU_VIOMMU_TYPE_DEFAULT = 0,
+ IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
+};
+
+/**
+ * struct iommu_viommu_alloc - ioctl(IOMMU_VIOMMU_ALLOC)
+ * @size: sizeof(struct iommu_viommu_alloc)
+ * @flags: Must be 0
+ * @type: Type of the virtual IOMMU. Must be defined in enum iommu_viommu_type
+ * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU
+ * @hwpt_id: ID of a nesting parent HWPT to associate to
+ * @out_viommu_id: Output virtual IOMMU ID for the allocated object
+ *
+ * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's
+ * virtualization support that is a security-isolated slice of the real IOMMU HW
+ * that is unique to a specific VM. Operations global to the IOMMU are connected
+ * to the vIOMMU, such as:
+ * - Security namespace for guest owned ID, e.g. guest-controlled cache tags
+ * - Non-device-affiliated event reporting, e.g. invalidation queue errors
+ * - Access to a sharable nesting parent pagetable across physical IOMMUs
+ * - Virtualization of various platforms IDs, e.g. RIDs and others
+ * - Delivery of paravirtualized invalidation
+ * - Direct assigned invalidation queues
+ * - Direct assigned interrupts
+ */
+struct iommu_viommu_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 type;
+ __u32 dev_id;
+ __u32 hwpt_id;
+ __u32 out_viommu_id;
+};
+#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
+
+/**
+ * struct iommu_vdevice_alloc - ioctl(IOMMU_VDEVICE_ALLOC)
+ * @size: sizeof(struct iommu_vdevice_alloc)
+ * @viommu_id: vIOMMU ID to associate with the virtual device
+ * @dev_id: The physical device to allocate a virtual instance on the vIOMMU
+ * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY
+ * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID
+ * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table
+ *
+ * Allocate a virtual device instance (for a physical device) against a vIOMMU.
+ * This instance holds the device's information (related to its vIOMMU) in a VM.
+ */
+struct iommu_vdevice_alloc {
+ __u32 size;
+ __u32 viommu_id;
+ __u32 dev_id;
+ __u32 out_vdevice_id;
+ __aligned_u64 virt_id;
+};
+#define IOMMU_VDEVICE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VDEVICE_ALLOC)
+
+/**
+ * struct iommu_ioas_change_process - ioctl(VFIO_IOAS_CHANGE_PROCESS)
+ * @size: sizeof(struct iommu_ioas_change_process)
+ * @__reserved: Must be 0
+ *
+ * This transfers pinned memory counts for every memory map in every IOAS
+ * in the context to the current process. This only supports maps created
+ * with IOMMU_IOAS_MAP_FILE, and returns EINVAL if other maps are present.
+ * If the ioctl returns a failure status, then nothing is changed.
+ *
+ * This API is useful for transferring operation of a device from one process
+ * to another, such as during userland live update.
+ */
+struct iommu_ioas_change_process {
+ __u32 size;
+ __u32 __reserved;
+};
+
+#define IOMMU_IOAS_CHANGE_PROCESS \
+ _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS)
+
+#endif
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (4 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 06/10] vfio/iommufd: Import iommufd header from kernel Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:31 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper Aneesh Kumar K.V (Arm)
` (3 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
This use a stage1 translate stage2 bypass iommu config.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
Makefile | 1 +
builtin-run.c | 1 +
include/kvm/kvm-config.h | 1 +
include/kvm/vfio.h | 2 +
vfio/core.c | 5 +
vfio/iommufd.c | 368 +++++++++++++++++++++++++++++++++++++++
6 files changed, 378 insertions(+)
create mode 100644 vfio/iommufd.c
diff --git a/Makefile b/Makefile
index 8b2720f73386..740b95c7c3c3 100644
--- a/Makefile
+++ b/Makefile
@@ -64,6 +64,7 @@ OBJS += mmio.o
OBJS += pci.o
OBJS += term.o
OBJS += vfio/core.o
+OBJS += vfio/iommufd.o
OBJS += vfio/pci.o
OBJS += vfio/legacy.o
OBJS += virtio/blk.o
diff --git a/builtin-run.c b/builtin-run.c
index 81f255f911b3..39198f9bc0d6 100644
--- a/builtin-run.c
+++ b/builtin-run.c
@@ -262,6 +262,7 @@ static int loglevel_parser(const struct option *opt, const char *arg, int unset)
OPT_CALLBACK('\0', "vfio-pci", NULL, "[domain:]bus:dev.fn", \
"Assign a PCI device to the virtual machine", \
vfio_device_parser, kvm), \
+ OPT_BOOLEAN('\0', "iommufd", &(cfg)->iommufd, "Use iommufd interface"), \
\
OPT_GROUP("Debug options:"), \
OPT_CALLBACK_NOOPT('\0', "debug", kvm, NULL, \
diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h
index 592b035785c9..632eaf84b7eb 100644
--- a/include/kvm/kvm-config.h
+++ b/include/kvm/kvm-config.h
@@ -65,6 +65,7 @@ struct kvm_config {
bool ioport_debug;
bool mmio_debug;
int virtio_transport;
+ bool iommufd;
};
#endif
diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
index fed692b0f265..37a2b5ac3dad 100644
--- a/include/kvm/vfio.h
+++ b/include/kvm/vfio.h
@@ -128,6 +128,8 @@ void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
+int iommufd__init(struct kvm *kvm);
+int iommufd__exit(struct kvm *kvm);
struct kvm_mem_bank;
int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
diff --git a/vfio/core.c b/vfio/core.c
index 32a8e0fe67c0..0b1796c54ffd 100644
--- a/vfio/core.c
+++ b/vfio/core.c
@@ -373,6 +373,8 @@ static int vfio__init(struct kvm *kvm)
}
kvm_vfio_device = device.fd;
+ if (kvm->cfg.iommufd)
+ return iommufd__init(kvm);
return legacy_vfio__init(kvm);
}
dev_base_init(vfio__init);
@@ -393,6 +395,9 @@ static int vfio__exit(struct kvm *kvm)
free(kvm->cfg.vfio_devices);
+ if (kvm->cfg.iommufd)
+ return iommufd__exit(kvm);
+
return legacy_vfio__exit(kvm);
}
dev_base_exit(vfio__exit);
diff --git a/vfio/iommufd.c b/vfio/iommufd.c
new file mode 100644
index 000000000000..3728a06cb318
--- /dev/null
+++ b/vfio/iommufd.c
@@ -0,0 +1,368 @@
+#include <sys/types.h>
+#include <dirent.h>
+
+#include "kvm/kvm.h"
+#include <linux/iommufd.h>
+#include <linux/list.h>
+
+#define VFIO_DEV_DIR "/dev/vfio"
+#define VFIO_DEV_NODE VFIO_DEV_DIR "/devices/"
+#define IOMMU_DEV "/dev/iommu"
+
+static int iommu_fd;
+static int ioas_id;
+
+static int __iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
+{
+ int ret;
+
+ vdev->info.argsz = sizeof(vdev->info);
+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to get info");
+ goto err_close_device;
+ }
+
+ if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
+ ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
+ vfio_dev_warn(vdev, "failed to reset device");
+
+ vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
+ if (!vdev->regions) {
+ ret = -ENOMEM;
+ goto err_close_device;
+ }
+
+ /* Now for the bus-specific initialization... */
+ switch (vdev->params->type) {
+ case VFIO_DEVICE_PCI:
+ BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
+ ret = vfio_pci_setup_device(kvm, vdev);
+ break;
+ default:
+ BUG_ON(1);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto err_free_regions;
+
+ vfio_dev_info(vdev, "assigned to device number 0x%x ",
+ vdev->dev_hdr.dev_num) ;
+
+ return 0;
+
+err_free_regions:
+ free(vdev->regions);
+err_close_device:
+ close(vdev->fd);
+
+ return ret;
+}
+
+static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
+{
+ int ret;
+ DIR *dir = NULL;
+ struct dirent *dir_ent;
+ bool found_dev = false;
+ char pci_dev_path[PATH_MAX];
+ char vfio_dev_path[PATH_MAX];
+ struct iommu_hwpt_alloc alloc_hwpt;
+ struct vfio_device_bind_iommufd bind;
+ struct vfio_device_attach_iommufd_pt attach_data;
+
+ ret = snprintf(pci_dev_path, PATH_MAX, "%s/vfio-dev/", vdev->sysfs_path);
+ if (ret < 0 || ret == PATH_MAX)
+ return -EINVAL;
+
+ dir = opendir(pci_dev_path);
+ if (!dir)
+ return -EINVAL;
+
+ while ((dir_ent = readdir(dir))) {
+ if (!strncmp(dir_ent->d_name, "vfio", 4)) {
+ ret = snprintf(vfio_dev_path, PATH_MAX, VFIO_DEV_NODE "%s", dir_ent->d_name);
+ if (ret < 0 || ret == PATH_MAX) {
+ ret = -EINVAL;
+ goto err_close_dir;
+ }
+ found_dev = true;
+ break;
+ }
+ }
+ if (!found_dev) {
+ ret = -ENODEV;
+ goto err_close_dir;
+ }
+
+ vdev->fd = open(vfio_dev_path, O_RDWR);
+ if (vdev->fd == -1) {
+ ret = errno;
+ pr_err("Failed to open %s", vfio_dev_path);
+ goto err_close_dir;
+ }
+
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_ADD,
+ .addr = (__u64)&vdev->fd,
+ };
+
+ if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
+ ret = -errno;
+ pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
+ goto err_close_device;
+ }
+
+ bind.argsz = sizeof(bind);
+ bind.flags = 0;
+ bind.iommufd = iommu_fd;
+
+ /* now bind the iommufd */
+ if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to get info");
+ goto err_close_device;
+ }
+
+ alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
+ alloc_hwpt.flags = 0;
+ alloc_hwpt.dev_id = bind.out_devid;
+ alloc_hwpt.pt_id = ioas_id;
+ alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
+ alloc_hwpt.data_len = 0;
+ alloc_hwpt.data_uptr = 0;
+
+ if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
+ ret = -errno;
+ pr_err("Failed to allocate HWPT");
+ goto err_close_device;
+ }
+
+ attach_data.argsz = sizeof(attach_data);
+ attach_data.flags = 0;
+ attach_data.pt_id = alloc_hwpt.out_hwpt_id;
+
+ if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to attach to IOAS ");
+ goto err_close_device;
+ }
+
+ closedir(dir);
+ return __iommufd_configure_device(kvm, vdev);
+
+err_close_device:
+ close(vdev->fd);
+err_close_dir:
+ closedir(dir);
+ return ret;
+}
+
+static int iommufd_configure_devices(struct kvm *kvm)
+{
+ int i, ret;
+
+ for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
+ ret = iommufd_configure_device(kvm, &vfio_devices[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iommufd_create_ioas(struct kvm *kvm)
+{
+ int ret;
+ struct iommu_ioas_alloc alloc_data;
+ iommu_fd = open(IOMMU_DEV, O_RDWR);
+ if (iommu_fd == -1) {
+ ret = errno;
+ pr_err("Failed to open %s", IOMMU_DEV);
+ return ret;
+ }
+
+ alloc_data.size = sizeof(alloc_data);
+ alloc_data.flags = 0;
+
+ if (ioctl(iommu_fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
+ ret = errno;
+ pr_err("Failed to alloc IOAS ");
+ goto err_close_device;
+ }
+ ioas_id = alloc_data.out_ioas_id;
+ return 0;
+
+err_close_device:
+ close(iommu_fd);
+ return ret;
+}
+
+static int vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
+{
+ int ret, dirfd;
+ char *group_name;
+ unsigned long group_id;
+ char dev_path[PATH_MAX];
+ struct vfio_group *group = NULL;
+
+ ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
+ vdev->params->bus, vdev->params->name);
+ if (ret < 0 || ret == PATH_MAX)
+ return -EINVAL;
+
+ vdev->sysfs_path = strndup(dev_path, PATH_MAX);
+ if (!vdev->sysfs_path)
+ return -ENOMEM;
+
+ /* Find IOMMU group for this device */
+ dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
+ if (dirfd < 0) {
+ vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
+ return -errno;
+ }
+
+ ret = readlinkat(dirfd, "iommu_group", dev_path, PATH_MAX);
+ if (ret < 0) {
+ vfio_dev_err(vdev, "no iommu_group");
+ goto out_close;
+ }
+ if (ret == PATH_MAX) {
+ ret = -ENOMEM;
+ goto out_close;
+ }
+
+ dev_path[ret] = '\0';
+ group_name = basename(dev_path);
+ errno = 0;
+ group_id = strtoul(group_name, NULL, 10);
+ if (errno) {
+ ret = -errno;
+ goto out_close;
+ }
+
+ list_for_each_entry(group, &vfio_groups, list) {
+ if (group->id == group_id) {
+ group->refs++;
+ break;
+ }
+ }
+ if (group->id != group_id) {
+ group = calloc(1, sizeof(*group));
+ if (!group) {
+ ret = -ENOMEM;
+ goto out_close;
+ }
+ group->id = group_id;
+ group->refs = 1;
+ /* no group fd for iommufd */
+ group->fd = -1;
+ list_add(&group->list, &vfio_groups);
+ }
+ vdev->group = group;
+ ret = 0;
+
+out_close:
+ close(dirfd);
+ return ret;
+}
+
+static int iommufd_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
+{
+ int ret = 0;
+ struct iommu_ioas_map dma_map;
+
+ dma_map.size = sizeof(dma_map);
+ dma_map.flags = IOMMU_IOAS_MAP_READABLE | IOMMU_IOAS_MAP_WRITEABLE |
+ IOMMU_IOAS_MAP_FIXED_IOVA;
+ dma_map.ioas_id = ioas_id;
+ dma_map.__reserved = 0;
+ dma_map.user_va = host_addr;
+ dma_map.iova = iova;
+ dma_map.length = size;
+
+ /* Map the guest memory for DMA (i.e. provide isolation) */
+ if (ioctl(iommu_fd, IOMMU_IOAS_MAP, &dma_map)) {
+ ret = -errno;
+ pr_err("Failed to map 0x%llx -> 0x%llx (%u) for DMA",
+ dma_map.iova, dma_map.user_va, dma_map.size);
+ }
+
+ return ret;
+}
+
+static int iommufd_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
+{
+ int ret = 0;
+ struct iommu_ioas_unmap dma_unmap;
+
+ dma_unmap.size = sizeof(dma_unmap);
+ dma_unmap.ioas_id = ioas_id;
+ dma_unmap.iova = iova;
+ dma_unmap.length = size;
+
+ if (ioctl(iommu_fd, IOMMU_IOAS_UNMAP, &dma_unmap)) {
+ ret = -errno;
+ if (ret != -ENOENT)
+ pr_err("Failed to unmap 0x%llx - size (%u) for DMA %d",
+ dma_unmap.iova, dma_unmap.size, ret);
+ }
+
+ return ret;
+}
+
+static int iommufd_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
+{
+ return iommufd_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
+}
+
+static int iommufd_configure_reserved_mem(struct kvm *kvm)
+{
+ int ret;
+ struct vfio_group *group;
+
+ list_for_each_entry(group, &vfio_groups, list) {
+ ret = vfio_configure_reserved_regions(kvm, group);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int iommufd__init(struct kvm *kvm)
+{
+ int ret, i;
+
+ for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
+ vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
+
+ ret = vfio_device_init(kvm, &vfio_devices[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = iommufd_create_ioas(kvm);
+ if (ret)
+ return ret;
+
+ ret = iommufd_configure_devices(kvm);
+ if (ret)
+ return ret;
+
+ ret = iommufd_configure_reserved_mem(kvm);
+ if (ret)
+ return ret;
+
+ dma_map_mem_range = iommufd_map_mem_range;
+ dma_unmap_mem_range = iommufd_unmap_mem_range;
+ /* Now map the full memory */
+ return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, iommufd_map_mem_bank,
+ NULL);
+}
+
+int iommufd__exit(struct kvm *kvm)
+{
+ return 0;
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (5 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:32 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects Aneesh Kumar K.V (Arm)
` (2 subsequent siblings)
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
alloc_hwpt.flags = 0; implies we prefer stage1 translation. Hence name
the helper iommufd_alloc_s2bypass_hwpt().
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
vfio/iommufd.c | 86 +++++++++++++++++++++++++++++---------------------
1 file changed, 50 insertions(+), 36 deletions(-)
diff --git a/vfio/iommufd.c b/vfio/iommufd.c
index 3728a06cb318..742550705746 100644
--- a/vfio/iommufd.c
+++ b/vfio/iommufd.c
@@ -60,6 +60,54 @@ err_close_device:
return ret;
}
+static int iommufd_alloc_s2bypass_hwpt(struct vfio_device *vdev)
+{
+ int ret;
+ struct iommu_hwpt_alloc alloc_hwpt;
+ struct vfio_device_bind_iommufd bind;
+ struct vfio_device_attach_iommufd_pt attach_data;
+
+ bind.argsz = sizeof(bind);
+ bind.flags = 0;
+ bind.iommufd = iommu_fd;
+
+ /* now bind the iommufd */
+ if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to get info");
+ goto err_out;
+ }
+
+ alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
+ /* stage1 translate stage 2 bypass table if stage1 is supported */
+ alloc_hwpt.flags = 0;
+ alloc_hwpt.dev_id = bind.out_devid;
+ alloc_hwpt.pt_id = ioas_id;
+ alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
+ alloc_hwpt.data_len = 0;
+ alloc_hwpt.data_uptr = 0;
+
+ if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
+ ret = -errno;
+ pr_err("Failed to allocate HWPT");
+ goto err_out;
+ }
+
+ attach_data.argsz = sizeof(attach_data);
+ attach_data.flags = 0;
+ attach_data.pt_id = alloc_hwpt.out_hwpt_id;
+
+ if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to attach to IOAS ");
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ return ret;
+}
+
static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
{
int ret;
@@ -68,9 +116,6 @@ static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
bool found_dev = false;
char pci_dev_path[PATH_MAX];
char vfio_dev_path[PATH_MAX];
- struct iommu_hwpt_alloc alloc_hwpt;
- struct vfio_device_bind_iommufd bind;
- struct vfio_device_attach_iommufd_pt attach_data;
ret = snprintf(pci_dev_path, PATH_MAX, "%s/vfio-dev/", vdev->sysfs_path);
if (ret < 0 || ret == PATH_MAX)
@@ -115,40 +160,9 @@ static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
goto err_close_device;
}
- bind.argsz = sizeof(bind);
- bind.flags = 0;
- bind.iommufd = iommu_fd;
-
- /* now bind the iommufd */
- if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
- ret = -errno;
- vfio_dev_err(vdev, "failed to get info");
- goto err_close_device;
- }
-
- alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
- alloc_hwpt.flags = 0;
- alloc_hwpt.dev_id = bind.out_devid;
- alloc_hwpt.pt_id = ioas_id;
- alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
- alloc_hwpt.data_len = 0;
- alloc_hwpt.data_uptr = 0;
-
- if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
- ret = -errno;
- pr_err("Failed to allocate HWPT");
- goto err_close_device;
- }
-
- attach_data.argsz = sizeof(attach_data);
- attach_data.flags = 0;
- attach_data.pt_id = alloc_hwpt.out_hwpt_id;
-
- if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
- ret = -errno;
- vfio_dev_err(vdev, "failed to attach to IOAS ");
+ ret = iommufd_alloc_s2bypass_hwpt(vdev);
+ if (ret)
goto err_close_device;
- }
closedir(dir);
return __iommufd_configure_device(kvm, vdev);
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (6 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-21 12:27 ` Will Deacon
2025-07-27 18:35 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 10/10] util/update_headers: Add vfio related header files to update list Aneesh Kumar K.V (Arm)
2025-07-27 18:19 ` [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Mostafa Saleh
9 siblings, 2 replies; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
This also allocates a stage1 bypass and stage2 translate table.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
builtin-run.c | 2 +
include/kvm/kvm-config.h | 1 +
vfio/core.c | 4 +-
vfio/iommufd.c | 115 ++++++++++++++++++++++++++++++++++++++-
4 files changed, 119 insertions(+), 3 deletions(-)
diff --git a/builtin-run.c b/builtin-run.c
index 39198f9bc0d6..bfa3e8b09f82 100644
--- a/builtin-run.c
+++ b/builtin-run.c
@@ -263,6 +263,8 @@ static int loglevel_parser(const struct option *opt, const char *arg, int unset)
"Assign a PCI device to the virtual machine", \
vfio_device_parser, kvm), \
OPT_BOOLEAN('\0', "iommufd", &(cfg)->iommufd, "Use iommufd interface"), \
+ OPT_BOOLEAN('\0', "iommufd-vdevice", &(cfg)->iommufd_vdevice, \
+ "Use iommufd vdevice interface"), \
\
OPT_GROUP("Debug options:"), \
OPT_CALLBACK_NOOPT('\0', "debug", kvm, NULL, \
diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h
index 632eaf84b7eb..d80be6826469 100644
--- a/include/kvm/kvm-config.h
+++ b/include/kvm/kvm-config.h
@@ -66,6 +66,7 @@ struct kvm_config {
bool mmio_debug;
int virtio_transport;
bool iommufd;
+ bool iommufd_vdevice;
};
#endif
diff --git a/vfio/core.c b/vfio/core.c
index 0b1796c54ffd..8dfcf3ca35c1 100644
--- a/vfio/core.c
+++ b/vfio/core.c
@@ -373,7 +373,7 @@ static int vfio__init(struct kvm *kvm)
}
kvm_vfio_device = device.fd;
- if (kvm->cfg.iommufd)
+ if (kvm->cfg.iommufd || kvm->cfg.iommufd_vdevice)
return iommufd__init(kvm);
return legacy_vfio__init(kvm);
}
@@ -395,7 +395,7 @@ static int vfio__exit(struct kvm *kvm)
free(kvm->cfg.vfio_devices);
- if (kvm->cfg.iommufd)
+ if (kvm->cfg.iommufd || kvm->cfg.iommufd_vdevice)
return iommufd__exit(kvm);
return legacy_vfio__exit(kvm);
diff --git a/vfio/iommufd.c b/vfio/iommufd.c
index 742550705746..39870320e4ac 100644
--- a/vfio/iommufd.c
+++ b/vfio/iommufd.c
@@ -108,6 +108,116 @@ err_out:
return ret;
}
+static int iommufd_alloc_s1bypass_hwpt(struct vfio_device *vdev)
+{
+ int ret;
+ unsigned long dev_num;
+ unsigned long guest_bdf;
+ struct vfio_device_bind_iommufd bind;
+ struct vfio_device_attach_iommufd_pt attach_data;
+ struct iommu_hwpt_alloc alloc_hwpt;
+ struct iommu_viommu_alloc alloc_viommu;
+ struct iommu_hwpt_arm_smmuv3 bypass_ste;
+ struct iommu_vdevice_alloc alloc_vdev;
+
+ bind.argsz = sizeof(bind);
+ bind.flags = 0;
+ bind.iommufd = iommu_fd;
+
+ /* now bind the iommufd */
+ if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to get info");
+ goto err_out;
+ }
+
+ alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
+ alloc_hwpt.flags = IOMMU_HWPT_ALLOC_NEST_PARENT;
+ alloc_hwpt.dev_id = bind.out_devid;
+ alloc_hwpt.pt_id = ioas_id;
+ alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
+ alloc_hwpt.data_len = 0;
+ alloc_hwpt.data_uptr = 0;
+
+ if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
+ ret = -errno;
+ pr_err("Failed to allocate HWPT");
+ goto err_out;
+ }
+
+ attach_data.argsz = sizeof(attach_data);
+ attach_data.flags = 0;
+ attach_data.pt_id = alloc_hwpt.out_hwpt_id;
+
+ if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to attach to IOAS ");
+ goto err_out;
+ }
+
+ alloc_viommu.size = sizeof(alloc_viommu);
+ alloc_viommu.flags = 0;
+ alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
+ alloc_viommu.dev_id = bind.out_devid;
+ alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
+
+ if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to allocate VIOMMU %d", ret);
+ goto err_out;
+ }
+#define STRTAB_STE_0_V (1UL << 0)
+#define STRTAB_STE_0_CFG_S2_TRANS 6
+#define STRTAB_STE_0_CFG_S1_TRANS 5
+#define STRTAB_STE_0_CFG_BYPASS 4
+
+ /* set up virtual ste as bypass ste */
+ bypass_ste.ste[0] = STRTAB_STE_0_V | (STRTAB_STE_0_CFG_BYPASS << 1);
+ bypass_ste.ste[1] = 0x0UL;
+
+ alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
+ alloc_hwpt.flags = 0;
+ alloc_hwpt.dev_id = bind.out_devid;
+ alloc_hwpt.pt_id = alloc_viommu.out_viommu_id;
+ alloc_hwpt.data_type = IOMMU_HWPT_DATA_ARM_SMMUV3;
+ alloc_hwpt.data_len = sizeof(bypass_ste);
+ alloc_hwpt.data_uptr = (unsigned long)&bypass_ste;
+
+ if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
+ ret = -errno;
+ pr_err("Failed to allocate S1 bypass HWPT %d", ret);
+ goto err_out;
+ }
+
+ alloc_vdev.size = sizeof(alloc_vdev),
+ alloc_vdev.viommu_id = alloc_viommu.out_viommu_id;
+ alloc_vdev.dev_id = bind.out_devid;
+
+ dev_num = vdev->dev_hdr.dev_num;
+ /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
+ guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
+ alloc_vdev.virt_id = guest_bdf;
+ if (ioctl(iommu_fd, IOMMU_VDEVICE_ALLOC, &alloc_vdev)) {
+ ret = -errno;
+ pr_err("Failed to allocate vdevice %d", ret);
+ goto err_out;
+ }
+
+ /* Now attach to the nested domain */
+ attach_data.argsz = sizeof(attach_data);
+ attach_data.flags = 0;
+ attach_data.pt_id = alloc_hwpt.out_hwpt_id;
+ if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
+ ret = -errno;
+ vfio_dev_err(vdev, "failed to attach Nested config to IOAS %d ", ret);
+ goto err_out;
+ }
+
+ return 0;
+err_out:
+ return ret;
+}
+
static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
{
int ret;
@@ -160,7 +270,10 @@ static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
goto err_close_device;
}
- ret = iommufd_alloc_s2bypass_hwpt(vdev);
+ if (kvm->cfg.iommufd_vdevice)
+ ret = iommufd_alloc_s1bypass_hwpt(vdev);
+ else
+ ret = iommufd_alloc_s2bypass_hwpt(vdev);
if (ret)
goto err_close_device;
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* [RFC PATCH kvmtool 10/10] util/update_headers: Add vfio related header files to update list
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (7 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects Aneesh Kumar K.V (Arm)
@ 2025-05-25 7:49 ` Aneesh Kumar K.V (Arm)
2025-07-27 18:35 ` Mostafa Saleh
2025-07-27 18:19 ` [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Mostafa Saleh
9 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2025-05-25 7:49 UTC (permalink / raw)
To: kvm
Cc: Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry,
Aneesh Kumar K.V (Arm)
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
| 7 +++++++
1 file changed, 7 insertions(+)
--git a/util/update_headers.sh b/util/update_headers.sh
index 789e2a42b280..8dd0dd7a9de0 100755
--- a/util/update_headers.sh
+++ b/util/update_headers.sh
@@ -35,6 +35,13 @@ do
cp -- "$LINUX_ROOT/include/uapi/linux/$header" include/linux
done
+
+VFIO_LIST="vfio.h iommufd.h"
+for header in $VFIO_LIST
+do
+ cp -- "$LINUX_ROOT/include/uapi/linux/$header" include/linux
+done
+
unset KVMTOOL_PATH
copy_optional_arch () {
--
2.43.0
^ permalink raw reply related [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-05-25 7:49 ` [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects Aneesh Kumar K.V (Arm)
@ 2025-07-21 12:27 ` Will Deacon
2025-07-24 14:09 ` Aneesh Kumar K.V
2025-07-27 18:35 ` Mostafa Saleh
1 sibling, 1 reply; 38+ messages in thread
From: Will Deacon @ 2025-07-21 12:27 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Julien Thierry
On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
> This also allocates a stage1 bypass and stage2 translate table.
Please write your commit messages as per Linux kernel guidelines.
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> builtin-run.c | 2 +
> include/kvm/kvm-config.h | 1 +
> vfio/core.c | 4 +-
> vfio/iommufd.c | 115 ++++++++++++++++++++++++++++++++++++++-
[...]
> 4 files changed, 119 insertions(+), 3 deletions(-)
> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
> index 742550705746..39870320e4ac 100644
> --- a/vfio/iommufd.c
> +++ b/vfio/iommufd.c
> @@ -108,6 +108,116 @@ err_out:
> return ret;
> }
>
> +static int iommufd_alloc_s1bypass_hwpt(struct vfio_device *vdev)
> +{
> + int ret;
> + unsigned long dev_num;
> + unsigned long guest_bdf;
> + struct vfio_device_bind_iommufd bind;
> + struct vfio_device_attach_iommufd_pt attach_data;
> + struct iommu_hwpt_alloc alloc_hwpt;
> + struct iommu_viommu_alloc alloc_viommu;
> + struct iommu_hwpt_arm_smmuv3 bypass_ste;
> + struct iommu_vdevice_alloc alloc_vdev;
> +
> + bind.argsz = sizeof(bind);
> + bind.flags = 0;
> + bind.iommufd = iommu_fd;
> +
> + /* now bind the iommufd */
> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to get info");
> + goto err_out;
> + }
> +
> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> + alloc_hwpt.flags = IOMMU_HWPT_ALLOC_NEST_PARENT;
> + alloc_hwpt.dev_id = bind.out_devid;
> + alloc_hwpt.pt_id = ioas_id;
> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
> + alloc_hwpt.data_len = 0;
> + alloc_hwpt.data_uptr = 0;
> +
> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> + ret = -errno;
> + pr_err("Failed to allocate HWPT");
> + goto err_out;
> + }
> +
> + attach_data.argsz = sizeof(attach_data);
> + attach_data.flags = 0;
> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> +
> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to attach to IOAS ");
> + goto err_out;
> + }
> +
> + alloc_viommu.size = sizeof(alloc_viommu);
> + alloc_viommu.flags = 0;
> + alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
> + alloc_viommu.dev_id = bind.out_devid;
> + alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
> +
> + if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to allocate VIOMMU %d", ret);
> + goto err_out;
> + }
> +#define STRTAB_STE_0_V (1UL << 0)
> +#define STRTAB_STE_0_CFG_S2_TRANS 6
> +#define STRTAB_STE_0_CFG_S1_TRANS 5
> +#define STRTAB_STE_0_CFG_BYPASS 4
> +
> + /* set up virtual ste as bypass ste */
> + bypass_ste.ste[0] = STRTAB_STE_0_V | (STRTAB_STE_0_CFG_BYPASS << 1);
> + bypass_ste.ste[1] = 0x0UL;
> +
> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> + alloc_hwpt.flags = 0;
> + alloc_hwpt.dev_id = bind.out_devid;
> + alloc_hwpt.pt_id = alloc_viommu.out_viommu_id;
> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_ARM_SMMUV3;
> + alloc_hwpt.data_len = sizeof(bypass_ste);
> + alloc_hwpt.data_uptr = (unsigned long)&bypass_ste;
> +
> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> + ret = -errno;
> + pr_err("Failed to allocate S1 bypass HWPT %d", ret);
> + goto err_out;
> + }
> +
> + alloc_vdev.size = sizeof(alloc_vdev),
> + alloc_vdev.viommu_id = alloc_viommu.out_viommu_id;
> + alloc_vdev.dev_id = bind.out_devid;
> +
> + dev_num = vdev->dev_hdr.dev_num;
> + /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
> + guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
I don't understand this. Shouldn't the BDF correspond to the virtual
configuration space? That's not allocated until later, but just going
with 0 isn't going to work.
What am I missing?
Will
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-21 12:27 ` Will Deacon
@ 2025-07-24 14:09 ` Aneesh Kumar K.V
2025-08-04 22:33 ` Suzuki K Poulose
0 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-24 14:09 UTC (permalink / raw)
To: Will Deacon; +Cc: kvm, Suzuki K Poulose, Steven Price, Julien Thierry
Will Deacon <will@kernel.org> writes:
> On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> This also allocates a stage1 bypass and stage2 translate table.
>
> Please write your commit messages as per Linux kernel guidelines.
>
>> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
>> ---
>> builtin-run.c | 2 +
>> include/kvm/kvm-config.h | 1 +
>> vfio/core.c | 4 +-
>> vfio/iommufd.c | 115 ++++++++++++++++++++++++++++++++++++++-
>
> [...]
>
>> 4 files changed, 119 insertions(+), 3 deletions(-)
>> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
>> index 742550705746..39870320e4ac 100644
>> --- a/vfio/iommufd.c
>> +++ b/vfio/iommufd.c
>> @@ -108,6 +108,116 @@ err_out:
>> return ret;
>> }
>>
>> +static int iommufd_alloc_s1bypass_hwpt(struct vfio_device *vdev)
>> +{
>> + int ret;
>> + unsigned long dev_num;
>> + unsigned long guest_bdf;
>> + struct vfio_device_bind_iommufd bind;
>> + struct vfio_device_attach_iommufd_pt attach_data;
>> + struct iommu_hwpt_alloc alloc_hwpt;
>> + struct iommu_viommu_alloc alloc_viommu;
>> + struct iommu_hwpt_arm_smmuv3 bypass_ste;
>> + struct iommu_vdevice_alloc alloc_vdev;
>> +
>> + bind.argsz = sizeof(bind);
>> + bind.flags = 0;
>> + bind.iommufd = iommu_fd;
>> +
>> + /* now bind the iommufd */
>> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
>> + ret = -errno;
>> + vfio_dev_err(vdev, "failed to get info");
>> + goto err_out;
>> + }
>> +
>> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
>> + alloc_hwpt.flags = IOMMU_HWPT_ALLOC_NEST_PARENT;
>> + alloc_hwpt.dev_id = bind.out_devid;
>> + alloc_hwpt.pt_id = ioas_id;
>> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
>> + alloc_hwpt.data_len = 0;
>> + alloc_hwpt.data_uptr = 0;
>> +
>> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
>> + ret = -errno;
>> + pr_err("Failed to allocate HWPT");
>> + goto err_out;
>> + }
>> +
>> + attach_data.argsz = sizeof(attach_data);
>> + attach_data.flags = 0;
>> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
>> +
>> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
>> + ret = -errno;
>> + vfio_dev_err(vdev, "failed to attach to IOAS ");
>> + goto err_out;
>> + }
>> +
>> + alloc_viommu.size = sizeof(alloc_viommu);
>> + alloc_viommu.flags = 0;
>> + alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
>> + alloc_viommu.dev_id = bind.out_devid;
>> + alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
>> +
>> + if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
>> + ret = -errno;
>> + vfio_dev_err(vdev, "failed to allocate VIOMMU %d", ret);
>> + goto err_out;
>> + }
>> +#define STRTAB_STE_0_V (1UL << 0)
>> +#define STRTAB_STE_0_CFG_S2_TRANS 6
>> +#define STRTAB_STE_0_CFG_S1_TRANS 5
>> +#define STRTAB_STE_0_CFG_BYPASS 4
>> +
>> + /* set up virtual ste as bypass ste */
>> + bypass_ste.ste[0] = STRTAB_STE_0_V | (STRTAB_STE_0_CFG_BYPASS << 1);
>> + bypass_ste.ste[1] = 0x0UL;
>> +
>> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
>> + alloc_hwpt.flags = 0;
>> + alloc_hwpt.dev_id = bind.out_devid;
>> + alloc_hwpt.pt_id = alloc_viommu.out_viommu_id;
>> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_ARM_SMMUV3;
>> + alloc_hwpt.data_len = sizeof(bypass_ste);
>> + alloc_hwpt.data_uptr = (unsigned long)&bypass_ste;
>> +
>> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
>> + ret = -errno;
>> + pr_err("Failed to allocate S1 bypass HWPT %d", ret);
>> + goto err_out;
>> + }
>> +
>> + alloc_vdev.size = sizeof(alloc_vdev),
>> + alloc_vdev.viommu_id = alloc_viommu.out_viommu_id;
>> + alloc_vdev.dev_id = bind.out_devid;
>> +
>> + dev_num = vdev->dev_hdr.dev_num;
>> + /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
>> + guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
>
> I don't understand this. Shouldn't the BDF correspond to the virtual
> configuration space? That's not allocated until later, but just going
> with 0 isn't going to work.
>
> What am I missing?
>
As I understand it, kvmtool supports only bus 0 and does not allow
multifunction devices. Based on that, I derived the guest BDF as follows
(correcting what was wrong in the original patch):
guest_bdf = (0ULL << 16) | (0 << 8) | dev_num << 3 | (0 << 0);
Are you suggesting that this approach is incorrect, and that we can use
a bus number other than 0?
From what I see, device__register() places the device in configuration
space using dev_num, which matches what we see in dev_hdr.
Separately, I did find a bug w.r.t iommufd while testing for bdf value
other than 00:00:0 : I was calling vfio_pci_setup_device() too late on
the iommufd side. I’ve fixed that now.
But just to clarify: is your feedback about that specific bug, or is it
about my assumption that kvmtool supports only domain 0, bus 0, and
function 0?
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
` (8 preceding siblings ...)
2025-05-25 7:49 ` [RFC PATCH kvmtool 10/10] util/update_headers: Add vfio related header files to update list Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:19 ` Mostafa Saleh
9 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:19 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Hi Aneesh,
On Sun, May 25, 2025 at 01:19:07PM +0530, Aneesh Kumar K.V (Arm) wrote:
> This is needed for followup patches
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> vfio/core.c | 24 +++++++++++++++++++++++-
> 1 file changed, 23 insertions(+), 1 deletion(-)
>
> diff --git a/vfio/core.c b/vfio/core.c
> index 3ff2c0b075df..c6b305c30cf7 100644
> --- a/vfio/core.c
> +++ b/vfio/core.c
> @@ -9,6 +9,7 @@
> #define IOMMU_GROUP_DIR "/sys/kernel/iommu_groups"
>
> static int vfio_container;
> +static int kvm_vfio_device;
> static LIST_HEAD(vfio_groups);
> static struct vfio_device *vfio_devices;
>
> @@ -437,8 +438,19 @@ static int vfio_configure_groups(struct kvm *kvm)
> ret = vfio_configure_reserved_regions(kvm, group);
> if (ret)
> return ret;
> - }
>
> + struct kvm_device_attr attr = {
> + .group = KVM_DEV_VFIO_FILE,
> + .attr = KVM_DEV_VFIO_FILE_ADD,
> + .addr = (__u64)&group->fd,
> + };
> +
> + if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
> + pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
> + return -ENODEV;
> + }
> +
I think it’s better if we get and print the error code of the ioctl in case it
fails instead of masking it as -ENODEV.
> + }
> return 0;
> }
>
> @@ -656,6 +668,16 @@ static int vfio__init(struct kvm *kvm)
> if (!vfio_devices)
> return -ENOMEM;
>
> + struct kvm_create_device device = {
> + .type = KVM_DEV_TYPE_VFIO,
> + };
> +
> + if (ioctl(kvm->vm_fd, KVM_CREATE_DEVICE, &device)) {
> + pr_err("Failed KVM_CREATE_DEVICE ioctl");
> + return -ENODEV;
> + }
> + kvm_vfio_device = device.fd;
Do we need to close this file in vfio_group_exit()?
Thanks,
Mostafa
> +
> ret = vfio_container_init(kvm);
> if (ret)
> return ret;
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 02/10] vfio: Rename some functions
2025-05-25 7:49 ` [RFC PATCH kvmtool 02/10] vfio: Rename some functions Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:20 ` Mostafa Saleh
2025-07-29 4:53 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:20 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:08PM +0530, Aneesh Kumar K.V (Arm) wrote:
> We will add iommufd support in later patches. Rename the old vfio
> method as legacy vfio.
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> vfio/core.c | 31 ++++++++++++++++---------------
> 1 file changed, 16 insertions(+), 15 deletions(-)
>
> diff --git a/vfio/core.c b/vfio/core.c
> index c6b305c30cf7..424dc4ed3aef 100644
> --- a/vfio/core.c
> +++ b/vfio/core.c
> @@ -282,7 +282,7 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
> }
> }
>
> -static int vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> +static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> {
> int ret;
> struct vfio_group *group = vdev->group;
> @@ -340,12 +340,12 @@ err_close_device:
> return ret;
> }
>
> -static int vfio_configure_devices(struct kvm *kvm)
> +static int legacy_vfio_configure_devices(struct kvm *kvm)
> {
> int i, ret;
>
> for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> - ret = vfio_configure_device(kvm, &vfio_devices[i]);
> + ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
> if (ret)
> return ret;
> }
> @@ -429,7 +429,7 @@ static int vfio_configure_reserved_regions(struct kvm *kvm,
> return ret;
> }
>
> -static int vfio_configure_groups(struct kvm *kvm)
> +static int legacy_vfio_configure_groups(struct kvm *kvm)
> {
> int ret;
> struct vfio_group *group;
> @@ -454,7 +454,7 @@ static int vfio_configure_groups(struct kvm *kvm)
> return 0;
> }
>
> -static struct vfio_group *vfio_group_create(struct kvm *kvm, unsigned long id)
> +static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
> {
> int ret;
> struct vfio_group *group;
> @@ -512,10 +512,11 @@ static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
> if (--group->refs != 0)
> return;
>
> - ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER);
> -
> list_del(&group->list);
> - close(group->fd);
> + if (group->fd != -1) {
> + ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER);
> + close(group->fd);
> + }
That seems unrelated to the rename, maybe it's better to move that when
IOMMUFD is supported as it's related to it.
Thanks,
Mostafa
> free(group);
> }
>
> @@ -559,14 +560,14 @@ vfio_group_get_for_dev(struct kvm *kvm, struct vfio_device *vdev)
> }
> }
>
> - group = vfio_group_create(kvm, group_id);
> + group = legacy_vfio_group_create(kvm, group_id);
>
> out_close:
> close(dirfd);
> return group;
> }
>
> -static int vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
> +static int legacy_vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
> {
> int ret;
> char dev_path[PATH_MAX];
> @@ -610,7 +611,7 @@ static void vfio_device_exit(struct kvm *kvm, struct vfio_device *vdev)
> free(vdev->sysfs_path);
> }
>
> -static int vfio_container_init(struct kvm *kvm)
> +static int legacy_vfio_container_init(struct kvm *kvm)
> {
> int api, i, ret, iommu_type;;
>
> @@ -638,7 +639,7 @@ static int vfio_container_init(struct kvm *kvm)
> for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
>
> - ret = vfio_device_init(kvm, &vfio_devices[i]);
> + ret = legacy_vfio_device_init(kvm, &vfio_devices[i]);
> if (ret)
> return ret;
> }
> @@ -678,15 +679,15 @@ static int vfio__init(struct kvm *kvm)
> }
> kvm_vfio_device = device.fd;
>
> - ret = vfio_container_init(kvm);
> + ret = legacy_vfio_container_init(kvm);
> if (ret)
> return ret;
>
> - ret = vfio_configure_groups(kvm);
> + ret = legacy_vfio_configure_groups(kvm);
> if (ret)
> return ret;
>
> - ret = vfio_configure_devices(kvm);
> + ret = legacy_vfio_configure_devices(kvm);
> if (ret)
> return ret;
>
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c
2025-05-25 7:49 ` [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:23 ` Mostafa Saleh
2025-07-29 4:59 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:23 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:09PM +0530, Aneesh Kumar K.V (Arm) wrote:
> Move legacy vfio config methodology to legacy.c. Also add helper
> vfio_map/unmap_mem_range which will be switched to function pointers in
> the later patch.
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> Makefile | 1 +
> include/kvm/vfio.h | 14 ++
> vfio/core.c | 342 ++------------------------------------------
> vfio/legacy.c | 347 +++++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 372 insertions(+), 332 deletions(-)
> create mode 100644 vfio/legacy.c
>
> diff --git a/Makefile b/Makefile
> index 60e551fd0c2a..8b2720f73386 100644
> --- a/Makefile
> +++ b/Makefile
> @@ -65,6 +65,7 @@ OBJS += pci.o
> OBJS += term.o
> OBJS += vfio/core.o
> OBJS += vfio/pci.o
> +OBJS += vfio/legacy.o
> OBJS += virtio/blk.o
> OBJS += virtio/scsi.o
> OBJS += virtio/console.o
> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
> index ac7b6226239a..67a528f18d33 100644
> --- a/include/kvm/vfio.h
> +++ b/include/kvm/vfio.h
> @@ -126,4 +126,18 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region);
> int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *device);
> void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
>
> +int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
> +int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size);
> +
> +struct kvm_mem_bank;
> +int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
> +int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
> +int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group);
> +int legacy_vfio__init(struct kvm *kvm);
> +int legacy_vfio__exit(struct kvm *kvm);
> +
> +extern int kvm_vfio_device;
> +extern struct list_head vfio_groups;
> +extern struct vfio_device *vfio_devices;
> +
> #endif /* KVM__VFIO_H */
> diff --git a/vfio/core.c b/vfio/core.c
> index 424dc4ed3aef..2af30df3b2b9 100644
> --- a/vfio/core.c
> +++ b/vfio/core.c
> @@ -4,14 +4,11 @@
>
> #include <linux/list.h>
>
> -#define VFIO_DEV_DIR "/dev/vfio"
> -#define VFIO_DEV_NODE VFIO_DEV_DIR "/vfio"
> #define IOMMU_GROUP_DIR "/sys/kernel/iommu_groups"
>
> -static int vfio_container;
> -static int kvm_vfio_device;
> -static LIST_HEAD(vfio_groups);
> -static struct vfio_device *vfio_devices;
> +int kvm_vfio_device;
kvm_vfio_device shouldn’t be VFIO/IOMMUFD specific, so that leads to
duplication in both files, I suggest move it’s management to the vfio/core.c
(and don’t extern the fd) And either export a function to add devices or maybe,
better doing it once from vfio__init()
> +LIST_HEAD(vfio_groups);
“vfio_groups” seems not to be used by the core code, maybe it’s better to have a
static version in each file?
Also, as that is not really used for IOMMUFD, it seems to move group logic into
legacy file. Instead of making iommufd populating groups so the core code handle
the group exit.
> +struct vfio_device *vfio_devices;
>
Similarly for “vfio_devices”, it’s only allocated/freed in core code, but never used.
But no strong opinion about that.
Thanks,
Mostafa
> static int vfio_device_pci_parser(const struct option *opt, char *arg,
> struct vfio_device_params *dev)
> @@ -282,124 +279,17 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
> }
> }
>
> -static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> +int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> {
> - int ret;
> - struct vfio_group *group = vdev->group;
> -
> - vdev->fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD,
> - vdev->params->name);
> - if (vdev->fd < 0) {
> - vfio_dev_warn(vdev, "failed to get fd");
> -
> - /* The device might be a bridge without an fd */
> - return 0;
> - }
> -
> - vdev->info.argsz = sizeof(vdev->info);
> - if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
> - ret = -errno;
> - vfio_dev_err(vdev, "failed to get info");
> - goto err_close_device;
> - }
> -
> - if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
> - ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
> - vfio_dev_warn(vdev, "failed to reset device");
> -
> - vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
> - if (!vdev->regions) {
> - ret = -ENOMEM;
> - goto err_close_device;
> - }
> -
> - /* Now for the bus-specific initialization... */
> - switch (vdev->params->type) {
> - case VFIO_DEVICE_PCI:
> - BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
> - ret = vfio_pci_setup_device(kvm, vdev);
> - break;
> - default:
> - BUG_ON(1);
> - ret = -EINVAL;
> - }
> -
> - if (ret)
> - goto err_free_regions;
> -
> - vfio_dev_info(vdev, "assigned to device number 0x%x in group %lu",
> - vdev->dev_hdr.dev_num, group->id);
> -
> - return 0;
> -
> -err_free_regions:
> - free(vdev->regions);
> -err_close_device:
> - close(vdev->fd);
> -
> - return ret;
> -}
> -
> -static int legacy_vfio_configure_devices(struct kvm *kvm)
> -{
> - int i, ret;
> -
> - for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> - ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
> - if (ret)
> - return ret;
> - }
> -
> - return 0;
> + return vfio_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
> }
>
> -static int vfio_get_iommu_type(void)
> +int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> {
> - if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU))
> - return VFIO_TYPE1v2_IOMMU;
> -
> - if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU))
> - return VFIO_TYPE1_IOMMU;
> -
> - return -ENODEV;
> -}
> -
> -static int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> -{
> - int ret = 0;
> - struct vfio_iommu_type1_dma_map dma_map = {
> - .argsz = sizeof(dma_map),
> - .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
> - .vaddr = (unsigned long)bank->host_addr,
> - .iova = (u64)bank->guest_phys_addr,
> - .size = bank->size,
> - };
> -
> - /* Map the guest memory for DMA (i.e. provide isolation) */
> - if (ioctl(vfio_container, VFIO_IOMMU_MAP_DMA, &dma_map)) {
> - ret = -errno;
> - pr_err("Failed to map 0x%llx -> 0x%llx (%llu) for DMA",
> - dma_map.iova, dma_map.vaddr, dma_map.size);
> - }
> -
> - return ret;
> -}
> -
> -static int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> -{
> - struct vfio_iommu_type1_dma_unmap dma_unmap = {
> - .argsz = sizeof(dma_unmap),
> - .size = bank->size,
> - .iova = bank->guest_phys_addr,
> - };
> -
> - ioctl(vfio_container, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
> -
> - return 0;
> + return vfio_unmap_mem_range(kvm, bank->guest_phys_addr, bank->size);
> }
>
> -static int vfio_configure_reserved_regions(struct kvm *kvm,
> - struct vfio_group *group)
> +int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group)
> {
> FILE *file;
> int ret = 0;
> @@ -429,84 +319,6 @@ static int vfio_configure_reserved_regions(struct kvm *kvm,
> return ret;
> }
>
> -static int legacy_vfio_configure_groups(struct kvm *kvm)
> -{
> - int ret;
> - struct vfio_group *group;
> -
> - list_for_each_entry(group, &vfio_groups, list) {
> - ret = vfio_configure_reserved_regions(kvm, group);
> - if (ret)
> - return ret;
> -
> - struct kvm_device_attr attr = {
> - .group = KVM_DEV_VFIO_FILE,
> - .attr = KVM_DEV_VFIO_FILE_ADD,
> - .addr = (__u64)&group->fd,
> - };
> -
> - if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
> - pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
> - return -ENODEV;
> - }
> -
> - }
> - return 0;
> -}
> -
> -static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
> -{
> - int ret;
> - struct vfio_group *group;
> - char group_node[PATH_MAX];
> - struct vfio_group_status group_status = {
> - .argsz = sizeof(group_status),
> - };
> -
> - group = calloc(1, sizeof(*group));
> - if (!group)
> - return NULL;
> -
> - group->id = id;
> - group->refs = 1;
> -
> - ret = snprintf(group_node, PATH_MAX, VFIO_DEV_DIR "/%lu", id);
> - if (ret < 0 || ret == PATH_MAX)
> - return NULL;
> -
> - group->fd = open(group_node, O_RDWR);
> - if (group->fd < 0) {
> - pr_err("Failed to open IOMMU group %s", group_node);
> - goto err_free_group;
> - }
> -
> - if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &group_status)) {
> - pr_err("Failed to determine status of IOMMU group %lu", id);
> - goto err_close_group;
> - }
> -
> - if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
> - pr_err("IOMMU group %lu is not viable", id);
> - goto err_close_group;
> - }
> -
> - if (ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &vfio_container)) {
> - pr_err("Failed to add IOMMU group %lu to VFIO container", id);
> - goto err_close_group;
> - }
> -
> - list_add(&group->list, &vfio_groups);
> -
> - return group;
> -
> -err_close_group:
> - close(group->fd);
> -err_free_group:
> - free(group);
> -
> - return NULL;
> -}
> -
> static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
> {
> if (--group->refs != 0)
> @@ -520,78 +332,6 @@ static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
> free(group);
> }
>
> -static struct vfio_group *
> -vfio_group_get_for_dev(struct kvm *kvm, struct vfio_device *vdev)
> -{
> - int dirfd;
> - ssize_t ret;
> - char *group_name;
> - unsigned long group_id;
> - char group_path[PATH_MAX];
> - struct vfio_group *group = NULL;
> -
> - /* Find IOMMU group for this device */
> - dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
> - if (dirfd < 0) {
> - vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
> - return NULL;
> - }
> -
> - ret = readlinkat(dirfd, "iommu_group", group_path, PATH_MAX);
> - if (ret < 0) {
> - vfio_dev_err(vdev, "no iommu_group");
> - goto out_close;
> - }
> - if (ret == PATH_MAX)
> - goto out_close;
> -
> - group_path[ret] = '\0';
> -
> - group_name = basename(group_path);
> - errno = 0;
> - group_id = strtoul(group_name, NULL, 10);
> - if (errno)
> - goto out_close;
> -
> - list_for_each_entry(group, &vfio_groups, list) {
> - if (group->id == group_id) {
> - group->refs++;
> - return group;
> - }
> - }
> -
> - group = legacy_vfio_group_create(kvm, group_id);
> -
> -out_close:
> - close(dirfd);
> - return group;
> -}
> -
> -static int legacy_vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
> -{
> - int ret;
> - char dev_path[PATH_MAX];
> - struct vfio_group *group;
> -
> - ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
> - vdev->params->bus, vdev->params->name);
> - if (ret < 0 || ret == PATH_MAX)
> - return -EINVAL;
> -
> - vdev->sysfs_path = strndup(dev_path, PATH_MAX);
> - if (!vdev->sysfs_path)
> - return -errno;
> -
> - group = vfio_group_get_for_dev(kvm, vdev);
> - if (!group) {
> - free(vdev->sysfs_path);
> - return -EINVAL;
> - }
> -
> - vdev->group = group;
> -
> - return 0;
> -}
>
> static void vfio_device_exit(struct kvm *kvm, struct vfio_device *vdev)
> {
> @@ -611,57 +351,8 @@ static void vfio_device_exit(struct kvm *kvm, struct vfio_device *vdev)
> free(vdev->sysfs_path);
> }
>
> -static int legacy_vfio_container_init(struct kvm *kvm)
> -{
> - int api, i, ret, iommu_type;;
> -
> - /* Create a container for our IOMMU groups */
> - vfio_container = open(VFIO_DEV_NODE, O_RDWR);
> - if (vfio_container == -1) {
> - ret = errno;
> - pr_err("Failed to open %s", VFIO_DEV_NODE);
> - return ret;
> - }
> -
> - api = ioctl(vfio_container, VFIO_GET_API_VERSION);
> - if (api != VFIO_API_VERSION) {
> - pr_err("Unknown VFIO API version %d", api);
> - return -ENODEV;
> - }
> -
> - iommu_type = vfio_get_iommu_type();
> - if (iommu_type < 0) {
> - pr_err("VFIO type-1 IOMMU not supported on this platform");
> - return iommu_type;
> - }
> -
> - /* Create groups for our devices and add them to the container */
> - for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> - vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
> -
> - ret = legacy_vfio_device_init(kvm, &vfio_devices[i]);
> - if (ret)
> - return ret;
> - }
> -
> - /* Finalise the container */
> - if (ioctl(vfio_container, VFIO_SET_IOMMU, iommu_type)) {
> - ret = -errno;
> - pr_err("Failed to set IOMMU type %d for VFIO container",
> - iommu_type);
> - return ret;
> - } else {
> - pr_info("Using IOMMU type %d for VFIO container", iommu_type);
> - }
> -
> - return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, vfio_map_mem_bank,
> - NULL);
> -}
> -
> static int vfio__init(struct kvm *kvm)
> {
> - int ret;
> -
> if (!kvm->cfg.num_vfio_devices)
> return 0;
>
> @@ -679,19 +370,7 @@ static int vfio__init(struct kvm *kvm)
> }
> kvm_vfio_device = device.fd;
>
> - ret = legacy_vfio_container_init(kvm);
> - if (ret)
> - return ret;
> -
> - ret = legacy_vfio_configure_groups(kvm);
> - if (ret)
> - return ret;
> -
> - ret = legacy_vfio_configure_devices(kvm);
> - if (ret)
> - return ret;
> -
> - return 0;
> + return legacy_vfio__init(kvm);
> }
> dev_base_init(vfio__init);
>
> @@ -708,10 +387,9 @@ static int vfio__exit(struct kvm *kvm)
> free(vfio_devices);
>
> kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, vfio_unmap_mem_bank, NULL);
> - close(vfio_container);
>
> free(kvm->cfg.vfio_devices);
>
> - return 0;
> + return legacy_vfio__exit(kvm);
> }
> dev_base_exit(vfio__exit);
> diff --git a/vfio/legacy.c b/vfio/legacy.c
> new file mode 100644
> index 000000000000..92d6d0bd5c80
> --- /dev/null
> +++ b/vfio/legacy.c
> @@ -0,0 +1,347 @@
> +#include "kvm/kvm.h"
> +#include "kvm/vfio.h"
> +
> +#include <linux/list.h>
> +
> +#define VFIO_DEV_DIR "/dev/vfio"
> +#define VFIO_DEV_NODE VFIO_DEV_DIR "/vfio"
> +static int vfio_container;
> +
> +static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> +{
> + int ret;
> + struct vfio_group *group = vdev->group;
> +
> + vdev->fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD,
> + vdev->params->name);
> + if (vdev->fd < 0) {
> + vfio_dev_warn(vdev, "failed to get fd");
> +
> + /* The device might be a bridge without an fd */
> + return 0;
> + }
> +
> + vdev->info.argsz = sizeof(vdev->info);
> + if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to get info");
> + goto err_close_device;
> + }
> +
> + if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
> + ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
> + vfio_dev_warn(vdev, "failed to reset device");
> +
> + vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
> + if (!vdev->regions) {
> + ret = -ENOMEM;
> + goto err_close_device;
> + }
> +
> + /* Now for the bus-specific initialization... */
> + switch (vdev->params->type) {
> + case VFIO_DEVICE_PCI:
> + BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
> + ret = vfio_pci_setup_device(kvm, vdev);
> + break;
> + default:
> + BUG_ON(1);
> + ret = -EINVAL;
> + }
> +
> + if (ret)
> + goto err_free_regions;
> +
> + vfio_dev_info(vdev, "assigned to device number 0x%x in group %lu",
> + vdev->dev_hdr.dev_num, group->id);
> +
> + return 0;
> +
> +err_free_regions:
> + free(vdev->regions);
> +err_close_device:
> + close(vdev->fd);
> +
> + return ret;
> +}
> +
> +static int legacy_vfio_configure_devices(struct kvm *kvm)
> +{
> + int i, ret;
> +
> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> + ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +static int vfio_get_iommu_type(void)
> +{
> + if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU))
> + return VFIO_TYPE1v2_IOMMU;
> +
> + if (ioctl(vfio_container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU))
> + return VFIO_TYPE1_IOMMU;
> +
> + return -ENODEV;
> +}
> +
> +int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
> +{
> + int ret = 0;
> + struct vfio_iommu_type1_dma_map dma_map = {
> + .argsz = sizeof(dma_map),
> + .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
> + .vaddr = host_addr,
> + .iova = iova,
> + .size = size,
> + };
> +
> + /* Map the guest memory for DMA (i.e. provide isolation) */
> + if (ioctl(vfio_container, VFIO_IOMMU_MAP_DMA, &dma_map)) {
> + ret = -errno;
> + pr_err("Failed to map 0x%llx -> 0x%llx (%llu) for DMA",
> + dma_map.iova, dma_map.vaddr, dma_map.size);
> + }
> +
> + return ret;
> +}
> +
> +int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
> +{
> + struct vfio_iommu_type1_dma_unmap dma_unmap = {
> + .argsz = sizeof(dma_unmap),
> + .size = size,
> + .iova = iova,
> + };
> +
> + ioctl(vfio_container, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
> +
> + return 0;
> +}
> +
> +static int legacy_vfio_configure_groups(struct kvm *kvm)
> +{
> + int ret;
> + struct vfio_group *group;
> +
> + list_for_each_entry(group, &vfio_groups, list) {
> + ret = vfio_configure_reserved_regions(kvm, group);
> + if (ret)
> + return ret;
> +
> + struct kvm_device_attr attr = {
> + .group = KVM_DEV_VFIO_FILE,
> + .attr = KVM_DEV_VFIO_FILE_ADD,
> + .addr = (__u64)&group->fd,
> + };
> +
> + if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
> + pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
> + return -ENODEV;
> + }
> +
> + }
> + return 0;
> +}
> +
> +static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
> +{
> + int ret;
> + struct vfio_group *group;
> + char group_node[PATH_MAX];
> + struct vfio_group_status group_status = {
> + .argsz = sizeof(group_status),
> + };
> +
> + group = calloc(1, sizeof(*group));
> + if (!group)
> + return NULL;
> +
> + group->id = id;
> + group->refs = 1;
> +
> + ret = snprintf(group_node, PATH_MAX, VFIO_DEV_DIR "/%lu", id);
> + if (ret < 0 || ret == PATH_MAX)
> + return NULL;
> +
> + group->fd = open(group_node, O_RDWR);
> + if (group->fd < 0) {
> + pr_err("Failed to open IOMMU group %s", group_node);
> + goto err_free_group;
> + }
> +
> + if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &group_status)) {
> + pr_err("Failed to determine status of IOMMU group %lu", id);
> + goto err_close_group;
> + }
> +
> + if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
> + pr_err("IOMMU group %lu is not viable", id);
> + goto err_close_group;
> + }
> +
> + if (ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &vfio_container)) {
> + pr_err("Failed to add IOMMU group %lu to VFIO container", id);
> + goto err_close_group;
> + }
> +
> + list_add(&group->list, &vfio_groups);
> +
> + return group;
> +
> +err_close_group:
> + close(group->fd);
> +err_free_group:
> + free(group);
> +
> + return NULL;
> +}
> +
> +static struct vfio_group *
> +vfio_group_get_for_dev(struct kvm *kvm, struct vfio_device *vdev)
> +{
> + int dirfd;
> + ssize_t ret;
> + char *group_name;
> + unsigned long group_id;
> + char group_path[PATH_MAX];
> + struct vfio_group *group = NULL;
> +
> + /* Find IOMMU group for this device */
> + dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
> + if (dirfd < 0) {
> + vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
> + return NULL;
> + }
> +
> + ret = readlinkat(dirfd, "iommu_group", group_path, PATH_MAX);
> + if (ret < 0) {
> + vfio_dev_err(vdev, "no iommu_group");
> + goto out_close;
> + }
> + if (ret == PATH_MAX)
> + goto out_close;
> +
> + group_path[ret] = '\0';
> +
> + group_name = basename(group_path);
> + errno = 0;
> + group_id = strtoul(group_name, NULL, 10);
> + if (errno)
> + goto out_close;
> +
> + list_for_each_entry(group, &vfio_groups, list) {
> + if (group->id == group_id) {
> + group->refs++;
> + return group;
> + }
> + }
> +
> + group = legacy_vfio_group_create(kvm, group_id);
> +
> +out_close:
> + close(dirfd);
> + return group;
> +}
> +
> +static int legacy_vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
> +{
> + int ret;
> + char dev_path[PATH_MAX];
> + struct vfio_group *group;
> +
> + ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
> + vdev->params->bus, vdev->params->name);
> + if (ret < 0 || ret == PATH_MAX)
> + return -EINVAL;
> +
> + vdev->sysfs_path = strndup(dev_path, PATH_MAX);
> + if (!vdev->sysfs_path)
> + return -errno;
> +
> + group = vfio_group_get_for_dev(kvm, vdev);
> + if (!group) {
> + free(vdev->sysfs_path);
> + return -EINVAL;
> + }
> +
> + vdev->group = group;
> +
> + return 0;
> +}
> +
> +static int legacy_vfio_container_init(struct kvm *kvm)
> +{
> + int api, i, ret, iommu_type;;
> +
> + /* Create a container for our IOMMU groups */
> + vfio_container = open(VFIO_DEV_NODE, O_RDWR);
> + if (vfio_container == -1) {
> + ret = errno;
> + pr_err("Failed to open %s", VFIO_DEV_NODE);
> + return ret;
> + }
> +
> + api = ioctl(vfio_container, VFIO_GET_API_VERSION);
> + if (api != VFIO_API_VERSION) {
> + pr_err("Unknown VFIO API version %d", api);
> + return -ENODEV;
> + }
> +
> + iommu_type = vfio_get_iommu_type();
> + if (iommu_type < 0) {
> + pr_err("VFIO type-1 IOMMU not supported on this platform");
> + return iommu_type;
> + }
> +
> + /* Create groups for our devices and add them to the container */
> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> + vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
> +
> + ret = legacy_vfio_device_init(kvm, &vfio_devices[i]);
> + if (ret)
> + return ret;
> + }
> +
> + /* Finalise the container */
> + if (ioctl(vfio_container, VFIO_SET_IOMMU, iommu_type)) {
> + ret = -errno;
> + pr_err("Failed to set IOMMU type %d for VFIO container",
> + iommu_type);
> + return ret;
> + } else {
> + pr_info("Using IOMMU type %d for VFIO container", iommu_type);
> + }
> +
> + return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, vfio_map_mem_bank,
> + NULL);
> +}
> +
> +int legacy_vfio__init(struct kvm *kvm)
> +{
> + int ret;
> +
> + ret = legacy_vfio_container_init(kvm);
> + if (ret)
> + return ret;
> +
> + ret = legacy_vfio_configure_groups(kvm);
> + if (ret)
> + return ret;
> +
> + ret = legacy_vfio_configure_devices(kvm);
> + if (ret)
> + return ret;
> +
> + return 0;
> +}
> +
> +int legacy_vfio__exit(struct kvm *kvm)
> +{
> + close(vfio_container);
> + return 0;
> +}
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 04/10] vfio: Update vfio header from linux kernel
2025-05-25 7:49 ` [RFC PATCH kvmtool 04/10] vfio: Update vfio header from linux kernel Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:23 ` Mostafa Saleh
0 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:23 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:10PM +0530, Aneesh Kumar K.V (Arm) wrote:
> sync with include/uapi/linux/vfio.h from v6.14
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
> ---
> include/linux/types.h | 13 +
> include/linux/vfio.h | 1131 ++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 1132 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/types.h b/include/linux/types.h
> index 5e20f10f8830..652c33bf5c87 100644
> --- a/include/linux/types.h
> +++ b/include/linux/types.h
> @@ -36,6 +36,19 @@ typedef __u32 __bitwise __be32;
> typedef __u64 __bitwise __le64;
> typedef __u64 __bitwise __be64;
>
> +/*
> + * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
> + * common 32/64-bit compat problems.
> + * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
> + * architectures) and to 8-byte boundaries on 64-bit architectures. The new
> + * aligned_64 type enforces 8-byte alignment so that structs containing
> + * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
> + * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
> + */
> +#define __aligned_u64 __u64 __attribute__((aligned(8)))
> +#define __aligned_be64 __be64 __attribute__((aligned(8)))
> +#define __aligned_le64 __le64 __attribute__((aligned(8)))
> +
> struct list_head {
> struct list_head *next, *prev;
> };
> diff --git a/include/linux/vfio.h b/include/linux/vfio.h
> index 4e7ab4c52a4a..c8dbf8219c4f 100644
> --- a/include/linux/vfio.h
> +++ b/include/linux/vfio.h
> @@ -1,3 +1,4 @@
> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> /*
> * VFIO API definition
> *
> @@ -8,8 +9,8 @@
> * it under the terms of the GNU General Public License version 2 as
> * published by the Free Software Foundation.
> */
> -#ifndef VFIO_H
> -#define VFIO_H
> +#ifndef _UAPIVFIO_H
> +#define _UAPIVFIO_H
>
> #include <linux/types.h>
> #include <linux/ioctl.h>
> @@ -34,7 +35,7 @@
> #define VFIO_EEH 5
>
> /* Two-stage IOMMU */
> -#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
> +#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
>
> #define VFIO_SPAPR_TCE_v2_IOMMU 7
>
> @@ -45,6 +46,16 @@
> */
> #define VFIO_NOIOMMU_IOMMU 8
>
> +/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
> +#define VFIO_UNMAP_ALL 9
> +
> +/*
> + * Supports the vaddr flag for DMA map and unmap. Not supported for mediated
> + * devices, so this capability is subject to change as groups are added or
> + * removed.
> + */
> +#define VFIO_UPDATE_VADDR 10
> +
> /*
> * The IOCTL interface is designed for extensibility by embedding the
> * structure length (argsz) and flags into structures passed between
> @@ -199,8 +210,14 @@ struct vfio_device_info {
> #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
> #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
> #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
> +#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
> +#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */
> +#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */
> +#define VFIO_DEVICE_FLAGS_CDX (1 << 8) /* vfio-cdx device */
> __u32 num_regions; /* Max region index + 1 */
> __u32 num_irqs; /* Max IRQ index + 1 */
> + __u32 cap_offset; /* Offset within info struct of first cap */
> + __u32 pad;
> };
> #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
>
> @@ -214,6 +231,30 @@ struct vfio_device_info {
> #define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
> #define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
> #define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
> +#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
> +
> +/*
> + * The following capabilities are unique to s390 zPCI devices. Their contents
> + * are further-defined in vfio_zdev.h
> + */
> +#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE 1
> +#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP 2
> +#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3
> +#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4
> +
> +/*
> + * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
> + * completion to the root bus with supported widths provided via flags.
> + */
> +#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP 5
> +struct vfio_device_info_cap_pci_atomic_comp {
> + struct vfio_info_cap_header header;
> + __u32 flags;
> +#define VFIO_PCI_ATOMIC_COMP32 (1 << 0)
> +#define VFIO_PCI_ATOMIC_COMP64 (1 << 1)
> +#define VFIO_PCI_ATOMIC_COMP128 (1 << 2)
> + __u32 reserved;
> +};
>
> /**
> * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
> @@ -236,8 +277,8 @@ struct vfio_region_info {
> #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
> __u32 index; /* Region index */
> __u32 cap_offset; /* Offset within info struct of first cap */
> - __u64 size; /* Region size (bytes) */
> - __u64 offset; /* Region offset from start of device fd */
> + __aligned_u64 size; /* Region size (bytes) */
> + __aligned_u64 offset; /* Region offset from start of device fd */
> };
> #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
>
> @@ -253,8 +294,8 @@ struct vfio_region_info {
> #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
>
> struct vfio_region_sparse_mmap_area {
> - __u64 offset; /* Offset of mmap'able area within region */
> - __u64 size; /* Size of mmap'able area */
> + __aligned_u64 offset; /* Offset of mmap'able area within region */
> + __aligned_u64 size; /* Size of mmap'able area */
> };
>
> struct vfio_region_info_cap_sparse_mmap {
> @@ -292,14 +333,169 @@ struct vfio_region_info_cap_type {
> __u32 subtype; /* type specific */
> };
>
> +/*
> + * List of region types, global per bus driver.
> + * If you introduce a new type, please add it here.
> + */
> +
> +/* PCI region type containing a PCI vendor part */
> #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31)
> #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
> +#define VFIO_REGION_TYPE_GFX (1)
> +#define VFIO_REGION_TYPE_CCW (2)
> +#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3)
> +
> +/* sub-types for VFIO_REGION_TYPE_PCI_* */
>
> -/* 8086 Vendor sub-types */
> +/* 8086 vendor PCI sub-types */
> #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1)
> #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
> #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
>
> +/* 10de vendor PCI sub-types */
> +/*
> + * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
> + *
> + * Deprecated, region no longer provided
> + */
> +#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
> +
> +/* 1014 vendor PCI sub-types */
> +/*
> + * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
> + * to do TLB invalidation on a GPU.
> + *
> + * Deprecated, region no longer provided
> + */
> +#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
> +
> +/* sub-types for VFIO_REGION_TYPE_GFX */
> +#define VFIO_REGION_SUBTYPE_GFX_EDID (1)
> +
> +/**
> + * struct vfio_region_gfx_edid - EDID region layout.
> + *
> + * Set display link state and EDID blob.
> + *
> + * The EDID blob has monitor information such as brand, name, serial
> + * number, physical size, supported video modes and more.
> + *
> + * This special region allows userspace (typically qemu) set a virtual
> + * EDID for the virtual monitor, which allows a flexible display
> + * configuration.
> + *
> + * For the edid blob spec look here:
> + * https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
> + *
> + * On linux systems you can find the EDID blob in sysfs:
> + * /sys/class/drm/${card}/${connector}/edid
> + *
> + * You can use the edid-decode ulility (comes with xorg-x11-utils) to
> + * decode the EDID blob.
> + *
> + * @edid_offset: location of the edid blob, relative to the
> + * start of the region (readonly).
> + * @edid_max_size: max size of the edid blob (readonly).
> + * @edid_size: actual edid size (read/write).
> + * @link_state: display link state (read/write).
> + * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
> + * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
> + * @max_xres: max display width (0 == no limitation, readonly).
> + * @max_yres: max display height (0 == no limitation, readonly).
> + *
> + * EDID update protocol:
> + * (1) set link-state to down.
> + * (2) update edid blob and size.
> + * (3) set link-state to up.
> + */
> +struct vfio_region_gfx_edid {
> + __u32 edid_offset;
> + __u32 edid_max_size;
> + __u32 edid_size;
> + __u32 max_xres;
> + __u32 max_yres;
> + __u32 link_state;
> +#define VFIO_DEVICE_GFX_LINK_STATE_UP 1
> +#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
> +};
> +
> +/* sub-types for VFIO_REGION_TYPE_CCW */
> +#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
> +#define VFIO_REGION_SUBTYPE_CCW_SCHIB (2)
> +#define VFIO_REGION_SUBTYPE_CCW_CRW (3)
> +
> +/* sub-types for VFIO_REGION_TYPE_MIGRATION */
> +#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
> +
> +struct vfio_device_migration_info {
> + __u32 device_state; /* VFIO device state */
> +#define VFIO_DEVICE_STATE_V1_STOP (0)
> +#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0)
> +#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1)
> +#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2)
> +#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \
> + VFIO_DEVICE_STATE_V1_SAVING | \
> + VFIO_DEVICE_STATE_V1_RESUMING)
> +
> +#define VFIO_DEVICE_STATE_VALID(state) \
> + (state & VFIO_DEVICE_STATE_V1_RESUMING ? \
> + (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
> +
> +#define VFIO_DEVICE_STATE_IS_ERROR(state) \
> + ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
> + VFIO_DEVICE_STATE_V1_RESUMING))
> +
> +#define VFIO_DEVICE_STATE_SET_ERROR(state) \
> + ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
> + VFIO_DEVICE_STATE_V1_RESUMING)
> +
> + __u32 reserved;
> + __aligned_u64 pending_bytes;
> + __aligned_u64 data_offset;
> + __aligned_u64 data_size;
> +};
> +
> +/*
> + * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
> + * which allows direct access to non-MSIX registers which happened to be within
> + * the same system page.
> + *
> + * Even though the userspace gets direct access to the MSIX data, the existing
> + * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
> + */
> +#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
> +
> +/*
> + * Capability with compressed real address (aka SSA - small system address)
> + * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
> + * and by the userspace to associate a NVLink bridge with a GPU.
> + *
> + * Deprecated, capability no longer provided
> + */
> +#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
> +
> +struct vfio_region_info_cap_nvlink2_ssatgt {
> + struct vfio_info_cap_header header;
> + __aligned_u64 tgt;
> +};
> +
> +/*
> + * Capability with an NVLink link speed. The value is read by
> + * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
> + * property in the device tree. The value is fixed in the hardware
> + * and failing to provide the correct value results in the link
> + * not working with no indication from the driver why.
> + *
> + * Deprecated, capability no longer provided
> + */
> +#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
> +
> +struct vfio_region_info_cap_nvlink2_lnkspd {
> + struct vfio_info_cap_header header;
> + __u32 link_speed;
> + __u32 __pad;
> +};
> +
> /**
> * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
> * struct vfio_irq_info)
> @@ -331,6 +527,9 @@ struct vfio_region_info_cap_type {
> * then add and unmask vectors, it's up to userspace to make the decision
> * whether to allocate the maximum supported number of vectors or tear
> * down setup and incrementally increase the vectors as each is enabled.
> + * Absence of the NORESIZE flag indicates that vectors can be enabled
> + * and disabled dynamically without impacting other vectors within the
> + * index.
> */
> struct vfio_irq_info {
> __u32 argsz;
> @@ -461,18 +660,78 @@ enum {
>
> enum {
> VFIO_CCW_IO_IRQ_INDEX,
> + VFIO_CCW_CRW_IRQ_INDEX,
> + VFIO_CCW_REQ_IRQ_INDEX,
> VFIO_CCW_NUM_IRQS
> };
>
> +/*
> + * The vfio-ap bus driver makes use of the following IRQ index mapping.
> + * Unimplemented IRQ types return a count of zero.
> + */
> +enum {
> + VFIO_AP_REQ_IRQ_INDEX,
> + VFIO_AP_NUM_IRQS
> +};
> +
> /**
> - * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
> + * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
> * struct vfio_pci_hot_reset_info)
> *
> + * This command is used to query the affected devices in the hot reset for
> + * a given device.
> + *
> + * This command always reports the segment, bus, and devfn information for
> + * each affected device, and selectively reports the group_id or devid per
> + * the way how the calling device is opened.
> + *
> + * - If the calling device is opened via the traditional group/container
> + * API, group_id is reported. User should check if it has owned all
> + * the affected devices and provides a set of group fds to prove the
> + * ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
> + *
> + * - If the calling device is opened as a cdev, devid is reported.
> + * Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
> + * data type. All the affected devices should be represented in
> + * the dev_set, ex. bound to a vfio driver, and also be owned by
> + * this interface which is determined by the following conditions:
> + * 1) Has a valid devid within the iommufd_ctx of the calling device.
> + * Ownership cannot be determined across separate iommufd_ctx and
> + * the cdev calling conventions do not support a proof-of-ownership
> + * model as provided in the legacy group interface. In this case
> + * valid devid with value greater than zero is provided in the return
> + * structure.
> + * 2) Does not have a valid devid within the iommufd_ctx of the calling
> + * device, but belongs to the same IOMMU group as the calling device
> + * or another opened device that has a valid devid within the
> + * iommufd_ctx of the calling device. This provides implicit ownership
> + * for devices within the same DMA isolation context. In this case
> + * the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
> + * structure.
> + *
> + * A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
> + * structure for affected devices where device is NOT represented in the
> + * dev_set or ownership is not available. Such devices prevent the use
> + * of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
> + * calling conventions (ie. via legacy group accessed devices). Flag
> + * VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
> + * affected devices are represented in the dev_set and also owned by
> + * the user. This flag is available only when
> + * flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
> + * When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
> + * length fd array on the calling device as the ownership is validated
> + * by iommufd_ctx.
> + *
> * Return: 0 on success, -errno on failure:
> * -enospc = insufficient buffer, -enodev = unsupported for device.
> */
> struct vfio_pci_dependent_device {
> - __u32 group_id;
> + union {
> + __u32 group_id;
> + __u32 devid;
> +#define VFIO_PCI_DEVID_OWNED 0
> +#define VFIO_PCI_DEVID_NOT_OWNED -1
> + };
> __u16 segment;
> __u8 bus;
> __u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
> @@ -481,6 +740,8 @@ struct vfio_pci_dependent_device {
> struct vfio_pci_hot_reset_info {
> __u32 argsz;
> __u32 flags;
> +#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID (1 << 0)
> +#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED (1 << 1)
> __u32 count;
> struct vfio_pci_dependent_device devices[];
> };
> @@ -491,6 +752,24 @@ struct vfio_pci_hot_reset_info {
> * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
> * struct vfio_pci_hot_reset)
> *
> + * A PCI hot reset results in either a bus or slot reset which may affect
> + * other devices sharing the bus/slot. The calling user must have
> + * ownership of the full set of affected devices as determined by the
> + * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
> + *
> + * When called on a device file descriptor acquired through the vfio
> + * group interface, the user is required to provide proof of ownership
> + * of those affected devices via the group_fds array in struct
> + * vfio_pci_hot_reset.
> + *
> + * When called on a direct cdev opened vfio device, the flags field of
> + * struct vfio_pci_hot_reset_info reports the ownership status of the
> + * affected devices and this ioctl must be called with an empty group_fds
> + * array. See above INFO ioctl definition for ownership requirements.
> + *
> + * Mixed usage of legacy groups and cdevs across the set of affected
> + * devices is not supported.
> + *
> * Return: 0 on success, -errno on failure.
> */
> struct vfio_pci_hot_reset {
> @@ -502,6 +781,683 @@ struct vfio_pci_hot_reset {
>
> #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
>
> +/**
> + * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
> + * struct vfio_device_query_gfx_plane)
> + *
> + * Set the drm_plane_type and flags, then retrieve the gfx plane info.
> + *
> + * flags supported:
> + * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
> + * to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
> + * support for dma-buf.
> + * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
> + * to ask if the mdev supports region. 0 on support, -EINVAL on no
> + * support for region.
> + * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
> + * with each call to query the plane info.
> + * - Others are invalid and return -EINVAL.
> + *
> + * Note:
> + * 1. Plane could be disabled by guest. In that case, success will be
> + * returned with zero-initialized drm_format, size, width and height
> + * fields.
> + * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
> + *
> + * Return: 0 on success, -errno on other failure.
> + */
> +struct vfio_device_gfx_plane_info {
> + __u32 argsz;
> + __u32 flags;
> +#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
> +#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
> +#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
> + /* in */
> + __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
> + /* out */
> + __u32 drm_format; /* drm format of plane */
> + __aligned_u64 drm_format_mod; /* tiled mode */
> + __u32 width; /* width of plane */
> + __u32 height; /* height of plane */
> + __u32 stride; /* stride of plane */
> + __u32 size; /* size of plane in bytes, align on page*/
> + __u32 x_pos; /* horizontal position of cursor plane */
> + __u32 y_pos; /* vertical position of cursor plane*/
> + __u32 x_hot; /* horizontal position of cursor hotspot */
> + __u32 y_hot; /* vertical position of cursor hotspot */
> + union {
> + __u32 region_index; /* region index */
> + __u32 dmabuf_id; /* dma-buf id */
> + };
> + __u32 reserved;
> +};
> +
> +#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
> +
> +/**
> + * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
> + *
> + * Return a new dma-buf file descriptor for an exposed guest framebuffer
> + * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
> + * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
> + */
> +
> +#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
> +
> +/**
> + * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
> + * struct vfio_device_ioeventfd)
> + *
> + * Perform a write to the device at the specified device fd offset, with
> + * the specified data and width when the provided eventfd is triggered.
> + * vfio bus drivers may not support this for all regions, for all widths,
> + * or at all. vfio-pci currently only enables support for BAR regions,
> + * excluding the MSI-X vector table.
> + *
> + * Return: 0 on success, -errno on failure.
> + */
> +struct vfio_device_ioeventfd {
> + __u32 argsz;
> + __u32 flags;
> +#define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */
> +#define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */
> +#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
> +#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
> +#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
> + __aligned_u64 offset; /* device fd offset of write */
> + __aligned_u64 data; /* data to be written */
> + __s32 fd; /* -1 for de-assignment */
> + __u32 reserved;
> +};
> +
> +#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
> +
> +/**
> + * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
> + * struct vfio_device_feature)
> + *
> + * Get, set, or probe feature data of the device. The feature is selected
> + * using the FEATURE_MASK portion of the flags field. Support for a feature
> + * can be probed by setting both the FEATURE_MASK and PROBE bits. A probe
> + * may optionally include the GET and/or SET bits to determine read vs write
> + * access of the feature respectively. Probing a feature will return success
> + * if the feature is supported and all of the optionally indicated GET/SET
> + * methods are supported. The format of the data portion of the structure is
> + * specific to the given feature. The data portion is not required for
> + * probing. GET and SET are mutually exclusive, except for use with PROBE.
> + *
> + * Return 0 on success, -errno on failure.
> + */
> +struct vfio_device_feature {
> + __u32 argsz;
> + __u32 flags;
> +#define VFIO_DEVICE_FEATURE_MASK (0xffff) /* 16-bit feature index */
> +#define VFIO_DEVICE_FEATURE_GET (1 << 16) /* Get feature into data[] */
> +#define VFIO_DEVICE_FEATURE_SET (1 << 17) /* Set feature from data[] */
> +#define VFIO_DEVICE_FEATURE_PROBE (1 << 18) /* Probe feature support */
> + __u8 data[];
> +};
> +
> +#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
> +
> +/*
> + * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
> + * struct vfio_device_bind_iommufd)
> + * @argsz: User filled size of this data.
> + * @flags: Must be 0.
> + * @iommufd: iommufd to bind.
> + * @out_devid: The device id generated by this bind. devid is a handle for
> + * this device/iommufd bond and can be used in IOMMUFD commands.
> + *
> + * Bind a vfio_device to the specified iommufd.
> + *
> + * User is restricted from accessing the device before the binding operation
> + * is completed. Only allowed on cdev fds.
> + *
> + * Unbind is automatically conducted when device fd is closed.
> + *
> + * Return: 0 on success, -errno on failure.
> + */
> +struct vfio_device_bind_iommufd {
> + __u32 argsz;
> + __u32 flags;
> + __s32 iommufd;
> + __u32 out_devid;
> +};
> +
> +#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
> +
> +/*
> + * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
> + * struct vfio_device_attach_iommufd_pt)
> + * @argsz: User filled size of this data.
> + * @flags: Must be 0.
> + * @pt_id: Input the target id which can represent an ioas or a hwpt
> + * allocated via iommufd subsystem.
> + * Output the input ioas id or the attached hwpt id which could
> + * be the specified hwpt itself or a hwpt automatically created
> + * for the specified ioas by kernel during the attachment.
> + *
> + * Associate the device with an address space within the bound iommufd.
> + * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
> + * allowed on cdev fds.
> + *
> + * If a vfio device is currently attached to a valid hw_pagetable, without doing
> + * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
> + * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
> + * as a hw_pagetable replacement, will replace the device's currently attached
> + * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
> + *
> + * Return: 0 on success, -errno on failure.
> + */
> +struct vfio_device_attach_iommufd_pt {
> + __u32 argsz;
> + __u32 flags;
> + __u32 pt_id;
> +};
> +
> +#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
> +
> +/*
> + * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
> + * struct vfio_device_detach_iommufd_pt)
> + * @argsz: User filled size of this data.
> + * @flags: Must be 0.
> + *
> + * Remove the association of the device and its current associated address
> + * space. After it, the device should be in a blocking DMA state. This is only
> + * allowed on cdev fds.
> + *
> + * Return: 0 on success, -errno on failure.
> + */
> +struct vfio_device_detach_iommufd_pt {
> + __u32 argsz;
> + __u32 flags;
> +};
> +
> +#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
> +
> +/*
> + * Provide support for setting a PCI VF Token, which is used as a shared
> + * secret between PF and VF drivers. This feature may only be set on a
> + * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
> + * open VFs. Data provided when setting this feature is a 16-byte array
> + * (__u8 b[16]), representing a UUID.
> + */
> +#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0)
> +
> +/*
> + * Indicates the device can support the migration API through
> + * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
> + * ERROR states are always supported. Support for additional states is
> + * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
> + * set.
> + *
> + * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
> + * RESUMING are supported.
> + *
> + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
> + * is supported in addition to the STOP_COPY states.
> + *
> + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
> + * PRE_COPY is supported in addition to the STOP_COPY states.
> + *
> + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
> + * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
> + * in addition to the STOP_COPY states.
> + *
> + * Other combinations of flags have behavior to be defined in the future.
> + */
> +struct vfio_device_feature_migration {
> + __aligned_u64 flags;
> +#define VFIO_MIGRATION_STOP_COPY (1 << 0)
> +#define VFIO_MIGRATION_P2P (1 << 1)
> +#define VFIO_MIGRATION_PRE_COPY (1 << 2)
> +};
> +#define VFIO_DEVICE_FEATURE_MIGRATION 1
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
> + * device. The new state is supplied in device_state, see enum
> + * vfio_device_mig_state for details
> + *
> + * The kernel migration driver must fully transition the device to the new state
> + * value before the operation returns to the user.
> + *
> + * The kernel migration driver must not generate asynchronous device state
> + * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
> + * ioctl as described above.
> + *
> + * If this function fails then current device_state may be the original
> + * operating state or some other state along the combination transition path.
> + * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
> + * to return to the original state, or attempt to return to some other state
> + * such as RUNNING or STOP.
> + *
> + * If the new_state starts a new data transfer session then the FD associated
> + * with that session is returned in data_fd. The user is responsible to close
> + * this FD when it is finished. The user must consider the migration data stream
> + * carried over the FD to be opaque and must preserve the byte order of the
> + * stream. The user is not required to preserve buffer segmentation when writing
> + * the data stream during the RESUMING operation.
> + *
> + * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
> + * device, data_fd will be -1.
> + */
> +struct vfio_device_feature_mig_state {
> + __u32 device_state; /* From enum vfio_device_mig_state */
> + __s32 data_fd;
> +};
> +#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
> +
> +/*
> + * The device migration Finite State Machine is described by the enum
> + * vfio_device_mig_state. Some of the FSM arcs will create a migration data
> + * transfer session by returning a FD, in this case the migration data will
> + * flow over the FD using read() and write() as discussed below.
> + *
> + * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
> + * RUNNING - The device is running normally
> + * STOP - The device does not change the internal or external state
> + * STOP_COPY - The device internal state can be read out
> + * RESUMING - The device is stopped and is loading a new internal state
> + * ERROR - The device has failed and must be reset
> + *
> + * And optional states to support VFIO_MIGRATION_P2P:
> + * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
> + * And VFIO_MIGRATION_PRE_COPY:
> + * PRE_COPY - The device is running normally but tracking internal state
> + * changes
> + * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
> + * PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
> + *
> + * The FSM takes actions on the arcs between FSM states. The driver implements
> + * the following behavior for the FSM arcs:
> + *
> + * RUNNING_P2P -> STOP
> + * STOP_COPY -> STOP
> + * While in STOP the device must stop the operation of the device. The device
> + * must not generate interrupts, DMA, or any other change to external state.
> + * It must not change its internal state. When stopped the device and kernel
> + * migration driver must accept and respond to interaction to support external
> + * subsystems in the STOP state, for example PCI MSI-X and PCI config space.
> + * Failure by the user to restrict device access while in STOP must not result
> + * in error conditions outside the user context (ex. host system faults).
> + *
> + * The STOP_COPY arc will terminate a data transfer session.
> + *
> + * RESUMING -> STOP
> + * Leaving RESUMING terminates a data transfer session and indicates the
> + * device should complete processing of the data delivered by write(). The
> + * kernel migration driver should complete the incorporation of data written
> + * to the data transfer FD into the device internal state and perform
> + * final validity and consistency checking of the new device state. If the
> + * user provided data is found to be incomplete, inconsistent, or otherwise
> + * invalid, the migration driver must fail the SET_STATE ioctl and
> + * optionally go to the ERROR state as described below.
> + *
> + * While in STOP the device has the same behavior as other STOP states
> + * described above.
> + *
> + * To abort a RESUMING session the device must be reset.
> + *
> + * PRE_COPY -> RUNNING
> + * RUNNING_P2P -> RUNNING
> + * While in RUNNING the device is fully operational, the device may generate
> + * interrupts, DMA, respond to MMIO, all vfio device regions are functional,
> + * and the device may advance its internal state.
> + *
> + * The PRE_COPY arc will terminate a data transfer session.
> + *
> + * PRE_COPY_P2P -> RUNNING_P2P
> + * RUNNING -> RUNNING_P2P
> + * STOP -> RUNNING_P2P
> + * While in RUNNING_P2P the device is partially running in the P2P quiescent
> + * state defined below.
> + *
> + * The PRE_COPY_P2P arc will terminate a data transfer session.
> + *
> + * RUNNING -> PRE_COPY
> + * RUNNING_P2P -> PRE_COPY_P2P
> + * STOP -> STOP_COPY
> + * PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
> + * which share a data transfer session. Moving between these states alters
> + * what is streamed in session, but does not terminate or otherwise affect
> + * the associated fd.
> + *
> + * These arcs begin the process of saving the device state and will return a
> + * new data_fd. The migration driver may perform actions such as enabling
> + * dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
> + *
> + * Each arc does not change the device operation, the device remains
> + * RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
> + * in PRE_COPY_P2P -> STOP_COPY.
> + *
> + * PRE_COPY -> PRE_COPY_P2P
> + * Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
> + * However, while in the PRE_COPY_P2P state, the device is partially running
> + * in the P2P quiescent state defined below, like RUNNING_P2P.
> + *
> + * PRE_COPY_P2P -> PRE_COPY
> + * This arc allows returning the device to a full RUNNING behavior while
> + * continuing all the behaviors of PRE_COPY.
> + *
> + * PRE_COPY_P2P -> STOP_COPY
> + * While in the STOP_COPY state the device has the same behavior as STOP
> + * with the addition that the data transfers session continues to stream the
> + * migration state. End of stream on the FD indicates the entire device
> + * state has been transferred.
> + *
> + * The user should take steps to restrict access to vfio device regions while
> + * the device is in STOP_COPY or risk corruption of the device migration data
> + * stream.
> + *
> + * STOP -> RESUMING
> + * Entering the RESUMING state starts a process of restoring the device state
> + * and will return a new data_fd. The data stream fed into the data_fd should
> + * be taken from the data transfer output of a single FD during saving from
> + * a compatible device. The migration driver may alter/reset the internal
> + * device state for this arc if required to prepare the device to receive the
> + * migration data.
> + *
> + * STOP_COPY -> PRE_COPY
> + * STOP_COPY -> PRE_COPY_P2P
> + * These arcs are not permitted and return error if requested. Future
> + * revisions of this API may define behaviors for these arcs, in this case
> + * support will be discoverable by a new flag in
> + * VFIO_DEVICE_FEATURE_MIGRATION.
> + *
> + * any -> ERROR
> + * ERROR cannot be specified as a device state, however any transition request
> + * can be failed with an errno return and may then move the device_state into
> + * ERROR. In this case the device was unable to execute the requested arc and
> + * was also unable to restore the device to any valid device_state.
> + * To recover from ERROR VFIO_DEVICE_RESET must be used to return the
> + * device_state back to RUNNING.
> + *
> + * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
> + * state for the device for the purposes of managing multiple devices within a
> + * user context where peer-to-peer DMA between devices may be active. The
> + * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
> + * any new P2P DMA transactions. If the device can identify P2P transactions
> + * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
> + * driver must complete any such outstanding operations prior to completing the
> + * FSM arc into a P2P state. For the purpose of specification the states
> + * behave as though the device was fully running if not supported. Like while in
> + * STOP or STOP_COPY the user must not touch the device, otherwise the state
> + * can be exited.
> + *
> + * The remaining possible transitions are interpreted as combinations of the
> + * above FSM arcs. As there are multiple paths through the FSM arcs the path
> + * should be selected based on the following rules:
> + * - Select the shortest path.
> + * - The path cannot have saving group states as interior arcs, only
> + * starting/end states.
> + * Refer to vfio_mig_get_next_state() for the result of the algorithm.
> + *
> + * The automatic transit through the FSM arcs that make up the combination
> + * transition is invisible to the user. When working with combination arcs the
> + * user may see any step along the path in the device_state if SET_STATE
> + * fails. When handling these types of errors users should anticipate future
> + * revisions of this protocol using new states and those states becoming
> + * visible in this case.
> + *
> + * The optional states cannot be used with SET_STATE if the device does not
> + * support them. The user can discover if these states are supported by using
> + * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
> + * avoid knowing about these optional states if the kernel driver supports them.
> + *
> + * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
> + * is not present.
> + */
> +enum vfio_device_mig_state {
> + VFIO_DEVICE_STATE_ERROR = 0,
> + VFIO_DEVICE_STATE_STOP = 1,
> + VFIO_DEVICE_STATE_RUNNING = 2,
> + VFIO_DEVICE_STATE_STOP_COPY = 3,
> + VFIO_DEVICE_STATE_RESUMING = 4,
> + VFIO_DEVICE_STATE_RUNNING_P2P = 5,
> + VFIO_DEVICE_STATE_PRE_COPY = 6,
> + VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
> + VFIO_DEVICE_STATE_NR,
> +};
> +
> +/**
> + * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
> + *
> + * This ioctl is used on the migration data FD in the precopy phase of the
> + * migration data transfer. It returns an estimate of the current data sizes
> + * remaining to be transferred. It allows the user to judge when it is
> + * appropriate to leave PRE_COPY for STOP_COPY.
> + *
> + * This ioctl is valid only in PRE_COPY states and kernel driver should
> + * return -EINVAL from any other migration state.
> + *
> + * The vfio_precopy_info data structure returned by this ioctl provides
> + * estimates of data available from the device during the PRE_COPY states.
> + * This estimate is split into two categories, initial_bytes and
> + * dirty_bytes.
> + *
> + * The initial_bytes field indicates the amount of initial precopy
> + * data available from the device. This field should have a non-zero initial
> + * value and decrease as migration data is read from the device.
> + * It is recommended to leave PRE_COPY for STOP_COPY only after this field
> + * reaches zero. Leaving PRE_COPY earlier might make things slower.
> + *
> + * The dirty_bytes field tracks device state changes relative to data
> + * previously retrieved. This field starts at zero and may increase as
> + * the internal device state is modified or decrease as that modified
> + * state is read from the device.
> + *
> + * Userspace may use the combination of these fields to estimate the
> + * potential data size available during the PRE_COPY phases, as well as
> + * trends relative to the rate the device is dirtying its internal
> + * state, but these fields are not required to have any bearing relative
> + * to the data size available during the STOP_COPY phase.
> + *
> + * Drivers have a lot of flexibility in when and what they transfer during the
> + * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
> + *
> + * During pre-copy the migration data FD has a temporary "end of stream" that is
> + * reached when both initial_bytes and dirty_byte are zero. For instance, this
> + * may indicate that the device is idle and not currently dirtying any internal
> + * state. When read() is done on this temporary end of stream the kernel driver
> + * should return ENOMSG from read(). Userspace can wait for more data (which may
> + * never come) by using poll.
> + *
> + * Once in STOP_COPY the migration data FD has a permanent end of stream
> + * signaled in the usual way by read() always returning 0 and poll always
> + * returning readable. ENOMSG may not be returned in STOP_COPY.
> + * Support for this ioctl is mandatory if a driver claims to support
> + * VFIO_MIGRATION_PRE_COPY.
> + *
> + * Return: 0 on success, -1 and errno set on failure.
> + */
> +struct vfio_precopy_info {
> + __u32 argsz;
> + __u32 flags;
> + __aligned_u64 initial_bytes;
> + __aligned_u64 dirty_bytes;
> +};
> +
> +#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
> + * state with the platform-based power management. Device use of lower power
> + * states depends on factors managed by the runtime power management core,
> + * including system level support and coordinating support among dependent
> + * devices. Enabling device low power entry does not guarantee lower power
> + * usage by the device, nor is a mechanism provided through this feature to
> + * know the current power state of the device. If any device access happens
> + * (either from the host or through the vfio uAPI) when the device is in the
> + * low power state, then the host will move the device out of the low power
> + * state as necessary prior to the access. Once the access is completed, the
> + * device may re-enter the low power state. For single shot low power support
> + * with wake-up notification, see
> + * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below. Access to mmap'd
> + * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
> + * calling LOW_POWER_EXIT.
> + */
> +#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
> +
> +/*
> + * This device feature has the same behavior as
> + * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
> + * provides an eventfd for wake-up notification. When the device moves out of
> + * the low power state for the wake-up, the host will not allow the device to
> + * re-enter a low power state without a subsequent user call to one of the low
> + * power entry device feature IOCTLs. Access to mmap'd device regions is
> + * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
> + * low power exit. The low power exit can happen either through LOW_POWER_EXIT
> + * or through any other access (where the wake-up notification has been
> + * generated). The access to mmap'd device regions will not trigger low power
> + * exit.
> + *
> + * The notification through the provided eventfd will be generated only when
> + * the device has entered and is resumed from a low power state after
> + * calling this device feature IOCTL. A device that has not entered low power
> + * state, as managed through the runtime power management core, will not
> + * generate a notification through the provided eventfd on access. Calling the
> + * LOW_POWER_EXIT feature is optional in the case where notification has been
> + * signaled on the provided eventfd that a resume from low power has occurred.
> + */
> +struct vfio_device_low_power_entry_with_wakeup {
> + __s32 wakeup_eventfd;
> + __u32 reserved;
> +};
> +
> +#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
> + * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
> + * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
> + * This device feature IOCTL may itself generate a wakeup eventfd notification
> + * in the latter case if the device had previously entered a low power state.
> + */
> +#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
> + * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
> + * DMA logging.
> + *
> + * DMA logging allows a device to internally record what DMAs the device is
> + * initiating and report them back to userspace. It is part of the VFIO
> + * migration infrastructure that allows implementing dirty page tracking
> + * during the pre copy phase of live migration. Only DMA WRITEs are logged,
> + * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
> + *
> + * When DMA logging is started a range of IOVAs to monitor is provided and the
> + * device can optimize its logging to cover only the IOVA range given. Each
> + * DMA that the device initiates inside the range will be logged by the device
> + * for later retrieval.
> + *
> + * page_size is an input that hints what tracking granularity the device
> + * should try to achieve. If the device cannot do the hinted page size then
> + * it's the driver choice which page size to pick based on its support.
> + * On output the device will return the page size it selected.
> + *
> + * ranges is a pointer to an array of
> + * struct vfio_device_feature_dma_logging_range.
> + *
> + * The core kernel code guarantees to support by minimum num_ranges that fit
> + * into a single kernel page. User space can try higher values but should give
> + * up if the above can't be achieved as of some driver limitations.
> + *
> + * A single call to start device DMA logging can be issued and a matching stop
> + * should follow at the end. Another start is not allowed in the meantime.
> + */
> +struct vfio_device_feature_dma_logging_control {
> + __aligned_u64 page_size;
> + __u32 num_ranges;
> + __u32 __reserved;
> + __aligned_u64 ranges;
> +};
> +
> +struct vfio_device_feature_dma_logging_range {
> + __aligned_u64 iova;
> + __aligned_u64 length;
> +};
> +
> +#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
> + * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
> + */
> +#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
> + *
> + * Query the device's DMA log for written pages within the given IOVA range.
> + * During querying the log is cleared for the IOVA range.
> + *
> + * bitmap is a pointer to an array of u64s that will hold the output bitmap
> + * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
> + * is given by:
> + * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
> + *
> + * The input page_size can be any power of two value and does not have to
> + * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
> + * will format its internal logging to match the reporting page size, possibly
> + * by replicating bits if the internal page size is lower than requested.
> + *
> + * The LOGGING_REPORT will only set bits in the bitmap and never clear or
> + * perform any initialization of the user provided bitmap.
> + *
> + * If any error is returned userspace should assume that the dirty log is
> + * corrupted. Error recovery is to consider all memory dirty and try to
> + * restart the dirty tracking, or to abort/restart the whole migration.
> + *
> + * If DMA logging is not enabled, an error will be returned.
> + *
> + */
> +struct vfio_device_feature_dma_logging_report {
> + __aligned_u64 iova;
> + __aligned_u64 length;
> + __aligned_u64 page_size;
> + __aligned_u64 bitmap;
> +};
> +
> +#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
> +
> +/*
> + * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
> + * be required to complete stop copy.
> + *
> + * Note: Can be called on each device state.
> + */
> +
> +struct vfio_device_feature_mig_data_size {
> + __aligned_u64 stop_copy_length;
> +};
> +
> +#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
> +
> +/**
> + * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
> + * based on the operation specified in op flag.
> + *
> + * The functionality is incorporated for devices that needs bus master control,
> + * but the in-band device interface lacks the support. Consequently, it is not
> + * applicable to PCI devices, as bus master control for PCI devices is managed
> + * in-band through the configuration space. At present, this feature is supported
> + * only for CDX devices.
> + * When the device's BUS MASTER setting is configured as CLEAR, it will result in
> + * blocking all incoming DMA requests from the device. On the other hand, configuring
> + * the device's BUS MASTER setting as SET (enable) will grant the device the
> + * capability to perform DMA to the host memory.
> + */
> +struct vfio_device_feature_bus_master {
> + __u32 op;
> +#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */
> +#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */
> +};
> +#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
> +
> /* -------- API for Type1 VFIO IOMMU -------- */
>
> /**
> @@ -516,7 +1472,70 @@ struct vfio_iommu_type1_info {
> __u32 argsz;
> __u32 flags;
> #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
> - __u64 iova_pgsizes; /* Bitmap of supported page sizes */
> +#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
> + __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */
> + __u32 cap_offset; /* Offset within info struct of first cap */
> + __u32 pad;
> +};
> +
> +/*
> + * The IOVA capability allows to report the valid IOVA range(s)
> + * excluding any non-relaxable reserved regions exposed by
> + * devices attached to the container. Any DMA map attempt
> + * outside the valid iova range will return error.
> + *
> + * The structures below define version 1 of this capability.
> + */
> +#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1
> +
> +struct vfio_iova_range {
> + __u64 start;
> + __u64 end;
> +};
> +
> +struct vfio_iommu_type1_info_cap_iova_range {
> + struct vfio_info_cap_header header;
> + __u32 nr_iovas;
> + __u32 reserved;
> + struct vfio_iova_range iova_ranges[];
> +};
> +
> +/*
> + * The migration capability allows to report supported features for migration.
> + *
> + * The structures below define version 1 of this capability.
> + *
> + * The existence of this capability indicates that IOMMU kernel driver supports
> + * dirty page logging.
> + *
> + * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
> + * page logging.
> + * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
> + * size in bytes that can be used by user applications when getting the dirty
> + * bitmap.
> + */
> +#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 2
> +
> +struct vfio_iommu_type1_info_cap_migration {
> + struct vfio_info_cap_header header;
> + __u32 flags;
> + __u64 pgsize_bitmap;
> + __u64 max_dirty_bitmap_size; /* in bytes */
> +};
> +
> +/*
> + * The DMA available capability allows to report the current number of
> + * simultaneously outstanding DMA mappings that are allowed.
> + *
> + * The structure below defines version 1 of this capability.
> + *
> + * avail: specifies the current number of outstanding DMA mappings allowed.
> + */
> +#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
> +
> +struct vfio_iommu_type1_info_dma_avail {
> + struct vfio_info_cap_header header;
> + __u32 avail;
> };
>
> #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
> @@ -526,12 +1545,21 @@ struct vfio_iommu_type1_info {
> *
> * Map process virtual addresses to IO virtual addresses using the
> * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
> + *
> + * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
> + * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To
> + * maintain memory consistency within the user application, the updated vaddr
> + * must address the same memory object as originally mapped. Failure to do so
> + * will result in user memory corruption and/or device misbehavior. iova and
> + * size must match those in the original MAP_DMA call. Protection is not
> + * changed, and the READ & WRITE flags must be 0.
> */
> struct vfio_iommu_type1_dma_map {
> __u32 argsz;
> __u32 flags;
> #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
> #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
> +#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
> __u64 vaddr; /* Process virtual address */
> __u64 iova; /* IO virtual address */
> __u64 size; /* Size of mapping (bytes) */
> @@ -539,6 +1567,12 @@ struct vfio_iommu_type1_dma_map {
>
> #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
>
> +struct vfio_bitmap {
> + __u64 pgsize; /* page size for bitmap in bytes */
> + __u64 size; /* in bytes */
> + __u64 __user *data; /* one bit per page */
> +};
> +
> /**
> * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
> * struct vfio_dma_unmap)
> @@ -548,12 +1582,34 @@ struct vfio_iommu_type1_dma_map {
> * field. No guarantee is made to the user that arbitrary unmaps of iova
> * or size different from those used in the original mapping call will
> * succeed.
> + *
> + * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
> + * before unmapping IO virtual addresses. When this flag is set, the user must
> + * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
> + * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
> + * A bit in the bitmap represents one page, of user provided page size in
> + * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
> + * indicates that the page at that offset from iova is dirty. A Bitmap of the
> + * pages in the range of unmapped size is returned in the user-provided
> + * vfio_bitmap.data.
> + *
> + * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses. iova and size
> + * must be 0. This cannot be combined with the get-dirty-bitmap flag.
> + *
> + * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
> + * virtual addresses in the iova range. DMA to already-mapped pages continues.
> + * Groups may not be added to the container while any addresses are invalid.
> + * This cannot be combined with the get-dirty-bitmap flag.
> */
> struct vfio_iommu_type1_dma_unmap {
> __u32 argsz;
> __u32 flags;
> +#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
> +#define VFIO_DMA_UNMAP_FLAG_ALL (1 << 1)
> +#define VFIO_DMA_UNMAP_FLAG_VADDR (1 << 2)
> __u64 iova; /* IO virtual address */
> __u64 size; /* Size of mapping (bytes) */
> + __u8 data[];
> };
>
> #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
> @@ -565,6 +1621,57 @@ struct vfio_iommu_type1_dma_unmap {
> #define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
> #define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
>
> +/**
> + * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
> + * struct vfio_iommu_type1_dirty_bitmap)
> + * IOCTL is used for dirty pages logging.
> + * Caller should set flag depending on which operation to perform, details as
> + * below:
> + *
> + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
> + * the IOMMU driver to log pages that are dirtied or potentially dirtied by
> + * the device; designed to be used when a migration is in progress. Dirty pages
> + * are logged until logging is disabled by user application by calling the IOCTL
> + * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
> + *
> + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
> + * the IOMMU driver to stop logging dirtied pages.
> + *
> + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
> + * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
> + * The user must specify the IOVA range and the pgsize through the structure
> + * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
> + * supports getting a bitmap of the smallest supported pgsize only and can be
> + * modified in future to get a bitmap of any specified supported pgsize. The
> + * user must provide a zeroed memory area for the bitmap memory and specify its
> + * size in bitmap.size. One bit is used to represent one page consecutively
> + * starting from iova offset. The user should provide page size in bitmap.pgsize
> + * field. A bit set in the bitmap indicates that the page at that offset from
> + * iova is dirty. The caller must set argsz to a value including the size of
> + * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
> + * actual bitmap. If dirty pages logging is not enabled, an error will be
> + * returned.
> + *
> + * Only one of the flags _START, _STOP and _GET may be specified at a time.
> + *
> + */
> +struct vfio_iommu_type1_dirty_bitmap {
> + __u32 argsz;
> + __u32 flags;
> +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
> +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
> +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
> + __u8 data[];
> +};
> +
> +struct vfio_iommu_type1_dirty_bitmap_get {
> + __u64 iova; /* IO virtual address */
> + __u64 size; /* Size of iova range */
> + struct vfio_bitmap bitmap;
> +};
> +
> +#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17)
> +
> /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
>
> /*
> @@ -716,4 +1823,4 @@ struct vfio_iommu_spapr_tce_remove {
>
> /* ***************************************************************** */
>
> -#endif /* VFIO_H */
> +#endif /* _UAPIVFIO_H */
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers
2025-05-25 7:49 ` [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:25 ` Mostafa Saleh
2025-07-29 5:03 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:25 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:11PM +0530, Aneesh Kumar K.V (Arm) wrote:
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> include/kvm/vfio.h | 4 ++--
> vfio/core.c | 7 +++++--
> vfio/legacy.c | 7 +++++--
> 3 files changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
> index 67a528f18d33..fed692b0f265 100644
> --- a/include/kvm/vfio.h
> +++ b/include/kvm/vfio.h
> @@ -126,8 +126,8 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region);
> int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *device);
> void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
>
> -int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
> -int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size);
> +extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
> +extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
>
> struct kvm_mem_bank;
> int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
> diff --git a/vfio/core.c b/vfio/core.c
> index 2af30df3b2b9..32a8e0fe67c0 100644
> --- a/vfio/core.c
> +++ b/vfio/core.c
> @@ -10,6 +10,9 @@ int kvm_vfio_device;
> LIST_HEAD(vfio_groups);
> struct vfio_device *vfio_devices;
>
> +int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
> +int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
I think it's better to wrap those in an ops struct, this can be set once and
in the next patches this can be used for init/exit instead of having such checks:
“if (kvm->cfg.iommufd || kvm->cfg.iommufd_vdevice)”
Thanks,
Mostafa
> +
> static int vfio_device_pci_parser(const struct option *opt, char *arg,
> struct vfio_device_params *dev)
> {
> @@ -281,12 +284,12 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
>
> int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> {
> - return vfio_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
> + return dma_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
> }
>
> int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> {
> - return vfio_unmap_mem_range(kvm, bank->guest_phys_addr, bank->size);
> + return dma_unmap_mem_range(kvm, bank->guest_phys_addr, bank->size);
> }
>
> int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group)
> diff --git a/vfio/legacy.c b/vfio/legacy.c
> index 92d6d0bd5c80..5b35d6ebff69 100644
> --- a/vfio/legacy.c
> +++ b/vfio/legacy.c
> @@ -89,7 +89,7 @@ static int vfio_get_iommu_type(void)
> return -ENODEV;
> }
>
> -int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
> +static int legacy_vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
> {
> int ret = 0;
> struct vfio_iommu_type1_dma_map dma_map = {
> @@ -110,7 +110,7 @@ int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
> return ret;
> }
>
> -int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
> +static int legacy_vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
> {
> struct vfio_iommu_type1_dma_unmap dma_unmap = {
> .argsz = sizeof(dma_unmap),
> @@ -325,6 +325,9 @@ int legacy_vfio__init(struct kvm *kvm)
> {
> int ret;
>
> + dma_map_mem_range = legacy_vfio_map_mem_range;
> + dma_unmap_mem_range = legacy_vfio_unmap_mem_range;
> +
> ret = legacy_vfio_container_init(kvm);
> if (ret)
> return ret;
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 06/10] vfio/iommufd: Import iommufd header from kernel
2025-05-25 7:49 ` [RFC PATCH kvmtool 06/10] vfio/iommufd: Import iommufd header from kernel Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:25 ` Mostafa Saleh
0 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:25 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:12PM +0530, Aneesh Kumar K.V (Arm) wrote:
> sync with include/uapi/linux/iommufd.h from v6.14
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
> ---
> include/linux/iommufd.h | 1017 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 1017 insertions(+)
> create mode 100644 include/linux/iommufd.h
>
> diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
> new file mode 100644
> index 000000000000..78747b24bd0f
> --- /dev/null
> +++ b/include/linux/iommufd.h
> @@ -0,0 +1,1017 @@
> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
> + */
> +#ifndef _UAPI_IOMMUFD_H
> +#define _UAPI_IOMMUFD_H
> +
> +#include <linux/ioctl.h>
> +#include <linux/types.h>
> +
> +#define IOMMUFD_TYPE (';')
> +
> +/**
> + * DOC: General ioctl format
> + *
> + * The ioctl interface follows a general format to allow for extensibility. Each
> + * ioctl is passed in a structure pointer as the argument providing the size of
> + * the structure in the first u32. The kernel checks that any structure space
> + * beyond what it understands is 0. This allows userspace to use the backward
> + * compatible portion while consistently using the newer, larger, structures.
> + *
> + * ioctls use a standard meaning for common errnos:
> + *
> + * - ENOTTY: The IOCTL number itself is not supported at all
> + * - E2BIG: The IOCTL number is supported, but the provided structure has
> + * non-zero in a part the kernel does not understand.
> + * - EOPNOTSUPP: The IOCTL number is supported, and the structure is
> + * understood, however a known field has a value the kernel does not
> + * understand or support.
> + * - EINVAL: Everything about the IOCTL was understood, but a field is not
> + * correct.
> + * - ENOENT: An ID or IOVA provided does not exist.
> + * - ENOMEM: Out of memory.
> + * - EOVERFLOW: Mathematics overflowed.
> + *
> + * As well as additional errnos, within specific ioctls.
> + */
> +enum {
> + IOMMUFD_CMD_BASE = 0x80,
> + IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE,
> + IOMMUFD_CMD_IOAS_ALLOC = 0x81,
> + IOMMUFD_CMD_IOAS_ALLOW_IOVAS = 0x82,
> + IOMMUFD_CMD_IOAS_COPY = 0x83,
> + IOMMUFD_CMD_IOAS_IOVA_RANGES = 0x84,
> + IOMMUFD_CMD_IOAS_MAP = 0x85,
> + IOMMUFD_CMD_IOAS_UNMAP = 0x86,
> + IOMMUFD_CMD_OPTION = 0x87,
> + IOMMUFD_CMD_VFIO_IOAS = 0x88,
> + IOMMUFD_CMD_HWPT_ALLOC = 0x89,
> + IOMMUFD_CMD_GET_HW_INFO = 0x8a,
> + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING = 0x8b,
> + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP = 0x8c,
> + IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d,
> + IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e,
> + IOMMUFD_CMD_IOAS_MAP_FILE = 0x8f,
> + IOMMUFD_CMD_VIOMMU_ALLOC = 0x90,
> + IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
> + IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
> +};
> +
> +/**
> + * struct iommu_destroy - ioctl(IOMMU_DESTROY)
> + * @size: sizeof(struct iommu_destroy)
> + * @id: iommufd object ID to destroy. Can be any destroyable object type.
> + *
> + * Destroy any object held within iommufd.
> + */
> +struct iommu_destroy {
> + __u32 size;
> + __u32 id;
> +};
> +#define IOMMU_DESTROY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_DESTROY)
> +
> +/**
> + * struct iommu_ioas_alloc - ioctl(IOMMU_IOAS_ALLOC)
> + * @size: sizeof(struct iommu_ioas_alloc)
> + * @flags: Must be 0
> + * @out_ioas_id: Output IOAS ID for the allocated object
> + *
> + * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA)
> + * to memory mapping.
> + */
> +struct iommu_ioas_alloc {
> + __u32 size;
> + __u32 flags;
> + __u32 out_ioas_id;
> +};
> +#define IOMMU_IOAS_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOC)
> +
> +/**
> + * struct iommu_iova_range - ioctl(IOMMU_IOVA_RANGE)
> + * @start: First IOVA
> + * @last: Inclusive last IOVA
> + *
> + * An interval in IOVA space.
> + */
> +struct iommu_iova_range {
> + __aligned_u64 start;
> + __aligned_u64 last;
> +};
> +
> +/**
> + * struct iommu_ioas_iova_ranges - ioctl(IOMMU_IOAS_IOVA_RANGES)
> + * @size: sizeof(struct iommu_ioas_iova_ranges)
> + * @ioas_id: IOAS ID to read ranges from
> + * @num_iovas: Input/Output total number of ranges in the IOAS
> + * @__reserved: Must be 0
> + * @allowed_iovas: Pointer to the output array of struct iommu_iova_range
> + * @out_iova_alignment: Minimum alignment required for mapping IOVA
> + *
> + * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges
> + * is not allowed. num_iovas will be set to the total number of iovas and
> + * the allowed_iovas[] will be filled in as space permits.
> + *
> + * The allowed ranges are dependent on the HW path the DMA operation takes, and
> + * can change during the lifetime of the IOAS. A fresh empty IOAS will have a
> + * full range, and each attached device will narrow the ranges based on that
> + * device's HW restrictions. Detaching a device can widen the ranges. Userspace
> + * should query ranges after every attach/detach to know what IOVAs are valid
> + * for mapping.
> + *
> + * On input num_iovas is the length of the allowed_iovas array. On output it is
> + * the total number of iovas filled in. The ioctl will return -EMSGSIZE and set
> + * num_iovas to the required value if num_iovas is too small. In this case the
> + * caller should allocate a larger output array and re-issue the ioctl.
> + *
> + * out_iova_alignment returns the minimum IOVA alignment that can be given
> + * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy::
> + *
> + * starting_iova % out_iova_alignment == 0
> + * (starting_iova + length) % out_iova_alignment == 0
> + *
> + * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot
> + * be higher than the system PAGE_SIZE.
> + */
> +struct iommu_ioas_iova_ranges {
> + __u32 size;
> + __u32 ioas_id;
> + __u32 num_iovas;
> + __u32 __reserved;
> + __aligned_u64 allowed_iovas;
> + __aligned_u64 out_iova_alignment;
> +};
> +#define IOMMU_IOAS_IOVA_RANGES _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_IOVA_RANGES)
> +
> +/**
> + * struct iommu_ioas_allow_iovas - ioctl(IOMMU_IOAS_ALLOW_IOVAS)
> + * @size: sizeof(struct iommu_ioas_allow_iovas)
> + * @ioas_id: IOAS ID to allow IOVAs from
> + * @num_iovas: Input/Output total number of ranges in the IOAS
> + * @__reserved: Must be 0
> + * @allowed_iovas: Pointer to array of struct iommu_iova_range
> + *
> + * Ensure a range of IOVAs are always available for allocation. If this call
> + * succeeds then IOMMU_IOAS_IOVA_RANGES will never return a list of IOVA ranges
> + * that are narrower than the ranges provided here. This call will fail if
> + * IOMMU_IOAS_IOVA_RANGES is currently narrower than the given ranges.
> + *
> + * When an IOAS is first created the IOVA_RANGES will be maximally sized, and as
> + * devices are attached the IOVA will narrow based on the device restrictions.
> + * When an allowed range is specified any narrowing will be refused, ie device
> + * attachment can fail if the device requires limiting within the allowed range.
> + *
> + * Automatic IOVA allocation is also impacted by this call. MAP will only
> + * allocate within the allowed IOVAs if they are present.
> + *
> + * This call replaces the entire allowed list with the given list.
> + */
> +struct iommu_ioas_allow_iovas {
> + __u32 size;
> + __u32 ioas_id;
> + __u32 num_iovas;
> + __u32 __reserved;
> + __aligned_u64 allowed_iovas;
> +};
> +#define IOMMU_IOAS_ALLOW_IOVAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOW_IOVAS)
> +
> +/**
> + * enum iommufd_ioas_map_flags - Flags for map and copy
> + * @IOMMU_IOAS_MAP_FIXED_IOVA: If clear the kernel will compute an appropriate
> + * IOVA to place the mapping at
> + * @IOMMU_IOAS_MAP_WRITEABLE: DMA is allowed to write to this mapping
> + * @IOMMU_IOAS_MAP_READABLE: DMA is allowed to read from this mapping
> + */
> +enum iommufd_ioas_map_flags {
> + IOMMU_IOAS_MAP_FIXED_IOVA = 1 << 0,
> + IOMMU_IOAS_MAP_WRITEABLE = 1 << 1,
> + IOMMU_IOAS_MAP_READABLE = 1 << 2,
> +};
> +
> +/**
> + * struct iommu_ioas_map - ioctl(IOMMU_IOAS_MAP)
> + * @size: sizeof(struct iommu_ioas_map)
> + * @flags: Combination of enum iommufd_ioas_map_flags
> + * @ioas_id: IOAS ID to change the mapping of
> + * @__reserved: Must be 0
> + * @user_va: Userspace pointer to start mapping from
> + * @length: Number of bytes to map
> + * @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set
> + * then this must be provided as input.
> + *
> + * Set an IOVA mapping from a user pointer. If FIXED_IOVA is specified then the
> + * mapping will be established at iova, otherwise a suitable location based on
> + * the reserved and allowed lists will be automatically selected and returned in
> + * iova.
> + *
> + * If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently
> + * be unused, existing IOVA cannot be replaced.
> + */
> +struct iommu_ioas_map {
> + __u32 size;
> + __u32 flags;
> + __u32 ioas_id;
> + __u32 __reserved;
> + __aligned_u64 user_va;
> + __aligned_u64 length;
> + __aligned_u64 iova;
> +};
> +#define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP)
> +
> +/**
> + * struct iommu_ioas_map_file - ioctl(IOMMU_IOAS_MAP_FILE)
> + * @size: sizeof(struct iommu_ioas_map_file)
> + * @flags: same as for iommu_ioas_map
> + * @ioas_id: same as for iommu_ioas_map
> + * @fd: the memfd to map
> + * @start: byte offset from start of file to map from
> + * @length: same as for iommu_ioas_map
> + * @iova: same as for iommu_ioas_map
> + *
> + * Set an IOVA mapping from a memfd file. All other arguments and semantics
> + * match those of IOMMU_IOAS_MAP.
> + */
> +struct iommu_ioas_map_file {
> + __u32 size;
> + __u32 flags;
> + __u32 ioas_id;
> + __s32 fd;
> + __aligned_u64 start;
> + __aligned_u64 length;
> + __aligned_u64 iova;
> +};
> +#define IOMMU_IOAS_MAP_FILE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP_FILE)
> +
> +/**
> + * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY)
> + * @size: sizeof(struct iommu_ioas_copy)
> + * @flags: Combination of enum iommufd_ioas_map_flags
> + * @dst_ioas_id: IOAS ID to change the mapping of
> + * @src_ioas_id: IOAS ID to copy from
> + * @length: Number of bytes to copy and map
> + * @dst_iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is
> + * set then this must be provided as input.
> + * @src_iova: IOVA to start the copy
> + *
> + * Copy an already existing mapping from src_ioas_id and establish it in
> + * dst_ioas_id. The src iova/length must exactly match a range used with
> + * IOMMU_IOAS_MAP.
> + *
> + * This may be used to efficiently clone a subset of an IOAS to another, or as a
> + * kind of 'cache' to speed up mapping. Copy has an efficiency advantage over
> + * establishing equivalent new mappings, as internal resources are shared, and
> + * the kernel will pin the user memory only once.
> + */
> +struct iommu_ioas_copy {
> + __u32 size;
> + __u32 flags;
> + __u32 dst_ioas_id;
> + __u32 src_ioas_id;
> + __aligned_u64 length;
> + __aligned_u64 dst_iova;
> + __aligned_u64 src_iova;
> +};
> +#define IOMMU_IOAS_COPY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_COPY)
> +
> +/**
> + * struct iommu_ioas_unmap - ioctl(IOMMU_IOAS_UNMAP)
> + * @size: sizeof(struct iommu_ioas_unmap)
> + * @ioas_id: IOAS ID to change the mapping of
> + * @iova: IOVA to start the unmapping at
> + * @length: Number of bytes to unmap, and return back the bytes unmapped
> + *
> + * Unmap an IOVA range. The iova/length must be a superset of a previously
> + * mapped range used with IOMMU_IOAS_MAP or IOMMU_IOAS_COPY. Splitting or
> + * truncating ranges is not allowed. The values 0 to U64_MAX will unmap
> + * everything.
> + */
> +struct iommu_ioas_unmap {
> + __u32 size;
> + __u32 ioas_id;
> + __aligned_u64 iova;
> + __aligned_u64 length;
> +};
> +#define IOMMU_IOAS_UNMAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_UNMAP)
> +
> +/**
> + * enum iommufd_option - ioctl(IOMMU_OPTION_RLIMIT_MODE) and
> + * ioctl(IOMMU_OPTION_HUGE_PAGES)
> + * @IOMMU_OPTION_RLIMIT_MODE:
> + * Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
> + * to invoke this. Value 0 (default) is user based accounting, 1 uses process
> + * based accounting. Global option, object_id must be 0
> + * @IOMMU_OPTION_HUGE_PAGES:
> + * Value 1 (default) allows contiguous pages to be combined when generating
> + * iommu mappings. Value 0 disables combining, everything is mapped to
> + * PAGE_SIZE. This can be useful for benchmarking. This is a per-IOAS
> + * option, the object_id must be the IOAS ID.
> + */
> +enum iommufd_option {
> + IOMMU_OPTION_RLIMIT_MODE = 0,
> + IOMMU_OPTION_HUGE_PAGES = 1,
> +};
> +
> +/**
> + * enum iommufd_option_ops - ioctl(IOMMU_OPTION_OP_SET) and
> + * ioctl(IOMMU_OPTION_OP_GET)
> + * @IOMMU_OPTION_OP_SET: Set the option's value
> + * @IOMMU_OPTION_OP_GET: Get the option's value
> + */
> +enum iommufd_option_ops {
> + IOMMU_OPTION_OP_SET = 0,
> + IOMMU_OPTION_OP_GET = 1,
> +};
> +
> +/**
> + * struct iommu_option - iommu option multiplexer
> + * @size: sizeof(struct iommu_option)
> + * @option_id: One of enum iommufd_option
> + * @op: One of enum iommufd_option_ops
> + * @__reserved: Must be 0
> + * @object_id: ID of the object if required
> + * @val64: Option value to set or value returned on get
> + *
> + * Change a simple option value. This multiplexor allows controlling options
> + * on objects. IOMMU_OPTION_OP_SET will load an option and IOMMU_OPTION_OP_GET
> + * will return the current value.
> + */
> +struct iommu_option {
> + __u32 size;
> + __u32 option_id;
> + __u16 op;
> + __u16 __reserved;
> + __u32 object_id;
> + __aligned_u64 val64;
> +};
> +#define IOMMU_OPTION _IO(IOMMUFD_TYPE, IOMMUFD_CMD_OPTION)
> +
> +/**
> + * enum iommufd_vfio_ioas_op - IOMMU_VFIO_IOAS_* ioctls
> + * @IOMMU_VFIO_IOAS_GET: Get the current compatibility IOAS
> + * @IOMMU_VFIO_IOAS_SET: Change the current compatibility IOAS
> + * @IOMMU_VFIO_IOAS_CLEAR: Disable VFIO compatibility
> + */
> +enum iommufd_vfio_ioas_op {
> + IOMMU_VFIO_IOAS_GET = 0,
> + IOMMU_VFIO_IOAS_SET = 1,
> + IOMMU_VFIO_IOAS_CLEAR = 2,
> +};
> +
> +/**
> + * struct iommu_vfio_ioas - ioctl(IOMMU_VFIO_IOAS)
> + * @size: sizeof(struct iommu_vfio_ioas)
> + * @ioas_id: For IOMMU_VFIO_IOAS_SET the input IOAS ID to set
> + * For IOMMU_VFIO_IOAS_GET will output the IOAS ID
> + * @op: One of enum iommufd_vfio_ioas_op
> + * @__reserved: Must be 0
> + *
> + * The VFIO compatibility support uses a single ioas because VFIO APIs do not
> + * support the ID field. Set or Get the IOAS that VFIO compatibility will use.
> + * When VFIO_GROUP_SET_CONTAINER is used on an iommufd it will get the
> + * compatibility ioas, either by taking what is already set, or auto creating
> + * one. From then on VFIO will continue to use that ioas and is not effected by
> + * this ioctl. SET or CLEAR does not destroy any auto-created IOAS.
> + */
> +struct iommu_vfio_ioas {
> + __u32 size;
> + __u32 ioas_id;
> + __u16 op;
> + __u16 __reserved;
> +};
> +#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
> +
> +/**
> + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
> + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
> + * the parent HWPT in a nesting configuration.
> + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
> + * enforced on device attachment
> + * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is
> + * valid.
> + * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
> + * domain can be attached to any PASID on the device.
> + * Any domain attached to the non-PASID part of the
> + * device must also be flagged, otherwise attaching a
> + * PASID will blocked.
> + * If IOMMU does not support PASID it will return
> + * error (-EOPNOTSUPP).
> + */
> +enum iommufd_hwpt_alloc_flags {
> + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
> + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
> + IOMMU_HWPT_FAULT_ID_VALID = 1 << 2,
> + IOMMU_HWPT_ALLOC_PASID = 1 << 3,
> +};
> +
> +/**
> + * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
> + * entry attributes
> + * @IOMMU_VTD_S1_SRE: Supervisor request
> + * @IOMMU_VTD_S1_EAFE: Extended access enable
> + * @IOMMU_VTD_S1_WPE: Write protect enable
> + */
> +enum iommu_hwpt_vtd_s1_flags {
> + IOMMU_VTD_S1_SRE = 1 << 0,
> + IOMMU_VTD_S1_EAFE = 1 << 1,
> + IOMMU_VTD_S1_WPE = 1 << 2,
> +};
> +
> +/**
> + * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
> + * info (IOMMU_HWPT_DATA_VTD_S1)
> + * @flags: Combination of enum iommu_hwpt_vtd_s1_flags
> + * @pgtbl_addr: The base address of the stage-1 page table.
> + * @addr_width: The address width of the stage-1 page table
> + * @__reserved: Must be 0
> + */
> +struct iommu_hwpt_vtd_s1 {
> + __aligned_u64 flags;
> + __aligned_u64 pgtbl_addr;
> + __u32 addr_width;
> + __u32 __reserved;
> +};
> +
> +/**
> + * struct iommu_hwpt_arm_smmuv3 - ARM SMMUv3 nested STE
> + * (IOMMU_HWPT_DATA_ARM_SMMUV3)
> + *
> + * @ste: The first two double words of the user space Stream Table Entry for
> + * the translation. Must be little-endian.
> + * Allowed fields: (Refer to "5.2 Stream Table Entry" in SMMUv3 HW Spec)
> + * - word-0: V, Cfg, S1Fmt, S1ContextPtr, S1CDMax
> + * - word-1: EATS, S1DSS, S1CIR, S1COR, S1CSH, S1STALLD
> + *
> + * -EIO will be returned if @ste is not legal or contains any non-allowed field.
> + * Cfg can be used to select a S1, Bypass or Abort configuration. A Bypass
> + * nested domain will translate the same as the nesting parent. The S1 will
> + * install a Context Descriptor Table pointing at userspace memory translated
> + * by the nesting parent.
> + */
> +struct iommu_hwpt_arm_smmuv3 {
> + __aligned_le64 ste[2];
> +};
> +
> +/**
> + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type
> + * @IOMMU_HWPT_DATA_NONE: no data
> + * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
> + * @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table
> + */
> +enum iommu_hwpt_data_type {
> + IOMMU_HWPT_DATA_NONE = 0,
> + IOMMU_HWPT_DATA_VTD_S1 = 1,
> + IOMMU_HWPT_DATA_ARM_SMMUV3 = 2,
> +};
> +
> +/**
> + * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
> + * @size: sizeof(struct iommu_hwpt_alloc)
> + * @flags: Combination of enum iommufd_hwpt_alloc_flags
> + * @dev_id: The device to allocate this HWPT for
> + * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to
> + * @out_hwpt_id: The ID of the new HWPT
> + * @__reserved: Must be 0
> + * @data_type: One of enum iommu_hwpt_data_type
> + * @data_len: Length of the type specific data
> + * @data_uptr: User pointer to the type specific data
> + * @fault_id: The ID of IOMMUFD_FAULT object. Valid only if flags field of
> + * IOMMU_HWPT_FAULT_ID_VALID is set.
> + * @__reserved2: Padding to 64-bit alignment. Must be 0.
> + *
> + * Explicitly allocate a hardware page table object. This is the same object
> + * type that is returned by iommufd_device_attach() and represents the
> + * underlying iommu driver's iommu_domain kernel object.
> + *
> + * A kernel-managed HWPT will be created with the mappings from the given
> + * IOAS via the @pt_id. The @data_type for this allocation must be set to
> + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
> + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
> + *
> + * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a
> + * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be
> + * allocated previously via the same ioctl from a given IOAS (@pt_id). In this
> + * case, the @data_type must be set to a pre-defined type corresponding to an
> + * I/O page table type supported by the underlying IOMMU hardware. The device
> + * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU
> + * instance.
> + *
> + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
> + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
> + * must be given.
> + */
> +struct iommu_hwpt_alloc {
> + __u32 size;
> + __u32 flags;
> + __u32 dev_id;
> + __u32 pt_id;
> + __u32 out_hwpt_id;
> + __u32 __reserved;
> + __u32 data_type;
> + __u32 data_len;
> + __aligned_u64 data_uptr;
> + __u32 fault_id;
> + __u32 __reserved2;
> +};
> +#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
> +
> +/**
> + * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
> + * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
> + * on a nested_parent domain.
> + * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
> + */
> +enum iommu_hw_info_vtd_flags {
> + IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
> +};
> +
> +/**
> + * struct iommu_hw_info_vtd - Intel VT-d hardware information
> + *
> + * @flags: Combination of enum iommu_hw_info_vtd_flags
> + * @__reserved: Must be 0
> + *
> + * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
> + * section 11.4.2 Capability Register.
> + * @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
> + * section 11.4.3 Extended Capability Register.
> + *
> + * User needs to understand the Intel VT-d specification to decode the
> + * register value.
> + */
> +struct iommu_hw_info_vtd {
> + __u32 flags;
> + __u32 __reserved;
> + __aligned_u64 cap_reg;
> + __aligned_u64 ecap_reg;
> +};
> +
> +/**
> + * struct iommu_hw_info_arm_smmuv3 - ARM SMMUv3 hardware information
> + * (IOMMU_HW_INFO_TYPE_ARM_SMMUV3)
> + *
> + * @flags: Must be set to 0
> + * @__reserved: Must be 0
> + * @idr: Implemented features for ARM SMMU Non-secure programming interface
> + * @iidr: Information about the implementation and implementer of ARM SMMU,
> + * and architecture version supported
> + * @aidr: ARM SMMU architecture version
> + *
> + * For the details of @idr, @iidr and @aidr, please refer to the chapters
> + * from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
> + *
> + * This reports the raw HW capability, and not all bits are meaningful to be
> + * read by userspace. Only the following fields should be used:
> + *
> + * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
> + * idr[1]: SIDSIZE, SSIDSIZE
> + * idr[3]: BBML, RIL
> + * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
> + *
> + * - S1P should be assumed to be true if a NESTED HWPT can be created
> + * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
> + * true.
> + * - ATS is a per-device property. If the VMM describes any devices as ATS
> + * capable in ACPI/DT it should set the corresponding idr.
> + *
> + * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
> + * important that VMMs do not read bits outside the list to allow for
> + * compatibility with future kernels. Several features in the SMMUv3
> + * architecture are not currently supported by the kernel for nesting: HTTU,
> + * BTM, MPAM and others.
> + */
> +struct iommu_hw_info_arm_smmuv3 {
> + __u32 flags;
> + __u32 __reserved;
> + __u32 idr[6];
> + __u32 iidr;
> + __u32 aidr;
> +};
> +
> +/**
> + * enum iommu_hw_info_type - IOMMU Hardware Info Types
> + * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
> + * info
> + * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
> + * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
> + */
> +enum iommu_hw_info_type {
> + IOMMU_HW_INFO_TYPE_NONE = 0,
> + IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
> + IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
> +};
> +
> +/**
> + * enum iommufd_hw_capabilities
> + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
> + * If available, it means the following APIs
> + * are supported:
> + *
> + * IOMMU_HWPT_GET_DIRTY_BITMAP
> + * IOMMU_HWPT_SET_DIRTY_TRACKING
> + *
> + */
> +enum iommufd_hw_capabilities {
> + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
> +};
> +
> +/**
> + * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
> + * @size: sizeof(struct iommu_hw_info)
> + * @flags: Must be 0
> + * @dev_id: The device bound to the iommufd
> + * @data_len: Input the length of a user buffer in bytes. Output the length of
> + * data that kernel supports
> + * @data_uptr: User pointer to a user-space buffer used by the kernel to fill
> + * the iommu type specific hardware information data
> + * @out_data_type: Output the iommu hardware info type as defined in the enum
> + * iommu_hw_info_type.
> + * @out_capabilities: Output the generic iommu capability info type as defined
> + * in the enum iommu_hw_capabilities.
> + * @__reserved: Must be 0
> + *
> + * Query an iommu type specific hardware information data from an iommu behind
> + * a given device that has been bound to iommufd. This hardware info data will
> + * be used to sync capabilities between the virtual iommu and the physical
> + * iommu, e.g. a nested translation setup needs to check the hardware info, so
> + * a guest stage-1 page table can be compatible with the physical iommu.
> + *
> + * To capture an iommu type specific hardware information data, @data_uptr and
> + * its length @data_len must be provided. Trailing bytes will be zeroed if the
> + * user buffer is larger than the data that kernel has. Otherwise, kernel only
> + * fills the buffer using the given length in @data_len. If the ioctl succeeds,
> + * @data_len will be updated to the length that kernel actually supports,
> + * @out_data_type will be filled to decode the data filled in the buffer
> + * pointed by @data_uptr. Input @data_len == zero is allowed.
> + */
> +struct iommu_hw_info {
> + __u32 size;
> + __u32 flags;
> + __u32 dev_id;
> + __u32 data_len;
> + __aligned_u64 data_uptr;
> + __u32 out_data_type;
> + __u32 __reserved;
> + __aligned_u64 out_capabilities;
> +};
> +#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
> +
> +/*
> + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
> + * tracking
> + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
> + */
> +enum iommufd_hwpt_set_dirty_tracking_flags {
> + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
> +};
> +
> +/**
> + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
> + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
> + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
> + * @hwpt_id: HW pagetable ID that represents the IOMMU domain
> + * @__reserved: Must be 0
> + *
> + * Toggle dirty tracking on an HW pagetable.
> + */
> +struct iommu_hwpt_set_dirty_tracking {
> + __u32 size;
> + __u32 flags;
> + __u32 hwpt_id;
> + __u32 __reserved;
> +};
> +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
> + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
> +
> +/**
> + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
> + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
> + * any dirty bits metadata. This flag
> + * can be passed in the expectation
> + * where the next operation is an unmap
> + * of the same IOVA range.
> + *
> + */
> +enum iommufd_hwpt_get_dirty_bitmap_flags {
> + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
> +};
> +
> +/**
> + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
> + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
> + * @hwpt_id: HW pagetable ID that represents the IOMMU domain
> + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
> + * @__reserved: Must be 0
> + * @iova: base IOVA of the bitmap first bit
> + * @length: IOVA range size
> + * @page_size: page size granularity of each bit in the bitmap
> + * @data: bitmap where to set the dirty bits. The bitmap bits each
> + * represent a page_size which you deviate from an arbitrary iova.
> + *
> + * Checking a given IOVA is dirty:
> + *
> + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
> + *
> + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap
> + * with the dirty IOVAs. In doing so it will also by default clear any
> + * dirty bit metadata set in the IOPTE.
> + */
> +struct iommu_hwpt_get_dirty_bitmap {
> + __u32 size;
> + __u32 hwpt_id;
> + __u32 flags;
> + __u32 __reserved;
> + __aligned_u64 iova;
> + __aligned_u64 length;
> + __aligned_u64 page_size;
> + __aligned_u64 data;
> +};
> +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
> + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
> +
> +/**
> + * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
> + * Data Type
> + * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
> + * @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
> + */
> +enum iommu_hwpt_invalidate_data_type {
> + IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
> + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1,
> +};
> +
> +/**
> + * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
> + * stage-1 cache invalidation
> + * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
> + * to all-levels page structure cache or just
> + * the leaf PTE cache.
> + */
> +enum iommu_hwpt_vtd_s1_invalidate_flags {
> + IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
> +};
> +
> +/**
> + * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
> + * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
> + * @addr: The start address of the range to be invalidated. It needs to
> + * be 4KB aligned.
> + * @npages: Number of contiguous 4K pages to be invalidated.
> + * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
> + * @__reserved: Must be 0
> + *
> + * The Intel VT-d specific invalidation data for user-managed stage-1 cache
> + * invalidation in nested translation. Userspace uses this structure to
> + * tell the impacted cache scope after modifying the stage-1 page table.
> + *
> + * Invalidating all the caches related to the page table by setting @addr
> + * to be 0 and @npages to be U64_MAX.
> + *
> + * The device TLB will be invalidated automatically if ATS is enabled.
> + */
> +struct iommu_hwpt_vtd_s1_invalidate {
> + __aligned_u64 addr;
> + __aligned_u64 npages;
> + __u32 flags;
> + __u32 __reserved;
> +};
> +
> +/**
> + * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
> + * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
> + * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
> + * Must be little-endian.
> + *
> + * Supported command list only when passing in a vIOMMU via @hwpt_id:
> + * CMDQ_OP_TLBI_NSNH_ALL
> + * CMDQ_OP_TLBI_NH_VA
> + * CMDQ_OP_TLBI_NH_VAA
> + * CMDQ_OP_TLBI_NH_ALL
> + * CMDQ_OP_TLBI_NH_ASID
> + * CMDQ_OP_ATC_INV
> + * CMDQ_OP_CFGI_CD
> + * CMDQ_OP_CFGI_CD_ALL
> + *
> + * -EIO will be returned if the command is not supported.
> + */
> +struct iommu_viommu_arm_smmuv3_invalidate {
> + __aligned_le64 cmd[2];
> +};
> +
> +/**
> + * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
> + * @size: sizeof(struct iommu_hwpt_invalidate)
> + * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation
> + * @data_uptr: User pointer to an array of driver-specific cache invalidation
> + * data.
> + * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
> + * type of all the entries in the invalidation request array. It
> + * should be a type supported by the hwpt pointed by @hwpt_id.
> + * @entry_len: Length (in bytes) of a request entry in the request array
> + * @entry_num: Input the number of cache invalidation requests in the array.
> + * Output the number of requests successfully handled by kernel.
> + * @__reserved: Must be 0.
> + *
> + * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications
> + * on a user-managed page table should be followed by this operation, if a HWPT
> + * is passed in via @hwpt_id. Other caches, such as device cache or descriptor
> + * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field.
> + *
> + * Each ioctl can support one or more cache invalidation requests in the array
> + * that has a total size of @entry_len * @entry_num.
> + *
> + * An empty invalidation request array by setting @entry_num==0 is allowed, and
> + * @entry_len and @data_uptr would be ignored in this case. This can be used to
> + * check if the given @data_type is supported or not by kernel.
> + */
> +struct iommu_hwpt_invalidate {
> + __u32 size;
> + __u32 hwpt_id;
> + __aligned_u64 data_uptr;
> + __u32 data_type;
> + __u32 entry_len;
> + __u32 entry_num;
> + __u32 __reserved;
> +};
> +#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
> +
> +/**
> + * enum iommu_hwpt_pgfault_flags - flags for struct iommu_hwpt_pgfault
> + * @IOMMU_PGFAULT_FLAGS_PASID_VALID: The pasid field of the fault data is
> + * valid.
> + * @IOMMU_PGFAULT_FLAGS_LAST_PAGE: It's the last fault of a fault group.
> + */
> +enum iommu_hwpt_pgfault_flags {
> + IOMMU_PGFAULT_FLAGS_PASID_VALID = (1 << 0),
> + IOMMU_PGFAULT_FLAGS_LAST_PAGE = (1 << 1),
> +};
> +
> +/**
> + * enum iommu_hwpt_pgfault_perm - perm bits for struct iommu_hwpt_pgfault
> + * @IOMMU_PGFAULT_PERM_READ: request for read permission
> + * @IOMMU_PGFAULT_PERM_WRITE: request for write permission
> + * @IOMMU_PGFAULT_PERM_EXEC: (PCIE 10.4.1) request with a PASID that has the
> + * Execute Requested bit set in PASID TLP Prefix.
> + * @IOMMU_PGFAULT_PERM_PRIV: (PCIE 10.4.1) request with a PASID that has the
> + * Privileged Mode Requested bit set in PASID TLP
> + * Prefix.
> + */
> +enum iommu_hwpt_pgfault_perm {
> + IOMMU_PGFAULT_PERM_READ = (1 << 0),
> + IOMMU_PGFAULT_PERM_WRITE = (1 << 1),
> + IOMMU_PGFAULT_PERM_EXEC = (1 << 2),
> + IOMMU_PGFAULT_PERM_PRIV = (1 << 3),
> +};
> +
> +/**
> + * struct iommu_hwpt_pgfault - iommu page fault data
> + * @flags: Combination of enum iommu_hwpt_pgfault_flags
> + * @dev_id: id of the originated device
> + * @pasid: Process Address Space ID
> + * @grpid: Page Request Group Index
> + * @perm: Combination of enum iommu_hwpt_pgfault_perm
> + * @__reserved: Must be 0.
> + * @addr: Fault address
> + * @length: a hint of how much data the requestor is expecting to fetch. For
> + * example, if the PRI initiator knows it is going to do a 10MB
> + * transfer, it could fill in 10MB and the OS could pre-fault in
> + * 10MB of IOVA. It's default to 0 if there's no such hint.
> + * @cookie: kernel-managed cookie identifying a group of fault messages. The
> + * cookie number encoded in the last page fault of the group should
> + * be echoed back in the response message.
> + */
> +struct iommu_hwpt_pgfault {
> + __u32 flags;
> + __u32 dev_id;
> + __u32 pasid;
> + __u32 grpid;
> + __u32 perm;
> + __u32 __reserved;
> + __aligned_u64 addr;
> + __u32 length;
> + __u32 cookie;
> +};
> +
> +/**
> + * enum iommufd_page_response_code - Return status of fault handlers
> + * @IOMMUFD_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
> + * populated, retry the access. This is the
> + * "Success" defined in PCI 10.4.2.1.
> + * @IOMMUFD_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
> + * access. This is the "Invalid Request" in PCI
> + * 10.4.2.1.
> + */
> +enum iommufd_page_response_code {
> + IOMMUFD_PAGE_RESP_SUCCESS = 0,
> + IOMMUFD_PAGE_RESP_INVALID = 1,
> +};
> +
> +/**
> + * struct iommu_hwpt_page_response - IOMMU page fault response
> + * @cookie: The kernel-managed cookie reported in the fault message.
> + * @code: One of response code in enum iommufd_page_response_code.
> + */
> +struct iommu_hwpt_page_response {
> + __u32 cookie;
> + __u32 code;
> +};
> +
> +/**
> + * struct iommu_fault_alloc - ioctl(IOMMU_FAULT_QUEUE_ALLOC)
> + * @size: sizeof(struct iommu_fault_alloc)
> + * @flags: Must be 0
> + * @out_fault_id: The ID of the new FAULT
> + * @out_fault_fd: The fd of the new FAULT
> + *
> + * Explicitly allocate a fault handling object.
> + */
> +struct iommu_fault_alloc {
> + __u32 size;
> + __u32 flags;
> + __u32 out_fault_id;
> + __u32 out_fault_fd;
> +};
> +#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC)
> +
> +/**
> + * enum iommu_viommu_type - Virtual IOMMU Type
> + * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
> + * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
> + */
> +enum iommu_viommu_type {
> + IOMMU_VIOMMU_TYPE_DEFAULT = 0,
> + IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
> +};
> +
> +/**
> + * struct iommu_viommu_alloc - ioctl(IOMMU_VIOMMU_ALLOC)
> + * @size: sizeof(struct iommu_viommu_alloc)
> + * @flags: Must be 0
> + * @type: Type of the virtual IOMMU. Must be defined in enum iommu_viommu_type
> + * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU
> + * @hwpt_id: ID of a nesting parent HWPT to associate to
> + * @out_viommu_id: Output virtual IOMMU ID for the allocated object
> + *
> + * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's
> + * virtualization support that is a security-isolated slice of the real IOMMU HW
> + * that is unique to a specific VM. Operations global to the IOMMU are connected
> + * to the vIOMMU, such as:
> + * - Security namespace for guest owned ID, e.g. guest-controlled cache tags
> + * - Non-device-affiliated event reporting, e.g. invalidation queue errors
> + * - Access to a sharable nesting parent pagetable across physical IOMMUs
> + * - Virtualization of various platforms IDs, e.g. RIDs and others
> + * - Delivery of paravirtualized invalidation
> + * - Direct assigned invalidation queues
> + * - Direct assigned interrupts
> + */
> +struct iommu_viommu_alloc {
> + __u32 size;
> + __u32 flags;
> + __u32 type;
> + __u32 dev_id;
> + __u32 hwpt_id;
> + __u32 out_viommu_id;
> +};
> +#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
> +
> +/**
> + * struct iommu_vdevice_alloc - ioctl(IOMMU_VDEVICE_ALLOC)
> + * @size: sizeof(struct iommu_vdevice_alloc)
> + * @viommu_id: vIOMMU ID to associate with the virtual device
> + * @dev_id: The physical device to allocate a virtual instance on the vIOMMU
> + * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY
> + * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID
> + * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table
> + *
> + * Allocate a virtual device instance (for a physical device) against a vIOMMU.
> + * This instance holds the device's information (related to its vIOMMU) in a VM.
> + */
> +struct iommu_vdevice_alloc {
> + __u32 size;
> + __u32 viommu_id;
> + __u32 dev_id;
> + __u32 out_vdevice_id;
> + __aligned_u64 virt_id;
> +};
> +#define IOMMU_VDEVICE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VDEVICE_ALLOC)
> +
> +/**
> + * struct iommu_ioas_change_process - ioctl(VFIO_IOAS_CHANGE_PROCESS)
> + * @size: sizeof(struct iommu_ioas_change_process)
> + * @__reserved: Must be 0
> + *
> + * This transfers pinned memory counts for every memory map in every IOAS
> + * in the context to the current process. This only supports maps created
> + * with IOMMU_IOAS_MAP_FILE, and returns EINVAL if other maps are present.
> + * If the ioctl returns a failure status, then nothing is changed.
> + *
> + * This API is useful for transferring operation of a device from one process
> + * to another, such as during userland live update.
> + */
> +struct iommu_ioas_change_process {
> + __u32 size;
> + __u32 __reserved;
> +};
> +
> +#define IOMMU_IOAS_CHANGE_PROCESS \
> + _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS)
> +
> +#endif
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support
2025-05-25 7:49 ` [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:31 ` Mostafa Saleh
2025-07-29 5:12 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:31 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:13PM +0530, Aneesh Kumar K.V (Arm) wrote:
> This use a stage1 translate stage2 bypass iommu config.
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> Makefile | 1 +
> builtin-run.c | 1 +
> include/kvm/kvm-config.h | 1 +
> include/kvm/vfio.h | 2 +
> vfio/core.c | 5 +
> vfio/iommufd.c | 368 +++++++++++++++++++++++++++++++++++++++
> 6 files changed, 378 insertions(+)
> create mode 100644 vfio/iommufd.c
>
> diff --git a/Makefile b/Makefile
> index 8b2720f73386..740b95c7c3c3 100644
> --- a/Makefile
> +++ b/Makefile
> @@ -64,6 +64,7 @@ OBJS += mmio.o
> OBJS += pci.o
> OBJS += term.o
> OBJS += vfio/core.o
> +OBJS += vfio/iommufd.o
> OBJS += vfio/pci.o
> OBJS += vfio/legacy.o
> OBJS += virtio/blk.o
> diff --git a/builtin-run.c b/builtin-run.c
> index 81f255f911b3..39198f9bc0d6 100644
> --- a/builtin-run.c
> +++ b/builtin-run.c
> @@ -262,6 +262,7 @@ static int loglevel_parser(const struct option *opt, const char *arg, int unset)
> OPT_CALLBACK('\0', "vfio-pci", NULL, "[domain:]bus:dev.fn", \
> "Assign a PCI device to the virtual machine", \
> vfio_device_parser, kvm), \
> + OPT_BOOLEAN('\0', "iommufd", &(cfg)->iommufd, "Use iommufd interface"), \
> \
> OPT_GROUP("Debug options:"), \
> OPT_CALLBACK_NOOPT('\0', "debug", kvm, NULL, \
> diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h
> index 592b035785c9..632eaf84b7eb 100644
> --- a/include/kvm/kvm-config.h
> +++ b/include/kvm/kvm-config.h
> @@ -65,6 +65,7 @@ struct kvm_config {
> bool ioport_debug;
> bool mmio_debug;
> int virtio_transport;
> + bool iommufd;
> };
>
> #endif
> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
> index fed692b0f265..37a2b5ac3dad 100644
> --- a/include/kvm/vfio.h
> +++ b/include/kvm/vfio.h
> @@ -128,6 +128,8 @@ void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
>
> extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
> extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
> +int iommufd__init(struct kvm *kvm);
> +int iommufd__exit(struct kvm *kvm);
>
> struct kvm_mem_bank;
> int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
> diff --git a/vfio/core.c b/vfio/core.c
> index 32a8e0fe67c0..0b1796c54ffd 100644
> --- a/vfio/core.c
> +++ b/vfio/core.c
> @@ -373,6 +373,8 @@ static int vfio__init(struct kvm *kvm)
> }
> kvm_vfio_device = device.fd;
>
> + if (kvm->cfg.iommufd)
> + return iommufd__init(kvm);
> return legacy_vfio__init(kvm);
> }
> dev_base_init(vfio__init);
> @@ -393,6 +395,9 @@ static int vfio__exit(struct kvm *kvm)
>
> free(kvm->cfg.vfio_devices);
>
> + if (kvm->cfg.iommufd)
> + return iommufd__exit(kvm);
> +
> return legacy_vfio__exit(kvm);
> }
> dev_base_exit(vfio__exit);
> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
> new file mode 100644
> index 000000000000..3728a06cb318
> --- /dev/null
> +++ b/vfio/iommufd.c
> @@ -0,0 +1,368 @@
> +#include <sys/types.h>
> +#include <dirent.h>
> +
> +#include "kvm/kvm.h"
> +#include <linux/iommufd.h>
> +#include <linux/list.h>
> +
> +#define VFIO_DEV_DIR "/dev/vfio"
This is duplicate with the legacy file, so maybe move it to the header?
> +#define VFIO_DEV_NODE VFIO_DEV_DIR "/devices/"
> +#define IOMMU_DEV "/dev/iommu"
> +
> +static int iommu_fd;
> +static int ioas_id;
> +
> +static int __iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> +{
> + int ret;
> +
> + vdev->info.argsz = sizeof(vdev->info);
> + if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to get info");
> + goto err_close_device;
> + }
> +
> + if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
> + ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
> + vfio_dev_warn(vdev, "failed to reset device");
> +
> + vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
> + if (!vdev->regions) {
> + ret = -ENOMEM;
> + goto err_close_device;
> + }
> +
> + /* Now for the bus-specific initialization... */
> + switch (vdev->params->type) {
> + case VFIO_DEVICE_PCI:
> + BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
> + ret = vfio_pci_setup_device(kvm, vdev);
> + break;
> + default:
> + BUG_ON(1);
> + ret = -EINVAL;
> + }
> +
> + if (ret)
> + goto err_free_regions;
> +
> + vfio_dev_info(vdev, "assigned to device number 0x%x ",
> + vdev->dev_hdr.dev_num) ;
> +
> + return 0;
> +
> +err_free_regions:
> + free(vdev->regions);
> +err_close_device:
> + close(vdev->fd);
> +
> + return ret;
> +}
> +
> +static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> +{
> + int ret;
> + DIR *dir = NULL;
> + struct dirent *dir_ent;
> + bool found_dev = false;
> + char pci_dev_path[PATH_MAX];
> + char vfio_dev_path[PATH_MAX];
> + struct iommu_hwpt_alloc alloc_hwpt;
> + struct vfio_device_bind_iommufd bind;
> + struct vfio_device_attach_iommufd_pt attach_data;
> +
> + ret = snprintf(pci_dev_path, PATH_MAX, "%s/vfio-dev/", vdev->sysfs_path);
> + if (ret < 0 || ret == PATH_MAX)
> + return -EINVAL;
> +
> + dir = opendir(pci_dev_path);
> + if (!dir)
> + return -EINVAL;
> +
> + while ((dir_ent = readdir(dir))) {
> + if (!strncmp(dir_ent->d_name, "vfio", 4)) {
> + ret = snprintf(vfio_dev_path, PATH_MAX, VFIO_DEV_NODE "%s", dir_ent->d_name);
> + if (ret < 0 || ret == PATH_MAX) {
> + ret = -EINVAL;
> + goto err_close_dir;
> + }
> + found_dev = true;
> + break;
> + }
> + }
> + if (!found_dev) {
> + ret = -ENODEV;
> + goto err_close_dir;
> + }
At this point we already found the device, as in error there is "err_close_dir"
so there is no need for the extra flag.
> +
> + vdev->fd = open(vfio_dev_path, O_RDWR);
> + if (vdev->fd == -1) {
> + ret = errno;
> + pr_err("Failed to open %s", vfio_dev_path);
> + goto err_close_dir;
> + }
> +
> + struct kvm_device_attr attr = {
> + .group = KVM_DEV_VFIO_FILE,
> + .attr = KVM_DEV_VFIO_FILE_ADD,
> + .addr = (__u64)&vdev->fd,
> + };
> +
> + if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
> + ret = -errno;
> + pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
> + goto err_close_device;
> + }
> +
> + bind.argsz = sizeof(bind);
> + bind.flags = 0;
> + bind.iommufd = iommu_fd;
> +
> + /* now bind the iommufd */
> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to get info");
> + goto err_close_device;
> + }
> +
> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> + alloc_hwpt.flags = 0;
> + alloc_hwpt.dev_id = bind.out_devid;
> + alloc_hwpt.pt_id = ioas_id;
> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
> + alloc_hwpt.data_len = 0;
> + alloc_hwpt.data_uptr = 0;
> +
> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> + ret = -errno;
> + pr_err("Failed to allocate HWPT");
> + goto err_close_device;
> + }
> +
> + attach_data.argsz = sizeof(attach_data);
> + attach_data.flags = 0;
> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> +
> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to attach to IOAS ");
Extra space.
> + goto err_close_device;
> + }
> +
> + closedir(dir);
> + return __iommufd_configure_device(kvm, vdev);
> +
> +err_close_device:
> + close(vdev->fd);
> +err_close_dir:
> + closedir(dir);
> + return ret;
> +}
> +
> +static int iommufd_configure_devices(struct kvm *kvm)
> +{
> + int i, ret;
> +
> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> + ret = iommufd_configure_device(kvm, &vfio_devices[i]);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +static int iommufd_create_ioas(struct kvm *kvm)
> +{
> + int ret;
> + struct iommu_ioas_alloc alloc_data;
> + iommu_fd = open(IOMMU_DEV, O_RDWR);
> + if (iommu_fd == -1) {
> + ret = errno;
> + pr_err("Failed to open %s", IOMMU_DEV);
> + return ret;
> + }
> +
> + alloc_data.size = sizeof(alloc_data);
> + alloc_data.flags = 0;
> +
> + if (ioctl(iommu_fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
> + ret = errno;
For all other ioctls, we return -errorno, except here, is there a reason
for that?
> + pr_err("Failed to alloc IOAS ");
Also, extra space at the end, also maybe more consistent with the rest of
the code with “vfio_dev_err”.
> + goto err_close_device;
> + }
> + ioas_id = alloc_data.out_ioas_id;
> + return 0;
> +
> +err_close_device:
> + close(iommu_fd);
> + return ret;
> +}
> +
> +static int vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
> +{
> + int ret, dirfd;
> + char *group_name;
> + unsigned long group_id;
> + char dev_path[PATH_MAX];
> + struct vfio_group *group = NULL;
> +
> + ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
> + vdev->params->bus, vdev->params->name);
> + if (ret < 0 || ret == PATH_MAX)
> + return -EINVAL;
> +
> + vdev->sysfs_path = strndup(dev_path, PATH_MAX);
> + if (!vdev->sysfs_path)
> + return -ENOMEM;
> +
> + /* Find IOMMU group for this device */
> + dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
> + if (dirfd < 0) {
> + vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
> + return -errno;
> + }
> +
> + ret = readlinkat(dirfd, "iommu_group", dev_path, PATH_MAX);
> + if (ret < 0) {
> + vfio_dev_err(vdev, "no iommu_group");
> + goto out_close;
> + }
> + if (ret == PATH_MAX) {
> + ret = -ENOMEM;
> + goto out_close;
> + }
> +
> + dev_path[ret] = '\0';
> + group_name = basename(dev_path);
> + errno = 0;
> + group_id = strtoul(group_name, NULL, 10);
> + if (errno) {
> + ret = -errno;
> + goto out_close;
> + }
> +
> + list_for_each_entry(group, &vfio_groups, list) {
> + if (group->id == group_id) {
> + group->refs++;
> + break;
> + }
> + }
> + if (group->id != group_id) {
> + group = calloc(1, sizeof(*group));
> + if (!group) {
> + ret = -ENOMEM;
> + goto out_close;
> + }
> + group->id = group_id;
> + group->refs = 1;
> + /* no group fd for iommufd */
> + group->fd = -1;
> + list_add(&group->list, &vfio_groups);
> + }
> + vdev->group = group;
> + ret = 0;
> +
There is some duplication with “vfio_group_get_for_dev”, I wonder if we could
re-use some of this code in a helper.
> +out_close:
> + close(dirfd);
> + return ret;
> +}
> +
> +static int iommufd_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
> +{
> + int ret = 0;
> + struct iommu_ioas_map dma_map;
> +
> + dma_map.size = sizeof(dma_map);
> + dma_map.flags = IOMMU_IOAS_MAP_READABLE | IOMMU_IOAS_MAP_WRITEABLE |
> + IOMMU_IOAS_MAP_FIXED_IOVA;
> + dma_map.ioas_id = ioas_id;
> + dma_map.__reserved = 0;
> + dma_map.user_va = host_addr;
> + dma_map.iova = iova;
> + dma_map.length = size;
> +
> + /* Map the guest memory for DMA (i.e. provide isolation) */
> + if (ioctl(iommu_fd, IOMMU_IOAS_MAP, &dma_map)) {
> + ret = -errno;
> + pr_err("Failed to map 0x%llx -> 0x%llx (%u) for DMA",
> + dma_map.iova, dma_map.user_va, dma_map.size);
> + }
> +
> + return ret;
> +}
> +
> +static int iommufd_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
> +{
> + int ret = 0;
> + struct iommu_ioas_unmap dma_unmap;
> +
> + dma_unmap.size = sizeof(dma_unmap);
> + dma_unmap.ioas_id = ioas_id;
> + dma_unmap.iova = iova;
> + dma_unmap.length = size;
> +
> + if (ioctl(iommu_fd, IOMMU_IOAS_UNMAP, &dma_unmap)) {
> + ret = -errno;
> + if (ret != -ENOENT)
> + pr_err("Failed to unmap 0x%llx - size (%u) for DMA %d",
> + dma_unmap.iova, dma_unmap.size, ret);
> + }
> +
> + return ret;
> +}
> +
> +static int iommufd_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> +{
> + return iommufd_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
> +}
> +
> +static int iommufd_configure_reserved_mem(struct kvm *kvm)
> +{
> + int ret;
> + struct vfio_group *group;
> +
> + list_for_each_entry(group, &vfio_groups, list) {
> + ret = vfio_configure_reserved_regions(kvm, group);
> + if (ret)
> + return ret;
> + }
> + return 0;
> +}
> +
> +int iommufd__init(struct kvm *kvm)
> +{
> + int ret, i;
> +
> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> + vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
> +
> + ret = vfio_device_init(kvm, &vfio_devices[i]);
> + if (ret)
> + return ret;
> + }
> +
> + ret = iommufd_create_ioas(kvm);
> + if (ret)
> + return ret;
> +
> + ret = iommufd_configure_devices(kvm);
> + if (ret)
> + return ret;
> +
Any failure after this point will just return, although iommufd_create_ioas()
would “close(iommu_fd)” on failure.
Also, don’t we want to close “iommu_fd” at exit similar to the VFIO container?
Thanks,
Mostafa
> + ret = iommufd_configure_reserved_mem(kvm);
> + if (ret)
> + return ret;
> +
> + dma_map_mem_range = iommufd_map_mem_range;
> + dma_unmap_mem_range = iommufd_unmap_mem_range;
> + /* Now map the full memory */
> + return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, iommufd_map_mem_bank,
> + NULL);
> +}
> +
> +int iommufd__exit(struct kvm *kvm)
> +{
> + return 0;
> +}
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper
2025-05-25 7:49 ` [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:32 ` Mostafa Saleh
2025-07-29 5:14 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:32 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:14PM +0530, Aneesh Kumar K.V (Arm) wrote:
> alloc_hwpt.flags = 0; implies we prefer stage1 translation. Hence name
> the helper iommufd_alloc_s2bypass_hwpt().
This patch moves the recently added code into a new function,
can't this be squashed?
Also, I believe that with “IOMMU_HWPT_DATA_NONE”, we shouldn’t make
any assumptions in userspace about which stage is used.
The only guarantee is that IOMMU_IOAS_MAP/IOMMU_IOAS_UNMAP works.
So, I believe the naming for "s2bypass" is not accurate.
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> vfio/iommufd.c | 86 +++++++++++++++++++++++++++++---------------------
> 1 file changed, 50 insertions(+), 36 deletions(-)
>
> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
> index 3728a06cb318..742550705746 100644
> --- a/vfio/iommufd.c
> +++ b/vfio/iommufd.c
> @@ -60,6 +60,54 @@ err_close_device:
> return ret;
> }
>
> +static int iommufd_alloc_s2bypass_hwpt(struct vfio_device *vdev)
> +{
> + int ret;
> + struct iommu_hwpt_alloc alloc_hwpt;
> + struct vfio_device_bind_iommufd bind;
> + struct vfio_device_attach_iommufd_pt attach_data;
> +
> + bind.argsz = sizeof(bind);
> + bind.flags = 0;
> + bind.iommufd = iommu_fd;
> +
> + /* now bind the iommufd */
> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to get info");
> + goto err_out;
> + }
> +
> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> + /* stage1 translate stage 2 bypass table if stage1 is supported */
> + alloc_hwpt.flags = 0;
> + alloc_hwpt.dev_id = bind.out_devid;
> + alloc_hwpt.pt_id = ioas_id;
> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
> + alloc_hwpt.data_len = 0;
> + alloc_hwpt.data_uptr = 0;
> +
> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> + ret = -errno;
> + pr_err("Failed to allocate HWPT");
> + goto err_out;
> + }
> +
> + attach_data.argsz = sizeof(attach_data);
> + attach_data.flags = 0;
> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> +
> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to attach to IOAS ");
> + goto err_out;
> + }
> + return 0;
> +
> +err_out:
> + return ret;
> +}
> +
> static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> {
> int ret;
> @@ -68,9 +116,6 @@ static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> bool found_dev = false;
> char pci_dev_path[PATH_MAX];
> char vfio_dev_path[PATH_MAX];
> - struct iommu_hwpt_alloc alloc_hwpt;
> - struct vfio_device_bind_iommufd bind;
> - struct vfio_device_attach_iommufd_pt attach_data;
>
> ret = snprintf(pci_dev_path, PATH_MAX, "%s/vfio-dev/", vdev->sysfs_path);
> if (ret < 0 || ret == PATH_MAX)
> @@ -115,40 +160,9 @@ static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> goto err_close_device;
> }
>
> - bind.argsz = sizeof(bind);
> - bind.flags = 0;
> - bind.iommufd = iommu_fd;
> -
> - /* now bind the iommufd */
> - if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
> - ret = -errno;
> - vfio_dev_err(vdev, "failed to get info");
> - goto err_close_device;
> - }
> -
> - alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> - alloc_hwpt.flags = 0;
> - alloc_hwpt.dev_id = bind.out_devid;
> - alloc_hwpt.pt_id = ioas_id;
> - alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
> - alloc_hwpt.data_len = 0;
> - alloc_hwpt.data_uptr = 0;
> -
> - if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> - ret = -errno;
> - pr_err("Failed to allocate HWPT");
> - goto err_close_device;
> - }
> -
> - attach_data.argsz = sizeof(attach_data);
> - attach_data.flags = 0;
> - attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> -
> - if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> - ret = -errno;
> - vfio_dev_err(vdev, "failed to attach to IOAS ");
> + ret = iommufd_alloc_s2bypass_hwpt(vdev);
> + if (ret)
> goto err_close_device;
> - }
>
> closedir(dir);
> return __iommufd_configure_device(kvm, vdev);
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-05-25 7:49 ` [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects Aneesh Kumar K.V (Arm)
2025-07-21 12:27 ` Will Deacon
@ 2025-07-27 18:35 ` Mostafa Saleh
2025-07-29 5:19 ` Aneesh Kumar K.V
1 sibling, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:35 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
> This also allocates a stage1 bypass and stage2 translate table.
So this makes IOMMUFD only working with SMMUv3?
I don’t understand what is the point of this configuration? It seems to add
extra complexity and extra hw constraints and no extra value.
Not related to this patch, do you have plans to add some of the other iommufd
features, I think things such as page faults might be useful?
Thanks,
Mostafa
>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> ---
> builtin-run.c | 2 +
> include/kvm/kvm-config.h | 1 +
> vfio/core.c | 4 +-
> vfio/iommufd.c | 115 ++++++++++++++++++++++++++++++++++++++-
> 4 files changed, 119 insertions(+), 3 deletions(-)
>
> diff --git a/builtin-run.c b/builtin-run.c
> index 39198f9bc0d6..bfa3e8b09f82 100644
> --- a/builtin-run.c
> +++ b/builtin-run.c
> @@ -263,6 +263,8 @@ static int loglevel_parser(const struct option *opt, const char *arg, int unset)
> "Assign a PCI device to the virtual machine", \
> vfio_device_parser, kvm), \
> OPT_BOOLEAN('\0', "iommufd", &(cfg)->iommufd, "Use iommufd interface"), \
> + OPT_BOOLEAN('\0', "iommufd-vdevice", &(cfg)->iommufd_vdevice, \
> + "Use iommufd vdevice interface"), \
> \
> OPT_GROUP("Debug options:"), \
> OPT_CALLBACK_NOOPT('\0', "debug", kvm, NULL, \
> diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h
> index 632eaf84b7eb..d80be6826469 100644
> --- a/include/kvm/kvm-config.h
> +++ b/include/kvm/kvm-config.h
> @@ -66,6 +66,7 @@ struct kvm_config {
> bool mmio_debug;
> int virtio_transport;
> bool iommufd;
> + bool iommufd_vdevice;
> };
>
> #endif
> diff --git a/vfio/core.c b/vfio/core.c
> index 0b1796c54ffd..8dfcf3ca35c1 100644
> --- a/vfio/core.c
> +++ b/vfio/core.c
> @@ -373,7 +373,7 @@ static int vfio__init(struct kvm *kvm)
> }
> kvm_vfio_device = device.fd;
>
> - if (kvm->cfg.iommufd)
> + if (kvm->cfg.iommufd || kvm->cfg.iommufd_vdevice)
> return iommufd__init(kvm);
> return legacy_vfio__init(kvm);
> }
> @@ -395,7 +395,7 @@ static int vfio__exit(struct kvm *kvm)
>
> free(kvm->cfg.vfio_devices);
>
> - if (kvm->cfg.iommufd)
> + if (kvm->cfg.iommufd || kvm->cfg.iommufd_vdevice)
> return iommufd__exit(kvm);
>
> return legacy_vfio__exit(kvm);
> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
> index 742550705746..39870320e4ac 100644
> --- a/vfio/iommufd.c
> +++ b/vfio/iommufd.c
> @@ -108,6 +108,116 @@ err_out:
> return ret;
> }
>
> +static int iommufd_alloc_s1bypass_hwpt(struct vfio_device *vdev)
> +{
> + int ret;
> + unsigned long dev_num;
> + unsigned long guest_bdf;
> + struct vfio_device_bind_iommufd bind;
> + struct vfio_device_attach_iommufd_pt attach_data;
> + struct iommu_hwpt_alloc alloc_hwpt;
> + struct iommu_viommu_alloc alloc_viommu;
> + struct iommu_hwpt_arm_smmuv3 bypass_ste;
> + struct iommu_vdevice_alloc alloc_vdev;
> +
> + bind.argsz = sizeof(bind);
> + bind.flags = 0;
> + bind.iommufd = iommu_fd;
> +
> + /* now bind the iommufd */
> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to get info");
> + goto err_out;
> + }
> +
> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> + alloc_hwpt.flags = IOMMU_HWPT_ALLOC_NEST_PARENT;
> + alloc_hwpt.dev_id = bind.out_devid;
> + alloc_hwpt.pt_id = ioas_id;
> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
> + alloc_hwpt.data_len = 0;
> + alloc_hwpt.data_uptr = 0;
> +
> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> + ret = -errno;
> + pr_err("Failed to allocate HWPT");
> + goto err_out;
> + }
> +
> + attach_data.argsz = sizeof(attach_data);
> + attach_data.flags = 0;
> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> +
> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to attach to IOAS ");
> + goto err_out;
> + }
> +
> + alloc_viommu.size = sizeof(alloc_viommu);
> + alloc_viommu.flags = 0;
> + alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
> + alloc_viommu.dev_id = bind.out_devid;
> + alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
> +
> + if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to allocate VIOMMU %d", ret);
> + goto err_out;
> + }
> +#define STRTAB_STE_0_V (1UL << 0)
> +#define STRTAB_STE_0_CFG_S2_TRANS 6
> +#define STRTAB_STE_0_CFG_S1_TRANS 5
> +#define STRTAB_STE_0_CFG_BYPASS 4
> +
> + /* set up virtual ste as bypass ste */
> + bypass_ste.ste[0] = STRTAB_STE_0_V | (STRTAB_STE_0_CFG_BYPASS << 1);
> + bypass_ste.ste[1] = 0x0UL;
> +
> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> + alloc_hwpt.flags = 0;
> + alloc_hwpt.dev_id = bind.out_devid;
> + alloc_hwpt.pt_id = alloc_viommu.out_viommu_id;
> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_ARM_SMMUV3;
> + alloc_hwpt.data_len = sizeof(bypass_ste);
> + alloc_hwpt.data_uptr = (unsigned long)&bypass_ste;
> +
> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> + ret = -errno;
> + pr_err("Failed to allocate S1 bypass HWPT %d", ret);
> + goto err_out;
> + }
> +
> + alloc_vdev.size = sizeof(alloc_vdev),
> + alloc_vdev.viommu_id = alloc_viommu.out_viommu_id;
> + alloc_vdev.dev_id = bind.out_devid;
> +
> + dev_num = vdev->dev_hdr.dev_num;
> + /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
> + guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
> + alloc_vdev.virt_id = guest_bdf;
> + if (ioctl(iommu_fd, IOMMU_VDEVICE_ALLOC, &alloc_vdev)) {
> + ret = -errno;
> + pr_err("Failed to allocate vdevice %d", ret);
> + goto err_out;
> + }
> +
> + /* Now attach to the nested domain */
> + attach_data.argsz = sizeof(attach_data);
> + attach_data.flags = 0;
> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> + ret = -errno;
> + vfio_dev_err(vdev, "failed to attach Nested config to IOAS %d ", ret);
> + goto err_out;
> + }
> +
> + return 0;
> +err_out:
> + return ret;
> +}
> +
> static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> {
> int ret;
> @@ -160,7 +270,10 @@ static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> goto err_close_device;
> }
>
> - ret = iommufd_alloc_s2bypass_hwpt(vdev);
> + if (kvm->cfg.iommufd_vdevice)
> + ret = iommufd_alloc_s1bypass_hwpt(vdev);
> + else
> + ret = iommufd_alloc_s2bypass_hwpt(vdev);
> if (ret)
> goto err_close_device;
>
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 10/10] util/update_headers: Add vfio related header files to update list
2025-05-25 7:49 ` [RFC PATCH kvmtool 10/10] util/update_headers: Add vfio related header files to update list Aneesh Kumar K.V (Arm)
@ 2025-07-27 18:35 ` Mostafa Saleh
0 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-27 18:35 UTC (permalink / raw)
To: Aneesh Kumar K.V (Arm)
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Sun, May 25, 2025 at 01:19:16PM +0530, Aneesh Kumar K.V (Arm) wrote:
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
> ---
> util/update_headers.sh | 7 +++++++
> 1 file changed, 7 insertions(+)
>
> diff --git a/util/update_headers.sh b/util/update_headers.sh
> index 789e2a42b280..8dd0dd7a9de0 100755
> --- a/util/update_headers.sh
> +++ b/util/update_headers.sh
> @@ -35,6 +35,13 @@ do
> cp -- "$LINUX_ROOT/include/uapi/linux/$header" include/linux
> done
>
> +
> +VFIO_LIST="vfio.h iommufd.h"
> +for header in $VFIO_LIST
> +do
> + cp -- "$LINUX_ROOT/include/uapi/linux/$header" include/linux
> +done
> +
> unset KVMTOOL_PATH
>
> copy_optional_arch () {
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 02/10] vfio: Rename some functions
2025-07-27 18:20 ` Mostafa Saleh
@ 2025-07-29 4:53 ` Aneesh Kumar K.V
0 siblings, 0 replies; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-29 4:53 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Sun, May 25, 2025 at 01:19:08PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> We will add iommufd support in later patches. Rename the old vfio
>> method as legacy vfio.
>>
>> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
>> ---
>> vfio/core.c | 31 ++++++++++++++++---------------
>> 1 file changed, 16 insertions(+), 15 deletions(-)
>>
>> diff --git a/vfio/core.c b/vfio/core.c
>> index c6b305c30cf7..424dc4ed3aef 100644
>> --- a/vfio/core.c
>> +++ b/vfio/core.c
>> @@ -282,7 +282,7 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region)
>> }
>> }
>>
>> -static int vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
>> +static int legacy_vfio_configure_device(struct kvm *kvm, struct vfio_device *vdev)
>> {
>> int ret;
>> struct vfio_group *group = vdev->group;
>> @@ -340,12 +340,12 @@ err_close_device:
>> return ret;
>> }
>>
>> -static int vfio_configure_devices(struct kvm *kvm)
>> +static int legacy_vfio_configure_devices(struct kvm *kvm)
>> {
>> int i, ret;
>>
>> for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
>> - ret = vfio_configure_device(kvm, &vfio_devices[i]);
>> + ret = legacy_vfio_configure_device(kvm, &vfio_devices[i]);
>> if (ret)
>> return ret;
>> }
>> @@ -429,7 +429,7 @@ static int vfio_configure_reserved_regions(struct kvm *kvm,
>> return ret;
>> }
>>
>> -static int vfio_configure_groups(struct kvm *kvm)
>> +static int legacy_vfio_configure_groups(struct kvm *kvm)
>> {
>> int ret;
>> struct vfio_group *group;
>> @@ -454,7 +454,7 @@ static int vfio_configure_groups(struct kvm *kvm)
>> return 0;
>> }
>>
>> -static struct vfio_group *vfio_group_create(struct kvm *kvm, unsigned long id)
>> +static struct vfio_group *legacy_vfio_group_create(struct kvm *kvm, unsigned long id)
>> {
>> int ret;
>> struct vfio_group *group;
>> @@ -512,10 +512,11 @@ static void vfio_group_exit(struct kvm *kvm, struct vfio_group *group)
>> if (--group->refs != 0)
>> return;
>>
>> - ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER);
>> -
>> list_del(&group->list);
>> - close(group->fd);
>> + if (group->fd != -1) {
>> + ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER);
>> + close(group->fd);
>> + }
>
> That seems unrelated to the rename, maybe it's better to move that when
> IOMMUFD is supported as it's related to it.
>
Sure. Will make the change as part of iommufd patch.
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c
2025-07-27 18:23 ` Mostafa Saleh
@ 2025-07-29 4:59 ` Aneesh Kumar K.V
0 siblings, 0 replies; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-29 4:59 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Sun, May 25, 2025 at 01:19:09PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> Move legacy vfio config methodology to legacy.c. Also add helper
>> vfio_map/unmap_mem_range which will be switched to function pointers in
>> the later patch.
>>
>> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
>> ---
>> Makefile | 1 +
>> include/kvm/vfio.h | 14 ++
>> vfio/core.c | 342 ++------------------------------------------
>> vfio/legacy.c | 347 +++++++++++++++++++++++++++++++++++++++++++++
>> 4 files changed, 372 insertions(+), 332 deletions(-)
>> create mode 100644 vfio/legacy.c
>>
>> diff --git a/Makefile b/Makefile
>> index 60e551fd0c2a..8b2720f73386 100644
>> --- a/Makefile
>> +++ b/Makefile
>> @@ -65,6 +65,7 @@ OBJS += pci.o
>> OBJS += term.o
>> OBJS += vfio/core.o
>> OBJS += vfio/pci.o
>> +OBJS += vfio/legacy.o
>> OBJS += virtio/blk.o
>> OBJS += virtio/scsi.o
>> OBJS += virtio/console.o
>> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
>> index ac7b6226239a..67a528f18d33 100644
>> --- a/include/kvm/vfio.h
>> +++ b/include/kvm/vfio.h
>> @@ -126,4 +126,18 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region);
>> int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *device);
>> void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
>>
>> +int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
>> +int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size);
>> +
>> +struct kvm_mem_bank;
>> +int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
>> +int vfio_unmap_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
>> +int vfio_configure_reserved_regions(struct kvm *kvm, struct vfio_group *group);
>> +int legacy_vfio__init(struct kvm *kvm);
>> +int legacy_vfio__exit(struct kvm *kvm);
>> +
>> +extern int kvm_vfio_device;
>> +extern struct list_head vfio_groups;
>> +extern struct vfio_device *vfio_devices;
>> +
>> #endif /* KVM__VFIO_H */
>> diff --git a/vfio/core.c b/vfio/core.c
>> index 424dc4ed3aef..2af30df3b2b9 100644
>> --- a/vfio/core.c
>> +++ b/vfio/core.c
>> @@ -4,14 +4,11 @@
>>
>> #include <linux/list.h>
>>
>> -#define VFIO_DEV_DIR "/dev/vfio"
>> -#define VFIO_DEV_NODE VFIO_DEV_DIR "/vfio"
>> #define IOMMU_GROUP_DIR "/sys/kernel/iommu_groups"
>>
>> -static int vfio_container;
>> -static int kvm_vfio_device;
>> -static LIST_HEAD(vfio_groups);
>> -static struct vfio_device *vfio_devices;
>> +int kvm_vfio_device;
>
> kvm_vfio_device shouldn’t be VFIO/IOMMUFD specific, so that leads to
> duplication in both files, I suggest move it’s management to the vfio/core.c
> (and don’t extern the fd) And either export a function to add devices or maybe,
> better doing it once from vfio__init()
>
>> +LIST_HEAD(vfio_groups);
> “vfio_groups” seems not to be used by the core code, maybe it’s better to have a
> static version in each file?
> Also, as that is not really used for IOMMUFD, it seems to move group logic into
> legacy file. Instead of making iommufd populating groups so the core code handle
> the group exit.
>
I am also using the groups for reserved region configuration.
static int iommufd_configure_reserved_mem(struct kvm *kvm)
{
int ret;
struct vfio_group *group;
list_for_each_entry(group, &vfio_groups, list) {
ret = vfio_configure_reserved_regions(kvm, group);
if (ret)
return ret;
}
return 0;
}
An updated version of these patches can be found at
https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
>
>> +struct vfio_device *vfio_devices;
>>
>
> Similarly for “vfio_devices”, it’s only allocated/freed in core code, but never used.
> But no strong opinion about that.
>
> Thanks,
> Mostafa
>
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers
2025-07-27 18:25 ` Mostafa Saleh
@ 2025-07-29 5:03 ` Aneesh Kumar K.V
0 siblings, 0 replies; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-29 5:03 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Sun, May 25, 2025 at 01:19:11PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
>> ---
>> include/kvm/vfio.h | 4 ++--
>> vfio/core.c | 7 +++++--
>> vfio/legacy.c | 7 +++++--
>> 3 files changed, 12 insertions(+), 6 deletions(-)
>>
>> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
>> index 67a528f18d33..fed692b0f265 100644
>> --- a/include/kvm/vfio.h
>> +++ b/include/kvm/vfio.h
>> @@ -126,8 +126,8 @@ void vfio_unmap_region(struct kvm *kvm, struct vfio_region *region);
>> int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *device);
>> void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
>>
>> -int vfio_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
>> -int vfio_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size);
>> +extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
>> +extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
>>
>> struct kvm_mem_bank;
>> int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
>> diff --git a/vfio/core.c b/vfio/core.c
>> index 2af30df3b2b9..32a8e0fe67c0 100644
>> --- a/vfio/core.c
>> +++ b/vfio/core.c
>> @@ -10,6 +10,9 @@ int kvm_vfio_device;
>> LIST_HEAD(vfio_groups);
>> struct vfio_device *vfio_devices;
>>
>> +int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
>> +int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
>
> I think it's better to wrap those in an ops struct, this can be set once and
> in the next patches this can be used for init/exit instead of having such checks:
> “if (kvm->cfg.iommufd || kvm->cfg.iommufd_vdevice)”
>
Sure. I’ll revise the patch to introduce the ops struct.
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support
2025-07-27 18:31 ` Mostafa Saleh
@ 2025-07-29 5:12 ` Aneesh Kumar K.V
2025-07-29 9:38 ` Mostafa Saleh
0 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-29 5:12 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Sun, May 25, 2025 at 01:19:13PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> This use a stage1 translate stage2 bypass iommu config.
>>
>> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
>> ---
>> Makefile | 1 +
>> builtin-run.c | 1 +
>> include/kvm/kvm-config.h | 1 +
>> include/kvm/vfio.h | 2 +
>> vfio/core.c | 5 +
>> vfio/iommufd.c | 368 +++++++++++++++++++++++++++++++++++++++
>> 6 files changed, 378 insertions(+)
>> create mode 100644 vfio/iommufd.c
>>
>> diff --git a/Makefile b/Makefile
>> index 8b2720f73386..740b95c7c3c3 100644
>> --- a/Makefile
>> +++ b/Makefile
>> @@ -64,6 +64,7 @@ OBJS += mmio.o
>> OBJS += pci.o
>> OBJS += term.o
>> OBJS += vfio/core.o
>> +OBJS += vfio/iommufd.o
>> OBJS += vfio/pci.o
>> OBJS += vfio/legacy.o
>> OBJS += virtio/blk.o
>> diff --git a/builtin-run.c b/builtin-run.c
>> index 81f255f911b3..39198f9bc0d6 100644
>> --- a/builtin-run.c
>> +++ b/builtin-run.c
>> @@ -262,6 +262,7 @@ static int loglevel_parser(const struct option *opt, const char *arg, int unset)
>> OPT_CALLBACK('\0', "vfio-pci", NULL, "[domain:]bus:dev.fn", \
>> "Assign a PCI device to the virtual machine", \
>> vfio_device_parser, kvm), \
>> + OPT_BOOLEAN('\0', "iommufd", &(cfg)->iommufd, "Use iommufd interface"), \
>> \
>> OPT_GROUP("Debug options:"), \
>> OPT_CALLBACK_NOOPT('\0', "debug", kvm, NULL, \
>> diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h
>> index 592b035785c9..632eaf84b7eb 100644
>> --- a/include/kvm/kvm-config.h
>> +++ b/include/kvm/kvm-config.h
>> @@ -65,6 +65,7 @@ struct kvm_config {
>> bool ioport_debug;
>> bool mmio_debug;
>> int virtio_transport;
>> + bool iommufd;
>> };
>>
>> #endif
>> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
>> index fed692b0f265..37a2b5ac3dad 100644
>> --- a/include/kvm/vfio.h
>> +++ b/include/kvm/vfio.h
>> @@ -128,6 +128,8 @@ void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
>>
>> extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
>> extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
>> +int iommufd__init(struct kvm *kvm);
>> +int iommufd__exit(struct kvm *kvm);
>>
>> struct kvm_mem_bank;
>> int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
>> diff --git a/vfio/core.c b/vfio/core.c
>> index 32a8e0fe67c0..0b1796c54ffd 100644
>> --- a/vfio/core.c
>> +++ b/vfio/core.c
>> @@ -373,6 +373,8 @@ static int vfio__init(struct kvm *kvm)
>> }
>> kvm_vfio_device = device.fd;
>>
>> + if (kvm->cfg.iommufd)
>> + return iommufd__init(kvm);
>> return legacy_vfio__init(kvm);
>> }
>> dev_base_init(vfio__init);
>> @@ -393,6 +395,9 @@ static int vfio__exit(struct kvm *kvm)
>>
>> free(kvm->cfg.vfio_devices);
>>
>> + if (kvm->cfg.iommufd)
>> + return iommufd__exit(kvm);
>> +
>> return legacy_vfio__exit(kvm);
>> }
>> dev_base_exit(vfio__exit);
>> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
>> new file mode 100644
>> index 000000000000..3728a06cb318
>> --- /dev/null
>> +++ b/vfio/iommufd.c
>> @@ -0,0 +1,368 @@
>> +#include <sys/types.h>
>> +#include <dirent.h>
>> +
>> +#include "kvm/kvm.h"
>> +#include <linux/iommufd.h>
>> +#include <linux/list.h>
>> +
>> +#define VFIO_DEV_DIR "/dev/vfio"
> This is duplicate with the legacy file, so maybe move it to the header?
>
>> +#define VFIO_DEV_NODE VFIO_DEV_DIR "/devices/"
>> +#define IOMMU_DEV "/dev/iommu"
>> +
>> +static int iommu_fd;
>> +static int ioas_id;
>> +
>> +static int __iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
>> +{
>> + int ret;
>> +
>> + vdev->info.argsz = sizeof(vdev->info);
>> + if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
>> + ret = -errno;
>> + vfio_dev_err(vdev, "failed to get info");
>> + goto err_close_device;
>> + }
>> +
>> + if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
>> + ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
>> + vfio_dev_warn(vdev, "failed to reset device");
>> +
>> + vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
>> + if (!vdev->regions) {
>> + ret = -ENOMEM;
>> + goto err_close_device;
>> + }
>> +
>> + /* Now for the bus-specific initialization... */
>> + switch (vdev->params->type) {
>> + case VFIO_DEVICE_PCI:
>> + BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
>> + ret = vfio_pci_setup_device(kvm, vdev);
>> + break;
>> + default:
>> + BUG_ON(1);
>> + ret = -EINVAL;
>> + }
>> +
>> + if (ret)
>> + goto err_free_regions;
>> +
>> + vfio_dev_info(vdev, "assigned to device number 0x%x ",
>> + vdev->dev_hdr.dev_num) ;
>> +
>> + return 0;
>> +
>> +err_free_regions:
>> + free(vdev->regions);
>> +err_close_device:
>> + close(vdev->fd);
>> +
>> + return ret;
>> +}
>> +
>> +static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
>> +{
>> + int ret;
>> + DIR *dir = NULL;
>> + struct dirent *dir_ent;
>> + bool found_dev = false;
>> + char pci_dev_path[PATH_MAX];
>> + char vfio_dev_path[PATH_MAX];
>> + struct iommu_hwpt_alloc alloc_hwpt;
>> + struct vfio_device_bind_iommufd bind;
>> + struct vfio_device_attach_iommufd_pt attach_data;
>> +
>> + ret = snprintf(pci_dev_path, PATH_MAX, "%s/vfio-dev/", vdev->sysfs_path);
>> + if (ret < 0 || ret == PATH_MAX)
>> + return -EINVAL;
>> +
>> + dir = opendir(pci_dev_path);
>> + if (!dir)
>> + return -EINVAL;
>> +
>> + while ((dir_ent = readdir(dir))) {
>> + if (!strncmp(dir_ent->d_name, "vfio", 4)) {
>> + ret = snprintf(vfio_dev_path, PATH_MAX, VFIO_DEV_NODE "%s", dir_ent->d_name);
>> + if (ret < 0 || ret == PATH_MAX) {
>> + ret = -EINVAL;
>> + goto err_close_dir;
>> + }
>> + found_dev = true;
>> + break;
>> + }
>> + }
>> + if (!found_dev) {
>> + ret = -ENODEV;
>> + goto err_close_dir;
>> + }
>
> At this point we already found the device, as in error there is "err_close_dir"
> so there is no need for the extra flag.
>
I didn't follow this. So if we didn't find the "vfio<x>" directory in
pci devices sysfspatch/vfio-dev/ we need to error out.
>
>> +
>> + vdev->fd = open(vfio_dev_path, O_RDWR);
>> + if (vdev->fd == -1) {
>> + ret = errno;
>> + pr_err("Failed to open %s", vfio_dev_path);
>> + goto err_close_dir;
>> + }
>> +
>> + struct kvm_device_attr attr = {
>> + .group = KVM_DEV_VFIO_FILE,
>> + .attr = KVM_DEV_VFIO_FILE_ADD,
>> + .addr = (__u64)&vdev->fd,
>> + };
>> +
>> + if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
>> + ret = -errno;
>> + pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
>> + goto err_close_device;
>> + }
>> +
>> + bind.argsz = sizeof(bind);
>> + bind.flags = 0;
>> + bind.iommufd = iommu_fd;
>> +
>> + /* now bind the iommufd */
>> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
>> + ret = -errno;
>> + vfio_dev_err(vdev, "failed to get info");
>> + goto err_close_device;
>> + }
>> +
>> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
>> + alloc_hwpt.flags = 0;
>> + alloc_hwpt.dev_id = bind.out_devid;
>> + alloc_hwpt.pt_id = ioas_id;
>> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
>> + alloc_hwpt.data_len = 0;
>> + alloc_hwpt.data_uptr = 0;
>> +
>> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
>> + ret = -errno;
>> + pr_err("Failed to allocate HWPT");
>> + goto err_close_device;
>> + }
>> +
>> + attach_data.argsz = sizeof(attach_data);
>> + attach_data.flags = 0;
>> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
>> +
>> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
>> + ret = -errno;
>> + vfio_dev_err(vdev, "failed to attach to IOAS ");
>
> Extra space.
>
>> + goto err_close_device;
>> + }
>> +
>> + closedir(dir);
>> + return __iommufd_configure_device(kvm, vdev);
>> +
>> +err_close_device:
>> + close(vdev->fd);
>> +err_close_dir:
>> + closedir(dir);
>> + return ret;
>> +}
>> +
>> +static int iommufd_configure_devices(struct kvm *kvm)
>> +{
>> + int i, ret;
>> +
>> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
>> + ret = iommufd_configure_device(kvm, &vfio_devices[i]);
>> + if (ret)
>> + return ret;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static int iommufd_create_ioas(struct kvm *kvm)
>> +{
>> + int ret;
>> + struct iommu_ioas_alloc alloc_data;
>> + iommu_fd = open(IOMMU_DEV, O_RDWR);
>> + if (iommu_fd == -1) {
>> + ret = errno;
>> + pr_err("Failed to open %s", IOMMU_DEV);
>> + return ret;
>> + }
>> +
>> + alloc_data.size = sizeof(alloc_data);
>> + alloc_data.flags = 0;
>> +
>> + if (ioctl(iommu_fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
>> + ret = errno;
>
> For all other ioctls, we return -errorno, except here, is there a reason
> for that?
>
No. Will update the patch.
>> + pr_err("Failed to alloc IOAS ");
> Also, extra space at the end, also maybe more consistent with the rest of
> the code with “vfio_dev_err”.
>
>> + goto err_close_device;
>> + }
>> + ioas_id = alloc_data.out_ioas_id;
>> + return 0;
>> +
>> +err_close_device:
>> + close(iommu_fd);
>> + return ret;
>> +}
>> +
>> +static int vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
>> +{
>> + int ret, dirfd;
>> + char *group_name;
>> + unsigned long group_id;
>> + char dev_path[PATH_MAX];
>> + struct vfio_group *group = NULL;
>> +
>> + ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
>> + vdev->params->bus, vdev->params->name);
>> + if (ret < 0 || ret == PATH_MAX)
>> + return -EINVAL;
>> +
>> + vdev->sysfs_path = strndup(dev_path, PATH_MAX);
>> + if (!vdev->sysfs_path)
>> + return -ENOMEM;
>> +
>> + /* Find IOMMU group for this device */
>> + dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
>> + if (dirfd < 0) {
>> + vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
>> + return -errno;
>> + }
>> +
>> + ret = readlinkat(dirfd, "iommu_group", dev_path, PATH_MAX);
>> + if (ret < 0) {
>> + vfio_dev_err(vdev, "no iommu_group");
>> + goto out_close;
>> + }
>> + if (ret == PATH_MAX) {
>> + ret = -ENOMEM;
>> + goto out_close;
>> + }
>> +
>> + dev_path[ret] = '\0';
>> + group_name = basename(dev_path);
>> + errno = 0;
>> + group_id = strtoul(group_name, NULL, 10);
>> + if (errno) {
>> + ret = -errno;
>> + goto out_close;
>> + }
>> +
>> + list_for_each_entry(group, &vfio_groups, list) {
>> + if (group->id == group_id) {
>> + group->refs++;
>> + break;
>> + }
>> + }
>> + if (group->id != group_id) {
>> + group = calloc(1, sizeof(*group));
>> + if (!group) {
>> + ret = -ENOMEM;
>> + goto out_close;
>> + }
>> + group->id = group_id;
>> + group->refs = 1;
>> + /* no group fd for iommufd */
>> + group->fd = -1;
>> + list_add(&group->list, &vfio_groups);
>> + }
>> + vdev->group = group;
>> + ret = 0;
>> +
>
> There is some duplication with “vfio_group_get_for_dev”, I wonder if we could
> re-use some of this code in a helper.
>
>> +out_close:
>> + close(dirfd);
>> + return ret;
>> +}
>> +
>> +static int iommufd_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
>> +{
>> + int ret = 0;
>> + struct iommu_ioas_map dma_map;
>> +
>> + dma_map.size = sizeof(dma_map);
>> + dma_map.flags = IOMMU_IOAS_MAP_READABLE | IOMMU_IOAS_MAP_WRITEABLE |
>> + IOMMU_IOAS_MAP_FIXED_IOVA;
>> + dma_map.ioas_id = ioas_id;
>> + dma_map.__reserved = 0;
>> + dma_map.user_va = host_addr;
>> + dma_map.iova = iova;
>> + dma_map.length = size;
>> +
>> + /* Map the guest memory for DMA (i.e. provide isolation) */
>> + if (ioctl(iommu_fd, IOMMU_IOAS_MAP, &dma_map)) {
>> + ret = -errno;
>> + pr_err("Failed to map 0x%llx -> 0x%llx (%u) for DMA",
>> + dma_map.iova, dma_map.user_va, dma_map.size);
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +static int iommufd_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
>> +{
>> + int ret = 0;
>> + struct iommu_ioas_unmap dma_unmap;
>> +
>> + dma_unmap.size = sizeof(dma_unmap);
>> + dma_unmap.ioas_id = ioas_id;
>> + dma_unmap.iova = iova;
>> + dma_unmap.length = size;
>> +
>> + if (ioctl(iommu_fd, IOMMU_IOAS_UNMAP, &dma_unmap)) {
>> + ret = -errno;
>> + if (ret != -ENOENT)
>> + pr_err("Failed to unmap 0x%llx - size (%u) for DMA %d",
>> + dma_unmap.iova, dma_unmap.size, ret);
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +static int iommufd_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
>> +{
>> + return iommufd_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
>> +}
>> +
>> +static int iommufd_configure_reserved_mem(struct kvm *kvm)
>> +{
>> + int ret;
>> + struct vfio_group *group;
>> +
>> + list_for_each_entry(group, &vfio_groups, list) {
>> + ret = vfio_configure_reserved_regions(kvm, group);
>> + if (ret)
>> + return ret;
>> + }
>> + return 0;
>> +}
>> +
>> +int iommufd__init(struct kvm *kvm)
>> +{
>> + int ret, i;
>> +
>> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
>> + vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
>> +
>> + ret = vfio_device_init(kvm, &vfio_devices[i]);
>> + if (ret)
>> + return ret;
>> + }
>> +
>> + ret = iommufd_create_ioas(kvm);
>> + if (ret)
>> + return ret;
>> +
>> + ret = iommufd_configure_devices(kvm);
>> + if (ret)
>> + return ret;
>> +
>
> Any failure after this point will just return, although iommufd_create_ioas()
> would “close(iommu_fd)” on failure.
> Also, don’t we want to close “iommu_fd” at exit similar to the VFIO container?
>
That is already fixed in the latest version
> Thanks,
> Mostafa
>
>> + ret = iommufd_configure_reserved_mem(kvm);
>> + if (ret)
>> + return ret;
>> +
>> + dma_map_mem_range = iommufd_map_mem_range;
>> + dma_unmap_mem_range = iommufd_unmap_mem_range;
>> + /* Now map the full memory */
>> + return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, iommufd_map_mem_bank,
>> + NULL);
>> +}
>> +
>> +int iommufd__exit(struct kvm *kvm)
>> +{
>> + return 0;
>> +}
>> --
>> 2.43.0
>>
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper
2025-07-27 18:32 ` Mostafa Saleh
@ 2025-07-29 5:14 ` Aneesh Kumar K.V
2025-07-29 9:43 ` Mostafa Saleh
0 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-29 5:14 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Sun, May 25, 2025 at 01:19:14PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> alloc_hwpt.flags = 0; implies we prefer stage1 translation. Hence name
>> the helper iommufd_alloc_s2bypass_hwpt().
>
> This patch moves the recently added code into a new function,
> can't this be squashed?
>
Yes. Will update the patch.
> Also, I believe that with “IOMMU_HWPT_DATA_NONE”, we shouldn’t make
> any assumptions in userspace about which stage is used.
>
> The only guarantee is that IOMMU_IOAS_MAP/IOMMU_IOAS_UNMAP works.
>
> So, I believe the naming for "s2bypass" is not accurate.
>
Any suggestion w.r.t helper function name?
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-27 18:35 ` Mostafa Saleh
@ 2025-07-29 5:19 ` Aneesh Kumar K.V
2025-07-29 9:41 ` Mostafa Saleh
0 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-29 5:19 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> This also allocates a stage1 bypass and stage2 translate table.
>
> So this makes IOMMUFD only working with SMMUv3?
>
> I don’t understand what is the point of this configuration? It seems to add
> extra complexity and extra hw constraints and no extra value.
>
> Not related to this patch, do you have plans to add some of the other iommufd
> features, I think things such as page faults might be useful?
>
The primary goal of adding viommu/vdevice support is to enable kvmtool
to serve as the VMM for ARM CCA secure device development. This requires
a viommu implementation so that a KVM file descriptor can be associated
with the corresponding viommu.
The full set of related patches is available here:
https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support
2025-07-29 5:12 ` Aneesh Kumar K.V
@ 2025-07-29 9:38 ` Mostafa Saleh
0 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-29 9:38 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Tue, Jul 29, 2025 at 10:42:42AM +0530, Aneesh Kumar K.V wrote:
> Mostafa Saleh <smostafa@google.com> writes:
>
> > On Sun, May 25, 2025 at 01:19:13PM +0530, Aneesh Kumar K.V (Arm) wrote:
> >> This use a stage1 translate stage2 bypass iommu config.
> >>
> >> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
> >> ---
> >> Makefile | 1 +
> >> builtin-run.c | 1 +
> >> include/kvm/kvm-config.h | 1 +
> >> include/kvm/vfio.h | 2 +
> >> vfio/core.c | 5 +
> >> vfio/iommufd.c | 368 +++++++++++++++++++++++++++++++++++++++
> >> 6 files changed, 378 insertions(+)
> >> create mode 100644 vfio/iommufd.c
> >>
> >> diff --git a/Makefile b/Makefile
> >> index 8b2720f73386..740b95c7c3c3 100644
> >> --- a/Makefile
> >> +++ b/Makefile
> >> @@ -64,6 +64,7 @@ OBJS += mmio.o
> >> OBJS += pci.o
> >> OBJS += term.o
> >> OBJS += vfio/core.o
> >> +OBJS += vfio/iommufd.o
> >> OBJS += vfio/pci.o
> >> OBJS += vfio/legacy.o
> >> OBJS += virtio/blk.o
> >> diff --git a/builtin-run.c b/builtin-run.c
> >> index 81f255f911b3..39198f9bc0d6 100644
> >> --- a/builtin-run.c
> >> +++ b/builtin-run.c
> >> @@ -262,6 +262,7 @@ static int loglevel_parser(const struct option *opt, const char *arg, int unset)
> >> OPT_CALLBACK('\0', "vfio-pci", NULL, "[domain:]bus:dev.fn", \
> >> "Assign a PCI device to the virtual machine", \
> >> vfio_device_parser, kvm), \
> >> + OPT_BOOLEAN('\0', "iommufd", &(cfg)->iommufd, "Use iommufd interface"), \
> >> \
> >> OPT_GROUP("Debug options:"), \
> >> OPT_CALLBACK_NOOPT('\0', "debug", kvm, NULL, \
> >> diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h
> >> index 592b035785c9..632eaf84b7eb 100644
> >> --- a/include/kvm/kvm-config.h
> >> +++ b/include/kvm/kvm-config.h
> >> @@ -65,6 +65,7 @@ struct kvm_config {
> >> bool ioport_debug;
> >> bool mmio_debug;
> >> int virtio_transport;
> >> + bool iommufd;
> >> };
> >>
> >> #endif
> >> diff --git a/include/kvm/vfio.h b/include/kvm/vfio.h
> >> index fed692b0f265..37a2b5ac3dad 100644
> >> --- a/include/kvm/vfio.h
> >> +++ b/include/kvm/vfio.h
> >> @@ -128,6 +128,8 @@ void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev);
> >>
> >> extern int (*dma_map_mem_range)(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size);
> >> extern int (*dma_unmap_mem_range)(struct kvm *kvm, __u64 iova, __u64 size);
> >> +int iommufd__init(struct kvm *kvm);
> >> +int iommufd__exit(struct kvm *kvm);
> >>
> >> struct kvm_mem_bank;
> >> int vfio_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data);
> >> diff --git a/vfio/core.c b/vfio/core.c
> >> index 32a8e0fe67c0..0b1796c54ffd 100644
> >> --- a/vfio/core.c
> >> +++ b/vfio/core.c
> >> @@ -373,6 +373,8 @@ static int vfio__init(struct kvm *kvm)
> >> }
> >> kvm_vfio_device = device.fd;
> >>
> >> + if (kvm->cfg.iommufd)
> >> + return iommufd__init(kvm);
> >> return legacy_vfio__init(kvm);
> >> }
> >> dev_base_init(vfio__init);
> >> @@ -393,6 +395,9 @@ static int vfio__exit(struct kvm *kvm)
> >>
> >> free(kvm->cfg.vfio_devices);
> >>
> >> + if (kvm->cfg.iommufd)
> >> + return iommufd__exit(kvm);
> >> +
> >> return legacy_vfio__exit(kvm);
> >> }
> >> dev_base_exit(vfio__exit);
> >> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
> >> new file mode 100644
> >> index 000000000000..3728a06cb318
> >> --- /dev/null
> >> +++ b/vfio/iommufd.c
> >> @@ -0,0 +1,368 @@
> >> +#include <sys/types.h>
> >> +#include <dirent.h>
> >> +
> >> +#include "kvm/kvm.h"
> >> +#include <linux/iommufd.h>
> >> +#include <linux/list.h>
> >> +
> >> +#define VFIO_DEV_DIR "/dev/vfio"
> > This is duplicate with the legacy file, so maybe move it to the header?
> >
> >> +#define VFIO_DEV_NODE VFIO_DEV_DIR "/devices/"
> >> +#define IOMMU_DEV "/dev/iommu"
> >> +
> >> +static int iommu_fd;
> >> +static int ioas_id;
> >> +
> >> +static int __iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> >> +{
> >> + int ret;
> >> +
> >> + vdev->info.argsz = sizeof(vdev->info);
> >> + if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &vdev->info)) {
> >> + ret = -errno;
> >> + vfio_dev_err(vdev, "failed to get info");
> >> + goto err_close_device;
> >> + }
> >> +
> >> + if (vdev->info.flags & VFIO_DEVICE_FLAGS_RESET &&
> >> + ioctl(vdev->fd, VFIO_DEVICE_RESET) < 0)
> >> + vfio_dev_warn(vdev, "failed to reset device");
> >> +
> >> + vdev->regions = calloc(vdev->info.num_regions, sizeof(*vdev->regions));
> >> + if (!vdev->regions) {
> >> + ret = -ENOMEM;
> >> + goto err_close_device;
> >> + }
> >> +
> >> + /* Now for the bus-specific initialization... */
> >> + switch (vdev->params->type) {
> >> + case VFIO_DEVICE_PCI:
> >> + BUG_ON(!(vdev->info.flags & VFIO_DEVICE_FLAGS_PCI));
> >> + ret = vfio_pci_setup_device(kvm, vdev);
> >> + break;
> >> + default:
> >> + BUG_ON(1);
> >> + ret = -EINVAL;
> >> + }
> >> +
> >> + if (ret)
> >> + goto err_free_regions;
> >> +
> >> + vfio_dev_info(vdev, "assigned to device number 0x%x ",
> >> + vdev->dev_hdr.dev_num) ;
> >> +
> >> + return 0;
> >> +
> >> +err_free_regions:
> >> + free(vdev->regions);
> >> +err_close_device:
> >> + close(vdev->fd);
> >> +
> >> + return ret;
> >> +}
> >> +
> >> +static int iommufd_configure_device(struct kvm *kvm, struct vfio_device *vdev)
> >> +{
> >> + int ret;
> >> + DIR *dir = NULL;
> >> + struct dirent *dir_ent;
> >> + bool found_dev = false;
> >> + char pci_dev_path[PATH_MAX];
> >> + char vfio_dev_path[PATH_MAX];
> >> + struct iommu_hwpt_alloc alloc_hwpt;
> >> + struct vfio_device_bind_iommufd bind;
> >> + struct vfio_device_attach_iommufd_pt attach_data;
> >> +
> >> + ret = snprintf(pci_dev_path, PATH_MAX, "%s/vfio-dev/", vdev->sysfs_path);
> >> + if (ret < 0 || ret == PATH_MAX)
> >> + return -EINVAL;
> >> +
> >> + dir = opendir(pci_dev_path);
> >> + if (!dir)
> >> + return -EINVAL;
> >> +
> >> + while ((dir_ent = readdir(dir))) {
> >> + if (!strncmp(dir_ent->d_name, "vfio", 4)) {
> >> + ret = snprintf(vfio_dev_path, PATH_MAX, VFIO_DEV_NODE "%s", dir_ent->d_name);
> >> + if (ret < 0 || ret == PATH_MAX) {
> >> + ret = -EINVAL;
> >> + goto err_close_dir;
> >> + }
> >> + found_dev = true;
> >> + break;
> >> + }
> >> + }
> >> + if (!found_dev) {
> >> + ret = -ENODEV;
> >> + goto err_close_dir;
> >> + }
> >
> > At this point we already found the device, as in error there is "err_close_dir"
> > so there is no need for the extra flag.
> >
>
> I didn't follow this. So if we didn't find the "vfio<x>" directory in
> pci devices sysfspatch/vfio-dev/ we need to error out.
My bad, I mis-read the code.
Thanks,
Mostafa
>
> >
> >> +
> >> + vdev->fd = open(vfio_dev_path, O_RDWR);
> >> + if (vdev->fd == -1) {
> >> + ret = errno;
> >> + pr_err("Failed to open %s", vfio_dev_path);
> >> + goto err_close_dir;
> >> + }
> >> +
> >> + struct kvm_device_attr attr = {
> >> + .group = KVM_DEV_VFIO_FILE,
> >> + .attr = KVM_DEV_VFIO_FILE_ADD,
> >> + .addr = (__u64)&vdev->fd,
> >> + };
> >> +
> >> + if (ioctl(kvm_vfio_device, KVM_SET_DEVICE_ATTR, &attr)) {
> >> + ret = -errno;
> >> + pr_err("Failed KVM_SET_DEVICE_ATTR for KVM_DEV_VFIO_FILE");
> >> + goto err_close_device;
> >> + }
> >> +
> >> + bind.argsz = sizeof(bind);
> >> + bind.flags = 0;
> >> + bind.iommufd = iommu_fd;
> >> +
> >> + /* now bind the iommufd */
> >> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
> >> + ret = -errno;
> >> + vfio_dev_err(vdev, "failed to get info");
> >> + goto err_close_device;
> >> + }
> >> +
> >> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
> >> + alloc_hwpt.flags = 0;
> >> + alloc_hwpt.dev_id = bind.out_devid;
> >> + alloc_hwpt.pt_id = ioas_id;
> >> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
> >> + alloc_hwpt.data_len = 0;
> >> + alloc_hwpt.data_uptr = 0;
> >> +
> >> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
> >> + ret = -errno;
> >> + pr_err("Failed to allocate HWPT");
> >> + goto err_close_device;
> >> + }
> >> +
> >> + attach_data.argsz = sizeof(attach_data);
> >> + attach_data.flags = 0;
> >> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
> >> +
> >> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
> >> + ret = -errno;
> >> + vfio_dev_err(vdev, "failed to attach to IOAS ");
> >
> > Extra space.
> >
> >> + goto err_close_device;
> >> + }
> >> +
> >> + closedir(dir);
> >> + return __iommufd_configure_device(kvm, vdev);
> >> +
> >> +err_close_device:
> >> + close(vdev->fd);
> >> +err_close_dir:
> >> + closedir(dir);
> >> + return ret;
> >> +}
> >> +
> >> +static int iommufd_configure_devices(struct kvm *kvm)
> >> +{
> >> + int i, ret;
> >> +
> >> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> >> + ret = iommufd_configure_device(kvm, &vfio_devices[i]);
> >> + if (ret)
> >> + return ret;
> >> + }
> >> +
> >> + return 0;
> >> +}
> >> +
> >> +static int iommufd_create_ioas(struct kvm *kvm)
> >> +{
> >> + int ret;
> >> + struct iommu_ioas_alloc alloc_data;
> >> + iommu_fd = open(IOMMU_DEV, O_RDWR);
> >> + if (iommu_fd == -1) {
> >> + ret = errno;
> >> + pr_err("Failed to open %s", IOMMU_DEV);
> >> + return ret;
> >> + }
> >> +
> >> + alloc_data.size = sizeof(alloc_data);
> >> + alloc_data.flags = 0;
> >> +
> >> + if (ioctl(iommu_fd, IOMMU_IOAS_ALLOC, &alloc_data)) {
> >> + ret = errno;
> >
> > For all other ioctls, we return -errorno, except here, is there a reason
> > for that?
> >
>
> No. Will update the patch.
>
>
> >> + pr_err("Failed to alloc IOAS ");
> > Also, extra space at the end, also maybe more consistent with the rest of
> > the code with “vfio_dev_err”.
> >
> >> + goto err_close_device;
> >> + }
> >> + ioas_id = alloc_data.out_ioas_id;
> >> + return 0;
> >> +
> >> +err_close_device:
> >> + close(iommu_fd);
> >> + return ret;
> >> +}
> >> +
> >> +static int vfio_device_init(struct kvm *kvm, struct vfio_device *vdev)
> >> +{
> >> + int ret, dirfd;
> >> + char *group_name;
> >> + unsigned long group_id;
> >> + char dev_path[PATH_MAX];
> >> + struct vfio_group *group = NULL;
> >> +
> >> + ret = snprintf(dev_path, PATH_MAX, "/sys/bus/%s/devices/%s",
> >> + vdev->params->bus, vdev->params->name);
> >> + if (ret < 0 || ret == PATH_MAX)
> >> + return -EINVAL;
> >> +
> >> + vdev->sysfs_path = strndup(dev_path, PATH_MAX);
> >> + if (!vdev->sysfs_path)
> >> + return -ENOMEM;
> >> +
> >> + /* Find IOMMU group for this device */
> >> + dirfd = open(vdev->sysfs_path, O_DIRECTORY | O_PATH | O_RDONLY);
> >> + if (dirfd < 0) {
> >> + vfio_dev_err(vdev, "failed to open '%s'", vdev->sysfs_path);
> >> + return -errno;
> >> + }
> >> +
> >> + ret = readlinkat(dirfd, "iommu_group", dev_path, PATH_MAX);
> >> + if (ret < 0) {
> >> + vfio_dev_err(vdev, "no iommu_group");
> >> + goto out_close;
> >> + }
> >> + if (ret == PATH_MAX) {
> >> + ret = -ENOMEM;
> >> + goto out_close;
> >> + }
> >> +
> >> + dev_path[ret] = '\0';
> >> + group_name = basename(dev_path);
> >> + errno = 0;
> >> + group_id = strtoul(group_name, NULL, 10);
> >> + if (errno) {
> >> + ret = -errno;
> >> + goto out_close;
> >> + }
> >> +
> >> + list_for_each_entry(group, &vfio_groups, list) {
> >> + if (group->id == group_id) {
> >> + group->refs++;
> >> + break;
> >> + }
> >> + }
> >> + if (group->id != group_id) {
> >> + group = calloc(1, sizeof(*group));
> >> + if (!group) {
> >> + ret = -ENOMEM;
> >> + goto out_close;
> >> + }
> >> + group->id = group_id;
> >> + group->refs = 1;
> >> + /* no group fd for iommufd */
> >> + group->fd = -1;
> >> + list_add(&group->list, &vfio_groups);
> >> + }
> >> + vdev->group = group;
> >> + ret = 0;
> >> +
> >
> > There is some duplication with “vfio_group_get_for_dev”, I wonder if we could
> > re-use some of this code in a helper.
> >
> >> +out_close:
> >> + close(dirfd);
> >> + return ret;
> >> +}
> >> +
> >> +static int iommufd_map_mem_range(struct kvm *kvm, __u64 host_addr, __u64 iova, __u64 size)
> >> +{
> >> + int ret = 0;
> >> + struct iommu_ioas_map dma_map;
> >> +
> >> + dma_map.size = sizeof(dma_map);
> >> + dma_map.flags = IOMMU_IOAS_MAP_READABLE | IOMMU_IOAS_MAP_WRITEABLE |
> >> + IOMMU_IOAS_MAP_FIXED_IOVA;
> >> + dma_map.ioas_id = ioas_id;
> >> + dma_map.__reserved = 0;
> >> + dma_map.user_va = host_addr;
> >> + dma_map.iova = iova;
> >> + dma_map.length = size;
> >> +
> >> + /* Map the guest memory for DMA (i.e. provide isolation) */
> >> + if (ioctl(iommu_fd, IOMMU_IOAS_MAP, &dma_map)) {
> >> + ret = -errno;
> >> + pr_err("Failed to map 0x%llx -> 0x%llx (%u) for DMA",
> >> + dma_map.iova, dma_map.user_va, dma_map.size);
> >> + }
> >> +
> >> + return ret;
> >> +}
> >> +
> >> +static int iommufd_unmap_mem_range(struct kvm *kvm, __u64 iova, __u64 size)
> >> +{
> >> + int ret = 0;
> >> + struct iommu_ioas_unmap dma_unmap;
> >> +
> >> + dma_unmap.size = sizeof(dma_unmap);
> >> + dma_unmap.ioas_id = ioas_id;
> >> + dma_unmap.iova = iova;
> >> + dma_unmap.length = size;
> >> +
> >> + if (ioctl(iommu_fd, IOMMU_IOAS_UNMAP, &dma_unmap)) {
> >> + ret = -errno;
> >> + if (ret != -ENOENT)
> >> + pr_err("Failed to unmap 0x%llx - size (%u) for DMA %d",
> >> + dma_unmap.iova, dma_unmap.size, ret);
> >> + }
> >> +
> >> + return ret;
> >> +}
> >> +
> >> +static int iommufd_map_mem_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
> >> +{
> >> + return iommufd_map_mem_range(kvm, (u64)bank->host_addr, bank->guest_phys_addr, bank->size);
> >> +}
> >> +
> >> +static int iommufd_configure_reserved_mem(struct kvm *kvm)
> >> +{
> >> + int ret;
> >> + struct vfio_group *group;
> >> +
> >> + list_for_each_entry(group, &vfio_groups, list) {
> >> + ret = vfio_configure_reserved_regions(kvm, group);
> >> + if (ret)
> >> + return ret;
> >> + }
> >> + return 0;
> >> +}
> >> +
> >> +int iommufd__init(struct kvm *kvm)
> >> +{
> >> + int ret, i;
> >> +
> >> + for (i = 0; i < kvm->cfg.num_vfio_devices; ++i) {
> >> + vfio_devices[i].params = &kvm->cfg.vfio_devices[i];
> >> +
> >> + ret = vfio_device_init(kvm, &vfio_devices[i]);
> >> + if (ret)
> >> + return ret;
> >> + }
> >> +
> >> + ret = iommufd_create_ioas(kvm);
> >> + if (ret)
> >> + return ret;
> >> +
> >> + ret = iommufd_configure_devices(kvm);
> >> + if (ret)
> >> + return ret;
> >> +
> >
> > Any failure after this point will just return, although iommufd_create_ioas()
> > would “close(iommu_fd)” on failure.
> > Also, don’t we want to close “iommu_fd” at exit similar to the VFIO container?
> >
>
> That is already fixed in the latest version
>
> > Thanks,
> > Mostafa
> >
> >> + ret = iommufd_configure_reserved_mem(kvm);
> >> + if (ret)
> >> + return ret;
> >> +
> >> + dma_map_mem_range = iommufd_map_mem_range;
> >> + dma_unmap_mem_range = iommufd_unmap_mem_range;
> >> + /* Now map the full memory */
> >> + return kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM, iommufd_map_mem_bank,
> >> + NULL);
> >> +}
> >> +
> >> +int iommufd__exit(struct kvm *kvm)
> >> +{
> >> + return 0;
> >> +}
> >> --
> >> 2.43.0
> >>
>
> -aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-29 5:19 ` Aneesh Kumar K.V
@ 2025-07-29 9:41 ` Mostafa Saleh
2025-07-30 8:13 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-29 9:41 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Tue, Jul 29, 2025 at 10:49:31AM +0530, Aneesh Kumar K.V wrote:
> Mostafa Saleh <smostafa@google.com> writes:
>
> > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
> >> This also allocates a stage1 bypass and stage2 translate table.
> >
> > So this makes IOMMUFD only working with SMMUv3?
> >
> > I don’t understand what is the point of this configuration? It seems to add
> > extra complexity and extra hw constraints and no extra value.
> >
> > Not related to this patch, do you have plans to add some of the other iommufd
> > features, I think things such as page faults might be useful?
> >
>
> The primary goal of adding viommu/vdevice support is to enable kvmtool
> to serve as the VMM for ARM CCA secure device development. This requires
> a viommu implementation so that a KVM file descriptor can be associated
> with the corresponding viommu.
>
> The full set of related patches is available here:
> https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
I see, but I don't understand why we need a nested setup in that case?
How would having bypassed stage-1 change things?
Also, In case we do something like this, I'd suggest to make it clear
for the command line that this is SMMUv3/CCA only, and maybe move
some of the code to arm64/
Thanks,
Mostafa
>
> -aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper
2025-07-29 5:14 ` Aneesh Kumar K.V
@ 2025-07-29 9:43 ` Mostafa Saleh
0 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-29 9:43 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Tue, Jul 29, 2025 at 10:44:14AM +0530, Aneesh Kumar K.V wrote:
> Mostafa Saleh <smostafa@google.com> writes:
>
> > On Sun, May 25, 2025 at 01:19:14PM +0530, Aneesh Kumar K.V (Arm) wrote:
> >> alloc_hwpt.flags = 0; implies we prefer stage1 translation. Hence name
> >> the helper iommufd_alloc_s2bypass_hwpt().
> >
> > This patch moves the recently added code into a new function,
> > can't this be squashed?
> >
>
> Yes. Will update the patch.
>
> > Also, I believe that with “IOMMU_HWPT_DATA_NONE”, we shouldn’t make
> > any assumptions in userspace about which stage is used.
> >
> > The only guarantee is that IOMMU_IOAS_MAP/IOMMU_IOAS_UNMAP works.
> >
> > So, I believe the naming for "s2bypass" is not accurate.
> >
>
> Any suggestion w.r.t helper function name?
Maybe just "iommufd_alloc_hwpt"?
Thanks,
Mostafa
>
> -aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-29 9:41 ` Mostafa Saleh
@ 2025-07-30 8:13 ` Aneesh Kumar K.V
2025-07-30 14:15 ` Mostafa Saleh
0 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-30 8:13 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Tue, Jul 29, 2025 at 10:49:31AM +0530, Aneesh Kumar K.V wrote:
>> Mostafa Saleh <smostafa@google.com> writes:
>>
>> > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> >> This also allocates a stage1 bypass and stage2 translate table.
>> >
>> > So this makes IOMMUFD only working with SMMUv3?
>> >
>> > I don’t understand what is the point of this configuration? It seems to add
>> > extra complexity and extra hw constraints and no extra value.
>> >
>> > Not related to this patch, do you have plans to add some of the other iommufd
>> > features, I think things such as page faults might be useful?
>> >
>>
>> The primary goal of adding viommu/vdevice support is to enable kvmtool
>> to serve as the VMM for ARM CCA secure device development. This requires
>> a viommu implementation so that a KVM file descriptor can be associated
>> with the corresponding viommu.
>>
>> The full set of related patches is available here:
>> https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
>
> I see, but I don't understand why we need a nested setup in that case?
> How would having bypassed stage-1 change things?
>
I might be misunderstanding the viommu/vdevice setup, but I was under
the impression that it requires an `IOMMU_HWPT_ALLOC_NEST_PARENT`-type
HWPT allocation.
Based on that, I expected the viommu allocation to look something like this:
alloc_viommu.size = sizeof(alloc_viommu);
alloc_viommu.flags = IOMMU_VIOMMU_KVM_FD;
alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
alloc_viommu.dev_id = vdev->bound_devid;
alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
alloc_viommu.kvm_vm_fd = kvm->vm_fd;
if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
Could you clarify if this is the correct usage pattern, or whether a
different HWPT setup is expected here?
>
> Also, In case we do something like this, I'd suggest to make it clear
> for the command line that this is SMMUv3/CCA only, and maybe move
> some of the code to arm64/
>
My intent wasn't to make this SMMUv3-specific. Ideally, we could make
the IOMMU type a runtime option in `lkvm`.
The main requirement here is the ability to create a `vdevice` and
use that in the VFIO setup flow.
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-30 8:13 ` Aneesh Kumar K.V
@ 2025-07-30 14:15 ` Mostafa Saleh
2025-07-31 4:39 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Mostafa Saleh @ 2025-07-30 14:15 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Wed, Jul 30, 2025 at 01:43:21PM +0530, Aneesh Kumar K.V wrote:
> Mostafa Saleh <smostafa@google.com> writes:
>
> > On Tue, Jul 29, 2025 at 10:49:31AM +0530, Aneesh Kumar K.V wrote:
> >> Mostafa Saleh <smostafa@google.com> writes:
> >>
> >> > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
> >> >> This also allocates a stage1 bypass and stage2 translate table.
> >> >
> >> > So this makes IOMMUFD only working with SMMUv3?
> >> >
> >> > I don’t understand what is the point of this configuration? It seems to add
> >> > extra complexity and extra hw constraints and no extra value.
> >> >
> >> > Not related to this patch, do you have plans to add some of the other iommufd
> >> > features, I think things such as page faults might be useful?
> >> >
> >>
> >> The primary goal of adding viommu/vdevice support is to enable kvmtool
> >> to serve as the VMM for ARM CCA secure device development. This requires
> >> a viommu implementation so that a KVM file descriptor can be associated
> >> with the corresponding viommu.
> >>
> >> The full set of related patches is available here:
> >> https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
> >
> > I see, but I don't understand why we need a nested setup in that case?
> > How would having bypassed stage-1 change things?
> >
>
> I might be misunderstanding the viommu/vdevice setup, but I was under
> the impression that it requires an `IOMMU_HWPT_ALLOC_NEST_PARENT`-type
> HWPT allocation.
>
> Based on that, I expected the viommu allocation to look something like this:
>
> alloc_viommu.size = sizeof(alloc_viommu);
> alloc_viommu.flags = IOMMU_VIOMMU_KVM_FD;
> alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
> alloc_viommu.dev_id = vdev->bound_devid;
> alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
> alloc_viommu.kvm_vm_fd = kvm->vm_fd;
>
> if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
>
> Could you clarify if this is the correct usage pattern, or whether a
> different HWPT setup is expected here?
I believe that's correct, my question was why does it matter if the
config is S1 bypass + S2 IPA -> PA as opposed to before this patch
where it would be S1 IPA -> PA and s2 bypass.
As in this patch we manage the STE but set in bypass, so we don't
actually use nesting.
>
> >
> > Also, In case we do something like this, I'd suggest to make it clear
> > for the command line that this is SMMUv3/CCA only, and maybe move
> > some of the code to arm64/
> >
>
> My intent wasn't to make this SMMUv3-specific. Ideally, we could make
> the IOMMU type a runtime option in `lkvm`.
Makes sense.
Thanks,
Mostafa
>
> The main requirement here is the ability to create a `vdevice` and
> use that in the VFIO setup flow.
>
> -aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-30 14:15 ` Mostafa Saleh
@ 2025-07-31 4:39 ` Aneesh Kumar K.V
2025-08-04 15:07 ` Mostafa Saleh
0 siblings, 1 reply; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-07-31 4:39 UTC (permalink / raw)
To: Mostafa Saleh
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
Mostafa Saleh <smostafa@google.com> writes:
> On Wed, Jul 30, 2025 at 01:43:21PM +0530, Aneesh Kumar K.V wrote:
>> Mostafa Saleh <smostafa@google.com> writes:
>>
>> > On Tue, Jul 29, 2025 at 10:49:31AM +0530, Aneesh Kumar K.V wrote:
>> >> Mostafa Saleh <smostafa@google.com> writes:
>> >>
>> >> > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> >> >> This also allocates a stage1 bypass and stage2 translate table.
>> >> >
>> >> > So this makes IOMMUFD only working with SMMUv3?
>> >> >
>> >> > I don’t understand what is the point of this configuration? It seems to add
>> >> > extra complexity and extra hw constraints and no extra value.
>> >> >
>> >> > Not related to this patch, do you have plans to add some of the other iommufd
>> >> > features, I think things such as page faults might be useful?
>> >> >
>> >>
>> >> The primary goal of adding viommu/vdevice support is to enable kvmtool
>> >> to serve as the VMM for ARM CCA secure device development. This requires
>> >> a viommu implementation so that a KVM file descriptor can be associated
>> >> with the corresponding viommu.
>> >>
>> >> The full set of related patches is available here:
>> >> https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
>> >
>> > I see, but I don't understand why we need a nested setup in that case?
>> > How would having bypassed stage-1 change things?
>> >
>>
>> I might be misunderstanding the viommu/vdevice setup, but I was under
>> the impression that it requires an `IOMMU_HWPT_ALLOC_NEST_PARENT`-type
>> HWPT allocation.
>>
>> Based on that, I expected the viommu allocation to look something like this:
>>
>> alloc_viommu.size = sizeof(alloc_viommu);
>> alloc_viommu.flags = IOMMU_VIOMMU_KVM_FD;
>> alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
>> alloc_viommu.dev_id = vdev->bound_devid;
>> alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
>> alloc_viommu.kvm_vm_fd = kvm->vm_fd;
>>
>> if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
>>
>> Could you clarify if this is the correct usage pattern, or whether a
>> different HWPT setup is expected here?
>
> I believe that's correct, my question was why does it matter if the
> config is S1 bypass + S2 IPA -> PA as opposed to before this patch
> where it would be S1 IPA -> PA and s2 bypass.
>
Can we do a S1 IPA -> PA and s2 bypass with viommu and vdevice?
>
> As in this patch we manage the STE but set in bypass, so we don't
> actually use nesting.
>
>>
>> >
>> > Also, In case we do something like this, I'd suggest to make it clear
>> > for the command line that this is SMMUv3/CCA only, and maybe move
>> > some of the code to arm64/
>> >
>>
>> My intent wasn't to make this SMMUv3-specific. Ideally, we could make
>> the IOMMU type a runtime option in `lkvm`.
>
> Makes sense.
>
> Thanks,
> Mostafa
>
>>
>> The main requirement here is the ability to create a `vdevice` and
>> use that in the VFIO setup flow.
>>
>> -aneesh
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-31 4:39 ` Aneesh Kumar K.V
@ 2025-08-04 15:07 ` Mostafa Saleh
0 siblings, 0 replies; 38+ messages in thread
From: Mostafa Saleh @ 2025-08-04 15:07 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: kvm, Suzuki K Poulose, Steven Price, Will Deacon, Julien Thierry
On Thu, Jul 31, 2025 at 10:09:36AM +0530, Aneesh Kumar K.V wrote:
> Mostafa Saleh <smostafa@google.com> writes:
>
> > On Wed, Jul 30, 2025 at 01:43:21PM +0530, Aneesh Kumar K.V wrote:
> >> Mostafa Saleh <smostafa@google.com> writes:
> >>
> >> > On Tue, Jul 29, 2025 at 10:49:31AM +0530, Aneesh Kumar K.V wrote:
> >> >> Mostafa Saleh <smostafa@google.com> writes:
> >> >>
> >> >> > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
> >> >> >> This also allocates a stage1 bypass and stage2 translate table.
> >> >> >
> >> >> > So this makes IOMMUFD only working with SMMUv3?
> >> >> >
> >> >> > I don’t understand what is the point of this configuration? It seems to add
> >> >> > extra complexity and extra hw constraints and no extra value.
> >> >> >
> >> >> > Not related to this patch, do you have plans to add some of the other iommufd
> >> >> > features, I think things such as page faults might be useful?
> >> >> >
> >> >>
> >> >> The primary goal of adding viommu/vdevice support is to enable kvmtool
> >> >> to serve as the VMM for ARM CCA secure device development. This requires
> >> >> a viommu implementation so that a KVM file descriptor can be associated
> >> >> with the corresponding viommu.
> >> >>
> >> >> The full set of related patches is available here:
> >> >> https://gitlab.arm.com/linux-arm/kvmtool-cca/-/tree/cca/tdisp-upstream-post-v1
> >> >
> >> > I see, but I don't understand why we need a nested setup in that case?
> >> > How would having bypassed stage-1 change things?
> >> >
> >>
> >> I might be misunderstanding the viommu/vdevice setup, but I was under
> >> the impression that it requires an `IOMMU_HWPT_ALLOC_NEST_PARENT`-type
> >> HWPT allocation.
> >>
> >> Based on that, I expected the viommu allocation to look something like this:
> >>
> >> alloc_viommu.size = sizeof(alloc_viommu);
> >> alloc_viommu.flags = IOMMU_VIOMMU_KVM_FD;
> >> alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
> >> alloc_viommu.dev_id = vdev->bound_devid;
> >> alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
> >> alloc_viommu.kvm_vm_fd = kvm->vm_fd;
> >>
> >> if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
> >>
> >> Could you clarify if this is the correct usage pattern, or whether a
> >> different HWPT setup is expected here?
> >
> > I believe that's correct, my question was why does it matter if the
> > config is S1 bypass + S2 IPA -> PA as opposed to before this patch
> > where it would be S1 IPA -> PA and s2 bypass.
> >
>
> Can we do a S1 IPA -> PA and s2 bypass with viommu and vdevice?
Sorry I was not clear, my point is that this patch adds support for vdev
only to set the STE to bypass, which has the same effect on the device SID,
so why add such complexity if the assignment will still work without it.
AFAIK, the use of such feature will be to present an emualted SMMUv3 to
the guest.
Thanks,
Mostafa
>
> >
> > As in this patch we manage the STE but set in bypass, so we don't
> > actually use nesting.
> >
> >>
> >> >
> >> > Also, In case we do something like this, I'd suggest to make it clear
> >> > for the command line that this is SMMUv3/CCA only, and maybe move
> >> > some of the code to arm64/
> >> >
> >>
> >> My intent wasn't to make this SMMUv3-specific. Ideally, we could make
> >> the IOMMU type a runtime option in `lkvm`.
> >
> > Makes sense.
> >
> > Thanks,
> > Mostafa
> >
> >>
> >> The main requirement here is the ability to create a `vdevice` and
> >> use that in the VFIO setup flow.
> >>
> >> -aneesh
>
> -aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-07-24 14:09 ` Aneesh Kumar K.V
@ 2025-08-04 22:33 ` Suzuki K Poulose
2025-08-08 13:00 ` Will Deacon
0 siblings, 1 reply; 38+ messages in thread
From: Suzuki K Poulose @ 2025-08-04 22:33 UTC (permalink / raw)
To: Aneesh Kumar K.V, Will Deacon; +Cc: kvm, Steven Price, Julien Thierry
On 24/07/2025 15:09, Aneesh Kumar K.V wrote:
> Will Deacon <will@kernel.org> writes:
>
>> On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
>>> This also allocates a stage1 bypass and stage2 translate table.
>>
>> Please write your commit messages as per Linux kernel guidelines.
>>
>>> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
>>> ---
>>> builtin-run.c | 2 +
>>> include/kvm/kvm-config.h | 1 +
>>> vfio/core.c | 4 +-
>>> vfio/iommufd.c | 115 ++++++++++++++++++++++++++++++++++++++-
>>
>> [...]
>>
>>> 4 files changed, 119 insertions(+), 3 deletions(-)
>>> diff --git a/vfio/iommufd.c b/vfio/iommufd.c
>>> index 742550705746..39870320e4ac 100644
>>> --- a/vfio/iommufd.c
>>> +++ b/vfio/iommufd.c
>>> @@ -108,6 +108,116 @@ err_out:
>>> return ret;
>>> }
>>>
>>> +static int iommufd_alloc_s1bypass_hwpt(struct vfio_device *vdev)
>>> +{
>>> + int ret;
>>> + unsigned long dev_num;
>>> + unsigned long guest_bdf;
>>> + struct vfio_device_bind_iommufd bind;
>>> + struct vfio_device_attach_iommufd_pt attach_data;
>>> + struct iommu_hwpt_alloc alloc_hwpt;
>>> + struct iommu_viommu_alloc alloc_viommu;
>>> + struct iommu_hwpt_arm_smmuv3 bypass_ste;
>>> + struct iommu_vdevice_alloc alloc_vdev;
>>> +
>>> + bind.argsz = sizeof(bind);
>>> + bind.flags = 0;
>>> + bind.iommufd = iommu_fd;
>>> +
>>> + /* now bind the iommufd */
>>> + if (ioctl(vdev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) {
>>> + ret = -errno;
>>> + vfio_dev_err(vdev, "failed to get info");
>>> + goto err_out;
>>> + }
>>> +
>>> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
>>> + alloc_hwpt.flags = IOMMU_HWPT_ALLOC_NEST_PARENT;
>>> + alloc_hwpt.dev_id = bind.out_devid;
>>> + alloc_hwpt.pt_id = ioas_id;
>>> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_NONE;
>>> + alloc_hwpt.data_len = 0;
>>> + alloc_hwpt.data_uptr = 0;
>>> +
>>> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
>>> + ret = -errno;
>>> + pr_err("Failed to allocate HWPT");
>>> + goto err_out;
>>> + }
>>> +
>>> + attach_data.argsz = sizeof(attach_data);
>>> + attach_data.flags = 0;
>>> + attach_data.pt_id = alloc_hwpt.out_hwpt_id;
>>> +
>>> + if (ioctl(vdev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data)) {
>>> + ret = -errno;
>>> + vfio_dev_err(vdev, "failed to attach to IOAS ");
>>> + goto err_out;
>>> + }
>>> +
>>> + alloc_viommu.size = sizeof(alloc_viommu);
>>> + alloc_viommu.flags = 0;
>>> + alloc_viommu.type = IOMMU_VIOMMU_TYPE_ARM_SMMUV3;
>>> + alloc_viommu.dev_id = bind.out_devid;
>>> + alloc_viommu.hwpt_id = alloc_hwpt.out_hwpt_id;
>>> +
>>> + if (ioctl(iommu_fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu)) {
>>> + ret = -errno;
>>> + vfio_dev_err(vdev, "failed to allocate VIOMMU %d", ret);
>>> + goto err_out;
>>> + }
>>> +#define STRTAB_STE_0_V (1UL << 0)
>>> +#define STRTAB_STE_0_CFG_S2_TRANS 6
>>> +#define STRTAB_STE_0_CFG_S1_TRANS 5
>>> +#define STRTAB_STE_0_CFG_BYPASS 4
>>> +
>>> + /* set up virtual ste as bypass ste */
>>> + bypass_ste.ste[0] = STRTAB_STE_0_V | (STRTAB_STE_0_CFG_BYPASS << 1);
>>> + bypass_ste.ste[1] = 0x0UL;
>>> +
>>> + alloc_hwpt.size = sizeof(struct iommu_hwpt_alloc);
>>> + alloc_hwpt.flags = 0;
>>> + alloc_hwpt.dev_id = bind.out_devid;
>>> + alloc_hwpt.pt_id = alloc_viommu.out_viommu_id;
>>> + alloc_hwpt.data_type = IOMMU_HWPT_DATA_ARM_SMMUV3;
>>> + alloc_hwpt.data_len = sizeof(bypass_ste);
>>> + alloc_hwpt.data_uptr = (unsigned long)&bypass_ste;
>>> +
>>> + if (ioctl(iommu_fd, IOMMU_HWPT_ALLOC, &alloc_hwpt)) {
>>> + ret = -errno;
>>> + pr_err("Failed to allocate S1 bypass HWPT %d", ret);
>>> + goto err_out;
>>> + }
>>> +
>>> + alloc_vdev.size = sizeof(alloc_vdev),
>>> + alloc_vdev.viommu_id = alloc_viommu.out_viommu_id;
>>> + alloc_vdev.dev_id = bind.out_devid;
>>> +
>>> + dev_num = vdev->dev_hdr.dev_num;
>>> + /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
>>> + guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
>>
>> I don't understand this. Shouldn't the BDF correspond to the virtual
>> configuration space? That's not allocated until later, but just going
>> with 0 isn't going to work.
>>
>> What am I missing?
>>
>
> As I understand it, kvmtool supports only bus 0 and does not allow
> multifunction devices. Based on that, I derived the guest BDF as follows
> (correcting what was wrong in the original patch):
>
> guest_bdf = (0ULL << 16) | (0 << 8) | dev_num << 3 | (0 << 0);
>
> Are you suggesting that this approach is incorrect, and that we can use
> a bus number other than 0?
To put this other way, the emulation of the configuration space is based
on the "dev_num". i.e., CFG address is converted to the offset and
mapped to the "dev_num". So I think what we have here is correct.
Suzuki
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-08-04 22:33 ` Suzuki K Poulose
@ 2025-08-08 13:00 ` Will Deacon
2025-08-11 6:16 ` Aneesh Kumar K.V
0 siblings, 1 reply; 38+ messages in thread
From: Will Deacon @ 2025-08-08 13:00 UTC (permalink / raw)
To: Suzuki K Poulose; +Cc: Aneesh Kumar K.V, kvm, Steven Price, Julien Thierry
On Mon, Aug 04, 2025 at 11:33:27PM +0100, Suzuki K Poulose wrote:
> On 24/07/2025 15:09, Aneesh Kumar K.V wrote:
> > Will Deacon <will@kernel.org> writes:
> > > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
> > > > + dev_num = vdev->dev_hdr.dev_num;
> > > > + /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
> > > > + guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
> > >
> > > I don't understand this. Shouldn't the BDF correspond to the virtual
> > > configuration space? That's not allocated until later, but just going
> > > with 0 isn't going to work.
> > >
> > > What am I missing?
> > >
> >
> > As I understand it, kvmtool supports only bus 0 and does not allow
> > multifunction devices. Based on that, I derived the guest BDF as follows
> > (correcting what was wrong in the original patch):
> >
> > guest_bdf = (0ULL << 16) | (0 << 8) | dev_num << 3 | (0 << 0);
> >
> > Are you suggesting that this approach is incorrect, and that we can use
> > a bus number other than 0?
>
> To put this other way, the emulation of the configuration space is based
> on the "dev_num". i.e., CFG address is converted to the offset and
> mapped to the "dev_num". So I think what we have here is correct.
My point is that 'dev_num' isn't allocated until vfio_pci_setup_device(),
which is called from __iommufd_configure_device() _after_ we've called
iommufd_alloc_s1bypass_hwpt().
So I don't see how this works. You have to allocate the virtual config
space before you can allocate the virtual device with iommufd.
Will
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects
2025-08-08 13:00 ` Will Deacon
@ 2025-08-11 6:16 ` Aneesh Kumar K.V
0 siblings, 0 replies; 38+ messages in thread
From: Aneesh Kumar K.V @ 2025-08-11 6:16 UTC (permalink / raw)
To: Will Deacon, Suzuki K Poulose; +Cc: kvm, Steven Price, Julien Thierry
Will Deacon <will@kernel.org> writes:
> On Mon, Aug 04, 2025 at 11:33:27PM +0100, Suzuki K Poulose wrote:
>> On 24/07/2025 15:09, Aneesh Kumar K.V wrote:
>> > Will Deacon <will@kernel.org> writes:
>> > > On Sun, May 25, 2025 at 01:19:15PM +0530, Aneesh Kumar K.V (Arm) wrote:
>> > > > + dev_num = vdev->dev_hdr.dev_num;
>> > > > + /* kvmtool only do 0 domain, 0 bus and 0 function devices. */
>> > > > + guest_bdf = (0ULL << 32) | (0 << 16) | dev_num << 11 | (0 << 8);
>> > >
>> > > I don't understand this. Shouldn't the BDF correspond to the virtual
>> > > configuration space? That's not allocated until later, but just going
>> > > with 0 isn't going to work.
>> > >
>> > > What am I missing?
>> > >
>> >
>> > As I understand it, kvmtool supports only bus 0 and does not allow
>> > multifunction devices. Based on that, I derived the guest BDF as follows
>> > (correcting what was wrong in the original patch):
>> >
>> > guest_bdf = (0ULL << 16) | (0 << 8) | dev_num << 3 | (0 << 0);
>> >
>> > Are you suggesting that this approach is incorrect, and that we can use
>> > a bus number other than 0?
>>
>> To put this other way, the emulation of the configuration space is based
>> on the "dev_num". i.e., CFG address is converted to the offset and
>> mapped to the "dev_num". So I think what we have here is correct.
>
> My point is that 'dev_num' isn't allocated until vfio_pci_setup_device(),
> which is called from __iommufd_configure_device() _after_ we've called
> iommufd_alloc_s1bypass_hwpt().
>
> So I don't see how this works. You have to allocate the virtual config
> space before you can allocate the virtual device with iommufd.
>
I did fix that in https://lore.kernel.org/all/yq5att31brz2.fsf@kernel.org/
-aneesh
^ permalink raw reply [flat|nested] 38+ messages in thread
end of thread, other threads:[~2025-08-11 6:16 UTC | newest]
Thread overview: 38+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-25 7:49 [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Aneesh Kumar K.V (Arm)
2025-05-25 7:49 ` [RFC PATCH kvmtool 02/10] vfio: Rename some functions Aneesh Kumar K.V (Arm)
2025-07-27 18:20 ` Mostafa Saleh
2025-07-29 4:53 ` Aneesh Kumar K.V
2025-05-25 7:49 ` [RFC PATCH kvmtool 03/10] vfio: Create new file legacy.c Aneesh Kumar K.V (Arm)
2025-07-27 18:23 ` Mostafa Saleh
2025-07-29 4:59 ` Aneesh Kumar K.V
2025-05-25 7:49 ` [RFC PATCH kvmtool 04/10] vfio: Update vfio header from linux kernel Aneesh Kumar K.V (Arm)
2025-07-27 18:23 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 05/10] vfio: Add dma map/unmap handlers Aneesh Kumar K.V (Arm)
2025-07-27 18:25 ` Mostafa Saleh
2025-07-29 5:03 ` Aneesh Kumar K.V
2025-05-25 7:49 ` [RFC PATCH kvmtool 06/10] vfio/iommufd: Import iommufd header from kernel Aneesh Kumar K.V (Arm)
2025-07-27 18:25 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 07/10] vfio/iommufd: Add basic iommufd support Aneesh Kumar K.V (Arm)
2025-07-27 18:31 ` Mostafa Saleh
2025-07-29 5:12 ` Aneesh Kumar K.V
2025-07-29 9:38 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 08/10] vfio/iommufd: Move the hwpt allocation to helper Aneesh Kumar K.V (Arm)
2025-07-27 18:32 ` Mostafa Saleh
2025-07-29 5:14 ` Aneesh Kumar K.V
2025-07-29 9:43 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 09/10] vfio/iommufd: Add viommu and vdevice objects Aneesh Kumar K.V (Arm)
2025-07-21 12:27 ` Will Deacon
2025-07-24 14:09 ` Aneesh Kumar K.V
2025-08-04 22:33 ` Suzuki K Poulose
2025-08-08 13:00 ` Will Deacon
2025-08-11 6:16 ` Aneesh Kumar K.V
2025-07-27 18:35 ` Mostafa Saleh
2025-07-29 5:19 ` Aneesh Kumar K.V
2025-07-29 9:41 ` Mostafa Saleh
2025-07-30 8:13 ` Aneesh Kumar K.V
2025-07-30 14:15 ` Mostafa Saleh
2025-07-31 4:39 ` Aneesh Kumar K.V
2025-08-04 15:07 ` Mostafa Saleh
2025-05-25 7:49 ` [RFC PATCH kvmtool 10/10] util/update_headers: Add vfio related header files to update list Aneesh Kumar K.V (Arm)
2025-07-27 18:35 ` Mostafa Saleh
2025-07-27 18:19 ` [RFC PATCH kvmtool 01/10] vfio: Associate vm instance with vfio fd Mostafa Saleh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).