From: Yi Liu <yi.l.liu@intel.com>
To: alex.williamson@redhat.com, cohuck@redhat.com, qemu-devel@nongnu.org
Cc: david@gibson.dropbear.id.au, thuth@redhat.com,
farman@linux.ibm.com, mjrosato@linux.ibm.com,
akrowiak@linux.ibm.com, pasic@linux.ibm.com,
jjherne@linux.ibm.com, jasowang@redhat.com, kvm@vger.kernel.org,
jgg@nvidia.com, nicolinc@nvidia.com, eric.auger@redhat.com,
eric.auger.pro@gmail.com, kevin.tian@intel.com,
yi.l.liu@intel.com, chao.p.peng@intel.com, yi.y.sun@intel.com,
peterx@redhat.com, shameerali.kolothum.thodi@huawei.com,
zhangfei.gao@linaro.org, berrange@redhat.com
Subject: [RFC v2 05/15] vfio/container: Introduce vfio_[attach/detach]_device
Date: Wed, 8 Jun 2022 05:31:29 -0700 [thread overview]
Message-ID: <20220608123139.19356-6-yi.l.liu@intel.com> (raw)
In-Reply-To: <20220608123139.19356-1-yi.l.liu@intel.com>
From: Eric Auger <eric.auger@redhat.com>
We want the VFIO devices to be able to use two different
IOMMU callbacks, the legacy VFIO one and the new iommufd one.
Introduce vfio_[attach/detach]_device which aim at hiding the
underlying IOMMU backend (IOCTLs, datatypes, ...).
Once vfio_attach_device completes, the device is attached
to a security context and its fd can be used. Conversely
When vfio_detach_device completes, the device has been
detached to the security context.
In this patch, only the vfio-pci device gets converted to use
the new API. Subsequent patches will handle other devices.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
---
hw/vfio/container.c | 65 +++++++++++++++++++++++++++++++++++
hw/vfio/pci.c | 50 +++------------------------
include/hw/vfio/vfio-common.h | 2 ++
3 files changed, 72 insertions(+), 45 deletions(-)
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index dfc5183d5d..74e6eeba74 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -1218,6 +1218,71 @@ int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
return vfio_eeh_container_op(container, op);
}
+static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp)
+{
+ char *tmp, group_path[PATH_MAX], *group_name;
+ int ret, groupid;
+ ssize_t len;
+
+ tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
+ len = readlink(tmp, group_path, sizeof(group_path));
+ g_free(tmp);
+
+ if (len <= 0 || len >= sizeof(group_path)) {
+ ret = len < 0 ? -errno : -ENAMETOOLONG;
+ error_setg_errno(errp, -ret, "no iommu_group found");
+ return ret;
+ }
+
+ group_path[len] = 0;
+
+ group_name = basename(group_path);
+ if (sscanf(group_name, "%d", &groupid) != 1) {
+ error_setg_errno(errp, errno, "failed to read %s", group_path);
+ return -errno;
+ }
+ return groupid;
+}
+
+int vfio_attach_device(VFIODevice *vbasedev, AddressSpace *as, Error **errp)
+{
+ int groupid = vfio_device_groupid(vbasedev, errp);
+ VFIODevice *vbasedev_iter;
+ VFIOGroup *group;
+ int ret;
+
+ if (groupid < 0) {
+ return groupid;
+ }
+
+ trace_vfio_realize(vbasedev->name, groupid);
+ group = vfio_get_group(groupid, as, errp);
+ if (!group) {
+ return -1;
+ }
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
+ error_setg(errp, "device is already attached");
+ vfio_put_group(group);
+ return -1;
+ }
+ }
+ ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
+ if (ret) {
+ vfio_put_group(group);
+ return -1;
+ }
+
+ return 0;
+}
+
+void vfio_detach_device(VFIODevice *vbasedev)
+{
+ vfio_put_base_device(vbasedev);
+ vfio_put_group(vbasedev->group);
+}
+
const VFIOContainerOps legacy_container_ops = {
.dma_map = vfio_dma_map,
.dma_unmap = vfio_dma_unmap,
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index a9973a6d6a..9856d81819 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -2697,10 +2697,9 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
static void vfio_put_device(VFIOPCIDevice *vdev)
{
- g_free(vdev->vbasedev.name);
g_free(vdev->msix);
- vfio_put_base_device(&vdev->vbasedev);
+ vfio_detach_device(&vdev->vbasedev);
}
static void vfio_err_notifier_handler(void *opaque)
@@ -2847,13 +2846,9 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
{
VFIOPCIDevice *vdev = VFIO_PCI(pdev);
VFIODevice *vbasedev = &vdev->vbasedev;
- VFIODevice *vbasedev_iter;
- VFIOGroup *group;
- char *tmp, *subsys, group_path[PATH_MAX], *group_name;
+ char *tmp, *subsys;
Error *err = NULL;
- ssize_t len;
struct stat st;
- int groupid;
int i, ret;
bool is_mdev;
@@ -2882,39 +2877,6 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
vbasedev->type = VFIO_DEVICE_TYPE_PCI;
vbasedev->dev = DEVICE(vdev);
- tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
- len = readlink(tmp, group_path, sizeof(group_path));
- g_free(tmp);
-
- if (len <= 0 || len >= sizeof(group_path)) {
- error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
- "no iommu_group found");
- goto error;
- }
-
- group_path[len] = 0;
-
- group_name = basename(group_path);
- if (sscanf(group_name, "%d", &groupid) != 1) {
- error_setg_errno(errp, errno, "failed to read %s", group_path);
- goto error;
- }
-
- trace_vfio_realize(vbasedev->name, groupid);
-
- group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
- if (!group) {
- goto error;
- }
-
- QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
- error_setg(errp, "device is already attached");
- vfio_put_group(group);
- goto error;
- }
- }
-
/*
* Mediated devices *might* operate compatibly with discarding of RAM, but
* we cannot know for certain, it depends on whether the mdev vendor driver
@@ -2932,13 +2894,12 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vbasedev->ram_block_discard_allowed && !is_mdev) {
error_setg(errp, "x-balloon-allowed only potentially compatible "
"with mdev devices");
- vfio_put_group(group);
goto error;
}
- ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
+ ret = vfio_attach_device(vbasedev,
+ pci_device_iommu_address_space(pdev), errp);
if (ret) {
- vfio_put_group(group);
goto error;
}
@@ -3167,12 +3128,12 @@ out_teardown:
vfio_bars_exit(vdev);
error:
error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name);
+ vfio_detach_device(vbasedev);
}
static void vfio_instance_finalize(Object *obj)
{
VFIOPCIDevice *vdev = VFIO_PCI(obj);
- VFIOGroup *group = vdev->vbasedev.group;
vfio_display_finalize(vdev);
vfio_bars_finalize(vdev);
@@ -3186,7 +3147,6 @@ static void vfio_instance_finalize(Object *obj)
* g_free(vdev->igd_opregion);
*/
vfio_put_device(vdev);
- vfio_put_group(group);
}
static void vfio_exitfn(PCIDevice *pdev)
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 5cc0413b5c..1b68cd8ece 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -181,6 +181,8 @@ VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp);
void vfio_put_group(VFIOGroup *group);
int vfio_get_device(VFIOGroup *group, const char *name,
VFIODevice *vbasedev, Error **errp);
+int vfio_attach_device(VFIODevice *vbasedev, AddressSpace *as, Error **errp);
+void vfio_detach_device(VFIODevice *vbasedev);
extern const MemoryRegionOps vfio_region_ops;
typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
--
2.27.0
next prev parent reply other threads:[~2022-06-08 12:44 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-08 12:31 [RFC v2 00/15] vfio: Adopt iommufd Yi Liu
2022-06-08 12:31 ` [RFC v2 01/15] scripts/update-linux-headers: Add iommufd.h Yi Liu
2022-06-08 12:31 ` [RFC v2 02/15] linux-headers: Import latest vfio.h and iommufd.h Yi Liu
2022-06-08 12:31 ` [RFC v2 03/15] vfio/common: Split common.c into common.c, container.c and as.c Yi Liu
2022-06-08 12:31 ` [RFC v2 04/15] vfio: Add base container Yi Liu
2022-06-08 12:31 ` Yi Liu [this message]
2022-06-08 12:31 ` [RFC v2 06/15] vfio/platform: Use vfio_[attach/detach]_device Yi Liu
2022-06-08 12:31 ` [RFC v2 07/15] vfio/ap: " Yi Liu
2022-06-08 12:31 ` [RFC v2 08/15] vfio/ccw: " Yi Liu
2022-06-08 12:31 ` [RFC v2 09/15] vfio/container-base: Introduce [attach/detach]_device container callbacks Yi Liu
2022-06-08 12:31 ` [RFC v2 10/15] vfio/container-base: Introduce VFIOContainer reset callback Yi Liu
2022-06-08 12:31 ` [RFC v2 11/15] backends/iommufd: Introduce the iommufd object Yi Liu
2022-06-08 12:31 ` [RFC v2 12/15] util/char_dev: Add open_cdev() Yi Liu
2022-06-08 12:31 ` [RFC v2 13/15] vfio/iommufd: Implement the iommufd backend Yi Liu
2022-10-04 6:47 ` Alistair Popple
2022-10-05 9:02 ` Eric Auger
2022-06-08 12:31 ` [RFC v2 14/15] vfio/iommufd: Add IOAS_COPY_DMA support Yi Liu
2022-06-08 12:31 ` [RFC v2 15/15] vfio/as: Allow the selection of a given iommu backend Yi Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220608123139.19356-6-yi.l.liu@intel.com \
--to=yi.l.liu@intel.com \
--cc=akrowiak@linux.ibm.com \
--cc=alex.williamson@redhat.com \
--cc=berrange@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=cohuck@redhat.com \
--cc=david@gibson.dropbear.id.au \
--cc=eric.auger.pro@gmail.com \
--cc=eric.auger@redhat.com \
--cc=farman@linux.ibm.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=jjherne@linux.ibm.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=mjrosato@linux.ibm.com \
--cc=nicolinc@nvidia.com \
--cc=pasic@linux.ibm.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=thuth@redhat.com \
--cc=yi.y.sun@intel.com \
--cc=zhangfei.gao@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).