From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, jasowang@redhat.com, mst@redhat.com,
yi.l.liu@intel.com, jgg@nvidia.com, linux-kernel@vger.kernel.org,
virtualization@lists.linux-foundation.org,
netdev@vger.kernel.org
Subject: [RFC 1/7] vhost/iommufd: Add the functions support iommufd
Date: Sun, 24 Sep 2023 01:05:34 +0800 [thread overview]
Message-ID: <20230923170540.1447301-2-lulu@redhat.com> (raw)
In-Reply-To: <20230923170540.1447301-1-lulu@redhat.com>
Add a new file vhost/iommufd.c to support the function of
iommufd, This file contains iommufd function of emulated device and
the physical device.
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/iommufd.c | 151 ++++++++++++++++++++++++++++++++++++++++
drivers/vhost/vhost.h | 21 ++++++
2 files changed, 172 insertions(+)
create mode 100644 drivers/vhost/iommufd.c
diff --git a/drivers/vhost/iommufd.c b/drivers/vhost/iommufd.c
new file mode 100644
index 000000000000..080858f76fd5
--- /dev/null
+++ b/drivers/vhost/iommufd.c
@@ -0,0 +1,151 @@
+#include <linux/vdpa.h>
+#include <linux/iommufd.h>
+
+#include "vhost.h"
+
+MODULE_IMPORT_NS(IOMMUFD);
+
+int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
+ u32 *ioas_id, u32 *device_id)
+{
+ int ret;
+
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ /*
+ * If the driver doesn't provide this op then it means the device does
+ * not do DMA at all. So nothing to do.
+ */
+ if (!vdpa->config->bind_iommufd)
+ return 0;
+
+ ret = vdpa->config->bind_iommufd(vdpa, ictx, device_id);
+ if (ret)
+ return ret;
+
+ ret = vdpa->config->attach_ioas(vdpa, ioas_id);
+ if (ret)
+ goto err_unbind;
+ vdpa->iommufd_attached = true;
+
+ return 0;
+
+err_unbind:
+ if (vdpa->config->unbind_iommufd)
+ vdpa->config->unbind_iommufd(vdpa);
+ return ret;
+}
+
+void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
+{
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ if (vdpa->config->unbind_iommufd)
+ vdpa->config->unbind_iommufd(vdpa);
+}
+
+int vdpa_iommufd_physical_bind(struct vdpa_device *vdpa,
+ struct iommufd_ctx *ictx, u32 *out_device_id)
+{
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+ struct iommufd_device *idev;
+
+ idev = iommufd_device_bind(ictx, dma_dev, out_device_id);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
+ vdpa->iommufd_device = idev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_bind);
+
+void vdpa_iommufd_physical_unbind(struct vdpa_device *vdpa)
+{
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ if (vdpa->iommufd_attached) {
+ iommufd_device_detach(vdpa->iommufd_device);
+ vdpa->iommufd_attached = false;
+ }
+ iommufd_device_unbind(vdpa->iommufd_device);
+ vdpa->iommufd_device = NULL;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_unbind);
+
+int vdpa_iommufd_physical_attach_ioas(struct vdpa_device *vdpa, u32 *pt_id)
+{
+ unsigned int flags = 0;
+
+ return iommufd_device_attach(vdpa->iommufd_device, pt_id);
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_attach_ioas);
+
+static void vdpa_emulated_unmap(void *data, unsigned long iova,
+ unsigned long length)
+{
+ struct vdpa_device *vdpa = data;
+
+ vdpa->config->dma_unmap(vdpa, 0, iova, length);
+}
+
+static const struct iommufd_access_ops vdpa_user_ops = {
+ .needs_pin_pages = 1,
+ .unmap = vdpa_emulated_unmap,
+};
+
+int vdpa_iommufd_emulated_bind(struct vdpa_device *vdpa,
+ struct iommufd_ctx *ictx, u32 *out_device_id)
+{
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ vdpa->iommufd_ictx = ictx;
+ iommufd_ctx_get(ictx);
+ struct iommufd_device *idev;
+
+ idev = iommufd_device_bind(ictx, vdpa->dma_dev, out_device_id);
+
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
+ vdpa->iommufd_device = idev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_bind);
+
+void vdpa_iommufd_emulated_unbind(struct vdpa_device *vdpa)
+{
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ if (vdpa->iommufd_access) {
+ iommufd_access_destroy(vdpa->iommufd_access);
+ vdpa->iommufd_access = NULL;
+ }
+ iommufd_ctx_put(vdpa->iommufd_ictx);
+ vdpa->iommufd_ictx = NULL;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_unbind);
+
+int vdpa_iommufd_emulated_attach_ioas(struct vdpa_device *vdpa, u32 *pt_id)
+{
+ struct iommufd_access *user;
+
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ user = iommufd_access_create(vdpa->iommufd_ictx, *pt_id, &vdpa_user_ops,
+ vdpa);
+ if (IS_ERR(user))
+ return PTR_ERR(user);
+ vdpa->iommufd_access = user;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_attach_ioas);
+int vdpa_iommufd_emulated_detach_ioas(struct vdpa_device *vdpa)
+{
+ vhost_vdpa_lockdep_assert_held(vdpa);
+
+ if (!vdpa->iommufd_ictx || !vdpa->iommufd_access)
+ return -1;
+
+ iommufd_access_destroy(vdpa->iommufd_access);
+ vdpa->iommufd_access = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_detach_ioas);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 790b296271f1..c470a5596d9c 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -291,6 +291,27 @@ static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
}
#endif
+struct iommufd_ctx;
+struct vdpa_device;
+void vhost_vdpa_lockdep_assert_held(struct vdpa_device *vdpa);
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
+ u32 *ioas_id, u32 *device_id);
+void vdpa_iommufd_unbind(struct vdpa_device *vdpa);
+#else
+static inline int vdpa_iommufd_bind(struct vdpa_device *vdpa,
+ struct iommufd_ctx *ictx, u32 *ioas_id,
+ u32 *device_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
+{
+}
+#endif
+
/* Memory accessors */
static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
{
--
2.34.3
next prev parent reply other threads:[~2023-09-23 17:05 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-23 17:05 [RFC 0/7] vdpa: Add support for iommufd Cindy Lu
2023-09-23 17:05 ` Cindy Lu [this message]
2023-09-26 2:47 ` [RFC 1/7] vhost/iommufd: Add the functions support iommufd Jason Wang
2023-09-23 17:05 ` [RFC 2/7] Kconfig: Add the new file vhost/iommufd Cindy Lu
2023-09-23 17:05 ` [RFC 3/7] vhost: Add 3 new uapi to support iommufd Cindy Lu
2023-09-25 13:49 ` Jason Gunthorpe
2023-09-26 2:47 ` Jason Wang
2023-09-23 17:05 ` [RFC 4/7] vdpa: change the map/unmap process " Cindy Lu
2023-09-25 13:45 ` Jason Gunthorpe
2023-09-26 8:09 ` Cindy Lu
2023-09-23 17:05 ` [RFC 5/7] vdpa: Add new vdpa_config_ops Cindy Lu
2023-09-26 2:47 ` Jason Wang
2023-09-23 17:05 ` [RFC 6/7] vdpa_sim :Add support for iommufd Cindy Lu
2023-09-25 13:46 ` Jason Gunthorpe
2023-09-23 17:05 ` [RFC 7/7] iommufd: Skip the CACHE_COHERENCY and iommu group check Cindy Lu
2023-09-25 13:50 ` Jason Gunthorpe
2023-09-26 8:02 ` Cindy Lu
2023-09-26 2:46 ` [RFC 0/7] vdpa: Add support for iommufd Jason Wang
2023-10-26 6:42 ` Michael S. Tsirkin
2023-10-26 6:48 ` Cindy Lu
2023-10-26 6:49 ` Michael S. Tsirkin
2023-10-26 7:03 ` Cindy Lu
2023-11-02 10:02 ` Michael S. Tsirkin
2023-11-02 12:09 ` Cindy Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230923170540.1447301-2-lulu@redhat.com \
--to=lulu@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=virtualization@lists.linux-foundation.org \
--cc=yi.l.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).