netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, jasowang@redhat.com, mst@redhat.com,
	yi.l.liu@intel.com, jgg@nvidia.com, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	netdev@vger.kernel.org
Subject: [RFC v1 1/8] vhost/iommufd: Add the functions support iommufd
Date: Sat,  4 Nov 2023 01:16:34 +0800	[thread overview]
Message-ID: <20231103171641.1703146-2-lulu@redhat.com> (raw)
In-Reply-To: <20231103171641.1703146-1-lulu@redhat.com>

Add a new file vhost/iommufd.c to support the function of
iommufd, This file contains iommufd function of emulated device and
the physical device. 

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 drivers/vhost/iommufd.c | 178 ++++++++++++++++++++++++++++++++++++++++
 drivers/vhost/vhost.h   |  21 +++++
 2 files changed, 199 insertions(+)
 create mode 100644 drivers/vhost/iommufd.c

diff --git a/drivers/vhost/iommufd.c b/drivers/vhost/iommufd.c
new file mode 100644
index 000000000000..113dda50a9b6
--- /dev/null
+++ b/drivers/vhost/iommufd.c
@@ -0,0 +1,178 @@
+#include <linux/vdpa.h>
+#include <linux/iommufd.h>
+
+#include "vhost.h"
+
+MODULE_IMPORT_NS(IOMMUFD);
+
+int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
+		      u32 *ioas_id, u32 *device_id)
+{
+	int ret;
+
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	/*
+        * If the driver doesn't provide this op then it means the device does
+        * not do DMA at all. So nothing to do.
+        */
+	if (!vdpa->config->bind_iommufd)
+		return 0;
+	ret = vdpa->config->bind_iommufd(vdpa, ictx, device_id);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->config->unbind_iommufd)
+		vdpa->config->unbind_iommufd(vdpa);
+}
+
+int vdpa_iommufd_physical_bind(struct vdpa_device *vdpa,
+			       struct iommufd_ctx *ictx, u32 *out_device_id)
+{
+	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+	struct iommufd_device *idev;
+
+	idev = iommufd_device_bind(ictx, dma_dev, out_device_id);
+	if (IS_ERR(idev))
+		return PTR_ERR(idev);
+	vdpa->iommufd_device = idev;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_bind);
+
+void vdpa_iommufd_physical_unbind(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->iommufd_attached) {
+		iommufd_device_detach(vdpa->iommufd_device);
+		vdpa->iommufd_attached = false;
+	}
+	iommufd_device_unbind(vdpa->iommufd_device);
+	vdpa->iommufd_device = NULL;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_unbind);
+
+int vdpa_iommufd_physical_attach_ioas(struct vdpa_device *vdpa,
+				      u32 *iommufd_ioasid)
+{
+	int rc;
+
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (WARN_ON(!vdpa->iommufd_device))
+		return -EINVAL;
+
+	if (vdpa->iommufd_attached)
+		rc = iommufd_device_replace(vdpa->iommufd_device,
+					    iommufd_ioasid);
+	else
+		rc = iommufd_device_attach(vdpa->iommufd_device,
+					   iommufd_ioasid);
+	if (rc)
+		return rc;
+	vdpa->iommufd_attached = true;
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_attach_ioas);
+int vdpa_iommufd_physical_detach_ioas(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (WARN_ON(!vdpa->iommufd_device) || !vdpa->iommufd_attached)
+		return -1;
+
+	iommufd_device_detach(vdpa->iommufd_device);
+	vdpa->iommufd_attached = false;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_detach_ioas);
+
+static void vdpa_emulated_unmap(void *data, unsigned long iova,
+				unsigned long length)
+{
+	struct vdpa_device *vdpa = data;
+	/* todo: need to unmap the iova-lenth in all ASID*/
+
+	//	vdpa->config->dma_unmap(vdpa, 0, iova, length);
+}
+
+static const struct iommufd_access_ops vdpa_user_ops = {
+	.needs_pin_pages = 1,
+	.unmap = vdpa_emulated_unmap,
+};
+
+int vdpa_iommufd_emulated_bind(struct vdpa_device *vdpa,
+			       struct iommufd_ctx *ictx, u32 *out_device_id)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	struct iommufd_access *user;
+
+	user = iommufd_access_create(ictx, &vdpa_user_ops, vdpa, out_device_id);
+	if (IS_ERR(user))
+		return PTR_ERR(user);
+	vdpa->iommufd_access = user;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_bind);
+
+void vdpa_iommufd_emulated_unbind(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->iommufd_access) {
+		iommufd_access_destroy(vdpa->iommufd_access);
+		vdpa->iommufd_attached = false;
+		vdpa->iommufd_access = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_unbind);
+
+int vdpa_iommufd_emulated_attach_ioas(struct vdpa_device *vdpa,
+				      u32 *iommufd_ioasid)
+{
+	int rc;
+
+	struct iommufd_access *user;
+
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->iommufd_attached) {
+		rc = iommufd_access_replace(vdpa->iommufd_access,
+					    *iommufd_ioasid);
+	} else {
+		rc = iommufd_access_attach(vdpa->iommufd_access,
+					   *iommufd_ioasid);
+	}
+	user = vdpa->iommufd_access;
+
+	if (rc)
+		return rc;
+	vdpa->iommufd_attached = true;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_attach_ioas);
+
+int vdpa_iommufd_emulated_detach_ioas(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (WARN_ON(!vdpa->iommufd_access) || !vdpa->iommufd_attached)
+		return -1;
+
+	iommufd_access_detach(vdpa->iommufd_access);
+	vdpa->iommufd_attached = false;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_detach_ioas);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f60d5f7bef94..179012e350f9 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -310,6 +310,27 @@ static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
 }
 #endif
 
+struct iommufd_ctx;
+struct vdpa_device;
+void vhost_vdpa_lockdep_assert_held(struct vdpa_device *vdpa);
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
+		      u32 *ioas_id, u32 *device_id);
+void vdpa_iommufd_unbind(struct vdpa_device *vdpa);
+#else
+static inline int vdpa_iommufd_bind(struct vdpa_device *vdpa,
+				    struct iommufd_ctx *ictx, u32 *ioas_id,
+				    u32 *device_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
+{
+}
+#endif
+
 /* Memory accessors */
 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
 {
-- 
2.34.3


  reply	other threads:[~2023-11-03 17:16 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-03 17:16 [RFC v1 0/8] vhost-vdpa: add support for iommufd Cindy Lu
2023-11-03 17:16 ` Cindy Lu [this message]
2023-11-03 17:16 ` [RFC v1 2/8] Kconfig: Add the new file vhost/iommufd Cindy Lu
2023-11-06  8:53   ` Yi Liu
2023-11-07  6:15     ` Cindy Lu
2023-11-03 17:16 ` [RFC v1 3/8] vhost: Add 3 new uapi to support iommufd Cindy Lu
2023-11-06  7:27   ` Jason Wang
2023-11-06  7:30   ` Jason Wang
2023-11-07  6:57     ` Cindy Lu
2023-11-08  3:03       ` Jason Wang
2023-11-08  6:38         ` Cindy Lu
2023-11-08  7:09           ` Jason Wang
2023-11-10  2:31             ` Jason Wang
2023-11-10  6:49               ` Cindy Lu
2023-11-03 17:16 ` [RFC v1 4/8] vdpa: Add new vdpa_config_ops " Cindy Lu
2023-11-06  8:52   ` Yi Liu
2023-11-03 17:16 ` [RFC v1 5/8] vdpa_sim :Add support for iommufd Cindy Lu
2023-11-03 17:16 ` [RFC v1 6/8] vdpa: change the map/unmap process to support iommufd Cindy Lu
2023-11-06  8:54   ` Yi Liu
2023-11-07  6:14     ` Cindy Lu
2023-11-03 17:16 ` [RFC v1 7/8] vp_vdpa::Add support for iommufd Cindy Lu
2023-11-06  7:25   ` Jason Wang
2023-11-03 17:16 ` [RFC v1 8/8] iommu: expose the function iommu_device_use_default_domain Cindy Lu
2023-11-03 17:37   ` Jason Gunthorpe
2023-11-06  7:26   ` Jason Wang
2023-11-07  6:10     ` Cindy Lu
2023-11-08  3:03       ` Jason Wang
2023-11-08  7:05         ` Cindy Lu
2023-11-06  4:11 ` [RFC v1 0/8] vhost-vdpa: add support for iommufd Jason Wang
2023-11-06  8:05   ` Yi Liu
2023-11-07  7:30 ` Michael S. Tsirkin
2023-11-07 12:49   ` Jason Gunthorpe
2023-11-07 13:28     ` Michael S. Tsirkin
2023-11-07 14:12       ` Jason Gunthorpe
2023-11-07 14:30         ` Michael S. Tsirkin
2023-11-07 15:52           ` Jason Gunthorpe
2023-11-09 23:48             ` Michael S. Tsirkin
2023-11-10 14:00               ` Jason Gunthorpe
2023-11-07 17:02       ` Jakub Kicinski
2023-11-07 14:55     ` Michael S. Tsirkin
2023-11-07 15:48       ` Jason Gunthorpe
2023-11-07 16:11         ` Michael S. Tsirkin
2023-11-07 13:23 ` Michael S. Tsirkin
2024-01-10 22:25 ` Michael S. Tsirkin
2024-01-11  9:02   ` Cindy Lu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231103171641.1703146-2-lulu@redhat.com \
    --to=lulu@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=jgg@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=yi.l.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).