From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, mst@redhat.com, jasowang@redhat.com,
qemu-devel@nongnu.org
Subject: [RFC 7/7] vhost-vdpa-iommufd: Add iommufd support for vdpa
Date: Wed, 3 May 2023 17:13:37 +0800 [thread overview]
Message-ID: <20230503091337.2130631-8-lulu@redhat.com> (raw)
In-Reply-To: <20230503091337.2130631-1-lulu@redhat.com>
This file is support iommufd for vdpa, including the function:
1> iommufd bind/unbind the iommufd device
bind the vdpa device to iommufd and attach the ASID 0 to iommufd
2> iommufd map/unmap function.The map function working process is
a. Check if the asid was used before.
b. If this is the new asid, get the new ioas_id and attach it to iommufd.
save this information in vdpa_iommufd.
c. Use the ioas_id for mapping
The unmap logic is the same
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
hw/virtio/meson.build | 2 +-
hw/virtio/vhost-vdpa-iommufd.c | 240 +++++++++++++++++++++++++++++++++
2 files changed, 241 insertions(+), 1 deletion(-)
create mode 100644 hw/virtio/vhost-vdpa-iommufd.c
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index f93be2e137..848fdb18eb 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -13,7 +13,7 @@ if have_vhost
specific_virtio_ss.add(files('vhost-user.c'))
endif
if have_vhost_vdpa
- specific_virtio_ss.add(files('vhost-vdpa.c', 'vhost-shadow-virtqueue.c'))
+ specific_virtio_ss.add(files('vhost-vdpa.c', 'vhost-shadow-virtqueue.c','vhost-vdpa-iommufd.c'))
endif
else
softmmu_virtio_ss.add(files('vhost-stub.c'))
diff --git a/hw/virtio/vhost-vdpa-iommufd.c b/hw/virtio/vhost-vdpa-iommufd.c
new file mode 100644
index 0000000000..6a0875c0a4
--- /dev/null
+++ b/hw/virtio/vhost-vdpa-iommufd.c
@@ -0,0 +1,240 @@
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#include <linux/vhost.h>
+#include <linux/vfio.h>
+#include <linux/iommufd.h>
+#include "sysemu/iommufd.h"
+#include "hw/virtio/vhost.h"
+
+#include "hw/virtio/vhost-vdpa.h"
+
+static int vdpa_device_attach_ioas(struct vhost_vdpa *dev,
+ VDPAIOMMUFDState *vdpa_iommufd)
+{
+ int ret;
+
+ struct vdpa_device_attach_iommufd_as attach_data = {
+ .argsz = sizeof(attach_data),
+ .flags = 0,
+ .ioas_id = vdpa_iommufd->ioas_id,
+ };
+ /* Attach device to an ioas within iommufd */
+ ret = ioctl(dev->device_fd, VDPA_DEVICE_ATTACH_IOMMUFD_AS, &attach_data);
+ if (ret) {
+ error_report("fail to bind device fd=%d to ioas_id=%d", dev->device_fd,
+ vdpa_iommufd->ioas_id);
+ return ret;
+ }
+
+ return 0;
+}
+static VDPAIOMMUFDState *vdpa_get_ioas_by_asid(struct vhost_dev *hdev,
+ uint32_t asid)
+{
+ VDPAIOMMUFDState *vdpa_iommufd_ptr = hdev->vdev->iommufd_ptr;
+ while (vdpa_iommufd_ptr != NULL) {
+ if (asid == vdpa_iommufd_ptr->asid) {
+ return vdpa_iommufd_ptr;
+ }
+
+ vdpa_iommufd_ptr = vdpa_iommufd_ptr->next;
+ }
+
+ return NULL;
+}
+static VDPAIOMMUFDState *vdpa_add_new_ioas_id(struct vhost_dev *hdev,
+ uint32_t asid)
+{
+ int ret;
+ uint32_t ioas_id;
+
+ struct vhost_vdpa *v = hdev->opaque;
+ VDPAIOMMUFDState *vdpa_iommufd_ptr = hdev->vdev->iommufd_ptr;
+ VDPAIOMMUFDState *vdpa_iommufd_new = g_malloc(sizeof(VDPAIOMMUFDState));
+
+ vdpa_iommufd_new->dev = hdev;
+ vdpa_iommufd_new->asid = asid;
+ vdpa_iommufd_new->iommufd = vdpa_iommufd_ptr->iommufd;
+
+ ret = iommufd_backend_get_ioas(vdpa_iommufd_new->iommufd, &ioas_id);
+ if (ret < 0) {
+ error_report("Failed to alloc ioas (%s)", strerror(errno));
+ return NULL;
+ }
+
+ vdpa_iommufd_new->ioas_id = ioas_id;
+ /* this is new asid, attch to iommufd*/
+ ret = vdpa_device_attach_ioas(v, vdpa_iommufd_new);
+ if (ret < 0) {
+ error_report("Failed to attach ioas (%s)", strerror(errno));
+ return NULL;
+ }
+ while (vdpa_iommufd_ptr->next != NULL) {
+ vdpa_iommufd_ptr = vdpa_iommufd_ptr->next;
+ }
+ /*save this vdpa_iommufd in list */
+ vdpa_iommufd_ptr->next = vdpa_iommufd_new;
+ vdpa_iommufd_new->next = NULL;
+ return vdpa_iommufd_new;
+}
+static int vdpa_iommufd_map(struct vhost_dev *hdev, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly)
+{
+ VDPAIOMMUFDState *vdpa_iommufd;
+
+ if (hdev->vdev == NULL) {
+ error_report("Failed to get vdev (%s)", strerror(errno));
+ return 0;
+ }
+ /*search if this asid have attach to iommufd before*/
+ vdpa_iommufd = vdpa_get_ioas_by_asid(hdev, asid);
+ if (vdpa_iommufd == NULL) {
+ /*this asid is first use, need to alloc and add to iommufd*/
+ vdpa_iommufd = vdpa_add_new_ioas_id(hdev, asid);
+ }
+ return iommufd_backend_map_dma(vdpa_iommufd->iommufd, vdpa_iommufd->ioas_id,
+ iova, size, vaddr, readonly);
+}
+
+
+static int vdpa_iommufd_unmap(struct vhost_dev *hdev, uint32_t asid,
+ hwaddr iova, hwaddr size)
+{
+ VDPAIOMMUFDState *vdpa_iommufd;
+ if (hdev->vdev == NULL) {
+ error_report("Failed to get vdev (%s)", strerror(errno));
+ return 0;
+ }
+ /*search if this asid have attach to iommufd before*/
+
+ vdpa_iommufd = vdpa_get_ioas_by_asid(hdev, asid);
+ if (vdpa_iommufd == NULL) {
+ error_report("Failed to get ioas (%s)", strerror(errno));
+ return 0;
+ }
+ return iommufd_backend_unmap_dma(vdpa_iommufd->iommufd,
+ vdpa_iommufd->ioas_id, iova, size);
+}
+
+
+static void vdpa_device_detach_iommufd(struct vhost_vdpa *v,
+ VDPAIOMMUFDState *vdpa_iommufd,
+ Error **errp)
+{
+ struct vdpa_device_detach_iommufd_as detach_data = {
+ .argsz = sizeof(detach_data),
+ .flags = 0,
+ };
+
+ if (ioctl(v->device_fd, VDPA_DEVICE_DETACH_IOMMUFD_AS, &detach_data)) {
+ error_report("error bind device fd=%d ", v->device_fd);
+ return;
+ }
+}
+
+
+static int vdpa_device_bind_iommufd(struct vhost_vdpa *dev,
+ VDPAIOMMUFDState *vdpa_iommufd,
+ Error **errp)
+{
+ struct vhost_vdpa_set_iommufd bind = {
+ .iommufd = vdpa_iommufd->iommufd->fd,
+ .ioas_id = vdpa_iommufd->ioas_id,
+ };
+
+ int ret;
+ /* Bind device to iommufd */
+ ret = ioctl(dev->device_fd, VHOST_VDPA_SET_IOMMU_FD, &bind);
+ if (ret) {
+ error_report("error bind device fd=%d to iommufd=%d", dev->device_fd,
+ bind.iommufd);
+ return ret;
+ }
+
+ vdpa_iommufd->devid = bind.out_devid;
+ vdpa_iommufd->hwptid = bind.out_hwptid;
+
+ return vdpa_device_attach_ioas(dev, vdpa_iommufd);
+}
+
+static void vdpa_iommufd_destroy(VDPAIOMMUFDState *vdpa_iommufd)
+{
+ g_free(vdpa_iommufd);
+}
+
+/*attach the device to iommufd */
+static int vdpa_iommufd_attach_device(struct vhost_vdpa *v, AddressSpace *as,
+ Error **errp)
+{
+ VDPAIOMMUFDState *vdpa_iommufd;
+ int ret;
+ uint32_t ioas_id;
+ Error *err = NULL;
+ struct vhost_dev *dev = v->dev;
+ vdpa_iommufd = dev->vdev->iommufd_ptr;
+
+ /*allocate a new IOAS */
+ ret = iommufd_backend_get_ioas(vdpa_iommufd->iommufd, &ioas_id);
+ if (ret < 0) {
+ close(v->device_fd);
+ error_report("Failed to alloc ioas (%s)", strerror(errno));
+ return ret;
+ }
+
+ vdpa_iommufd->ioas_id = ioas_id;
+ vdpa_iommufd->dev = dev;
+ /* use the default ASID*/
+ vdpa_iommufd->asid = VHOST_VDPA_GUEST_PA_ASID;
+ vdpa_iommufd->next = NULL;
+
+ vdpa_iommufd->as = as;
+ /*bind the default ASID to iommufd*/
+ ret = vdpa_device_bind_iommufd(v, vdpa_iommufd, &err);
+ if (ret) {
+ /* todo check if fail */
+ error_report("Failed to vdpa_device_bind_iommufd (%s)",
+ strerror(errno));
+ iommufd_backend_put_ioas(vdpa_iommufd->iommufd, ioas_id);
+
+ vdpa_iommufd_destroy(vdpa_iommufd);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void vdpa_iommufd_detach_device(struct vhost_vdpa *v)
+{
+ VDPAIOMMUFDState *vdpa_iommufd;
+
+ VDPAIOMMUFDState *vdpa_iommufd_tmp;
+ Error *err = NULL;
+
+ struct vhost_dev *dev = v->dev;
+ if (!dev->vdev) {
+ return;
+ }
+ vdpa_iommufd = dev->vdev->iommufd_ptr;
+ vdpa_device_detach_iommufd(v, vdpa_iommufd, &err);
+
+ while (vdpa_iommufd != NULL) {
+ iommufd_backend_put_ioas(vdpa_iommufd->iommufd, vdpa_iommufd->ioas_id);
+ vdpa_iommufd_tmp = vdpa_iommufd;
+ vdpa_iommufd = vdpa_iommufd->next;
+
+ vdpa_iommufd_destroy(vdpa_iommufd_tmp);
+ }
+}
+
+struct vdpa_iommu_backend_ops iommufd_ops = {
+ .dma_map = vdpa_iommufd_map,
+ .dma_unmap = vdpa_iommufd_unmap,
+ .attach_device = vdpa_iommufd_attach_device,
+ .detach_device = vdpa_iommufd_detach_device,
+};
+
+void vdpa_backend_iommufd_ops_class_init(struct vhost_vdpa *v)
+{
+ v->ops = &iommufd_ops;
+}
--
2.34.3
next prev parent reply other threads:[~2023-05-03 9:17 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-03 9:13 [RFC 0/7] vhost-vdpa: add support for iommufd Cindy Lu
2023-05-03 9:13 ` [RFC 1/7] vhost: introduce new UAPI to support IOMMUFD Cindy Lu
2023-05-03 9:13 ` [RFC 2/7] qapi: support iommufd in vdpa Cindy Lu
2023-05-03 9:13 ` [RFC 3/7] virtio : add a ptr for vdpa_iommufd in VirtIODevice Cindy Lu
2023-05-03 9:13 ` [RFC 4/7] net/vhost-vdpa: Add the check for iommufd Cindy Lu
2023-05-03 9:13 ` [RFC 5/7] vhost-vdpa: Add the iommufd support in the map/unmap function Cindy Lu
2023-05-03 9:13 ` [RFC 6/7] vhost-vdpa: init iommufd function in vhost_vdpa start Cindy Lu
2023-05-03 9:13 ` Cindy Lu [this message]
2023-05-05 3:29 ` [RFC 0/7] vhost-vdpa: add support for iommufd Jason Wang
2023-05-05 6:29 ` Cindy Lu
2023-06-05 5:41 ` Michael S. Tsirkin
2023-06-05 8:04 ` Cindy Lu
2023-09-13 13:31 ` Michael S. Tsirkin
2023-09-14 5:44 ` Cindy Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230503091337.2130631-8-lulu@redhat.com \
--to=lulu@redhat.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).