From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, alex.williamson@redhat.com, jasowang@redhat.com,
mst@redhat.com, pbonzini@redhat.com, peterx@redhat.com,
david@redhat.com, f4bug@amsat.org, sgarzare@redhat.com
Cc: qemu-devel@nongnu.org
Subject: [PATCH v3 2/2] vhost-vdpa: add support for vIOMMU
Date: Wed, 26 Oct 2022 00:37:34 +0800 [thread overview]
Message-ID: <20221025163734.965367-3-lulu@redhat.com> (raw)
In-Reply-To: <20221025163734.965367-1-lulu@redhat.com>
Add support for vIOMMU. add the new function to deal with iommu MR.
- during iommu_region_add register a specific IOMMU notifier,
and store all notifiers in a list.
- during iommu_region_del, compare and delete the IOMMU notifier from the list
Verified in vp_vdpa and vdpa_sim_net driver
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
hw/virtio/vhost-vdpa.c | 134 ++++++++++++++++++++++++++++++---
include/hw/virtio/vhost-vdpa.h | 10 +++
2 files changed, 133 insertions(+), 11 deletions(-)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3ff9ce3501..229b8bdfd5 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -26,6 +26,7 @@
#include "cpu.h"
#include "trace.h"
#include "qapi/error.h"
+#include "hw/virtio/virtio-access.h"
/*
* Return one past the end of the end of section. Be careful with uint64_t
@@ -44,7 +45,6 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
uint64_t iova_min,
uint64_t iova_max)
{
- Int128 llend;
if ((!memory_region_is_ram(section->mr) &&
!memory_region_is_iommu(section->mr)) ||
@@ -61,14 +61,6 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
return true;
}
- llend = vhost_vdpa_section_end(section);
- if (int128_gt(llend, int128_make64(iova_max))) {
- error_report("RAM section out of device range (max=0x%" PRIx64
- ", end addr=0x%" PRIx64 ")",
- iova_max, int128_get64(llend));
- return true;
- }
-
return false;
}
@@ -173,6 +165,116 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
v->iotlb_batch_begin_sent = false;
}
+
+static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
+
+ hwaddr iova = iotlb->iova + iommu->iommu_offset;
+ struct vhost_vdpa *v = iommu->dev;
+ void *vaddr;
+ int ret;
+
+ if (iotlb->target_as != &address_space_memory) {
+ error_report("Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+ return;
+ }
+ RCU_READ_LOCK_GUARD();
+ vhost_vdpa_iotlb_batch_begin_once(v);
+
+ if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
+ bool read_only;
+
+ if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only,
+ &address_space_memory)) {
+ return;
+ }
+ ret =
+ vhost_vdpa_dma_map(v, iova, iotlb->addr_mask + 1, vaddr, read_only);
+ if (ret) {
+ error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
+ "0x%" HWADDR_PRIx ", %p) = %d (%m)",
+ v, iova, iotlb->addr_mask + 1, vaddr, ret);
+ }
+ } else {
+ ret = vhost_vdpa_dma_unmap(v, iova, iotlb->addr_mask + 1);
+ if (ret) {
+ error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
+ "0x%" HWADDR_PRIx ") = %d (%m)",
+ v, iova, iotlb->addr_mask + 1, ret);
+ }
+ }
+}
+
+static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
+
+ struct vdpa_iommu *iommu;
+ Int128 end;
+ int iommu_idx;
+ IOMMUMemoryRegion *iommu_mr;
+ int ret;
+
+ if (!memory_region_is_iommu(section->mr)) {
+ return;
+ }
+
+ iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+
+ iommu = g_malloc0(sizeof(*iommu));
+ end = int128_add(int128_make64(section->offset_within_region),
+ section->size);
+ end = int128_sub(end, int128_one());
+ iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
+ MEMTXATTRS_UNSPECIFIED);
+
+ iommu->iommu_mr = iommu_mr;
+
+ iommu_notifier_init(
+ &iommu->n, vhost_vdpa_iommu_map_notify, IOMMU_NOTIFIER_IOTLB_EVENTS,
+ section->offset_within_region, int128_get64(end), iommu_idx);
+ iommu->iommu_offset =
+ section->offset_within_address_space - section->offset_within_region;
+ iommu->dev = v;
+
+ ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
+ if (ret) {
+ g_free(iommu);
+ return;
+ }
+
+ QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next);
+ memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
+
+ return;
+}
+
+static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
+
+ struct vdpa_iommu *iommu;
+
+ if (!memory_region_is_iommu(section->mr)) {
+ return;
+ }
+
+ QLIST_FOREACH(iommu, &v->iommu_list, iommu_next)
+ {
+ if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
+ iommu->n.start == section->offset_within_region) {
+ memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
+ QLIST_REMOVE(iommu, iommu_next);
+ g_free(iommu);
+ break;
+ }
+ }
+}
+
static void vhost_vdpa_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
@@ -186,6 +288,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
v->iova_range.last)) {
return;
}
+ if (memory_region_is_iommu(section->mr)) {
+ vhost_vdpa_iommu_region_add(listener, section);
+ return;
+ }
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
(section->offset_within_region & ~TARGET_PAGE_MASK))) {
@@ -260,6 +366,10 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
v->iova_range.last)) {
return;
}
+ if (memory_region_is_iommu(section->mr)) {
+ vhost_vdpa_iommu_region_del(listener, section);
+ return;
+ }
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
(section->offset_within_region & ~TARGET_PAGE_MASK))) {
@@ -312,6 +422,7 @@ static const MemoryListener vhost_vdpa_memory_listener = {
.region_del = vhost_vdpa_listener_region_del,
};
+
static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
void *arg)
{
@@ -587,7 +698,6 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
v = dev->opaque;
trace_vhost_vdpa_cleanup(dev, v);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
- memory_listener_unregister(&v->listener);
vhost_vdpa_svq_cleanup(dev);
dev->opaque = NULL;
@@ -1127,12 +1237,14 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
}
if (started) {
- memory_listener_register(&v->listener, &address_space_memory);
+ memory_listener_register(&v->listener, dev->vdev->dma_as);
+
return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
} else {
vhost_vdpa_reset_device(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
+
memory_listener_unregister(&v->listener);
return 0;
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index d10a89303e..64a46e37cb 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -41,8 +41,18 @@ typedef struct vhost_vdpa {
void *shadow_vq_ops_opaque;
struct vhost_dev *dev;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
+ QLIST_HEAD(, vdpa_iommu) iommu_list;
+ IOMMUNotifier n;
} VhostVDPA;
+struct vdpa_iommu {
+ struct vhost_vdpa *dev;
+ IOMMUMemoryRegion *iommu_mr;
+ hwaddr iommu_offset;
+ IOMMUNotifier n;
+ QLIST_ENTRY(vdpa_iommu) iommu_next;
+};
+
int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
void *vaddr, bool readonly);
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size);
--
2.34.3
prev parent reply other threads:[~2022-10-25 16:56 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-25 16:37 [PATCH v3 0/2] vhost-vdpa: add support for vIOMMU Cindy Lu
2022-10-25 16:37 ` [PATCH v3 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c Cindy Lu
2022-10-25 16:55 ` Alex Williamson
2022-10-26 20:40 ` Michael S. Tsirkin
2022-10-27 6:27 ` Cindy Lu
2022-10-25 16:37 ` Cindy Lu [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221025163734.965367-3-lulu@redhat.com \
--to=lulu@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=david@redhat.com \
--cc=f4bug@amsat.org \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).