From: Matthew Rosato <mjrosato@linux.ibm.com>
To: alex.williamson@redhat.com, cohuck@redhat.com
Cc: thuth@redhat.com, pmorel@linux.ibm.com, david@redhat.com,
schnelle@linux.ibm.com, qemu-devel@nongnu.org,
pasic@linux.ibm.com, borntraeger@de.ibm.com,
qemu-s390x@nongnu.org, rth@twiddle.net
Subject: [PATCH 1/3] vfio: Get DMA limit information
Date: Fri, 11 Sep 2020 12:49:05 -0400 [thread overview]
Message-ID: <1599842947-4051-2-git-send-email-mjrosato@linux.ibm.com> (raw)
In-Reply-To: <1599842947-4051-1-git-send-email-mjrosato@linux.ibm.com>
The underlying host may be limiting the number of outstanding DMA
requests for type 1 IOMMU. Determine if this is the case and if
so retrieve the number of outstanding mappings allowed.
Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
---
hw/vfio/common.c | 76 ++++++++++++++++++++++++++++++++++++++-----
include/hw/vfio/vfio-common.h | 1 +
2 files changed, 68 insertions(+), 9 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 3335714..84ac3be 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -844,6 +844,41 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
return NULL;
}
+static struct vfio_info_cap_header *
+vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
+{
+ struct vfio_info_cap_header *hdr;
+ void *ptr = info;
+
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+ return NULL;
+ }
+
+ for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+ if (hdr->id == id) {
+ return hdr;
+ }
+ }
+
+ return NULL;
+}
+
+static unsigned int vfio_get_info_dma_limit(struct vfio_iommu_type1_info *info)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_iommu_type1_info_dma_limit *cap;
+
+ /* If the capability cannot be found, assume no DMA limiting */
+ hdr = vfio_get_iommu_type1_info_cap(info,
+ VFIO_IOMMU_TYPE1_INFO_DMA_LIMIT);
+ if (hdr == NULL) {
+ return 0;
+ }
+
+ cap = (void *) hdr;
+ return cap->max;
+}
+
static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
struct vfio_region_info *info)
{
@@ -1285,7 +1320,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_IOMMU:
{
- struct vfio_iommu_type1_info info;
+ struct vfio_iommu_type1_info *info;
+ uint32_t argsz;
/*
* FIXME: This assumes that a Type1 IOMMU can map any 64-bit
@@ -1294,15 +1330,37 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
* existing Type1 IOMMUs generally support any IOVA we're
* going to actually try in practice.
*/
- info.argsz = sizeof(info);
- ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
- /* Ignore errors */
- if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
- /* Assume 4k IOVA page size */
- info.iova_pgsizes = 4096;
+ argsz = sizeof(struct vfio_iommu_type1_info);
+ info = g_malloc0(argsz);
+ info->argsz = argsz;
+ /*
+ * If the specified argsz is not large enough to contain all
+ * capabilities it will be updated upon return. In this case
+ * use the updated value to get the entire capability chain.
+ */
+ ret = ioctl(fd, VFIO_IOMMU_GET_INFO, info);
+ if (argsz != info->argsz) {
+ argsz = info->argsz;
+ info = g_realloc(info, argsz);
+ info->argsz = argsz;
+ ret = ioctl(fd, VFIO_IOMMU_GET_INFO, info);
+ }
+ /* Set defaults on error */
+ if (ret) {
+ /* Assume 4k IOVA page size and no DMA limiting */
+ info->iova_pgsizes = 4096;
+ container->dma_limit = 0;
+ } else {
+ if (!(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
+ /* Assume 4k IOVA page size */
+ info->iova_pgsizes = 4096;
+ }
+ /* Obtain DMA limit from capability chain */
+ container->dma_limit = vfio_get_info_dma_limit(info);
}
- vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
- container->pgsizes = info.iova_pgsizes;
+ vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
+ container->pgsizes = info->iova_pgsizes;
+ g_free(info);
break;
}
case VFIO_SPAPR_TCE_v2_IOMMU:
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index c78f3ff..0ee4af0 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -74,6 +74,7 @@ typedef struct VFIOContainer {
Error *error;
bool initialized;
unsigned long pgsizes;
+ unsigned int dma_limit;
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
--
1.8.3.1
next prev parent reply other threads:[~2020-09-11 16:50 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-11 16:49 [PATCH 0/3] s390x/pci: Accomodate vfio DMA limiting Matthew Rosato
2020-09-11 16:49 ` Matthew Rosato [this message]
2020-09-11 16:49 ` [PATCH 2/3] s390x/pci: Honor DMA limits set by vfio Matthew Rosato
2020-09-11 16:49 ` [PATCH 3/3] vfio: Create shared routine for scanning info capabilities Matthew Rosato
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1599842947-4051-2-git-send-email-mjrosato@linux.ibm.com \
--to=mjrosato@linux.ibm.com \
--cc=alex.williamson@redhat.com \
--cc=borntraeger@de.ibm.com \
--cc=cohuck@redhat.com \
--cc=david@redhat.com \
--cc=pasic@linux.ibm.com \
--cc=pmorel@linux.ibm.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-s390x@nongnu.org \
--cc=rth@twiddle.net \
--cc=schnelle@linux.ibm.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).