qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] s390x/pci: Accomodate vfio DMA limiting
@ 2020-09-11 16:49 Matthew Rosato
  2020-09-11 16:49 ` [PATCH 1/3] vfio: Get DMA limit information Matthew Rosato
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Matthew Rosato @ 2020-09-11 16:49 UTC (permalink / raw)
  To: alex.williamson, cohuck
  Cc: thuth, pmorel, david, schnelle, qemu-devel, pasic, borntraeger,
	qemu-s390x, rth

Kernel commit 492855939bdb added a limit to the number of outstanding DMA
requests for a type1 vfio container.  However, lazy unmapping in s390 can in
quite a large number of outstanding DMA requests to build up prior to being
purged, potentially the entire guest DMA space.  This results in unexpected
'VFIO_MAP_DMA failed: No space left on device' conditions seen in QEMU.

This patchset adds support to qemu to retrieve the DMA limit information
added to VFIO_IOMMU_GET_INFO.  The patches are separated into vfio hits
which add support for reading in VFIO_IOMU_GET_INFO capability chains and
getting the per-container dma_limit, and s390 hits to track DMA usage on
a per-container basis.

Associated kernel patch:
https://marc.info/?l=kvm&m=159984267812181&w=2

Matthew Rosato (3):
  vfio: Get DMA limit information
  s390x/pci: Honor DMA limits set by vfio
  vfio: Create shared routine for scanning info capabilities

 hw/s390x/s390-pci-bus.c       | 56 ++++++++++++++++++++++++---
 hw/s390x/s390-pci-bus.h       |  9 +++++
 hw/s390x/s390-pci-inst.c      | 29 +++++++++++---
 hw/s390x/s390-pci-inst.h      |  3 ++
 hw/vfio/common.c              | 89 ++++++++++++++++++++++++++++++++++---------
 include/hw/vfio/vfio-common.h |  1 +
 6 files changed, 159 insertions(+), 28 deletions(-)

-- 
1.8.3.1



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/3] vfio: Get DMA limit information
  2020-09-11 16:49 [PATCH 0/3] s390x/pci: Accomodate vfio DMA limiting Matthew Rosato
@ 2020-09-11 16:49 ` Matthew Rosato
  2020-09-11 16:49 ` [PATCH 2/3] s390x/pci: Honor DMA limits set by vfio Matthew Rosato
  2020-09-11 16:49 ` [PATCH 3/3] vfio: Create shared routine for scanning info capabilities Matthew Rosato
  2 siblings, 0 replies; 4+ messages in thread
From: Matthew Rosato @ 2020-09-11 16:49 UTC (permalink / raw)
  To: alex.williamson, cohuck
  Cc: thuth, pmorel, david, schnelle, qemu-devel, pasic, borntraeger,
	qemu-s390x, rth

The underlying host may be limiting the number of outstanding DMA
requests for type 1 IOMMU.  Determine if this is the case and if
so retrieve the number of outstanding mappings allowed.

Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
---
 hw/vfio/common.c              | 76 ++++++++++++++++++++++++++++++++++++++-----
 include/hw/vfio/vfio-common.h |  1 +
 2 files changed, 68 insertions(+), 9 deletions(-)

diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 3335714..84ac3be 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -844,6 +844,41 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
     return NULL;
 }
 
+static struct vfio_info_cap_header *
+vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
+{
+    struct vfio_info_cap_header *hdr;
+    void *ptr = info;
+
+    if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+        return NULL;
+    }
+
+    for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+        if (hdr->id == id) {
+            return hdr;
+        }
+    }
+
+    return NULL;
+}
+
+static unsigned int vfio_get_info_dma_limit(struct vfio_iommu_type1_info *info)
+{
+    struct vfio_info_cap_header *hdr;
+    struct vfio_iommu_type1_info_dma_limit *cap;
+
+    /* If the capability cannot be found, assume no DMA limiting */
+    hdr = vfio_get_iommu_type1_info_cap(info,
+                                        VFIO_IOMMU_TYPE1_INFO_DMA_LIMIT);
+    if (hdr == NULL) {
+        return 0;
+    }
+
+    cap = (void *) hdr;
+    return cap->max;
+}
+
 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
                                           struct vfio_region_info *info)
 {
@@ -1285,7 +1320,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
     case VFIO_TYPE1v2_IOMMU:
     case VFIO_TYPE1_IOMMU:
     {
-        struct vfio_iommu_type1_info info;
+        struct vfio_iommu_type1_info *info;
+        uint32_t argsz;
 
         /*
          * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
@@ -1294,15 +1330,37 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
          * existing Type1 IOMMUs generally support any IOVA we're
          * going to actually try in practice.
          */
-        info.argsz = sizeof(info);
-        ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
-        /* Ignore errors */
-        if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
-            /* Assume 4k IOVA page size */
-            info.iova_pgsizes = 4096;
+        argsz = sizeof(struct vfio_iommu_type1_info);
+        info = g_malloc0(argsz);
+        info->argsz = argsz;
+        /*
+         * If the specified argsz is not large enough to contain all
+         * capabilities it will be updated upon return.  In this case
+         * use the updated value to get the entire capability chain.
+         */
+        ret = ioctl(fd, VFIO_IOMMU_GET_INFO, info);
+        if (argsz != info->argsz) {
+            argsz = info->argsz;
+            info = g_realloc(info, argsz);
+            info->argsz = argsz;
+            ret = ioctl(fd, VFIO_IOMMU_GET_INFO, info);
+        }
+        /* Set defaults on error */
+        if (ret) {
+            /* Assume 4k IOVA page size and no DMA limiting */
+            info->iova_pgsizes = 4096;
+            container->dma_limit = 0;
+        } else {
+            if (!(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
+                /* Assume 4k IOVA page size */
+                info->iova_pgsizes = 4096;
+            }
+            /* Obtain DMA limit from capability chain */
+            container->dma_limit = vfio_get_info_dma_limit(info);
         }
-        vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
-        container->pgsizes = info.iova_pgsizes;
+        vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
+        container->pgsizes = info->iova_pgsizes;
+        g_free(info);
         break;
     }
     case VFIO_SPAPR_TCE_v2_IOMMU:
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index c78f3ff..0ee4af0 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -74,6 +74,7 @@ typedef struct VFIOContainer {
     Error *error;
     bool initialized;
     unsigned long pgsizes;
+    unsigned int dma_limit;
     QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
     QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
     QLIST_HEAD(, VFIOGroup) group_list;
-- 
1.8.3.1



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] s390x/pci: Honor DMA limits set by vfio
  2020-09-11 16:49 [PATCH 0/3] s390x/pci: Accomodate vfio DMA limiting Matthew Rosato
  2020-09-11 16:49 ` [PATCH 1/3] vfio: Get DMA limit information Matthew Rosato
@ 2020-09-11 16:49 ` Matthew Rosato
  2020-09-11 16:49 ` [PATCH 3/3] vfio: Create shared routine for scanning info capabilities Matthew Rosato
  2 siblings, 0 replies; 4+ messages in thread
From: Matthew Rosato @ 2020-09-11 16:49 UTC (permalink / raw)
  To: alex.williamson, cohuck
  Cc: thuth, pmorel, david, schnelle, qemu-devel, pasic, borntraeger,
	qemu-s390x, rth

When an s390 guest is using lazy unmapping, it can result in a very
large number of oustanding DMA requests, far beyond the default
limit configured for vfio.  Let's track DMA usage similar to vfio
in the host, and trigger the guest to flush their DMA mappings
before vfio runs out.

Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
---
 hw/s390x/s390-pci-bus.c  | 56 +++++++++++++++++++++++++++++++++++++++++++-----
 hw/s390x/s390-pci-bus.h  |  9 ++++++++
 hw/s390x/s390-pci-inst.c | 29 +++++++++++++++++++------
 hw/s390x/s390-pci-inst.h |  3 +++
 4 files changed, 86 insertions(+), 11 deletions(-)

diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index 92146a2..ab20d34 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -24,6 +24,8 @@
 #include "qemu/error-report.h"
 #include "qemu/module.h"
 
+#include "hw/vfio/pci.h"
+
 #ifndef DEBUG_S390PCI_BUS
 #define DEBUG_S390PCI_BUS  0
 #endif
@@ -737,6 +739,42 @@ static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
     object_unref(OBJECT(iommu));
 }
 
+static S390PCIDMACount *s390_start_dma_count(S390pciState *s, VFIODevice *vdev)
+{
+    int id = vdev->group->container->fd;
+    S390PCIDMACount *cnt;
+
+    if (vdev->group->container->dma_limit == 0) {
+        return NULL;
+    }
+
+    QTAILQ_FOREACH(cnt, &s->zpci_dma_limit, link) {
+        if (cnt->id  == id) {
+            cnt->users++;
+            return cnt;
+        }
+    }
+
+    cnt = g_new0(S390PCIDMACount, 1);
+    cnt->id = id;
+    cnt->users = 1;
+    cnt->avail = vdev->group->container->dma_limit;
+    QTAILQ_INSERT_TAIL(&s->zpci_dma_limit, cnt, link);
+    return cnt;
+}
+
+static void s390_end_dma_count(S390pciState *s, S390PCIDMACount *cnt)
+{
+    if (cnt == NULL) {
+        return;
+    }
+
+    cnt->users--;
+    if (cnt->users == 0) {
+        QTAILQ_REMOVE(&s->zpci_dma_limit, cnt, link);
+    }
+}
+
 static void s390_pcihost_realize(DeviceState *dev, Error **errp)
 {
     PCIBus *b;
@@ -764,6 +802,7 @@ static void s390_pcihost_realize(DeviceState *dev, Error **errp)
     s->bus_no = 0;
     QTAILQ_INIT(&s->pending_sei);
     QTAILQ_INIT(&s->zpci_devs);
+    QTAILQ_INIT(&s->zpci_dma_limit);
 
     css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false,
                              S390_ADAPTER_SUPPRESSIBLE, errp);
@@ -902,6 +941,7 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
 {
     S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
     PCIDevice *pdev = NULL;
+    VFIOPCIDevice *vpdev = NULL;
     S390PCIBusDevice *pbdev = NULL;
 
     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
@@ -941,17 +981,20 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
             }
         }
 
+        pbdev->pdev = pdev;
+        pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn);
+        pbdev->iommu->pbdev = pbdev;
+        pbdev->state = ZPCI_FS_DISABLED;
+
         if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) {
             pbdev->fh |= FH_SHM_VFIO;
+            vpdev = container_of(pbdev->pdev, VFIOPCIDevice, pdev);
+            pbdev->iommu->dma_limit = s390_start_dma_count(s,
+                                                           &vpdev->vbasedev);
         } else {
             pbdev->fh |= FH_SHM_EMUL;
         }
 
-        pbdev->pdev = pdev;
-        pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn);
-        pbdev->iommu->pbdev = pbdev;
-        pbdev->state = ZPCI_FS_DISABLED;
-
         if (s390_pci_msix_init(pbdev)) {
             error_setg(errp, "MSI-X support is mandatory "
                        "in the S390 architecture");
@@ -1004,6 +1047,9 @@ static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
         pbdev->fid = 0;
         QTAILQ_REMOVE(&s->zpci_devs, pbdev, link);
         g_hash_table_remove(s->zpci_table, &pbdev->idx);
+        if (pbdev->iommu->dma_limit) {
+            s390_end_dma_count(s, pbdev->iommu->dma_limit);
+        }
         qdev_unrealize(dev);
     }
 }
diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h
index 550f3cc..a588aa8 100644
--- a/hw/s390x/s390-pci-bus.h
+++ b/hw/s390x/s390-pci-bus.h
@@ -265,6 +265,13 @@ typedef struct S390IOTLBEntry {
     uint64_t perm;
 } S390IOTLBEntry;
 
+typedef struct S390PCIDMACount {
+    int id;
+    int users;
+    uint32_t avail;
+    QTAILQ_ENTRY(S390PCIDMACount) link;
+} S390PCIDMACount;
+
 typedef struct S390PCIBusDevice S390PCIBusDevice;
 typedef struct S390PCIIOMMU {
     Object parent_obj;
@@ -277,6 +284,7 @@ typedef struct S390PCIIOMMU {
     uint64_t pba;
     uint64_t pal;
     GHashTable *iotlb;
+    S390PCIDMACount *dma_limit;
 } S390PCIIOMMU;
 
 typedef struct S390PCIIOMMUTable {
@@ -352,6 +360,7 @@ typedef struct S390pciState {
     GHashTable *zpci_table;
     QTAILQ_HEAD(, SeiContainer) pending_sei;
     QTAILQ_HEAD(, S390PCIBusDevice) zpci_devs;
+    QTAILQ_HEAD(, S390PCIDMACount) zpci_dma_limit;
 } S390pciState;
 
 S390pciState *s390_get_phb(void);
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index 2f7a7d7..6af9af4 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -32,6 +32,9 @@
         }                                                          \
     } while (0)
 
+#define INC_DMA_AVAIL(iommu) if (iommu->dma_limit) iommu->dma_limit->avail++;
+#define DEC_DMA_AVAIL(iommu) if (iommu->dma_limit) iommu->dma_limit->avail--;
+
 static void s390_set_status_code(CPUS390XState *env,
                                  uint8_t r, uint64_t status_code)
 {
@@ -572,7 +575,8 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
     return 0;
 }
 
-static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry)
+static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
+                                      S390IOTLBEntry *entry)
 {
     S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
     IOMMUTLBEntry notify = {
@@ -585,14 +589,15 @@ static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry)
 
     if (entry->perm == IOMMU_NONE) {
         if (!cache) {
-            return;
+            goto out;
         }
         g_hash_table_remove(iommu->iotlb, &entry->iova);
+        INC_DMA_AVAIL(iommu);
     } else {
         if (cache) {
             if (cache->perm == entry->perm &&
                 cache->translated_addr == entry->translated_addr) {
-                return;
+                goto out;
             }
 
             notify.perm = IOMMU_NONE;
@@ -606,9 +611,13 @@ static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry)
         cache->len = PAGE_SIZE;
         cache->perm = entry->perm;
         g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
+        DEC_DMA_AVAIL(iommu);
     }
 
     memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
+
+out:
+    return iommu->dma_limit ? iommu->dma_limit->avail : 1;
 }
 
 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
@@ -620,6 +629,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
     S390PCIIOMMU *iommu;
     S390IOTLBEntry entry;
     hwaddr start, end;
+    uint32_t dma_avail;
 
     if (env->psw.mask & PSW_MASK_PSTATE) {
         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
@@ -675,8 +685,9 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
         }
 
         start += entry.len;
-        while (entry.iova < start && entry.iova < end) {
-            s390_pci_update_iotlb(iommu, &entry);
+        dma_avail = 1; /* Assume non-zero dma_avail to start */
+        while (entry.iova < start && entry.iova < end && dma_avail > 0) {
+            dma_avail = s390_pci_update_iotlb(iommu, &entry);
             entry.iova += PAGE_SIZE;
             entry.translated_addr += PAGE_SIZE;
         }
@@ -689,7 +700,13 @@ err:
         s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
     } else {
         pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
-        setcc(cpu, ZPCI_PCI_LS_OK);
+        if (dma_avail > 0) {
+            setcc(cpu, ZPCI_PCI_LS_OK);
+        } else {
+            /* vfio DMA mappings are exhausted, trigger a RPCIT */
+            setcc(cpu, ZPCI_PCI_LS_ERR);
+            s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
+        }
     }
     return 0;
 }
diff --git a/hw/s390x/s390-pci-inst.h b/hw/s390x/s390-pci-inst.h
index fa3bf8b..8ee3a3c 100644
--- a/hw/s390x/s390-pci-inst.h
+++ b/hw/s390x/s390-pci-inst.h
@@ -254,6 +254,9 @@ typedef struct ClpReqRspQueryPciGrp {
 #define ZPCI_STPCIFC_ST_INVAL_DMAAS   28
 #define ZPCI_STPCIFC_ST_ERROR_RECOVER 40
 
+/* Refresh PCI Translations status codes */
+#define ZPCI_RPCIT_ST_INSUFF_RES      16
+
 /* FIB function controls */
 #define ZPCI_FIB_FC_ENABLED     0x80
 #define ZPCI_FIB_FC_ERROR       0x40
-- 
1.8.3.1



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] vfio: Create shared routine for scanning info capabilities
  2020-09-11 16:49 [PATCH 0/3] s390x/pci: Accomodate vfio DMA limiting Matthew Rosato
  2020-09-11 16:49 ` [PATCH 1/3] vfio: Get DMA limit information Matthew Rosato
  2020-09-11 16:49 ` [PATCH 2/3] s390x/pci: Honor DMA limits set by vfio Matthew Rosato
@ 2020-09-11 16:49 ` Matthew Rosato
  2 siblings, 0 replies; 4+ messages in thread
From: Matthew Rosato @ 2020-09-11 16:49 UTC (permalink / raw)
  To: alex.williamson, cohuck
  Cc: thuth, pmorel, david, schnelle, qemu-devel, pasic, borntraeger,
	qemu-s390x, rth

Rather than duplicating the same loop in multiple locations,
create a static function to do the work.

Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
---
 hw/vfio/common.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 84ac3be..aac07bd 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -825,17 +825,12 @@ static void vfio_listener_release(VFIOContainer *container)
     }
 }
 
-struct vfio_info_cap_header *
-vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
+static struct vfio_info_cap_header *
+vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
 {
     struct vfio_info_cap_header *hdr;
-    void *ptr = info;
-
-    if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
-        return NULL;
-    }
 
-    for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+    for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
         if (hdr->id == id) {
             return hdr;
         }
@@ -844,23 +839,25 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
     return NULL;
 }
 
+
+struct vfio_info_cap_header *
+vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
+{
+    if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
+        return NULL;
+    }
+
+    return vfio_get_cap((void *)info, info->cap_offset, id);
+}
+
 static struct vfio_info_cap_header *
 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
 {
-    struct vfio_info_cap_header *hdr;
-    void *ptr = info;
-
     if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
         return NULL;
     }
 
-    for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
-        if (hdr->id == id) {
-            return hdr;
-        }
-    }
-
-    return NULL;
+    return vfio_get_cap((void *)info, info->cap_offset, id);
 }
 
 static unsigned int vfio_get_info_dma_limit(struct vfio_iommu_type1_info *info)
-- 
1.8.3.1



^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-09-11 16:51 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-09-11 16:49 [PATCH 0/3] s390x/pci: Accomodate vfio DMA limiting Matthew Rosato
2020-09-11 16:49 ` [PATCH 1/3] vfio: Get DMA limit information Matthew Rosato
2020-09-11 16:49 ` [PATCH 2/3] s390x/pci: Honor DMA limits set by vfio Matthew Rosato
2020-09-11 16:49 ` [PATCH 3/3] vfio: Create shared routine for scanning info capabilities Matthew Rosato

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).