xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH v9 3/7] iommu: push use of type-safe DFN and MFN into iommu_ops
Date: Thu, 13 Sep 2018 16:21:23 +0100	[thread overview]
Message-ID: <20180913152128.6114-4-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180913152128.6114-1-paul.durrant@citrix.com>

This patch modifies the methods in struct iommu_ops to use type-safe DFN
and MFN. This follows on from the prior patch that modified the functions
exported in xen/iommu.h.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Roger Pau Monne <roger.pau@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@citrix.com>

v9:
 - Re-base.

v7:
 - Re-base and re-name BFN -> DFN.
 - Added Jan's A-b since re-naming was purely mechanical.

v6:
 - Re-base.

v3:
 - Remove some use of intermediate 'frame' variables.

v2:
 - Addressed comments from Jan.
 - Extend use of intermediate 'frame' variable to avoid directly
   encapsulating gfn values as bfns (now dfns) or vice versa.
---
 xen/drivers/passthrough/amd/iommu_map.c       | 46 ++++++++++++++++-----------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |  2 +-
 xen/drivers/passthrough/arm/smmu.c            | 16 +++++-----
 xen/drivers/passthrough/iommu.c               |  9 +++---
 xen/drivers/passthrough/vtd/iommu.c           | 26 +++++++--------
 xen/drivers/passthrough/x86/iommu.c           |  2 +-
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  8 ++---
 xen/include/xen/iommu.h                       | 13 +++++---
 8 files changed, 67 insertions(+), 55 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 61ade71850..c89c54fdb6 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -631,7 +631,7 @@ static int update_paging_mode(struct domain *d, unsigned long dfn)
     return 0;
 }
 
-int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
+int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
                        unsigned int flags)
 {
     bool_t need_flush = 0;
@@ -651,7 +651,8 @@ int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
     if ( rc )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %lx\n", dfn);
+        AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %"PRI_dfn"\n",
+                        dfn_x(dfn));
         domain_crash(d);
         return rc;
     }
@@ -660,25 +661,27 @@ int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
      * we might need a deeper page table for wider dfn now */
     if ( is_hvm_domain(d) )
     {
-        if ( update_paging_mode(d, dfn) )
+        if ( update_paging_mode(d, dfn_x(dfn)) )
         {
             spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed dfn = %lx\n", dfn);
+            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
+                            dfn_x(dfn));
             domain_crash(d);
             return -EFAULT;
         }
     }
 
-    if ( iommu_pde_from_dfn(d, dfn, pt_mfn) || (pt_mfn[1] == 0) )
+    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %lx\n", dfn);
+        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
+                        dfn_x(dfn));
         domain_crash(d);
         return -EFAULT;
     }
 
     /* Install 4k mapping first */
-    need_flush = set_iommu_pte_present(pt_mfn[1], dfn, mfn,
+    need_flush = set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn),
                                        IOMMU_PAGING_MODE_LEVEL_1,
                                        !!(flags & IOMMUF_writable),
                                        !!(flags & IOMMUF_readable));
@@ -690,7 +693,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
     /* 4K mapping for PV guests never changes, 
      * no need to flush if we trust non-present bits */
     if ( is_hvm_domain(d) )
-        amd_iommu_flush_pages(d, dfn, 0);
+        amd_iommu_flush_pages(d, dfn_x(dfn), 0);
 
     for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
           merge_level <= hd->arch.paging_mode; merge_level++ )
@@ -698,15 +701,16 @@ int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
         if ( pt_mfn[merge_level] == 0 )
             break;
         if ( !iommu_update_pde_count(d, pt_mfn[merge_level],
-                                     dfn, mfn, merge_level) )
+                                     dfn_x(dfn), mfn_x(mfn), merge_level) )
             break;
 
-        if ( iommu_merge_pages(d, pt_mfn[merge_level], dfn,
+        if ( iommu_merge_pages(d, pt_mfn[merge_level], dfn_x(dfn),
                                flags, merge_level) )
         {
             spin_unlock(&hd->arch.mapping_lock);
             AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
-                            "dfn = %lx mfn = %lx\n", merge_level, dfn, mfn);
+                            "dfn = %"PRI_dfn" mfn = %"PRI_mfn"\n",
+                            merge_level, dfn_x(dfn), mfn_x(mfn));
             domain_crash(d);
             return -EFAULT;
         }
@@ -720,7 +724,7 @@ out:
     return 0;
 }
 
-int amd_iommu_unmap_page(struct domain *d, unsigned long dfn)
+int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
 {
     unsigned long pt_mfn[7];
     struct domain_iommu *hd = dom_iommu(d);
@@ -742,31 +746,33 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long dfn)
      * we might need a deeper page table for lager dfn now */
     if ( is_hvm_domain(d) )
     {
-        int rc = update_paging_mode(d, dfn);
+        int rc = update_paging_mode(d, dfn_x(dfn));
 
         if ( rc )
         {
             spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed dfn = %lx\n", dfn);
+            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
+                            dfn_x(dfn));
             if ( rc != -EADDRNOTAVAIL )
                 domain_crash(d);
             return rc;
         }
     }
 
-    if ( iommu_pde_from_dfn(d, dfn, pt_mfn) || (pt_mfn[1] == 0) )
+    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %lx\n", dfn);
+        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
+                        dfn_x(dfn));
         domain_crash(d);
         return -EFAULT;
     }
 
     /* mark PTE as 'page not present' */
-    clear_iommu_pte_present(pt_mfn[1], dfn);
+    clear_iommu_pte_present(pt_mfn[1], dfn_x(dfn));
     spin_unlock(&hd->arch.mapping_lock);
 
-    amd_iommu_flush_pages(d, dfn, 0);
+    amd_iommu_flush_pages(d, dfn_x(dfn), 0);
 
     return 0;
 }
@@ -787,7 +793,9 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
     gfn = phys_addr >> PAGE_SHIFT;
     for ( i = 0; i < npages; i++ )
     {
-        rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags);
+        unsigned long frame = gfn + i;
+
+        rt = amd_iommu_map_page(domain, _dfn(frame), _mfn(frame), flags);
         if ( rt != 0 )
             return rt;
     }
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index bcc47eb96c..2f38752ebe 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -278,7 +278,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
              */
             if ( mfn_valid(_mfn(pfn)) )
             {
-                int ret = amd_iommu_map_page(d, pfn, pfn,
+                int ret = amd_iommu_map_page(d, _dfn(pfn), _mfn(pfn),
                                              IOMMUF_readable|IOMMUF_writable);
 
                 if ( !rc )
diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c
index 1ff09bbb02..d66601b62e 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2550,8 +2550,7 @@ static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
 	return 0;
 }
 
-static int __must_check arm_smmu_iotlb_flush(struct domain *d,
-                                             unsigned long dfn,
+static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
                                              unsigned int page_count)
 {
 	/* ARM SMMU v1 doesn't have flush by VMA and VMID */
@@ -2746,8 +2745,8 @@ static void arm_smmu_iommu_domain_teardown(struct domain *d)
 	xfree(xen_domain);
 }
 
-static int __must_check arm_smmu_map_page(struct domain *d, unsigned long dfn,
-			unsigned long mfn, unsigned int flags)
+static int __must_check arm_smmu_map_page(struct domain *d, dfn_t dfn,
+					  mfn_t mfn, unsigned int flags)
 {
 	p2m_type_t t;
 
@@ -2760,7 +2759,7 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long dfn,
 	 * function should only be used by gnttab code with gfn == mfn == dfn.
 	 */
 	BUG_ON(!is_domain_direct_mapped(d));
-	BUG_ON(mfn != dfn);
+	BUG_ON(mfn_x(mfn) != dfn_x(dfn));
 
 	/* We only support readable and writable flags */
 	if (!(flags & (IOMMUF_readable | IOMMUF_writable)))
@@ -2772,10 +2771,11 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long dfn,
 	 * The function guest_physmap_add_entry replaces the current mapping
 	 * if there is already one...
 	 */
-	return guest_physmap_add_entry(d, _gfn(dfn), _mfn(dfn), 0, t);
+	return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)),
+				       0, t);
 }
 
-static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long dfn)
+static int __must_check arm_smmu_unmap_page(struct domain *d, dfn_t dfn)
 {
 	/*
 	 * This function should only be used by gnttab code when the domain
@@ -2784,7 +2784,7 @@ static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long dfn)
 	if ( !is_domain_direct_mapped(d) )
 		return -EINVAL;
 
-	return guest_physmap_remove_page(d, _gfn(dfn), _mfn(dfn), 0);
+	return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), 0);
 }
 
 static const struct iommu_ops arm_smmu_iommu_ops = {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 9390b1366f..ac32a7022b 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -224,7 +224,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
                   == PGT_writable_page) )
                 mapping |= IOMMUF_writable;
 
-            ret = hd->platform_ops->map_page(d, dfn, mfn, mapping);
+            ret = hd->platform_ops->map_page(d, _dfn(dfn), _mfn(mfn),
+                                             mapping);
             if ( !rc )
                 rc = ret;
 
@@ -304,7 +305,7 @@ int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->map_page(d, dfn_x(dfn), mfn_x(mfn), flags);
+    rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
@@ -327,7 +328,7 @@ int iommu_unmap_page(struct domain *d, dfn_t dfn)
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->unmap_page(d, dfn_x(dfn));
+    rc = hd->platform_ops->unmap_page(d, dfn);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
@@ -367,7 +368,7 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count)
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
         return 0;
 
-    rc = hd->platform_ops->iotlb_flush(d, dfn_x(dfn), page_count);
+    rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 88720d4406..63eabb3c1b 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -584,8 +584,7 @@ static int __must_check iommu_flush_all(void)
     return rc;
 }
 
-static int __must_check iommu_flush_iotlb(struct domain *d,
-                                          unsigned long dfn,
+static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
                                           bool_t dma_old_pte_present,
                                           unsigned int page_count)
 {
@@ -612,12 +611,12 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
         if ( iommu_domid == -1 )
             continue;
 
-        if ( page_count != 1 || dfn == dfn_x(INVALID_DFN) )
+        if ( page_count != 1 || dfn_eq(dfn, INVALID_DFN) )
             rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
                                        0, flush_dev_iotlb);
         else
             rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
-                                       __dfn_to_daddr(dfn),
+                                       dfn_to_daddr(dfn),
                                        PAGE_ORDER_4K,
                                        !dma_old_pte_present,
                                        flush_dev_iotlb);
@@ -633,7 +632,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
 }
 
 static int __must_check iommu_flush_iotlb_pages(struct domain *d,
-                                                unsigned long dfn,
+                                                dfn_t dfn,
                                                 unsigned int page_count)
 {
     return iommu_flush_iotlb(d, dfn, 1, page_count);
@@ -641,7 +640,7 @@ static int __must_check iommu_flush_iotlb_pages(struct domain *d,
 
 static int __must_check iommu_flush_iotlb_all(struct domain *d)
 {
-    return iommu_flush_iotlb(d, dfn_x(INVALID_DFN), 0, 0);
+    return iommu_flush_iotlb(d, INVALID_DFN, 0, 0);
 }
 
 /* clear one page's page table */
@@ -676,7 +675,7 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1);
+        rc = iommu_flush_iotlb_pages(domain, daddr_to_dfn(addr), 1);
 
     unmap_vtd_domain_page(page);
 
@@ -1768,8 +1767,7 @@ static void iommu_domain_teardown(struct domain *d)
 }
 
 static int __must_check intel_iommu_map_page(struct domain *d,
-                                             unsigned long dfn,
-                                             unsigned long mfn,
+                                             dfn_t dfn, mfn_t mfn,
                                              unsigned int flags)
 {
     struct domain_iommu *hd = dom_iommu(d);
@@ -1787,16 +1785,16 @@ static int __must_check intel_iommu_map_page(struct domain *d,
 
     spin_lock(&hd->arch.mapping_lock);
 
-    pg_maddr = addr_to_dma_page_maddr(d, __dfn_to_daddr(dfn), 1);
+    pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), 1);
     if ( pg_maddr == 0 )
     {
         spin_unlock(&hd->arch.mapping_lock);
         return -ENOMEM;
     }
     page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
-    pte = page + (dfn & LEVEL_MASK);
+    pte = page + (dfn_x(dfn) & LEVEL_MASK);
     old = *pte;
-    dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
+    dma_set_pte_addr(new, mfn_to_maddr(mfn));
     dma_set_pte_prot(new,
                      ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
                      ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
@@ -1824,13 +1822,13 @@ static int __must_check intel_iommu_map_page(struct domain *d,
 }
 
 static int __must_check intel_iommu_unmap_page(struct domain *d,
-                                               unsigned long dfn)
+                                               dfn_t dfn)
 {
     /* Do nothing if hardware domain and iommu supports pass thru. */
     if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
         return 0;
 
-    return dma_pte_clear_one(d, __dfn_to_daddr(dfn));
+    return dma_pte_clear_one(d, dfn_to_daddr(dfn));
 }
 
 int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index d6cbeae5c9..70e79c20f7 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -68,7 +68,7 @@ int arch_iommu_populate_page_table(struct domain *d)
             {
                 ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
                 BUG_ON(SHARED_M2P(gfn));
-                rc = hd->platform_ops->map_page(d, gfn, mfn,
+                rc = hd->platform_ops->map_page(d, _dfn(gfn), _mfn(mfn),
                                                 IOMMUF_readable |
                                                 IOMMUF_writable);
             }
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 99bc21c7b3..3083d625bd 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -52,9 +52,9 @@ int amd_iommu_init(void);
 int amd_iommu_update_ivrs_mapping_acpi(void);
 
 /* mapping functions */
-int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
-                                    unsigned long mfn, unsigned int flags);
-int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
+                                    mfn_t mfn, unsigned int flags);
+int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn);
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
@@ -77,7 +77,7 @@ void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3,
 
 /* send cmd to iommu */
 void amd_iommu_flush_all_pages(struct domain *d);
-void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
+void amd_iommu_flush_pages(struct domain *d, unsigned long dfn,
                            unsigned int order);
 void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
                            uint64_t gaddr, unsigned int order);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index f9d86fc816..7313957c81 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -48,6 +48,11 @@ static inline dfn_t dfn_add(dfn_t dfn, unsigned long i)
     return _dfn(dfn_x(dfn) + i);
 }
 
+static inline bool_t dfn_eq(dfn_t x, dfn_t y)
+{
+    return dfn_x(x) == dfn_x(y);
+}
+
 extern bool_t iommu_enable, iommu_enabled;
 extern bool_t force_iommu, iommu_verbose;
 extern bool_t iommu_workaround_bios_bug, iommu_igfx;
@@ -174,9 +179,9 @@ struct iommu_ops {
 #endif /* HAS_PCI */
 
     void (*teardown)(struct domain *d);
-    int __must_check (*map_page)(struct domain *d, unsigned long dfn,
-                                 unsigned long mfn, unsigned int flags);
-    int __must_check (*unmap_page)(struct domain *d, unsigned long dfn);
+    int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
+                                 unsigned int flags);
+    int __must_check (*unmap_page)(struct domain *d, dfn_t dfn);
     void (*free_page_table)(struct page_info *);
 #ifdef CONFIG_X86
     void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
@@ -187,7 +192,7 @@ struct iommu_ops {
     void (*resume)(void);
     void (*share_p2m)(struct domain *d);
     void (*crash_shutdown)(void);
-    int __must_check (*iotlb_flush)(struct domain *d, unsigned long dfn,
+    int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
                                     unsigned int page_count);
     int __must_check (*iotlb_flush_all)(struct domain *d);
     int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-09-13 15:21 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-13 15:21 [PATCH v9 0/6] paravirtual IOMMU pre-requisites and clean-up Paul Durrant
2018-09-13 15:21 ` [PATCH v9 1/7] iommu: introduce the concept of DFN Paul Durrant
2018-09-14  2:27   ` Tian, Kevin
2018-09-13 15:21 ` [PATCH v9 2/7] iommu: make use of type-safe DFN and MFN in exported functions Paul Durrant
2018-09-13 15:21 ` Paul Durrant [this message]
2018-09-13 15:21 ` [PATCH v9 4/7] iommu: don't domain_crash() inside iommu_map/unmap_page() Paul Durrant
2018-09-13 15:21 ` [PATCH v9 5/7] memory: add check_get_page_from_gfn() as a wrapper Paul Durrant
2018-09-18 13:16   ` Jan Beulich
2018-09-18 14:03     ` Paul Durrant
2018-09-18 14:51       ` Jan Beulich
2018-09-18 16:37         ` Paul Durrant
2018-09-19  6:03           ` Jan Beulich
2018-09-19  7:56             ` Paul Durrant
2018-09-19  8:01               ` Paul Durrant
2018-09-19  8:31                 ` Jan Beulich
2018-09-19  8:59                   ` Roger Pau Monné
2018-09-19 16:13                     ` Wei Liu
2018-09-19  8:34               ` Jan Beulich
2018-09-19  8:54                 ` Paul Durrant
2018-09-19  9:22                   ` Jan Beulich
2018-09-19  9:24                     ` Paul Durrant
2018-09-19  9:29                     ` Roger Pau Monné
     [not found]                       ` <304E51960200007575B6CDDB@prv1-mh.provo.novell.com>
2018-09-19  9:34                         ` Jan Beulich
2018-09-13 15:21 ` [PATCH v7 6/6] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-09-13 15:22   ` Paul Durrant
2018-09-13 15:21 ` [PATCH v9 6/7] vtd: add missing check for shared EPT Paul Durrant
2018-09-14  2:29   ` Tian, Kevin
2018-09-13 15:21 ` [PATCH v9 7/7] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-09-14  2:30   ` Tian, Kevin
2018-09-18 13:20   ` Jan Beulich
2018-09-18 14:00     ` Paul Durrant
2018-09-18 14:50       ` Jan Beulich
2018-09-18 16:03         ` Paul Durrant
2018-09-13 15:24 ` [PATCH v9 0/6] paravirtual IOMMU pre-requisites and clean-up Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180913152128.6114-4-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).