From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
George Dunlap <george.dunlap@citrix.com>
Subject: [PATCH v6 03/14] iommu: push use of type-safe BFN and MFN into iommu_ops
Date: Thu, 23 Aug 2018 10:47:00 +0100 [thread overview]
Message-ID: <20180823094711.21376-4-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180823094711.21376-1-paul.durrant@citrix.com>
This patch modifies the methods in struct iommu_ops to use type-safe BFN
and MFN. This follows on from the prior patch that modified the functions
exported in xen/iommu.h.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@citrix.com>
v6:
- Re-base.
v3:
- Remove some use of intermediate 'frame' variables.
v2:
- Addressed comments from Jan.
- Extend use of intermediate 'frame' variable to avoid directly
encapsulating gfn values as bfns or vice versa.
---
xen/drivers/passthrough/amd/iommu_map.c | 46 ++++++++++++++++-----------
xen/drivers/passthrough/amd/pci_amd_iommu.c | 2 +-
xen/drivers/passthrough/arm/smmu.c | 16 +++++-----
xen/drivers/passthrough/iommu.c | 9 +++---
xen/drivers/passthrough/vtd/iommu.c | 26 +++++++--------
xen/drivers/passthrough/x86/iommu.c | 2 +-
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 8 ++---
xen/include/xen/iommu.h | 13 +++++---
8 files changed, 67 insertions(+), 55 deletions(-)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 4deab9cd2f..5a9a0af320 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -631,7 +631,7 @@ static int update_paging_mode(struct domain *d, unsigned long bfn)
return 0;
}
-int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
+int amd_iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
unsigned int flags)
{
bool_t need_flush = 0;
@@ -651,7 +651,8 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
if ( rc )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Root table alloc failed, bfn = %lx\n", bfn);
+ AMD_IOMMU_DEBUG("Root table alloc failed, bfn = %"PRI_bfn"\n",
+ bfn_x(bfn));
domain_crash(d);
return rc;
}
@@ -660,25 +661,27 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
* we might need a deeper page table for wider bfn now */
if ( is_hvm_domain(d) )
{
- if ( update_paging_mode(d, bfn) )
+ if ( update_paging_mode(d, bfn_x(bfn)) )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Update page mode failed bfn = %lx\n", bfn);
+ AMD_IOMMU_DEBUG("Update page mode failed bfn = %"PRI_bfn"\n",
+ bfn_x(bfn));
domain_crash(d);
return -EFAULT;
}
}
- if ( iommu_pde_from_bfn(d, bfn, pt_mfn) || (pt_mfn[1] == 0) )
+ if ( iommu_pde_from_bfn(d, bfn_x(bfn), pt_mfn) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %lx\n", bfn);
+ AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %"PRI_bfn"\n",
+ bfn_x(bfn));
domain_crash(d);
return -EFAULT;
}
/* Install 4k mapping first */
- need_flush = set_iommu_pte_present(pt_mfn[1], bfn, mfn,
+ need_flush = set_iommu_pte_present(pt_mfn[1], bfn_x(bfn), mfn_x(mfn),
IOMMU_PAGING_MODE_LEVEL_1,
!!(flags & IOMMUF_writable),
!!(flags & IOMMUF_readable));
@@ -690,7 +693,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
/* 4K mapping for PV guests never changes,
* no need to flush if we trust non-present bits */
if ( is_hvm_domain(d) )
- amd_iommu_flush_pages(d, bfn, 0);
+ amd_iommu_flush_pages(d, bfn_x(bfn), 0);
for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
merge_level <= hd->arch.paging_mode; merge_level++ )
@@ -698,15 +701,16 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
if ( pt_mfn[merge_level] == 0 )
break;
if ( !iommu_update_pde_count(d, pt_mfn[merge_level],
- bfn, mfn, merge_level) )
+ bfn_x(bfn), mfn_x(mfn), merge_level) )
break;
- if ( iommu_merge_pages(d, pt_mfn[merge_level], bfn,
+ if ( iommu_merge_pages(d, pt_mfn[merge_level], bfn_x(bfn),
flags, merge_level) )
{
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
- "bfn = %lx mfn = %lx\n", merge_level, bfn, mfn);
+ "bfn = %"PRI_bfn" mfn = %"PRI_mfn"\n",
+ merge_level, bfn_x(bfn), mfn_x(mfn));
domain_crash(d);
return -EFAULT;
}
@@ -720,7 +724,7 @@ out:
return 0;
}
-int amd_iommu_unmap_page(struct domain *d, unsigned long bfn)
+int amd_iommu_unmap_page(struct domain *d, bfn_t bfn)
{
unsigned long pt_mfn[7];
struct domain_iommu *hd = dom_iommu(d);
@@ -742,31 +746,33 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long bfn)
* we might need a deeper page table for lager bfn now */
if ( is_hvm_domain(d) )
{
- int rc = update_paging_mode(d, bfn);
+ int rc = update_paging_mode(d, bfn_x(bfn));
if ( rc )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Update page mode failed bfn = %lx\n", bfn);
+ AMD_IOMMU_DEBUG("Update page mode failed bfn = %"PRI_bfn"\n",
+ bfn_x(bfn));
if ( rc != -EADDRNOTAVAIL )
domain_crash(d);
return rc;
}
}
- if ( iommu_pde_from_bfn(d, bfn, pt_mfn) || (pt_mfn[1] == 0) )
+ if ( iommu_pde_from_bfn(d, bfn_x(bfn), pt_mfn) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->arch.mapping_lock);
- AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %lx\n", bfn);
+ AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %"PRI_bfn"\n",
+ bfn_x(bfn));
domain_crash(d);
return -EFAULT;
}
/* mark PTE as 'page not present' */
- clear_iommu_pte_present(pt_mfn[1], bfn);
+ clear_iommu_pte_present(pt_mfn[1], bfn_x(bfn));
spin_unlock(&hd->arch.mapping_lock);
- amd_iommu_flush_pages(d, bfn, 0);
+ amd_iommu_flush_pages(d, bfn_x(bfn), 0);
return 0;
}
@@ -787,7 +793,9 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
gfn = phys_addr >> PAGE_SHIFT;
for ( i = 0; i < npages; i++ )
{
- rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags);
+ unsigned long frame = gfn + i;
+
+ rt = amd_iommu_map_page(domain, _bfn(frame), _mfn(frame), flags);
if ( rt != 0 )
return rt;
}
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index d608631e6e..eea22c3d0d 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -271,7 +271,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
*/
if ( mfn_valid(_mfn(pfn)) )
{
- int ret = amd_iommu_map_page(d, pfn, pfn,
+ int ret = amd_iommu_map_page(d, _bfn(pfn), _mfn(pfn),
IOMMUF_readable|IOMMUF_writable);
if ( !rc )
diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c
index 1e4d561b47..221b62a59c 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2550,8 +2550,7 @@ static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
return 0;
}
-static int __must_check arm_smmu_iotlb_flush(struct domain *d,
- unsigned long bfn,
+static int __must_check arm_smmu_iotlb_flush(struct domain *d, bfn_t bfn,
unsigned int page_count)
{
/* ARM SMMU v1 doesn't have flush by VMA and VMID */
@@ -2737,8 +2736,8 @@ static void arm_smmu_iommu_domain_teardown(struct domain *d)
xfree(xen_domain);
}
-static int __must_check arm_smmu_map_page(struct domain *d, unsigned long bfn,
- unsigned long mfn, unsigned int flags)
+static int __must_check arm_smmu_map_page(struct domain *d, bfn_t bfn,
+ mfn_t mfn, unsigned int flags)
{
p2m_type_t t;
@@ -2751,7 +2750,7 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long bfn,
* function should only be used by gnttab code with gfn == mfn == bfn.
*/
BUG_ON(!is_domain_direct_mapped(d));
- BUG_ON(mfn != bfn);
+ BUG_ON(mfn_x(mfn) != bfn_x(bfn));
/* We only support readable and writable flags */
if (!(flags & (IOMMUF_readable | IOMMUF_writable)))
@@ -2763,10 +2762,11 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long bfn,
* The function guest_physmap_add_entry replaces the current mapping
* if there is already one...
*/
- return guest_physmap_add_entry(d, _gfn(bfn), _mfn(bfn), 0, t);
+ return guest_physmap_add_entry(d, _gfn(bfn_x(bfn)), _mfn(bfn_x(bfn)),
+ 0, t);
}
-static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long bfn)
+static int __must_check arm_smmu_unmap_page(struct domain *d, bfn_t bfn)
{
/*
* This function should only be used by gnttab code when the domain
@@ -2775,7 +2775,7 @@ static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long bfn)
if ( !is_domain_direct_mapped(d) )
return -EINVAL;
- return guest_physmap_remove_page(d, _gfn(bfn), _mfn(bfn), 0);
+ return guest_physmap_remove_page(d, _gfn(bfn_x(bfn)), _mfn(bfn_x(bfn)), 0);
}
static const struct iommu_ops arm_smmu_iommu_ops = {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 42b53b15e9..d9ec667945 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -194,7 +194,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- ret = hd->platform_ops->map_page(d, bfn, mfn, mapping);
+ ret = hd->platform_ops->map_page(d, _bfn(bfn), _mfn(mfn),
+ mapping);
if ( !rc )
rc = ret;
@@ -264,7 +265,7 @@ int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, bfn_x(bfn), mfn_x(mfn), flags);
+ rc = hd->platform_ops->map_page(d, bfn, mfn, flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
@@ -287,7 +288,7 @@ int iommu_unmap_page(struct domain *d, bfn_t bfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, bfn_x(bfn));
+ rc = hd->platform_ops->unmap_page(d, bfn);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
@@ -327,7 +328,7 @@ int iommu_iotlb_flush(struct domain *d, bfn_t bfn, unsigned int page_count)
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
return 0;
- rc = hd->platform_ops->iotlb_flush(d, bfn_x(bfn), page_count);
+ rc = hd->platform_ops->iotlb_flush(d, bfn, page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 48f62e0e8d..c9f50f04ad 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -584,8 +584,7 @@ static int __must_check iommu_flush_all(void)
return rc;
}
-static int __must_check iommu_flush_iotlb(struct domain *d,
- unsigned long bfn,
+static int __must_check iommu_flush_iotlb(struct domain *d, bfn_t bfn,
bool_t dma_old_pte_present,
unsigned int page_count)
{
@@ -612,12 +611,12 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
if ( iommu_domid == -1 )
continue;
- if ( page_count != 1 || bfn == bfn_x(INVALID_BFN) )
+ if ( page_count != 1 || bfn_eq(bfn, INVALID_BFN) )
rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
0, flush_dev_iotlb);
else
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
- __bfn_to_baddr(bfn),
+ bfn_to_baddr(bfn),
PAGE_ORDER_4K,
!dma_old_pte_present,
flush_dev_iotlb);
@@ -633,7 +632,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
}
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
- unsigned long bfn,
+ bfn_t bfn,
unsigned int page_count)
{
return iommu_flush_iotlb(d, bfn, 1, page_count);
@@ -641,7 +640,7 @@ static int __must_check iommu_flush_iotlb_pages(struct domain *d,
static int __must_check iommu_flush_iotlb_all(struct domain *d)
{
- return iommu_flush_iotlb(d, bfn_x(INVALID_BFN), 0, 0);
+ return iommu_flush_iotlb(d, INVALID_BFN, 0, 0);
}
/* clear one page's page table */
@@ -676,7 +675,7 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
if ( !this_cpu(iommu_dont_flush_iotlb) )
- rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1);
+ rc = iommu_flush_iotlb_pages(domain, baddr_to_bfn(addr), 1);
unmap_vtd_domain_page(page);
@@ -1767,8 +1766,7 @@ static void iommu_domain_teardown(struct domain *d)
}
static int __must_check intel_iommu_map_page(struct domain *d,
- unsigned long bfn,
- unsigned long mfn,
+ bfn_t bfn, mfn_t mfn,
unsigned int flags)
{
struct domain_iommu *hd = dom_iommu(d);
@@ -1786,16 +1784,16 @@ static int __must_check intel_iommu_map_page(struct domain *d,
spin_lock(&hd->arch.mapping_lock);
- pg_maddr = addr_to_dma_page_maddr(d, __bfn_to_baddr(bfn), 1);
+ pg_maddr = addr_to_dma_page_maddr(d, bfn_to_baddr(bfn), 1);
if ( pg_maddr == 0 )
{
spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
- pte = page + (bfn & LEVEL_MASK);
+ pte = page + (bfn_x(bfn) & LEVEL_MASK);
old = *pte;
- dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
+ dma_set_pte_addr(new, mfn_to_maddr(mfn));
dma_set_pte_prot(new,
((flags & IOMMUF_readable) ? DMA_PTE_READ : 0) |
((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
@@ -1823,13 +1821,13 @@ static int __must_check intel_iommu_map_page(struct domain *d,
}
static int __must_check intel_iommu_unmap_page(struct domain *d,
- unsigned long bfn)
+ bfn_t bfn)
{
/* Do nothing if hardware domain and iommu supports pass thru. */
if ( iommu_passthrough && is_hardware_domain(d) )
return 0;
- return dma_pte_clear_one(d, __bfn_to_baddr(bfn));
+ return dma_pte_clear_one(d, bfn_to_baddr(bfn));
}
int iommu_pte_flush(struct domain *d, uint64_t bfn, uint64_t *pte,
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index 68182afd91..379882c690 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -65,7 +65,7 @@ int arch_iommu_populate_page_table(struct domain *d)
{
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
BUG_ON(SHARED_M2P(gfn));
- rc = hd->platform_ops->map_page(d, gfn, mfn,
+ rc = hd->platform_ops->map_page(d, _bfn(gfn), _mfn(mfn),
IOMMUF_readable |
IOMMUF_writable);
}
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 99bc21c7b3..dce9ed6b83 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -52,9 +52,9 @@ int amd_iommu_init(void);
int amd_iommu_update_ivrs_mapping_acpi(void);
/* mapping functions */
-int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int flags);
-int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check amd_iommu_map_page(struct domain *d, bfn_t bfn,
+ mfn_t mfn, unsigned int flags);
+int __must_check amd_iommu_unmap_page(struct domain *d, bfn_t bfn);
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
@@ -77,7 +77,7 @@ void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3,
/* send cmd to iommu */
void amd_iommu_flush_all_pages(struct domain *d);
-void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
+void amd_iommu_flush_pages(struct domain *d, unsigned long bfn,
unsigned int order);
void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
uint64_t gaddr, unsigned int order);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 3d4f3e7d26..6edc6e1a10 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -48,6 +48,11 @@ static inline bfn_t bfn_add(bfn_t bfn, unsigned long i)
return _bfn(bfn_x(bfn) + i);
}
+static inline bool_t bfn_eq(bfn_t x, bfn_t y)
+{
+ return bfn_x(x) == bfn_x(y);
+}
+
#define IOMMU_PAGE_SHIFT 12
#define IOMMU_PAGE_SIZE (_AC(1,L) << IOMMU_PAGE_SHIFT)
#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1))
@@ -182,9 +187,9 @@ struct iommu_ops {
#endif /* HAS_PCI */
void (*teardown)(struct domain *d);
- int __must_check (*map_page)(struct domain *d, unsigned long bfn,
- unsigned long mfn, unsigned int flags);
- int __must_check (*unmap_page)(struct domain *d, unsigned long bfn);
+ int __must_check (*map_page)(struct domain *d, bfn_t bfn, mfn_t mfn,
+ unsigned int flags);
+ int __must_check (*unmap_page)(struct domain *d, bfn_t bfn);
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
@@ -195,7 +200,7 @@ struct iommu_ops {
void (*resume)(void);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
- int __must_check (*iotlb_flush)(struct domain *d, unsigned long bfn,
+ int __must_check (*iotlb_flush)(struct domain *d, bfn_t bfn,
unsigned int page_count);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-08-23 9:47 UTC|newest]
Thread overview: 111+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-08-23 9:46 [PATCH v6 00/14] paravirtual IOMMU interface Paul Durrant
2018-08-23 9:46 ` [PATCH v6 01/14] iommu: introduce the concept of BFN Paul Durrant
2018-08-30 15:59 ` Jan Beulich
2018-09-03 8:23 ` Paul Durrant
2018-09-03 11:46 ` Jan Beulich
2018-09-04 6:48 ` Tian, Kevin
2018-09-04 8:32 ` Jan Beulich
2018-09-04 8:37 ` Tian, Kevin
2018-09-04 8:47 ` Jan Beulich
2018-09-04 8:49 ` Paul Durrant
2018-09-04 9:08 ` Jan Beulich
2018-09-05 0:42 ` Tian, Kevin
2018-09-05 6:48 ` Jan Beulich
2018-09-05 6:56 ` Tian, Kevin
2018-09-05 7:11 ` Jan Beulich
2018-09-05 9:13 ` Paul Durrant
2018-09-05 9:38 ` Jan Beulich
2018-09-06 10:36 ` Paul Durrant
2018-09-06 13:13 ` Jan Beulich
2018-09-06 14:54 ` Paul Durrant
2018-09-07 1:47 ` Tian, Kevin
2018-09-07 6:24 ` Jan Beulich
2018-09-07 8:13 ` Paul Durrant
2018-09-07 8:16 ` Tian, Kevin
2018-09-07 8:25 ` Paul Durrant
2018-08-23 9:46 ` [PATCH v6 02/14] iommu: make use of type-safe BFN and MFN in exported functions Paul Durrant
2018-09-04 10:29 ` Jan Beulich
2018-08-23 9:47 ` Paul Durrant [this message]
2018-09-04 10:32 ` [PATCH v6 03/14] iommu: push use of type-safe BFN and MFN into iommu_ops Jan Beulich
2018-08-23 9:47 ` [PATCH v6 04/14] iommu: don't domain_crash() inside iommu_map/unmap_page() Paul Durrant
2018-09-04 10:38 ` Jan Beulich
2018-09-04 10:39 ` Paul Durrant
2018-08-23 9:47 ` [PATCH v6 05/14] public / x86: introduce __HYPERCALL_iommu_op Paul Durrant
2018-09-04 11:50 ` Jan Beulich
2018-09-04 12:23 ` Paul Durrant
2018-09-04 12:55 ` Jan Beulich
2018-09-04 13:17 ` Paul Durrant
2018-09-07 10:52 ` Jan Beulich
2018-08-23 9:47 ` [PATCH v6 06/14] iommu: track reserved ranges using a rangeset Paul Durrant
2018-09-07 10:40 ` Jan Beulich
2018-09-11 9:28 ` Paul Durrant
2018-08-23 9:47 ` [PATCH v6 07/14] x86: add iommu_op to query reserved ranges Paul Durrant
2018-09-07 11:01 ` Jan Beulich
2018-09-11 9:34 ` Paul Durrant
2018-09-11 9:43 ` Jan Beulich
2018-09-13 6:11 ` Tian, Kevin
2018-08-23 9:47 ` [PATCH v6 08/14] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-09-07 11:11 ` Jan Beulich
2018-09-07 12:36 ` Paul Durrant
2018-09-07 14:56 ` Jan Beulich
2018-09-07 15:24 ` Paul Durrant
2018-09-07 15:52 ` Jan Beulich
2018-09-12 8:31 ` Paul Durrant
2018-09-12 8:43 ` Jan Beulich
2018-09-12 8:45 ` Paul Durrant
2018-09-12 8:51 ` Paul Durrant
2018-09-12 8:53 ` Paul Durrant
2018-09-12 9:03 ` Jan Beulich
2018-09-12 9:05 ` Paul Durrant
2018-09-12 9:12 ` Jan Beulich
2018-09-12 9:15 ` Paul Durrant
2018-09-12 9:21 ` Jan Beulich
2018-09-12 9:30 ` Paul Durrant
2018-09-12 10:07 ` Jan Beulich
2018-09-12 10:09 ` Paul Durrant
2018-09-12 12:15 ` Jan Beulich
2018-09-12 12:22 ` Paul Durrant
2018-09-12 12:39 ` Jan Beulich
2018-09-12 12:53 ` Paul Durrant
2018-09-12 13:19 ` Jan Beulich
2018-09-12 13:25 ` Paul Durrant
2018-09-12 13:39 ` Jan Beulich
2018-09-12 13:43 ` Paul Durrant
2018-09-12 8:59 ` Jan Beulich
2018-08-23 9:47 ` [PATCH v6 09/14] mm / iommu: include need_iommu() test in iommu_use_hap_pt() Paul Durrant
2018-09-07 11:20 ` Jan Beulich
2018-09-11 9:39 ` Paul Durrant
2018-09-11 9:47 ` Jan Beulich
2018-09-13 6:23 ` Tian, Kevin
2018-09-13 8:34 ` Paul Durrant
2018-08-23 9:47 ` [PATCH v6 10/14] mm / iommu: split need_iommu() into has_iommu_pt() and need_iommu_pt_sync() Paul Durrant
2018-08-23 11:10 ` Razvan Cojocaru
2018-09-11 14:31 ` Jan Beulich
2018-09-11 15:40 ` Paul Durrant
2018-09-12 6:45 ` Jan Beulich
2018-09-12 8:07 ` Paul Durrant
2018-08-23 9:47 ` [PATCH v6 11/14] x86: add iommu_op to enable modification of IOMMU mappings Paul Durrant
2018-09-11 14:48 ` Jan Beulich
2018-09-11 15:52 ` Paul Durrant
2018-09-12 6:53 ` Jan Beulich
2018-09-12 8:04 ` Paul Durrant
2018-08-23 9:47 ` [PATCH v6 12/14] memory: add get_paged_gfn() as a wrapper Paul Durrant
2018-08-23 10:24 ` Julien Grall
2018-08-23 10:30 ` Paul Durrant
2018-09-11 14:56 ` Jan Beulich
2018-09-12 9:10 ` Paul Durrant
2018-09-12 9:15 ` Jan Beulich
2018-09-12 10:01 ` George Dunlap
2018-09-12 10:08 ` Paul Durrant
2018-09-12 10:10 ` Jan Beulich
2018-08-23 9:47 ` [PATCH v6 13/14] x86: add iommu_ops to modify and flush IOMMU mappings Paul Durrant
2018-09-11 15:15 ` Jan Beulich
2018-09-12 7:03 ` Jan Beulich
2018-09-12 8:02 ` Paul Durrant
2018-09-12 8:27 ` Jan Beulich
2018-09-13 6:41 ` Tian, Kevin
2018-09-13 8:32 ` Paul Durrant
2018-09-13 8:49 ` Jan Beulich
2018-08-23 9:47 ` [PATCH v6 14/14] x86: extend the map and unmap iommu_ops to support grant references Paul Durrant
2018-09-12 14:12 ` Jan Beulich
2018-09-12 16:28 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180823094711.21376-4-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@citrix.com \
--cc=jbeulich@suse.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).