From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Kevin Tian <kevin.tian@intel.com>,
Stefano Stabellini <sstabellini@kernel.org>,
Wei Liu <wei.liu2@citrix.com>,
Jun Nakajima <jun.nakajima@intel.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Ian Jackson <ian.jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
Julien Grall <julien.grall@arm.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 03/13] iommu: make use of type-safe BFN and MFN in exported functions
Date: Sat, 7 Jul 2018 12:05:16 +0100 [thread overview]
Message-ID: <20180707110526.35822-4-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180707110526.35822-1-paul.durrant@citrix.com>
This patch modifies the declaration of the entry points to the IOMMU
sub-system to use bfn_t and mfn_t in place of unsigned long. A subsequent
patch will similarly modify the methods in the iommu_ops structure.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
v2:
- Addressed comments from Jan.
- Use intermediate 'frame' variable to avoid directly encapsulating
mfn or gfn values as bfns.
---
xen/arch/arm/p2m.c | 6 ++++-
xen/arch/x86/mm.c | 11 ++++----
xen/arch/x86/mm/p2m-ept.c | 15 ++++++++---
xen/arch/x86/mm/p2m-pt.c | 49 +++++++++++++++++++++--------------
xen/arch/x86/mm/p2m.c | 24 ++++++++++++-----
xen/arch/x86/x86_64/mm.c | 5 ++--
xen/common/grant_table.c | 16 +++++++-----
xen/common/memory.c | 7 +++--
xen/drivers/passthrough/iommu.c | 25 +++++++++---------
xen/drivers/passthrough/vtd/x86/vtd.c | 3 ++-
xen/include/xen/iommu.h | 14 +++++++---
11 files changed, 110 insertions(+), 65 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index d43c3aa896..42c16da4dd 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -951,7 +951,11 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
if ( need_iommu(p2m->domain) &&
(lpae_valid(orig_pte) || lpae_valid(*entry)) )
- rc = iommu_iotlb_flush(p2m->domain, gfn_x(sgfn), 1UL << page_order);
+ {
+ unsigned long frame = gfn_x(sgfn);
+
+ rc = iommu_iotlb_flush(p2m->domain, _bfn(frame), 1UL << page_order);
+ }
else
rc = 0;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 4629bcaa47..4e66c6742d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2729,14 +2729,15 @@ static int _get_page_type(struct page_info *page, unsigned long type,
struct domain *d = page_get_owner(page);
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
- gfn_t gfn = _gfn(mfn_to_gmfn(d, mfn_x(page_to_mfn(page))));
+ mfn_t mfn = page_to_mfn(page);
+ unsigned long frame = mfn_to_gmfn(d, mfn_x(mfn));
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_ret = iommu_unmap_page(d, gfn_x(gfn));
+ iommu_ret = iommu_unmap_page(d, _bfn(frame));
else if ( type == PGT_writable_page )
- iommu_ret = iommu_map_page(d, gfn_x(gfn),
- mfn_x(page_to_mfn(page)),
- IOMMUF_readable|IOMMUF_writable);
+ iommu_ret = iommu_map_page(d, _bfn(frame), mfn,
+ IOMMUF_readable |
+ IOMMUF_writable);
}
}
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 14b593923b..4951234197 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -866,19 +866,26 @@ out:
if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) &&
need_modify_vtd_table )
{
+ unsigned long frame = gfn;
+
if ( iommu_hap_pt_share )
- rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
+ rc = iommu_pte_flush(d, frame, &ept_entry->epte, order,
+ vtd_pte_present);
else
{
+ bfn_t bfn = _bfn(frame);
+
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
{
- rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ rc = iommu_map_page(d, bfn_add(bfn, i),
+ mfn_add(mfn, i), iommu_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ if ( iommu_unmap_page(p2m->domain,
+ bfn_add(bfn, i)) )
continue;
break;
@@ -887,7 +894,7 @@ out:
else
for ( i = 0; i < (1 << order); i++ )
{
- ret = iommu_unmap_page(d, gfn + i);
+ ret = iommu_unmap_page(d, bfn_add(bfn, i));
if ( !rc )
rc = ret;
}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b8c5d2ed26..467ac8cb76 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -680,36 +680,45 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
if ( iommu_enabled && need_iommu(p2m->domain) &&
(iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
{
+ unsigned long frame = gfn;
+
ASSERT(rc == 0);
if ( iommu_use_hap_pt(p2m->domain) )
{
if ( iommu_old_flags )
- amd_iommu_flush_pages(p2m->domain, gfn, page_order);
+ amd_iommu_flush_pages(p2m->domain, frame, page_order);
}
- else if ( iommu_pte_flags )
- for ( i = 0; i < (1UL << page_order); i++ )
- {
- rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
- if ( unlikely(rc) )
+ else
+ {
+ bfn_t bfn = _bfn(frame);
+
+ if ( iommu_pte_flags )
+ for ( i = 0; i < (1UL << page_order); i++ )
{
- while ( i-- )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain, gfn + i) )
- continue;
+ rc = iommu_map_page(p2m->domain, bfn_add(bfn, i),
+ mfn_add(mfn, i), iommu_pte_flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ /* If statement to satisfy __must_check. */
+ if ( iommu_unmap_page(p2m->domain,
+ bfn_add(bfn, i)) )
+ continue;
- break;
+ break;
+ }
}
- }
- else
- for ( i = 0; i < (1UL << page_order); i++ )
- {
- int ret = iommu_unmap_page(p2m->domain, gfn + i);
+ else
+ for ( i = 0; i < (1UL << page_order); i++ )
+ {
+ int ret = iommu_unmap_page(p2m->domain,
+ bfn_add(bfn, i));
- if ( !rc )
- rc = ret;
- }
+ if ( !rc )
+ rc = ret;
+ }
+ }
}
/*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c53cab44d9..ce12bcff42 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -714,9 +714,12 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
if ( need_iommu(p2m->domain) )
{
+ unsigned long frame = mfn;
+ bfn_t bfn = _bfn(frame);
+
for ( i = 0; i < (1 << page_order); i++ )
{
- int ret = iommu_unmap_page(p2m->domain, mfn + i);
+ int ret = iommu_unmap_page(p2m->domain, bfn_add(bfn, i));
if ( !rc )
rc = ret;
@@ -773,16 +776,18 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
{
if ( need_iommu(d) && t == p2m_ram_rw )
{
+ unsigned long frame = mfn_x(mfn);
+ bfn_t bfn = _bfn(frame);
+
for ( i = 0; i < (1 << page_order); i++ )
{
- rc = iommu_map_page(d, mfn_x(mfn_add(mfn, i)),
- mfn_x(mfn_add(mfn, i)),
+ rc = iommu_map_page(d, bfn_add(bfn, i), mfn_add(mfn, i),
IOMMUF_readable|IOMMUF_writable);
if ( rc != 0 )
{
while ( i-- > 0 )
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(d, mfn_x(mfn_add(mfn, i))) )
+ if ( iommu_unmap_page(d, bfn_add(bfn, i)) )
continue;
return rc;
@@ -1155,9 +1160,13 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
if ( !paging_mode_translate(p2m->domain) )
{
+ unsigned long frame = gfn_l;
+
if ( !need_iommu(d) )
return 0;
- return iommu_map_page(d, gfn_l, gfn_l, IOMMUF_readable|IOMMUF_writable);
+
+ return iommu_map_page(d, _bfn(frame), _mfn(frame),
+ IOMMUF_readable | IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
@@ -1245,9 +1254,12 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
if ( !paging_mode_translate(d) )
{
+ unsigned long frame = gfn_l;
+
if ( !need_iommu(d) )
return 0;
- return iommu_unmap_page(d, gfn_l);
+
+ return iommu_unmap_page(d, _bfn(frame));
}
gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index cca4ae926e..cc58e4cef4 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1429,13 +1429,14 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
+ if ( iommu_map_page(hardware_domain, _bfn(i), _mfn(i),
+ IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(hardware_domain, i) )
+ if ( iommu_unmap_page(hardware_domain, _bfn(i)) )
continue;
goto destroy_m2p;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index d2610e320c..d0926d13e0 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1132,6 +1132,8 @@ map_grant_ref(
need_iommu = gnttab_need_iommu_mapping(ld);
if ( need_iommu )
{
+ unsigned long frame = mfn_x(mfn);
+ bfn_t bfn = _bfn(frame);
unsigned int kind;
int err = 0;
@@ -1144,14 +1146,13 @@ map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, mfn_x(mfn), mfn_x(mfn),
- IOMMUF_readable|IOMMUF_writable);
+ err = iommu_map_page(ld, bfn, mfn,
+ IOMMUF_readable | IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
- err = iommu_map_page(ld, mfn_x(mfn), mfn_x(mfn),
- IOMMUF_readable);
+ err = iommu_map_page(ld, bfn, mfn, IOMMUF_readable);
}
if ( err )
{
@@ -1406,6 +1407,8 @@ unmap_common(
if ( rc == GNTST_okay && gnttab_need_iommu_mapping(ld) )
{
+ unsigned long frame = mfn_x(op->mfn);
+ bfn_t bfn = _bfn(frame);
unsigned int kind;
int err = 0;
@@ -1413,10 +1416,9 @@ unmap_common(
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
- err = iommu_unmap_page(ld, mfn_x(op->mfn));
+ err = iommu_unmap_page(ld, bfn);
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, mfn_x(op->mfn),
- mfn_x(op->mfn), IOMMUF_readable);
+ err = iommu_map_page(ld, bfn, op->mfn, IOMMUF_readable);
double_gt_unlock(lgt, rgt);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 75010b78a5..e42da275c3 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -829,15 +829,18 @@ int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp,
#ifdef CONFIG_HAS_PASSTHROUGH
if ( need_iommu(d) )
{
+ unsigned long frame;
int ret;
this_cpu(iommu_dont_flush_iotlb) = 0;
- ret = iommu_iotlb_flush(d, xatp->idx - done, done);
+ frame = xatp->idx - done;
+ ret = iommu_iotlb_flush(d, _bfn(frame), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
- ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
+ frame = xatp->gpfn - done;
+ ret = iommu_iotlb_flush(d, _bfn(frame), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 9a3bb6a43e..2aa0c126d1 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -256,7 +256,7 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
-int iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
+int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
@@ -265,13 +265,13 @@ int iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, bfn, mfn, flags);
+ rc = hd->platform_ops->map_page(d, bfn_x(bfn), mfn_x(mfn), flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU mapping bfn %#lx to mfn %#lx failed: %d\n",
- d->domain_id, bfn, mfn, rc);
+ "d%d: IOMMU mapping bfn %"PRI_bfn" to mfn %"PRI_mfn" failed: %d\n",
+ d->domain_id, bfn_x(bfn), mfn_x(mfn), rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -280,7 +280,7 @@ int iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
return rc;
}
-int iommu_unmap_page(struct domain *d, unsigned long bfn)
+int iommu_unmap_page(struct domain *d, bfn_t bfn)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -288,13 +288,13 @@ int iommu_unmap_page(struct domain *d, unsigned long bfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, bfn);
+ rc = hd->platform_ops->unmap_page(d, bfn_x(bfn));
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU unmapping bfn %#lx failed: %d\n",
- d->domain_id, bfn, rc);
+ "d%d: IOMMU unmapping bfn %"PRI_bfn" failed: %d\n",
+ d->domain_id, bfn_x(bfn), rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -320,8 +320,7 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(), &cpu_online_map));
}
-int iommu_iotlb_flush(struct domain *d, unsigned long bfn,
- unsigned int page_count)
+int iommu_iotlb_flush(struct domain *d, bfn_t bfn, unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -329,13 +328,13 @@ int iommu_iotlb_flush(struct domain *d, unsigned long bfn,
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
return 0;
- rc = hd->platform_ops->iotlb_flush(d, bfn, page_count);
+ rc = hd->platform_ops->iotlb_flush(d, bfn_x(bfn), page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU IOTLB flush failed: %d, bfn %#lx, page count %u\n",
- d->domain_id, rc, bfn, page_count);
+ "d%d: IOMMU IOTLB flush failed: %d, bfn %"PRI_bfn", page count %u\n",
+ d->domain_id, rc, bfn_x(bfn), page_count);
if ( !is_hardware_domain(d) )
domain_crash(d);
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index cc2bfea162..dc37dce4b6 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -155,7 +155,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
{
- int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+ int ret = iommu_map_page(d, _bfn(pfn * tmp + j),
+ _mfn(pfn * tmp + j),
IOMMUF_readable|IOMMUF_writable);
if ( !rc )
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 14ada0c14e..a3c36c1148 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -24,6 +24,7 @@
#include <xen/spinlock.h>
#include <xen/pci.h>
#include <xen/typesafe.h>
+#include <xen/mm.h>
#include <public/hvm/ioreq.h>
#include <public/domctl.h>
#include <asm/device.h>
@@ -42,6 +43,11 @@ TYPE_SAFE(uint64_t, bfn);
#undef bfn_x
#endif
+static inline bfn_t bfn_add(bfn_t bfn, unsigned long i)
+{
+ return _bfn(bfn_x(bfn) + i);
+}
+
#define IOMMU_PAGE_SHIFT 12
#define IOMMU_PAGE_SIZE (_AC(1,L) << IOMMU_PAGE_SHIFT)
#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1))
@@ -86,9 +92,9 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, unsigned long bfn,
- unsigned long mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, unsigned long bfn);
+int __must_check iommu_map_page(struct domain *d, bfn_t bfn,
+ mfn_t mfn, unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, bfn_t bfn);
enum iommu_feature
{
@@ -213,7 +219,7 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-int __must_check iommu_iotlb_flush(struct domain *d, unsigned long bfn,
+int __must_check iommu_iotlb_flush(struct domain *d, bfn_t bfn,
unsigned int page_count);
int __must_check iommu_iotlb_flush_all(struct domain *d);
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-07-07 11:05 UTC|newest]
Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-07-07 11:05 [PATCH v2 00/13] paravirtual IOMMU interface Paul Durrant
2018-07-07 11:05 ` [PATCH v2 01/13] grant_table: use term 'mfn' for machine frame numbers Paul Durrant
2018-07-10 13:19 ` George Dunlap
2018-07-11 8:31 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 02/13] iommu: introduce the concept of BFN Paul Durrant
2018-07-10 13:47 ` George Dunlap
2018-07-10 14:08 ` Paul Durrant
2018-07-10 14:18 ` Jan Beulich
2018-07-07 11:05 ` Paul Durrant [this message]
2018-07-10 14:00 ` [PATCH v2 03/13] iommu: make use of type-safe BFN and MFN in exported functions George Dunlap
2018-07-10 14:10 ` Paul Durrant
2018-07-10 14:28 ` Jan Beulich
2018-07-10 14:37 ` Paul Durrant
2018-07-10 16:13 ` George Dunlap
2018-07-10 16:18 ` Paul Durrant
2018-07-10 16:19 ` George Dunlap
2018-07-11 7:57 ` Jan Beulich
2018-07-11 7:59 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 04/13] iommu: push use of type-safe BFN and MFN into iommu_ops Paul Durrant
2018-07-10 16:38 ` George Dunlap
2018-07-07 11:05 ` [PATCH v2 05/13] iommu: don't domain_crash() inside iommu_map/unmap_page() Paul Durrant
2018-07-10 16:49 ` George Dunlap
2018-07-16 14:09 ` Wei Liu
2018-07-07 11:05 ` [PATCH v2 06/13] public / x86: introduce __HYPERCALL_iommu_op Paul Durrant
2018-07-11 9:09 ` George Dunlap
2018-07-16 10:00 ` Paul Durrant
2018-07-16 14:14 ` Wei Liu
2018-07-16 14:17 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 07/13] iommu: track reserved ranges using a rangeset Paul Durrant
2018-07-11 9:16 ` George Dunlap
2018-07-16 10:21 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 08/13] x86: add iommu_op to query reserved ranges Paul Durrant
2018-07-11 10:34 ` George Dunlap
2018-07-11 12:21 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 09/13] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-07-11 10:51 ` George Dunlap
2018-07-11 12:25 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 10/13] x86: add iommu_op to enable modification of IOMMU mappings Paul Durrant
2018-07-07 11:05 ` [PATCH v2 11/13] memory: add get_paged_gfn() as a wrapper Paul Durrant
2018-07-11 11:24 ` George Dunlap
2018-07-11 12:31 ` Paul Durrant
2018-07-11 13:04 ` George Dunlap
2018-07-11 13:09 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 12/13] x86: add iommu_ops to modify and flush IOMMU mappings Paul Durrant
2018-07-11 11:46 ` George Dunlap
2018-07-11 12:36 ` Paul Durrant
2018-07-07 11:05 ` [PATCH v2 13/13] x86: extend the map and unmap iommu_ops to support grant references Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180707110526.35822-4-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=jun.nakajima@intel.com \
--cc=kevin.tian@intel.com \
--cc=sstabellini@kernel.org \
--cc=tim@xen.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).