From: Quan Xu <quan.xu@intel.com>
To: xen-devel@lists.xen.org
Cc: Kevin Tian <kevin.tian@intel.com>, Keir Fraser <keir@xen.org>,
Jan Beulich <jbeulich@suse.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
dario.faggioli@citrix.com, Jun Nakajima <jun.nakajima@intel.com>,
Quan Xu <quan.xu@intel.com>
Subject: [PATCH v5 03/10] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping
Date: Wed, 18 May 2016 16:08:24 +0800 [thread overview]
Message-ID: <1463558911-98187-4-git-send-email-quan.xu@intel.com> (raw)
In-Reply-To: <1463558911-98187-1-git-send-email-quan.xu@intel.com>
When IOMMU mapping is failed, we issue a best effort rollback, stopping
IOMMU mapping, unmapping the previous IOMMU maps and then reporting the
error up to the call trees. When rollback is not feasible (in early
initialization phase or trade-off of complexity) for the hardware domain,
we do things on a best effort basis, only throwing out an error message.
IOMMU unmapping should perhaps continue despite an error, in an attempt
to do best effort cleanup.
Signed-off-by: Quan Xu <quan.xu@intel.com>
CC: Keir Fraser <keir@xen.org>
CC: Jan Beulich <jbeulich@suse.com>
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: George Dunlap <george.dunlap@eu.citrix.com>
---
xen/arch/x86/mm.c | 18 +++++++++++-------
xen/arch/x86/mm/p2m-ept.c | 37 +++++++++++++++++++++++++++++--------
xen/arch/x86/mm/p2m-pt.c | 28 ++++++++++++++++++++++++----
xen/arch/x86/mm/p2m.c | 34 ++++++++++++++++++++++++++++------
xen/drivers/passthrough/iommu.c | 15 ++++++++++++++-
xen/include/asm-x86/p2m.h | 12 +++++++-----
6 files changed, 113 insertions(+), 31 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 03a4d5b..fee3b9d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2463,11 +2463,12 @@ static int __put_page_type(struct page_info *page,
}
-static int __get_page_type(struct page_info *page, unsigned long type,
- int preemptible)
+static int __must_check __get_page_type(struct page_info *page,
+ unsigned long type,
+ int preemptible)
{
unsigned long nx, x, y = page->u.inuse.type_info;
- int rc = 0;
+ int rc = 0, ret = 0;
ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
@@ -2578,11 +2579,11 @@ static int __get_page_type(struct page_info *page, unsigned long type,
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
else if ( type == PGT_writable_page )
- iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
}
}
@@ -2599,6 +2600,9 @@ static int __get_page_type(struct page_info *page, unsigned long type,
if ( (x & PGT_partial) && !(nx & PGT_partial) )
put_page(page);
+ if ( !rc )
+ rc = ret;
+
return rc;
}
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 1ed5b47..7d4809f 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -658,7 +658,7 @@ bool_t ept_handle_misconfig(uint64_t gpa)
*
* Returns: 0 for success, -errno for failure
*/
-static int
+static int __must_check
ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma,
int sve)
@@ -667,6 +667,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned long gfn_remainder = gfn;
unsigned int i, target = order / EPT_TABLE_ORDER;
int ret, rc = 0;
+ bool_t entry_written = 0;
bool_t direct_mmio = (p2mt == p2m_mmio_direct);
uint8_t ipat = 0;
bool_t need_modify_vtd_table = 1;
@@ -812,10 +813,15 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
rc = atomic_write_ept_entry(ept_entry, new_entry, target);
if ( unlikely(rc) )
old_entry.epte = 0;
- else if ( p2mt != p2m_invalid &&
- (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
- /* Track the highest gfn for which we have ever had a valid mapping */
- p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+ else
+ {
+ entry_written = 1;
+
+ if ( p2mt != p2m_invalid &&
+ (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
+ /* Track the highest gfn for which we have ever had a valid mapping */
+ p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+ }
out:
if ( needs_sync )
@@ -831,10 +837,25 @@ out:
{
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ {
+ rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ iommu_unmap_page(p2m->domain, gfn + i);
+
+ break;
+ }
+ }
else
for ( i = 0; i < (1 << order); i++ )
- iommu_unmap_page(d, gfn + i);
+ {
+ ret = iommu_unmap_page(d, gfn + i);
+
+ if ( !rc && unlikely(ret) )
+ rc = ret;
+ }
}
}
@@ -847,7 +868,7 @@ out:
if ( is_epte_present(&old_entry) )
ept_free_entry(p2m, &old_entry, target);
- if ( rc == 0 && p2m_is_hostp2m(p2m) )
+ if ( entry_written && p2m_is_hostp2m(p2m) )
p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
return rc;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3d80612..b50bd01 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -485,7 +485,7 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
}
/* Returns: 0 for success, -errno for failure */
-static int
+static int __must_check
p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
int sve)
@@ -673,6 +673,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
if ( iommu_enabled && need_iommu(p2m->domain) &&
(iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
{
+ int ret;
+
if ( iommu_use_hap_pt(p2m->domain) )
{
if ( iommu_old_flags )
@@ -680,11 +682,29 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
}
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ {
+ ret = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+ iommu_pte_flags);
+
+ if ( unlikely(ret) )
+ {
+ while ( i-- )
+ iommu_unmap_page(p2m->domain, gfn + i);
+
+ if ( !rc )
+ rc = ret;
+
+ break;
+ }
+ }
else
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_unmap_page(p2m->domain, gfn + i);
+ {
+ ret = iommu_unmap_page(p2m->domain, gfn + i);
+
+ if ( !rc && unlikely(ret) )
+ rc = ret;
+ }
}
/*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 94eabf6..6e33987 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -630,7 +630,7 @@ void p2m_final_teardown(struct domain *d)
}
-static int
+static int __must_check
p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
unsigned int page_order)
{
@@ -641,10 +641,22 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
if ( !paging_mode_translate(p2m->domain) )
{
+ int rc = 0;
+
if ( need_iommu(p2m->domain) )
+ {
for ( i = 0; i < (1 << page_order); i++ )
- iommu_unmap_page(p2m->domain, mfn + i);
- return 0;
+ {
+ int ret;
+
+ ret = iommu_unmap_page(p2m->domain, mfn + i);
+
+ if ( !rc && unlikely(ret) )
+ rc = ret;
+ }
+ }
+
+ return rc;
}
ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -798,7 +810,14 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
ogfn , mfn_x(omfn));
if ( mfn_x(omfn) == (mfn + i) )
- p2m_remove_page(p2m, ogfn, mfn + i, 0);
+ {
+ int ret;
+
+ ret = p2m_remove_page(p2m, ogfn, mfn + i, 0);
+
+ if ( !rc && unlikely(ret) )
+ rc = ret;
+ }
}
}
}
@@ -2513,9 +2532,12 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
if ( gfn_x(new_gfn) == INVALID_GFN )
{
- if ( mfn_valid(mfn) )
- p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K);
rc = 0;
+
+ if ( mfn_valid(mfn) )
+ rc = p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn),
+ PAGE_ORDER_4K);
+
goto out;
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 7c70306..e04db16 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -171,20 +171,33 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
{
struct page_info *page;
unsigned int i = 0;
+ int rc = 0;
+
page_list_for_each ( page, &d->page_list )
{
unsigned long mfn = page_to_mfn(page);
unsigned long gfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
+ int ret;
if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
((page->u.inuse.type_info & PGT_type_mask)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+ ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+ if ( unlikely(ret) )
+ rc = ret;
+
if ( !(i++ & 0xfffff) )
process_pending_softirqs();
}
+
+ if ( rc )
+ printk(XENLOG_WARNING
+ "d%d: IOMMU mapping failed %d for hardware domain.",
+ d->domain_id, rc);
}
return hd->platform_ops->hwdom_init(d);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 65675a2..dd51380 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -545,9 +545,11 @@ void p2m_teardown(struct p2m_domain *p2m);
void p2m_final_teardown(struct domain *d);
/* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int page_order,
- p2m_type_t t);
+int __must_check guest_physmap_add_entry(struct domain *d,
+ unsigned long gfn,
+ unsigned long mfn,
+ unsigned int page_order,
+ p2m_type_t t);
/* Untyped version for RAM only, for compatibility */
static inline int guest_physmap_add_page(struct domain *d,
@@ -808,8 +810,8 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx);
int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx);
/* Change a gfn->mfn mapping */
-int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
- gfn_t old_gfn, gfn_t new_gfn);
+int __must_check p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
+ gfn_t old_gfn, gfn_t new_gfn);
/* Propagate a host p2m change to all alternate p2m's */
void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-05-18 8:08 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-18 8:08 [PATCH v5 00/10] Check VT-d Device-TLB flush error Quan Xu
2016-05-18 8:08 ` [PATCH v5 01/10] vt-d: fix the IOMMU flush issue Quan Xu
2016-05-23 13:30 ` Jan Beulich
2016-05-23 15:22 ` Xu, Quan
2016-05-23 15:43 ` Jan Beulich
2016-05-25 8:04 ` Xu, Quan
2016-05-25 8:29 ` Jan Beulich
2016-05-25 8:53 ` Xu, Quan
2016-05-26 10:37 ` Xu, Quan
2016-05-26 14:37 ` Xu, Quan
2016-05-26 15:56 ` Jan Beulich
2016-05-26 16:20 ` Xu, Quan
2016-05-26 6:20 ` Xu, Quan
2016-05-18 8:08 ` [PATCH v5 02/10] IOMMU: handle IOMMU mapping and unmapping failures Quan Xu
2016-05-23 13:40 ` Jan Beulich
2016-05-24 9:09 ` Xu, Quan
2016-05-18 8:08 ` Quan Xu [this message]
2016-05-23 14:19 ` [PATCH v5 03/10] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping Jan Beulich
2016-05-25 15:34 ` Xu, Quan
2016-05-25 16:01 ` Jan Beulich
2016-05-26 1:42 ` Xu, Quan
2016-05-26 15:49 ` Jan Beulich
2016-05-18 8:08 ` [PATCH v5 04/10] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU unmapping Quan Xu
2016-05-18 8:08 ` [PATCH v5 05/10] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU mapping Quan Xu
2016-05-23 15:53 ` Jan Beulich
2016-05-24 9:01 ` Xu, Quan
2016-05-24 9:09 ` Jan Beulich
2016-05-24 9:14 ` Xu, Quan
2016-05-18 8:08 ` [PATCH v5 06/10] IOMMU/MMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} (top level ones) Quan Xu
2016-05-23 16:05 ` Jan Beulich
2016-05-24 1:16 ` Xu, Quan
2016-05-24 7:01 ` Jan Beulich
2016-05-24 7:08 ` Tian, Kevin
2016-05-24 8:11 ` Xu, Quan
2016-05-24 8:34 ` Jan Beulich
2016-05-24 8:44 ` Xu, Quan
2016-05-18 8:08 ` [PATCH v5 07/10] IOMMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} (leaf ones) Quan Xu
2016-05-24 3:42 ` Xu, Quan
2016-05-24 7:52 ` Jan Beulich
2016-05-18 8:08 ` [PATCH v5 08/10] vt-d/ept: propagate IOMMU Device-TLB flush error up to EPT update Quan Xu
2016-05-24 7:58 ` Jan Beulich
2016-05-18 8:08 ` [PATCH v5 09/10] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU suspending Quan Xu
2016-05-24 8:21 ` Jan Beulich
2016-05-25 6:41 ` Xu, Quan
2016-05-25 8:06 ` Jan Beulich
2016-05-25 15:13 ` Xu, Quan
2016-05-18 8:08 ` [PATCH v5 10/10] vt-d: propagate error up to ME phantom function mapping and unmapping Quan Xu
2016-05-24 8:28 ` Jan Beulich
2016-05-18 10:20 ` [PATCH v5 00/10] Check VT-d Device-TLB flush error Jan Beulich
2016-05-18 12:13 ` Xu, Quan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1463558911-98187-4-git-send-email-quan.xu@intel.com \
--to=quan.xu@intel.com \
--cc=andrew.cooper3@citrix.com \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=jun.nakajima@intel.com \
--cc=keir@xen.org \
--cc=kevin.tian@intel.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).