* [PATCH 0/7] Simplify and fix fetch()
@ 2010-07-11 15:42 Avi Kivity
2010-07-11 15:42 ` [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper Avi Kivity
` (6 more replies)
0 siblings, 7 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
fetch() has become a monster, with a zillion continues, breaks, and gotos.
Simplify it before Xiao adds even more.
Also fix the gpte validation race.
Avi Kivity (7):
KVM: MMU: Add link_shadow_page() helper
KVM: MMU: Use __set_spte to link shadow pages
KVM: MMU: Add drop_spte_if_large() helper
KVM: MMU: Add validate_direct_spte() helper
KVM: MMU: Add validate_indirect_spte() helper
KVM: MMU: Simplify spte fetch() function
KVM: MMU: Validate all gptes during fetch, not just those used for
new pages
arch/x86/kvm/mmu.c | 41 +++++++++++++
arch/x86/kvm/paging_tmpl.h | 135 +++++++++++++++++++++-----------------------
2 files changed, 105 insertions(+), 71 deletions(-)
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-12 4:58 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 2/7] KVM: MMU: Use __set_spte to link shadow pages Avi Kivity
` (5 subsequent siblings)
6 siblings, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
To simplify the process of fetching an spte, add a helper that links
a shadow page to an spte.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 10 ++++++++++
arch/x86/kvm/paging_tmpl.h | 7 ++-----
2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b93b94f..ae35853 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1482,6 +1482,16 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
--iterator->level;
}
+static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
+{
+ u64 spte;
+
+ spte = __pa(sp->spt)
+ | PT_PRESENT_MASK | PT_ACCESSED_MASK
+ | PT_WRITABLE_MASK | PT_USER_MASK;
+ *sptep = spte;
+}
+
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6daeacf..4606c88 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -309,7 +309,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *sp;
- u64 spte, *sptep = NULL;
+ u64 *sptep = NULL;
int direct;
gfn_t table_gfn;
int r;
@@ -394,10 +394,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
}
}
- spte = __pa(sp->spt)
- | PT_PRESENT_MASK | PT_ACCESSED_MASK
- | PT_WRITABLE_MASK | PT_USER_MASK;
- *sptep = spte;
+ link_shadow_page(sptep, sp);
}
return sptep;
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH 2/7] KVM: MMU: Use __set_spte to link shadow pages
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
2010-07-11 15:42 ` [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-12 4:58 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 3/7] KVM: MMU: Add drop_spte_if_large() helper Avi Kivity
` (4 subsequent siblings)
6 siblings, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
To avoid split accesses to 64 bit sptes on i386, use __set_spte() to link
shadow pages together.
(not technically required since shadow pages are __GFP_KERNEL, so upper 32
bits are always clear)
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ae35853..75cfb79 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1489,7 +1489,7 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
spte = __pa(sp->spt)
| PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
- *sptep = spte;
+ __set_spte(sptep, spte);
}
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH 3/7] KVM: MMU: Add drop_spte_if_large() helper
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
2010-07-11 15:42 ` [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper Avi Kivity
2010-07-11 15:42 ` [PATCH 2/7] KVM: MMU: Use __set_spte to link shadow pages Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-12 4:59 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 4/7] KVM: MMU: Add validate_direct_spte() helper Avi Kivity
` (3 subsequent siblings)
6 siblings, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
To clarify spte fetching code, move large spte handling into a helper.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 8 ++++++++
arch/x86/kvm/paging_tmpl.h | 5 +----
2 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 75cfb79..c02cbe1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1492,6 +1492,14 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
__set_spte(sptep, spte);
}
+static void drop_spte_if_large(struct kvm_vcpu *vcpu, u64 *sptep)
+{
+ if (is_large_pte(*sptep)) {
+ drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ }
+}
+
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4606c88..2befea7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -360,10 +360,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
kvm_flush_remote_tlbs(vcpu->kvm);
}
- if (is_large_pte(*sptep)) {
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
- kvm_flush_remote_tlbs(vcpu->kvm);
- }
+ drop_spte_if_large(vcpu, sptep);
if (level <= gw->level) {
direct = 1;
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH 4/7] KVM: MMU: Add validate_direct_spte() helper
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
` (2 preceding siblings ...)
2010-07-11 15:42 ` [PATCH 3/7] KVM: MMU: Add drop_spte_if_large() helper Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-12 5:00 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper Avi Kivity
` (2 subsequent siblings)
6 siblings, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
Add a helper to verify that a direct shadow page is valid wrt the required
access permissions; drop the page if it is not valid.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 23 +++++++++++++++++++++++
arch/x86/kvm/paging_tmpl.h | 27 ++++++---------------------
2 files changed, 29 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c02cbe1..e2dfdba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1500,6 +1500,29 @@ static void drop_spte_if_large(struct kvm_vcpu *vcpu, u64 *sptep)
}
}
+static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
+ unsigned direct_access)
+{
+ if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
+ struct kvm_mmu_page *child;
+
+ /*
+ * For the direct sp, if the guest pte's dirty bit
+ * changed form clean to dirty, it will corrupt the
+ * sp's access: allow writable in the read-only sp,
+ * so we should update the spte at this point to get
+ * a new sp with the correct access.
+ */
+ child = page_header(*sptep & PT64_BASE_ADDR_MASK);
+ if (child->role.access == direct_access)
+ return;
+
+ mmu_page_remove_parent_pte(child, sptep);
+ __set_spte(sptep, shadow_trap_nonpresent_pte);
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ }
+}
+
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 2befea7..cacb4f2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -338,30 +338,15 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
break;
}
- if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
- struct kvm_mmu_page *child;
-
- if (level != gw->level)
- continue;
-
- /*
- * For the direct sp, if the guest pte's dirty bit
- * changed form clean to dirty, it will corrupt the
- * sp's access: allow writable in the read-only sp,
- * so we should update the spte at this point to get
- * a new sp with the correct access.
- */
- child = page_header(*sptep & PT64_BASE_ADDR_MASK);
- if (child->role.access == direct_access)
- continue;
-
- mmu_page_remove_parent_pte(child, sptep);
- __set_spte(sptep, shadow_trap_nonpresent_pte);
- kvm_flush_remote_tlbs(vcpu->kvm);
- }
+ if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)
+ && level == gw->level)
+ validate_direct_spte(vcpu, sptep, direct_access);
drop_spte_if_large(vcpu, sptep);
+ if (is_shadow_present_pte(*sptep))
+ continue;
+
if (level <= gw->level) {
direct = 1;
access = direct_access;
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
` (3 preceding siblings ...)
2010-07-11 15:42 ` [PATCH 4/7] KVM: MMU: Add validate_direct_spte() helper Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-12 5:01 ` Xiao Guangrong
2010-07-12 5:12 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 6/7] KVM: MMU: Simplify spte fetch() function Avi Kivity
2010-07-11 15:42 ` [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages Avi Kivity
6 siblings, 2 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
Move the code to validate an indirect shadow page (by verifying that the gpte
has not changed since it was fetched) into a helper.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/paging_tmpl.h | 29 ++++++++++++++++++++---------
1 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index cacb4f2..72f54fe 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -299,6 +299,23 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gpte_to_gfn(gpte), pfn, true, true);
}
+static bool FNAME(validate_indirect_spte)(struct kvm_vcpu *vcpu,
+ u64 *sptep, struct kvm_mmu_page *sp,
+ struct guest_walker *gw, int level)
+{
+ int r;
+ pt_element_t curr_pte;
+
+ r = kvm_read_guest_atomic(vcpu->kvm,
+ gw->pte_gpa[level - 2],
+ &curr_pte, sizeof(curr_pte));
+ if (r || curr_pte != gw->ptes[level - 2]) {
+ kvm_mmu_put_page(sp, sptep);
+ return false;
+ }
+ return true;
+}
+
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
@@ -312,11 +329,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
u64 *sptep = NULL;
int direct;
gfn_t table_gfn;
- int r;
int level;
bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
unsigned direct_access;
- pt_element_t curr_pte;
struct kvm_shadow_walk_iterator iterator;
if (!is_present_gpte(gw->ptes[gw->level - 1]))
@@ -364,17 +379,13 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
}
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
direct, access, sptep);
- if (!direct) {
- r = kvm_read_guest_atomic(vcpu->kvm,
- gw->pte_gpa[level - 2],
- &curr_pte, sizeof(curr_pte));
- if (r || curr_pte != gw->ptes[level - 2]) {
- kvm_mmu_put_page(sp, sptep);
+ if (!direct)
+ if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
+ gw, level)) {
kvm_release_pfn_clean(pfn);
sptep = NULL;
break;
}
- }
link_shadow_page(sptep, sp);
}
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH 6/7] KVM: MMU: Simplify spte fetch() function
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
` (4 preceding siblings ...)
2010-07-11 15:42 ` [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-11 16:08 ` Avi Kivity
2010-07-12 5:08 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages Avi Kivity
6 siblings, 2 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
Paritition the function into three sections:
- fetching indirect shadow pages (host_level > guest_level)
- fetching direct shadow pages (page_level < host_level <= guest_level)
- the final spte (page_level == host_level)
Instead of the current spaghetti.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/paging_tmpl.h | 77 ++++++++++++++++++++++---------------------
1 files changed, 39 insertions(+), 38 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 72f54fe..a7f8295 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -327,9 +327,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
unsigned access = gw->pt_access;
struct kvm_mmu_page *sp;
u64 *sptep = NULL;
- int direct;
- gfn_t table_gfn;
- int level;
+ int uninitialized_var(level);
bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
unsigned direct_access;
struct kvm_shadow_walk_iterator iterator;
@@ -341,55 +339,58 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (!dirty)
direct_access &= ~ACC_WRITE_MASK;
- for_each_shadow_entry(vcpu, addr, iterator) {
+ for (shadow_walk_init(&iterator, vcpu, addr);
+ shadow_walk_okay(&iterator) && iterator.level > gw->level;
+ shadow_walk_next(&iterator)) {
+ gfn_t table_gfn;
+
level = iterator.level;
sptep = iterator.sptep;
- if (iterator.level == hlevel) {
- mmu_set_spte(vcpu, sptep, access,
- gw->pte_access & access,
- user_fault, write_fault,
- dirty, ptwrite, level,
- gw->gfn, pfn, false, true);
- break;
+
+ drop_spte_if_large(vcpu, sptep);
+
+ if (is_shadow_present_pte(*sptep))
+ continue;
+
+ table_gfn = gw->table_gfn[level - 2];
+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+ false, access, sptep);
+
+ if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
+ gw, level)) {
+ kvm_release_pfn_clean(pfn);
+ return NULL;
}
- if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)
- && level == gw->level)
- validate_direct_spte(vcpu, sptep, direct_access);
+ link_shadow_page(sptep, sp);
+ }
+
+ for (;
+ shadow_walk_okay(&iterator) && iterator.level > hlevel;
+ shadow_walk_next(&iterator)) {
+ gfn_t direct_gfn;
+
+ level = iterator.level;
+ sptep = iterator.sptep;
drop_spte_if_large(vcpu, sptep);
if (is_shadow_present_pte(*sptep))
continue;
- if (level <= gw->level) {
- direct = 1;
- access = direct_access;
-
- /*
- * It is a large guest pages backed by small host pages,
- * So we set @direct(@sp->role.direct)=1, and set
- * @table_gfn(@sp->gfn)=the base page frame for linear
- * translations.
- */
- table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
- } else {
- direct = 0;
- table_gfn = gw->table_gfn[level - 2];
- }
- sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
- direct, access, sptep);
- if (!direct)
- if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
- gw, level)) {
- kvm_release_pfn_clean(pfn);
- sptep = NULL;
- break;
- }
+ validate_direct_spte(vcpu, sptep, direct_access);
+
+ direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+ sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1,
+ true, direct_access, sptep);
link_shadow_page(sptep, sp);
}
+ mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
+ user_fault, write_fault, dirty, ptwrite, level,
+ gw->gfn, pfn, false, true);
+
return sptep;
}
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
` (5 preceding siblings ...)
2010-07-11 15:42 ` [PATCH 6/7] KVM: MMU: Simplify spte fetch() function Avi Kivity
@ 2010-07-11 15:42 ` Avi Kivity
2010-07-12 5:10 ` Xiao Guangrong
2010-07-12 8:58 ` Avi Kivity
6 siblings, 2 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 15:42 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
Currently, when we fetch an spte, we only verify that gptes match those that
the walker saw if we build new shadow pages for them.
However, this misses the following race:
vcpu1 vcpu2
walk
change gpte
walk
instantiate sp
fetch existing sp
Fix by validating every gpte, regardless of whether it is used for building
a new sp or not.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/paging_tmpl.h | 18 ++++++++++--------
1 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a7f8295..4bbd0c7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -325,7 +325,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
int *ptwrite, pfn_t pfn)
{
unsigned access = gw->pt_access;
- struct kvm_mmu_page *sp;
+ struct kvm_mmu_page *uninitialized_var(sp);
u64 *sptep = NULL;
int uninitialized_var(level);
bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
@@ -343,18 +343,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_okay(&iterator) && iterator.level > gw->level;
shadow_walk_next(&iterator)) {
gfn_t table_gfn;
+ bool new_page = false;
level = iterator.level;
sptep = iterator.sptep;
drop_spte_if_large(vcpu, sptep);
- if (is_shadow_present_pte(*sptep))
- continue;
-
- table_gfn = gw->table_gfn[level - 2];
- sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
- false, access, sptep);
+ if (!is_shadow_present_pte(*sptep)) {
+ table_gfn = gw->table_gfn[level - 2];
+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+ false, access, sptep);
+ new_page = true;
+ }
if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
gw, level)) {
@@ -362,7 +363,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
return NULL;
}
- link_shadow_page(sptep, sp);
+ if (new_page)
+ link_shadow_page(sptep, sp);
}
for (;
--
1.7.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [PATCH 6/7] KVM: MMU: Simplify spte fetch() function
2010-07-11 15:42 ` [PATCH 6/7] KVM: MMU: Simplify spte fetch() function Avi Kivity
@ 2010-07-11 16:08 ` Avi Kivity
2010-07-12 5:10 ` Xiao Guangrong
2010-07-12 5:08 ` Xiao Guangrong
1 sibling, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2010-07-11 16:08 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
On 07/11/2010 06:42 PM, Avi Kivity wrote:
> Paritition the function into three sections:
>
> - fetching indirect shadow pages (host_level> guest_level)
> - fetching direct shadow pages (page_level< host_level<= guest_level)
> - the final spte (page_level == host_level)
>
> Instead of the current spaghetti.
>
> +
> + for (;
> + shadow_walk_okay(&iterator)&& iterator.level> hlevel;
> + shadow_walk_next(&iterator)) {
> + gfn_t direct_gfn;
> +
> + level = iterator.level;
> + sptep = iterator.sptep;
>
> drop_spte_if_large(vcpu, sptep);
>
> if (is_shadow_present_pte(*sptep))
> continue;
>
> - if (level<= gw->level) {
> - direct = 1;
> - access = direct_access;
> -
> - /*
> - * It is a large guest pages backed by small host pages,
> - * So we set @direct(@sp->role.direct)=1, and set
> - * @table_gfn(@sp->gfn)=the base page frame for linear
> - * translations.
> - */
> - table_gfn = gw->gfn& ~(KVM_PAGES_PER_HPAGE(level) - 1);
> - } else {
> - direct = 0;
> - table_gfn = gw->table_gfn[level - 2];
> - }
> - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
> - direct, access, sptep);
> - if (!direct)
> - if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
> - gw, level)) {
> - kvm_release_pfn_clean(pfn);
> - sptep = NULL;
> - break;
> - }
> + validate_direct_spte(vcpu, sptep, direct_access);
> +
> + direct_gfn = gw->gfn& ~(KVM_PAGES_PER_HPAGE(level) - 1);
>
> + sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1,
> + true, direct_access, sptep);
> link_shadow_page(sptep, sp);
> }
>
>
validate_direct_spte() should be before the 'continue' above, probably.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper
2010-07-11 15:42 ` [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper Avi Kivity
@ 2010-07-12 4:58 ` Xiao Guangrong
0 siblings, 0 replies; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 4:58 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> To simplify the process of fetching an spte, add a helper that links
> a shadow page to an spte.
>
Reviewed-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 2/7] KVM: MMU: Use __set_spte to link shadow pages
2010-07-11 15:42 ` [PATCH 2/7] KVM: MMU: Use __set_spte to link shadow pages Avi Kivity
@ 2010-07-12 4:58 ` Xiao Guangrong
0 siblings, 0 replies; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 4:58 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> To avoid split accesses to 64 bit sptes on i386, use __set_spte() to link
> shadow pages together.
>
> (not technically required since shadow pages are __GFP_KERNEL, so upper 32
> bits are always clear)
>
Reviewed-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 3/7] KVM: MMU: Add drop_spte_if_large() helper
2010-07-11 15:42 ` [PATCH 3/7] KVM: MMU: Add drop_spte_if_large() helper Avi Kivity
@ 2010-07-12 4:59 ` Xiao Guangrong
0 siblings, 0 replies; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 4:59 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> To clarify spte fetching code, move large spte handling into a helper.
>
> Signed-off-by: Avi Kivity <avi@redhat.com>
> ---
> arch/x86/kvm/mmu.c | 8 ++++++++
> arch/x86/kvm/paging_tmpl.h | 5 +----
> 2 files changed, 9 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 75cfb79..c02cbe1 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1492,6 +1492,14 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
> __set_spte(sptep, spte);
> }
>
> +static void drop_spte_if_large(struct kvm_vcpu *vcpu, u64 *sptep)
> +{
> + if (is_large_pte(*sptep)) {
> + drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
> + kvm_flush_remote_tlbs(vcpu->kvm);
> + }
> +}
> +
Maybe drop_large_spte() is better?
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 4/7] KVM: MMU: Add validate_direct_spte() helper
2010-07-11 15:42 ` [PATCH 4/7] KVM: MMU: Add validate_direct_spte() helper Avi Kivity
@ 2010-07-12 5:00 ` Xiao Guangrong
0 siblings, 0 replies; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 5:00 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> Add a helper to verify that a direct shadow page is valid wrt the required
> access permissions; drop the page if it is not valid.
>
Reviewed-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper
2010-07-11 15:42 ` [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper Avi Kivity
@ 2010-07-12 5:01 ` Xiao Guangrong
2010-07-12 5:12 ` Xiao Guangrong
1 sibling, 0 replies; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 5:01 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> Move the code to validate an indirect shadow page (by verifying that the gpte
> has not changed since it was fetched) into a helper.
>
Reviewed-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 6/7] KVM: MMU: Simplify spte fetch() function
2010-07-11 15:42 ` [PATCH 6/7] KVM: MMU: Simplify spte fetch() function Avi Kivity
2010-07-11 16:08 ` Avi Kivity
@ 2010-07-12 5:08 ` Xiao Guangrong
2010-07-12 8:53 ` Avi Kivity
1 sibling, 1 reply; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 5:08 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> - if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)
> - && level == gw->level)
> - validate_direct_spte(vcpu, sptep, direct_access);
......
> + link_shadow_page(sptep, sp);
> + }
> +
> + for (;
> + shadow_walk_okay(&iterator) && iterator.level > hlevel;
> + shadow_walk_next(&iterator)) {
> + gfn_t direct_gfn;
> +
> + level = iterator.level;
> + sptep = iterator.sptep;
>
> drop_spte_if_large(vcpu, sptep);
>
> if (is_shadow_present_pte(*sptep))
> continue;
>
> - if (level <= gw->level) {
> - direct = 1;
> - access = direct_access;
> -
> - /*
> - * It is a large guest pages backed by small host pages,
> - * So we set @direct(@sp->role.direct)=1, and set
> - * @table_gfn(@sp->gfn)=the base page frame for linear
> - * translations.
> - */
> - table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
> - } else {
> - direct = 0;
> - table_gfn = gw->table_gfn[level - 2];
> - }
> - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
> - direct, access, sptep);
> - if (!direct)
> - if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
> - gw, level)) {
> - kvm_release_pfn_clean(pfn);
> - sptep = NULL;
> - break;
> - }
> + validate_direct_spte(vcpu, sptep, direct_access);
Need validate_direct_spte() only when 'level == gw->level'
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages
2010-07-11 15:42 ` [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages Avi Kivity
@ 2010-07-12 5:10 ` Xiao Guangrong
2010-07-12 8:58 ` Avi Kivity
1 sibling, 0 replies; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 5:10 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> Currently, when we fetch an spte, we only verify that gptes match those that
> the walker saw if we build new shadow pages for them.
>
> However, this misses the following race:
>
> vcpu1 vcpu2
>
> walk
> change gpte
> walk
> instantiate sp
>
> fetch existing sp
>
> Fix by validating every gpte, regardless of whether it is used for building
> a new sp or not.
>
Reviewed-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 6/7] KVM: MMU: Simplify spte fetch() function
2010-07-11 16:08 ` Avi Kivity
@ 2010-07-12 5:10 ` Xiao Guangrong
2010-07-12 8:52 ` Avi Kivity
0 siblings, 1 reply; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 5:10 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
>> - }
>> + validate_direct_spte(vcpu, sptep, direct_access);
>> +
>> + direct_gfn = gw->gfn& ~(KVM_PAGES_PER_HPAGE(level) - 1);
>>
>> + sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1,
>> + true, direct_access, sptep);
>> link_shadow_page(sptep, sp);
>> }
>>
>>
>
> validate_direct_spte() should be before the 'continue' above, probably.
>
Sure :-)
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper
2010-07-11 15:42 ` [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper Avi Kivity
2010-07-12 5:01 ` Xiao Guangrong
@ 2010-07-12 5:12 ` Xiao Guangrong
2010-07-12 8:50 ` Avi Kivity
1 sibling, 1 reply; 22+ messages in thread
From: Xiao Guangrong @ 2010-07-12 5:12 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
Avi Kivity wrote:
> Move the code to validate an indirect shadow page (by verifying that the gpte
> has not changed since it was fetched) into a helper.
>
> Signed-off-by: Avi Kivity <avi@redhat.com>
> ---
> arch/x86/kvm/paging_tmpl.h | 29 ++++++++++++++++++++---------
> 1 files changed, 20 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index cacb4f2..72f54fe 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -299,6 +299,23 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> gpte_to_gfn(gpte), pfn, true, true);
> }
>
> +static bool FNAME(validate_indirect_spte)(struct kvm_vcpu *vcpu,
> + u64 *sptep, struct kvm_mmu_page *sp,
> + struct guest_walker *gw, int level)
> +{
> + int r;
> + pt_element_t curr_pte;
> +
> + r = kvm_read_guest_atomic(vcpu->kvm,
> + gw->pte_gpa[level - 2],
> + &curr_pte, sizeof(curr_pte));
> + if (r || curr_pte != gw->ptes[level - 2]) {
> + kvm_mmu_put_page(sp, sptep);
> + return false;
I think it's 'level - 1' here for checking 'level''s mapping
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper
2010-07-12 5:12 ` Xiao Guangrong
@ 2010-07-12 8:50 ` Avi Kivity
0 siblings, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-12 8:50 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, kvm
On 07/12/2010 08:12 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>> Move the code to validate an indirect shadow page (by verifying that the gpte
>> has not changed since it was fetched) into a helper.
>>
>> Signed-off-by: Avi Kivity<avi@redhat.com>
>> ---
>> arch/x86/kvm/paging_tmpl.h | 29 ++++++++++++++++++++---------
>> 1 files changed, 20 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
>> index cacb4f2..72f54fe 100644
>> --- a/arch/x86/kvm/paging_tmpl.h
>> +++ b/arch/x86/kvm/paging_tmpl.h
>> @@ -299,6 +299,23 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>> gpte_to_gfn(gpte), pfn, true, true);
>> }
>>
>> +static bool FNAME(validate_indirect_spte)(struct kvm_vcpu *vcpu,
>> + u64 *sptep, struct kvm_mmu_page *sp,
>> + struct guest_walker *gw, int level)
>> +{
>> + int r;
>> + pt_element_t curr_pte;
>> +
>> + r = kvm_read_guest_atomic(vcpu->kvm,
>> + gw->pte_gpa[level - 2],
>> + &curr_pte, sizeof(curr_pte));
>> + if (r || curr_pte != gw->ptes[level - 2]) {
>> + kvm_mmu_put_page(sp, sptep);
>> + return false;
>>
> I think it's 'level - 1' here for checking 'level''s mapping
>
Well, I'm just moving code around. If it needs to be fixed, that's
another patch.
I think the logic is, we just write-protected the shadow page for level
L-1, so we need to verify that the gpte we write protected hasn't
changed. That gpte is in level L - 1, and it is stored in
gw->gptes[level - 2].
Of course, that logic is wrong, we need to check all gptes, since
between the guest walk and FNAME(fetch)() the shadow pages and gptes may
have changed several times. I'll fix that later.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 6/7] KVM: MMU: Simplify spte fetch() function
2010-07-12 5:10 ` Xiao Guangrong
@ 2010-07-12 8:52 ` Avi Kivity
0 siblings, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-12 8:52 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, kvm
On 07/12/2010 08:10 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>
>>> - }
>>> + validate_direct_spte(vcpu, sptep, direct_access);
>>> +
>>> + direct_gfn = gw->gfn& ~(KVM_PAGES_PER_HPAGE(level) - 1);
>>>
>>> + sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1,
>>> + true, direct_access, sptep);
>>> link_shadow_page(sptep, sp);
>>> }
>>>
>>>
>>>
>> validate_direct_spte() should be before the 'continue' above, probably.
>>
>>
> Sure :-)
>
Actually, I think this matches current code. So I'll change it in a
separate patch.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 6/7] KVM: MMU: Simplify spte fetch() function
2010-07-12 5:08 ` Xiao Guangrong
@ 2010-07-12 8:53 ` Avi Kivity
0 siblings, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-12 8:53 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, kvm
On 07/12/2010 08:08 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>
>> - if (is_shadow_present_pte(*sptep)&& !is_large_pte(*sptep)
>> - && level == gw->level)
>> - validate_direct_spte(vcpu, sptep, direct_access);
>>
> ......
>
>
>> + link_shadow_page(sptep, sp);
>> + }
>> +
>> + for (;
>> + shadow_walk_okay(&iterator)&& iterator.level> hlevel;
>> + shadow_walk_next(&iterator)) {
>> + gfn_t direct_gfn;
>> +
>> + level = iterator.level;
>> + sptep = iterator.sptep;
>>
>> drop_spte_if_large(vcpu, sptep);
>>
>> if (is_shadow_present_pte(*sptep))
>> continue;
>>
>> - if (level<= gw->level) {
>> - direct = 1;
>> - access = direct_access;
>> -
>> - /*
>> - * It is a large guest pages backed by small host pages,
>> - * So we set @direct(@sp->role.direct)=1, and set
>> - * @table_gfn(@sp->gfn)=the base page frame for linear
>> - * translations.
>> - */
>> - table_gfn = gw->gfn& ~(KVM_PAGES_PER_HPAGE(level) - 1);
>> - } else {
>> - direct = 0;
>> - table_gfn = gw->table_gfn[level - 2];
>> - }
>> - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
>> - direct, access, sptep);
>> - if (!direct)
>> - if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
>> - gw, level)) {
>> - kvm_release_pfn_clean(pfn);
>> - sptep = NULL;
>> - break;
>> - }
>> + validate_direct_spte(vcpu, sptep, direct_access);
>>
> Need validate_direct_spte() only when 'level == gw->level'
>
>
That is true (and this is a change from the current code), but level <
gw->level will be very rare (1G pages backed by 4k pages) so I don't
mind the extra check.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages
2010-07-11 15:42 ` [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages Avi Kivity
2010-07-12 5:10 ` Xiao Guangrong
@ 2010-07-12 8:58 ` Avi Kivity
1 sibling, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2010-07-12 8:58 UTC (permalink / raw)
To: Xiao Guangrong, Marcelo Tosatti, kvm
On 07/11/2010 06:42 PM, Avi Kivity wrote:
> Currently, when we fetch an spte, we only verify that gptes match those that
> the walker saw if we build new shadow pages for them.
>
> However, this misses the following race:
>
> vcpu1 vcpu2
>
> walk
> change gpte
> walk
> instantiate sp
>
> fetch existing sp
>
> Fix by validating every gpte, regardless of whether it is used for building
> a new sp or not.
>
> Signed-off-by: Avi Kivity<avi@redhat.com>
> ---
> arch/x86/kvm/paging_tmpl.h | 18 ++++++++++--------
> 1 files changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index a7f8295..4bbd0c7 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -325,7 +325,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
> int *ptwrite, pfn_t pfn)
> {
> unsigned access = gw->pt_access;
> - struct kvm_mmu_page *sp;
> + struct kvm_mmu_page *uninitialized_var(sp);
> u64 *sptep = NULL;
> int uninitialized_var(level);
> bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
> @@ -343,18 +343,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
> shadow_walk_okay(&iterator)&& iterator.level> gw->level;
> shadow_walk_next(&iterator)) {
> gfn_t table_gfn;
> + bool new_page = false;
>
> level = iterator.level;
> sptep = iterator.sptep;
>
> drop_spte_if_large(vcpu, sptep);
>
> - if (is_shadow_present_pte(*sptep))
> - continue;
>
See, this gets dropped.
> -
> - table_gfn = gw->table_gfn[level - 2];
> - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
> - false, access, sptep);
> + if (!is_shadow_present_pte(*sptep)) {
> + table_gfn = gw->table_gfn[level - 2];
> + sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
> + false, access, sptep);
> + new_page = true;
> + }
>
> if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
> gw, level)) {
>
Now we need to change validate_indirect_spte() to account for all levels.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
end of thread, other threads:[~2010-07-12 8:58 UTC | newest]
Thread overview: 22+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-07-11 15:42 [PATCH 0/7] Simplify and fix fetch() Avi Kivity
2010-07-11 15:42 ` [PATCH 1/7] KVM: MMU: Add link_shadow_page() helper Avi Kivity
2010-07-12 4:58 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 2/7] KVM: MMU: Use __set_spte to link shadow pages Avi Kivity
2010-07-12 4:58 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 3/7] KVM: MMU: Add drop_spte_if_large() helper Avi Kivity
2010-07-12 4:59 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 4/7] KVM: MMU: Add validate_direct_spte() helper Avi Kivity
2010-07-12 5:00 ` Xiao Guangrong
2010-07-11 15:42 ` [PATCH 5/7] KVM: MMU: Add validate_indirect_spte() helper Avi Kivity
2010-07-12 5:01 ` Xiao Guangrong
2010-07-12 5:12 ` Xiao Guangrong
2010-07-12 8:50 ` Avi Kivity
2010-07-11 15:42 ` [PATCH 6/7] KVM: MMU: Simplify spte fetch() function Avi Kivity
2010-07-11 16:08 ` Avi Kivity
2010-07-12 5:10 ` Xiao Guangrong
2010-07-12 8:52 ` Avi Kivity
2010-07-12 5:08 ` Xiao Guangrong
2010-07-12 8:53 ` Avi Kivity
2010-07-11 15:42 ` [PATCH 7/7] KVM: MMU: Validate all gptes during fetch, not just those used for new pages Avi Kivity
2010-07-12 5:10 ` Xiao Guangrong
2010-07-12 8:58 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox