* [PATCH 1/3] KVM: MMU: split kvm_sync_page() function
@ 2010-05-15 10:51 Xiao Guangrong
2010-05-15 10:52 ` [PATCH 2/3] KVM: MMU: don't write-protect if have new mapping to unsync page Xiao Guangrong
2010-05-19 16:19 ` [PATCH 1/3] KVM: MMU: split kvm_sync_page() function Avi Kivity
0 siblings, 2 replies; 4+ messages in thread
From: Xiao Guangrong @ 2010-05-15 10:51 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Split kvm_sync_page() into kvm_sync_page() and kvm_sync_page_transient()
to clarify the code address Avi's suggestion
kvm_sync_page_transient() function only update shadow page but not mark
it sync and not write protect sp->gfn. it will be used by later patch
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 29 +++++++++++++++++++++++++----
1 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 48ed42e..549255d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1198,16 +1198,20 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
-static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ bool clear_unsync)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_zap_page(vcpu->kvm, sp);
return 1;
}
- if (rmap_write_protect(vcpu->kvm, sp->gfn))
- kvm_flush_remote_tlbs(vcpu->kvm);
- kvm_unlink_unsync_page(vcpu->kvm, sp);
+ if (clear_unsync) {
+ if (rmap_write_protect(vcpu->kvm, sp->gfn))
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_unlink_unsync_page(vcpu->kvm, sp);
+ }
+
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
kvm_mmu_zap_page(vcpu->kvm, sp);
return 1;
@@ -1217,6 +1221,23 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return 0;
}
+static void mmu_convert_notrap(struct kvm_mmu_page *sp);
+static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *sp)
+{
+ int ret;
+
+ ret = __kvm_sync_page(vcpu, sp, false);
+ if (!ret)
+ mmu_convert_notrap(sp);
+ return ret;
+}
+
+static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+ return __kvm_sync_page(vcpu, sp, true);
+}
+
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
--
1.6.1.2
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 2/3] KVM: MMU: don't write-protect if have new mapping to unsync page
2010-05-15 10:51 [PATCH 1/3] KVM: MMU: split kvm_sync_page() function Xiao Guangrong
@ 2010-05-15 10:52 ` Xiao Guangrong
2010-05-15 10:53 ` [PATCH 3/3] KVM: MMU: only update unsync page in invlpg path Xiao Guangrong
2010-05-19 16:19 ` [PATCH 1/3] KVM: MMU: split kvm_sync_page() function Avi Kivity
1 sibling, 1 reply; 4+ messages in thread
From: Xiao Guangrong @ 2010-05-15 10:52 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Two cases maybe happen in kvm_mmu_get_page() function:
- one case is, the goal sp is already in cache, if the sp is unsync,
we only need update it to assure this mapping is valid, but not
mark it sync and not write-protect sp->gfn since it not broke unsync
rule(one shadow page for a gfn)
- another case is, the goal sp not existed, we need create a new sp
for gfn, i.e, gfn (may)has another shadow page, to keep unsync rule,
we should sync(mark sync and write-protect) gfn's unsync shadow page.
After enabling multiple unsync shadows, we sync those shadow pages
only when the new sp not allow to become unsync(also for the unsyc
rule, the new rule is: allow all pte page become unsync)
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 18 ++++++++++++++----
1 files changed, 14 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 549255d..97c5217 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1336,7 +1336,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned index;
unsigned quadrant;
struct hlist_head *bucket;
- struct kvm_mmu_page *sp;
+ struct kvm_mmu_page *sp, *unsync_sp = NULL;
struct hlist_node *node, *tmp;
role = vcpu->arch.mmu.base_role;
@@ -1355,20 +1355,30 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
if (sp->gfn == gfn) {
if (sp->unsync)
- if (kvm_sync_page(vcpu, sp))
- continue;
+ unsync_sp = sp;
if (sp->role.word != role.word)
continue;
+ if (!direct && unsync_sp &&
+ kvm_sync_page_transient(vcpu, unsync_sp)) {
+ unsync_sp = NULL;
+ break;
+ }
+
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
kvm_mmu_mark_parents_unsync(sp);
- }
+ } else if (sp->unsync)
+ kvm_mmu_mark_parents_unsync(sp);
+
trace_kvm_mmu_get_page(sp, false);
return sp;
}
+ if (!direct && unsync_sp)
+ kvm_sync_page(vcpu, unsync_sp);
+
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp)
--
1.6.1.2
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 3/3] KVM: MMU: only update unsync page in invlpg path
2010-05-15 10:52 ` [PATCH 2/3] KVM: MMU: don't write-protect if have new mapping to unsync page Xiao Guangrong
@ 2010-05-15 10:53 ` Xiao Guangrong
0 siblings, 0 replies; 4+ messages in thread
From: Xiao Guangrong @ 2010-05-15 10:53 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Only unsync pages need updated at invlpg time since other shadow
pages are write-protected
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/paging_tmpl.h | 8 ++++++--
1 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 71c73fe..dfbc8c7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -463,6 +463,7 @@ out_unlock:
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
+ struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1;
int level;
u64 *sptep;
@@ -474,10 +475,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level;
sptep = iterator.sptep;
+ sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
- struct kvm_mmu_page *sp = page_header(__pa(sptep));
int offset, shift;
+ if (!sp->unsync)
+ break;
+
shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;
@@ -495,7 +499,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
break;
}
- if (!is_shadow_present_pte(*sptep))
+ if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
--
1.6.1.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 1/3] KVM: MMU: split kvm_sync_page() function
2010-05-15 10:51 [PATCH 1/3] KVM: MMU: split kvm_sync_page() function Xiao Guangrong
2010-05-15 10:52 ` [PATCH 2/3] KVM: MMU: don't write-protect if have new mapping to unsync page Xiao Guangrong
@ 2010-05-19 16:19 ` Avi Kivity
1 sibling, 0 replies; 4+ messages in thread
From: Avi Kivity @ 2010-05-19 16:19 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/15/2010 01:51 PM, Xiao Guangrong wrote:
> Split kvm_sync_page() into kvm_sync_page() and kvm_sync_page_transient()
> to clarify the code address Avi's suggestion
>
> kvm_sync_page_transient() function only update shadow page but not mark
> it sync and not write protect sp->gfn. it will be used by later patch
>
Applied all three, thanks.
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2010-05-19 16:19 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-15 10:51 [PATCH 1/3] KVM: MMU: split kvm_sync_page() function Xiao Guangrong
2010-05-15 10:52 ` [PATCH 2/3] KVM: MMU: don't write-protect if have new mapping to unsync page Xiao Guangrong
2010-05-15 10:53 ` [PATCH 3/3] KVM: MMU: only update unsync page in invlpg path Xiao Guangrong
2010-05-19 16:19 ` [PATCH 1/3] KVM: MMU: split kvm_sync_page() function Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).