* [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time
@ 2010-05-23 12:14 Xiao Guangrong
2010-05-23 12:16 ` [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time Xiao Guangrong
` (2 more replies)
0 siblings, 3 replies; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-23 12:14 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
In current code, shadow page can become asynchronous only if one
shadow page for a gfn, this rule is too strict, in fact, we can
let all last mapping page(i.e, it's the pte page) become unsync,
and sync them at invlpg or flush tlb time.
This patch allow more page become asynchronous at gfn mapping time
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 81 +++++++++++++++++++++++----------------------------
1 files changed, 37 insertions(+), 44 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 97c5217..1c558ba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1168,26 +1168,6 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
return __mmu_unsync_walk(sp, pvec);
}
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
-{
- unsigned index;
- struct hlist_head *bucket;
- struct kvm_mmu_page *sp;
- struct hlist_node *node;
-
- pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
- index = kvm_page_table_hashfn(gfn);
- bucket = &kvm->arch.mmu_page_hash[index];
- hlist_for_each_entry(sp, node, bucket, hash_link)
- if (sp->gfn == gfn && !sp->role.direct
- && !sp->role.invalid) {
- pgprintk("%s: found role %x\n",
- __func__, sp->role.word);
- return sp;
- }
- return NULL;
-}
-
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
@@ -1757,47 +1737,60 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
-static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+ trace_kvm_mmu_unsync_page(sp);
+ ++vcpu->kvm->stat.mmu_unsync;
+ sp->unsync = 1;
+
+ kvm_mmu_mark_parents_unsync(sp);
+ mmu_convert_notrap(sp);
+}
+
+static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
+ unsigned index;
- index = kvm_page_table_hashfn(sp->gfn);
+ index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
- /* don't unsync if pagetable is shadowed with multiple roles */
+
hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
- if (s->gfn != sp->gfn || s->role.direct)
+ if (s->gfn != gfn || s->role.direct || s->unsync)
continue;
- if (s->role.word != sp->role.word)
- return 1;
+ WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+ __kvm_unsync_page(vcpu, s);
}
- trace_kvm_mmu_unsync_page(sp);
- ++vcpu->kvm->stat.mmu_unsync;
- sp->unsync = 1;
-
- kvm_mmu_mark_parents_unsync(sp);
-
- mmu_convert_notrap(sp);
- return 0;
}
static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
- struct kvm_mmu_page *shadow;
+ unsigned index;
+ struct hlist_head *bucket;
+ struct kvm_mmu_page *s;
+ struct hlist_node *node, *n;
+ bool need_unsync = false;
+
+ index = kvm_page_table_hashfn(gfn);
+ bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+ if (s->gfn != gfn || s->role.direct)
+ continue;
- shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
- if (shadow) {
- if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
+ if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
- if (shadow->unsync)
- return 0;
- if (can_unsync && oos_shadow)
- return kvm_unsync_page(vcpu, shadow);
- return 1;
+
+ if (!need_unsync && !s->unsync) {
+ if (!can_unsync || !oos_shadow)
+ return 1;
+ need_unsync = true;
+ }
}
+ if (need_unsync)
+ kvm_unsync_pages(vcpu, gfn);
return 0;
}
--
1.6.1.2
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time
2010-05-23 12:14 [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Xiao Guangrong
@ 2010-05-23 12:16 ` Xiao Guangrong
2010-05-23 14:11 ` Avi Kivity
2010-05-24 7:41 ` [PATCH 2/2 v2] " Xiao Guangrong
2010-05-23 13:51 ` [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Avi Kivity
2010-05-24 7:40 ` [PATCH 1/2 v2] " Xiao Guangrong
2 siblings, 2 replies; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-23 12:16 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Allow more page become asynchronous at getting sp time, if need create new
shadow page for gfn but it not allow unsync(level > 1), we should unsync all
gfn's unsync page
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 47 +++++++++++++++++++++++++++++++++++++----------
1 files changed, 37 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1c558ba..fb788fe 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1218,6 +1218,35 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return __kvm_sync_page(vcpu, sp, true);
}
+/* @gfn should be write-protected at the call site */
+static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct hlist_head *bucket;
+ struct kvm_mmu_page *s;
+ struct hlist_node *node, *n;
+ unsigned index;
+ bool flush = false;
+
+ index = kvm_page_table_hashfn(gfn);
+ bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+ if (s->gfn != gfn || !s->unsync)
+ continue;
+
+ WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+ if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
+ (vcpu->arch.mmu.sync_page(vcpu, s))) {
+ kvm_mmu_zap_page(vcpu->kvm, s);
+ continue;
+ }
+ kvm_unlink_unsync_page(vcpu->kvm, s);
+ flush = true;
+ }
+
+ if (flush)
+ kvm_mmu_flush_tlb(vcpu);
+}
+
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
@@ -1316,8 +1345,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned index;
unsigned quadrant;
struct hlist_head *bucket;
- struct kvm_mmu_page *sp, *unsync_sp = NULL;
+ struct kvm_mmu_page *sp;
struct hlist_node *node, *tmp;
+ bool need_sync = false;
role = vcpu->arch.mmu.base_role;
role.level = level;
@@ -1334,17 +1364,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
if (sp->gfn == gfn) {
- if (sp->unsync)
- unsync_sp = sp;
+ if (!need_sync && sp->unsync)
+ need_sync = true;
if (sp->role.word != role.word)
continue;
- if (!direct && unsync_sp &&
- kvm_sync_page_transient(vcpu, unsync_sp)) {
- unsync_sp = NULL;
+ if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
break;
- }
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
@@ -1356,9 +1383,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
trace_kvm_mmu_get_page(sp, false);
return sp;
}
- if (!direct && unsync_sp)
- kvm_sync_page(vcpu, unsync_sp);
-
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp)
@@ -1369,6 +1393,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
+ if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+ kvm_sync_pages(vcpu, gfn);
+
account_shadowed(vcpu->kvm, gfn);
}
if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
--
1.6.1.2
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time
2010-05-23 12:14 [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Xiao Guangrong
2010-05-23 12:16 ` [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time Xiao Guangrong
@ 2010-05-23 13:51 ` Avi Kivity
2010-05-24 2:03 ` Xiao Guangrong
2010-05-24 7:40 ` [PATCH 1/2 v2] " Xiao Guangrong
2 siblings, 1 reply; 12+ messages in thread
From: Avi Kivity @ 2010-05-23 13:51 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/23/2010 03:14 PM, Xiao Guangrong wrote:
> In current code, shadow page can become asynchronous only if one
> shadow page for a gfn, this rule is too strict, in fact, we can
> let all last mapping page(i.e, it's the pte page) become unsync,
> and sync them at invlpg or flush tlb time.
>
> This patch allow more page become asynchronous at gfn mapping time
>
>
> +
> +static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
> {
> - unsigned index;
> struct hlist_head *bucket;
> struct kvm_mmu_page *s;
> struct hlist_node *node, *n;
> + unsigned index;
>
> - index = kvm_page_table_hashfn(sp->gfn);
> + index = kvm_page_table_hashfn(gfn);
> bucket =&vcpu->kvm->arch.mmu_page_hash[index];
> - /* don't unsync if pagetable is shadowed with multiple roles */
> +
> hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
> - if (s->gfn != sp->gfn || s->role.direct)
> + if (s->gfn != gfn || s->role.direct || s->unsync)
> continue;
>
role.invalid?
> - if (s->role.word != sp->role.word)
> - return 1;
> + WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
> + __kvm_unsync_page(vcpu, s);
> }
> - trace_kvm_mmu_unsync_page(sp);
> - ++vcpu->kvm->stat.mmu_unsync;
> - sp->unsync = 1;
> -
> - kvm_mmu_mark_parents_unsync(sp);
> -
> - mmu_convert_notrap(sp);
> - return 0;
> }
>
> static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
> bool can_unsync)
> {
> - struct kvm_mmu_page *shadow;
> + unsigned index;
> + struct hlist_head *bucket;
> + struct kvm_mmu_page *s;
> + struct hlist_node *node, *n;
> + bool need_unsync = false;
> +
> + index = kvm_page_table_hashfn(gfn);
> + bucket =&vcpu->kvm->arch.mmu_page_hash[index];
> + hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
> + if (s->gfn != gfn || s->role.direct)
> + continue;
>
> - shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
> - if (shadow) {
> - if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
> + if (s->role.level != PT_PAGE_TABLE_LEVEL)
> return 1;
>
role.invalid?
> - if (shadow->unsync)
> - return 0;
> - if (can_unsync&& oos_shadow)
> - return kvm_unsync_page(vcpu, shadow);
> - return 1;
> +
> + if (!need_unsync&& !s->unsync) {
> + if (!can_unsync || !oos_shadow)
> + return 1;
> + need_unsync = true;
> + }
> }
> + if (need_unsync)
> + kvm_unsync_pages(vcpu, gfn);
> return 0;
> }
>
>
Looks good, I'm just uncertain about role.invalid handling. What's the
reasoning here?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time
2010-05-23 12:16 ` [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time Xiao Guangrong
@ 2010-05-23 14:11 ` Avi Kivity
2010-05-24 2:31 ` Xiao Guangrong
2010-05-24 7:41 ` [PATCH 2/2 v2] " Xiao Guangrong
1 sibling, 1 reply; 12+ messages in thread
From: Avi Kivity @ 2010-05-23 14:11 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/23/2010 03:16 PM, Xiao Guangrong wrote:
> Allow more page become asynchronous at getting sp time, if need create new
> shadow page for gfn but it not allow unsync(level> 1), we should unsync all
> gfn's unsync page
>
>
>
> +/* @gfn should be write-protected at the call site */
> +static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
> +{
> + struct hlist_head *bucket;
> + struct kvm_mmu_page *s;
> + struct hlist_node *node, *n;
> + unsigned index;
> + bool flush = false;
> +
> + index = kvm_page_table_hashfn(gfn);
> + bucket =&vcpu->kvm->arch.mmu_page_hash[index];
> + hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
>
role.direct, role.invalid?
Well, role.direct cannot be unsync. But that's not something we want to
rely on.
This patch looks good too.
Some completely unrelated ideas:
- replace mmu_zap_page() calls in __kvm_sync_page() by setting
role.invalid instead. This reduces problems with the hash list being
modified while we manipulate it.
- add a for_each_shadow_page_direct() { ... } and
for_each_shadow_page_indirect() { ... } to replace the
hlist_for_each_entry_safe()s.
- add kvm_tlb_gather() to reduce IPIs from kvm_mmu_zap_page()
- clear spte.accessed on speculative sptes (for example from invlpg) so
the swapper won't keep them in ram unnecessarily
Again, completely unrelated to this patch set, just wrong them down so I
don't forget them and to get your opinion.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time
2010-05-23 13:51 ` [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Avi Kivity
@ 2010-05-24 2:03 ` Xiao Guangrong
2010-05-24 6:24 ` Avi Kivity
0 siblings, 1 reply; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-24 2:03 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
>> + if (need_unsync)
>> + kvm_unsync_pages(vcpu, gfn);
>> return 0;
>> }
>>
>>
>
> Looks good, I'm just uncertain about role.invalid handling. What's the
> reasoning here?
>
Avi,
Thanks for your reply.
We no need worry about 'role.invalid' here, since we only allow the PTE shadow
pages(role.level == 1) become unsync, and in current code, 'role.invalid' is only
used for root shadow pages.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time
2010-05-23 14:11 ` Avi Kivity
@ 2010-05-24 2:31 ` Xiao Guangrong
2010-05-24 6:25 ` Avi Kivity
0 siblings, 1 reply; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-24 2:31 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
> On 05/23/2010 03:16 PM, Xiao Guangrong wrote:
>> Allow more page become asynchronous at getting sp time, if need create
>> new
>> shadow page for gfn but it not allow unsync(level> 1), we should
>> unsync all
>> gfn's unsync page
>>
>>
>>
>> +/* @gfn should be write-protected at the call site */
>> +static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
>> +{
>> + struct hlist_head *bucket;
>> + struct kvm_mmu_page *s;
>> + struct hlist_node *node, *n;
>> + unsigned index;
>> + bool flush = false;
>> +
>> + index = kvm_page_table_hashfn(gfn);
>> + bucket =&vcpu->kvm->arch.mmu_page_hash[index];
>> + hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
>>
>
> role.direct, role.invalid?
We only handle unsync pages here, and 'role.direct' or 'role.invalid'
pages can't become unsync.
>
> Well, role.direct cannot be unsync. But that's not something we want to
> rely on.
While we mark the unsync page, we have filtered out the 'role.direct' pages,
so, i think we not need worry 'role.direct' here. :-)
>
> This patch looks good too.
>
> Some completely unrelated ideas:
>
> - replace mmu_zap_page() calls in __kvm_sync_page() by setting
> role.invalid instead. This reduces problems with the hash list being
> modified while we manipulate it.
> - add a for_each_shadow_page_direct() { ... } and
> for_each_shadow_page_indirect() { ... } to replace the
> hlist_for_each_entry_safe()s.
Actually, i have introduced for_each_gfn_sp() to cleanup it in my private
development. :-)
> - add kvm_tlb_gather() to reduce IPIs from kvm_mmu_zap_page()
> - clear spte.accessed on speculative sptes (for example from invlpg) so
> the swapper won't keep them in ram unnecessarily
I also noticed this problem
>
> Again, completely unrelated to this patch set, just wrong them down so I
> don't forget them and to get your opinion.
>
Your ideas are very valuable, and i'll do those if you are not free :-)
Thanks,
Xiao
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time
2010-05-24 2:03 ` Xiao Guangrong
@ 2010-05-24 6:24 ` Avi Kivity
2010-05-24 7:14 ` Xiao Guangrong
0 siblings, 1 reply; 12+ messages in thread
From: Avi Kivity @ 2010-05-24 6:24 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/24/2010 05:03 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>
>>> + if (need_unsync)
>>> + kvm_unsync_pages(vcpu, gfn);
>>> return 0;
>>> }
>>>
>>>
>>>
>> Looks good, I'm just uncertain about role.invalid handling. What's the
>> reasoning here?
>>
>>
> Avi,
>
> Thanks for your reply.
>
> We no need worry about 'role.invalid' here, since we only allow the PTE shadow
> pages(role.level == 1) become unsync, and in current code, 'role.invalid' is only
> used for root shadow pages.
>
Right, the invlpg change is not it yet. But I think it should be in
this patch; I don't like subtle dependencies, and it will make the
invplg patch simpler.
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time
2010-05-24 2:31 ` Xiao Guangrong
@ 2010-05-24 6:25 ` Avi Kivity
0 siblings, 0 replies; 12+ messages in thread
From: Avi Kivity @ 2010-05-24 6:25 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/24/2010 05:31 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>> On 05/23/2010 03:16 PM, Xiao Guangrong wrote:
>>
>>> Allow more page become asynchronous at getting sp time, if need create
>>> new
>>> shadow page for gfn but it not allow unsync(level> 1), we should
>>> unsync all
>>> gfn's unsync page
>>>
>>>
>>>
>>> +/* @gfn should be write-protected at the call site */
>>> +static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
>>> +{
>>> + struct hlist_head *bucket;
>>> + struct kvm_mmu_page *s;
>>> + struct hlist_node *node, *n;
>>> + unsigned index;
>>> + bool flush = false;
>>> +
>>> + index = kvm_page_table_hashfn(gfn);
>>> + bucket =&vcpu->kvm->arch.mmu_page_hash[index];
>>> + hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
>>>
>>>
>> role.direct, role.invalid?
>>
> We only handle unsync pages here, and 'role.direct' or 'role.invalid'
> pages can't become unsync.
>
They will be once the invlpg patch is in...
>
>> Well, role.direct cannot be unsync. But that's not something we want to
>> rely on.
>>
> While we mark the unsync page, we have filtered out the 'role.direct' pages,
> so, i think we not need worry 'role.direct' here. :-)
>
Ok.
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time
2010-05-24 6:24 ` Avi Kivity
@ 2010-05-24 7:14 ` Xiao Guangrong
0 siblings, 0 replies; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-24 7:14 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
> On 05/24/2010 05:03 AM, Xiao Guangrong wrote:
>>
>> Avi Kivity wrote:
>>
>>
>>>> + if (need_unsync)
>>>> + kvm_unsync_pages(vcpu, gfn);
>>>> return 0;
>>>> }
>>>>
>>>>
>>>>
>>> Looks good, I'm just uncertain about role.invalid handling. What's the
>>> reasoning here?
>>>
>>>
>> Avi,
>>
>> Thanks for your reply.
>>
>> We no need worry about 'role.invalid' here, since we only allow the
>> PTE shadow
>> pages(role.level == 1) become unsync, and in current code,
>> 'role.invalid' is only
>> used for root shadow pages.
>>
>
> Right, the invlpg change is not it yet. But I think it should be in
> this patch; I don't like subtle dependencies, and it will make the
> invplg patch simpler.
>
OK, i'll fix those two patches, thanks
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 1/2 v2] KVM: MMU: allow more page become unsync at gfn mapping time
2010-05-23 12:14 [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Xiao Guangrong
2010-05-23 12:16 ` [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time Xiao Guangrong
2010-05-23 13:51 ` [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Avi Kivity
@ 2010-05-24 7:40 ` Xiao Guangrong
2 siblings, 0 replies; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-24 7:40 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
In current code, shadow page can become asynchronous only if one
shadow page for a gfn, this rule is too strict, in fact, we can
let all last mapping page(i.e, it's the pte page) become unsync,
and sync them at invlpg or flush tlb time.
This patch allow more page become asynchronous at gfn mapping time
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 82 ++++++++++++++++++++++++----------------------------
1 files changed, 38 insertions(+), 44 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 97c5217..170c8f7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1168,26 +1168,6 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
return __mmu_unsync_walk(sp, pvec);
}
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
-{
- unsigned index;
- struct hlist_head *bucket;
- struct kvm_mmu_page *sp;
- struct hlist_node *node;
-
- pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
- index = kvm_page_table_hashfn(gfn);
- bucket = &kvm->arch.mmu_page_hash[index];
- hlist_for_each_entry(sp, node, bucket, hash_link)
- if (sp->gfn == gfn && !sp->role.direct
- && !sp->role.invalid) {
- pgprintk("%s: found role %x\n",
- __func__, sp->role.word);
- return sp;
- }
- return NULL;
-}
-
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
@@ -1757,47 +1737,61 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
-static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+ trace_kvm_mmu_unsync_page(sp);
+ ++vcpu->kvm->stat.mmu_unsync;
+ sp->unsync = 1;
+
+ kvm_mmu_mark_parents_unsync(sp);
+ mmu_convert_notrap(sp);
+}
+
+static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
+ unsigned index;
- index = kvm_page_table_hashfn(sp->gfn);
+ index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
- /* don't unsync if pagetable is shadowed with multiple roles */
+
hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
- if (s->gfn != sp->gfn || s->role.direct)
+ if (s->gfn != gfn || s->role.direct || s->unsync ||
+ s->role.invalid)
continue;
- if (s->role.word != sp->role.word)
- return 1;
+ WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+ __kvm_unsync_page(vcpu, s);
}
- trace_kvm_mmu_unsync_page(sp);
- ++vcpu->kvm->stat.mmu_unsync;
- sp->unsync = 1;
-
- kvm_mmu_mark_parents_unsync(sp);
-
- mmu_convert_notrap(sp);
- return 0;
}
static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
- struct kvm_mmu_page *shadow;
+ unsigned index;
+ struct hlist_head *bucket;
+ struct kvm_mmu_page *s;
+ struct hlist_node *node, *n;
+ bool need_unsync = false;
+
+ index = kvm_page_table_hashfn(gfn);
+ bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+ if (s->gfn != gfn || s->role.direct || s->role.invalid)
+ continue;
- shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
- if (shadow) {
- if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
+ if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
- if (shadow->unsync)
- return 0;
- if (can_unsync && oos_shadow)
- return kvm_unsync_page(vcpu, shadow);
- return 1;
+
+ if (!need_unsync && !s->unsync) {
+ if (!can_unsync || !oos_shadow)
+ return 1;
+ need_unsync = true;
+ }
}
+ if (need_unsync)
+ kvm_unsync_pages(vcpu, gfn);
return 0;
}
--
1.6.1.2
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 2/2 v2] KVM: MMU: allow more page become unsync at getting sp time
2010-05-23 12:16 ` [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time Xiao Guangrong
2010-05-23 14:11 ` Avi Kivity
@ 2010-05-24 7:41 ` Xiao Guangrong
2010-05-25 9:32 ` Avi Kivity
1 sibling, 1 reply; 12+ messages in thread
From: Xiao Guangrong @ 2010-05-24 7:41 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Allow more page become asynchronous at getting sp time, if need create new
shadow page for gfn but it not allow unsync(level > 1), we should unsync all
gfn's unsync page
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 47 +++++++++++++++++++++++++++++++++++++----------
1 files changed, 37 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 170c8f7..e3a0ddf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1218,6 +1218,35 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return __kvm_sync_page(vcpu, sp, true);
}
+/* @gfn should be write-protected at the call site */
+static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct hlist_head *bucket;
+ struct kvm_mmu_page *s;
+ struct hlist_node *node, *n;
+ unsigned index;
+ bool flush = false;
+
+ index = kvm_page_table_hashfn(gfn);
+ bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+ if (s->gfn != gfn || !s->unsync || s->role.invalid)
+ continue;
+
+ WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+ if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
+ (vcpu->arch.mmu.sync_page(vcpu, s))) {
+ kvm_mmu_zap_page(vcpu->kvm, s);
+ continue;
+ }
+ kvm_unlink_unsync_page(vcpu->kvm, s);
+ flush = true;
+ }
+
+ if (flush)
+ kvm_mmu_flush_tlb(vcpu);
+}
+
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
@@ -1316,8 +1345,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned index;
unsigned quadrant;
struct hlist_head *bucket;
- struct kvm_mmu_page *sp, *unsync_sp = NULL;
+ struct kvm_mmu_page *sp;
struct hlist_node *node, *tmp;
+ bool need_sync = false;
role = vcpu->arch.mmu.base_role;
role.level = level;
@@ -1334,17 +1364,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
if (sp->gfn == gfn) {
- if (sp->unsync)
- unsync_sp = sp;
+ if (!need_sync && sp->unsync)
+ need_sync = true;
if (sp->role.word != role.word)
continue;
- if (!direct && unsync_sp &&
- kvm_sync_page_transient(vcpu, unsync_sp)) {
- unsync_sp = NULL;
+ if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
break;
- }
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
@@ -1356,9 +1383,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
trace_kvm_mmu_get_page(sp, false);
return sp;
}
- if (!direct && unsync_sp)
- kvm_sync_page(vcpu, unsync_sp);
-
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp)
@@ -1369,6 +1393,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
+ if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+ kvm_sync_pages(vcpu, gfn);
+
account_shadowed(vcpu->kvm, gfn);
}
if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
--
1.6.1.2
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH 2/2 v2] KVM: MMU: allow more page become unsync at getting sp time
2010-05-24 7:41 ` [PATCH 2/2 v2] " Xiao Guangrong
@ 2010-05-25 9:32 ` Avi Kivity
0 siblings, 0 replies; 12+ messages in thread
From: Avi Kivity @ 2010-05-25 9:32 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/24/2010 10:41 AM, Xiao Guangrong wrote:
> Allow more page become asynchronous at getting sp time, if need create new
> shadow page for gfn but it not allow unsync(level> 1), we should unsync all
> gfn's unsync page
>
Both applied, thanks.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2010-05-25 9:32 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-23 12:14 [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Xiao Guangrong
2010-05-23 12:16 ` [PATCH 2/2] KVM: MMU: allow more page become unsync at getting sp time Xiao Guangrong
2010-05-23 14:11 ` Avi Kivity
2010-05-24 2:31 ` Xiao Guangrong
2010-05-24 6:25 ` Avi Kivity
2010-05-24 7:41 ` [PATCH 2/2 v2] " Xiao Guangrong
2010-05-25 9:32 ` Avi Kivity
2010-05-23 13:51 ` [PATCH 1/2] KVM: MMU: allow more page become unsync at gfn mapping time Avi Kivity
2010-05-24 2:03 ` Xiao Guangrong
2010-05-24 6:24 ` Avi Kivity
2010-05-24 7:14 ` Xiao Guangrong
2010-05-24 7:40 ` [PATCH 1/2 v2] " Xiao Guangrong
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).