From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
To: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
KVM list <kvm@vger.kernel.org>,
LKML <linux-kernel@vger.kernel.org>
Subject: [PATCH 6/6] KVM MMU: optimize synchronization shadow pages
Date: Mon, 12 Apr 2010 16:06:25 +0800 [thread overview]
Message-ID: <4BC2D481.9060101@cn.fujitsu.com> (raw)
In-Reply-To: <4BC2D2E2.1030604@cn.fujitsu.com>
- chain all unsync shadow pages then we can fetch them quickly
- flush local/remote tlb after all shadow page synced
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.c | 82 ++++++++++++++++++---------------------
2 files changed, 39 insertions(+), 44 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d463bc6..ae543fb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -207,6 +207,7 @@ struct kvm_mmu_page {
#define MMU_PAGE_UNSYNC 0x2
unsigned int flags;
unsigned int unsync_children;
+ struct list_head unsync_link;
union {
u64 *parent_pte; /* !multimapped */
struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 18eceb2..fcb6299 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -177,6 +177,8 @@ typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
static struct kmem_cache *mmu_page_header_cache;
+static struct list_head unsync_mmu_page_list =
+ LIST_HEAD_INIT(unsync_mmu_page_list);
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
@@ -950,6 +952,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
sp->flags = 0;
sp->parent_pte = parent_pte;
+ INIT_LIST_HEAD(&sp->unsync_link);
--vcpu->kvm->arch.n_free_mmu_pages;
return sp;
}
@@ -1200,12 +1203,14 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!mmu_page_is_unsync(sp));
mmu_page_clear_unsync(sp);
+ list_del(&sp->unsync_link);
--kvm->stat.mmu_unsync;
}
static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
-static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ bool *flush_local_tlb, bool *flush_remote_tlb)
{
if (sp->role.glevels != vcpu->arch.mmu.root_level) {
kvm_mmu_zap_page(vcpu->kvm, sp);
@@ -1214,17 +1219,31 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
trace_kvm_mmu_sync_page(sp);
if (rmap_write_protect(vcpu->kvm, sp->gfn))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ *flush_remote_tlb = true;
kvm_unlink_unsync_page(vcpu->kvm, sp);
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
kvm_mmu_zap_page(vcpu->kvm, sp);
return 1;
}
- kvm_mmu_flush_tlb(vcpu);
+ *flush_local_tlb = true;
return 0;
}
+static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+ bool flush_local_tlb = false, flush_remote_tlb = false;
+ int ret;
+
+ ret = __kvm_sync_page(vcpu, sp, &flush_local_tlb, &flush_remote_tlb);
+ if (flush_local_tlb)
+ kvm_mmu_flush_tlb(vcpu);
+ if (flush_remote_tlb)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
+ return ret;
+}
+
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
@@ -1284,31 +1303,24 @@ static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
pvec->nr = 0;
}
-static void mmu_sync_children(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *parent)
+static void mmu_sync_pages(struct kvm_vcpu *vcpu)
{
- int i;
- struct kvm_mmu_page *sp;
- struct mmu_page_path parents;
- struct kvm_mmu_pages pages;
-
- kvm_mmu_pages_init(parent, &parents, &pages);
- while (mmu_unsync_walk(parent, &pages)) {
- int protected = 0;
-
- for_each_sp(pages, sp, parents, i)
- protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
+ struct list_head *p, *next;
+ bool flush_local_tlb = false, flush_remote_tlb = false;
- if (protected)
- kvm_flush_remote_tlbs(vcpu->kvm);
+ if (list_empty(&unsync_mmu_page_list))
+ return;
- for_each_sp(pages, sp, parents, i) {
- kvm_sync_page(vcpu, sp);
- mmu_pages_clear_parents(&parents);
- }
- cond_resched_lock(&vcpu->kvm->mmu_lock);
- kvm_mmu_pages_init(parent, &parents, &pages);
+ list_for_each_safe(p, next, &unsync_mmu_page_list) {
+ struct kvm_mmu_page *sp;
+ sp = list_entry(p, struct kvm_mmu_page, unsync_link);
+ __kvm_sync_page(vcpu, sp, &flush_local_tlb, &flush_remote_tlb);
}
+
+ if (flush_local_tlb)
+ kvm_mmu_flush_tlb(vcpu);
+ if (flush_remote_tlb)
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
@@ -1762,6 +1774,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
}
++vcpu->kvm->stat.mmu_unsync;
mmu_page_mark_unsync(sp);
+ list_add(&sp->unsync_link, &unsync_mmu_page_list);
kvm_mmu_mark_parents_unsync(sp);
mmu_convert_notrap(sp);
@@ -2121,26 +2134,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
static void mmu_sync_roots(struct kvm_vcpu *vcpu)
{
- int i;
- struct kvm_mmu_page *sp;
-
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
- return;
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
- sp = page_header(root);
- mmu_sync_children(vcpu, sp);
- return;
- }
- for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
-
- if (root && VALID_PAGE(root)) {
- root &= PT64_BASE_ADDR_MASK;
- sp = page_header(root);
- mmu_sync_children(vcpu, sp);
- }
- }
+ mmu_sync_pages(vcpu);
}
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
--
1.6.1.2
next prev parent reply other threads:[~2010-04-12 8:09 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-04-12 7:59 [PATCH 1/6] KVM MMU: remove unused struct Xiao Guangrong
2010-04-12 8:01 ` [PATCH 2/6] KVM MMU: fix kvm_mmu_zap_page() and its calling path Xiao Guangrong
2010-04-12 8:24 ` Avi Kivity
2010-04-12 8:53 ` Xiao Guangrong
2010-04-12 9:08 ` Avi Kivity
2010-04-12 9:22 ` Xiao Guangrong
2010-04-12 10:25 ` Avi Kivity
2010-04-12 12:22 ` Xiao Guangrong
2010-04-12 12:49 ` Avi Kivity
2010-04-12 17:10 ` Marcelo Tosatti
2010-04-13 1:34 ` Xiao Guangrong
2010-04-13 14:59 ` Marcelo Tosatti
2010-04-14 2:14 ` Xiao Guangrong
2010-04-14 16:31 ` Marcelo Tosatti
2010-04-12 8:02 ` [PATCH 3/6] KVM MMU: optimize/cleanup for marking parent unsync Xiao Guangrong
2010-04-12 8:32 ` Avi Kivity
2010-04-12 8:55 ` Xiao Guangrong
2010-04-12 17:12 ` Marcelo Tosatti
2010-04-13 1:53 ` Xiao Guangrong
2010-04-13 11:58 ` Avi Kivity
2010-04-13 15:01 ` Marcelo Tosatti
2010-04-14 3:23 ` Xiao Guangrong
2010-04-14 3:58 ` Xiao Guangrong
2010-04-14 16:35 ` Marcelo Tosatti
2010-04-12 8:03 ` [PATCH 4/6] KVM MMU: optimize for writing cr4 Xiao Guangrong
2010-04-12 8:34 ` Avi Kivity
2010-04-12 10:42 ` Xiao Guangrong
2010-04-12 11:22 ` Avi Kivity
2010-04-13 3:07 ` Xiao Guangrong
2010-04-13 6:42 ` Avi Kivity
2010-04-12 8:05 ` [PATCH 5/6] KVM MMU: reduce kvm_mmu_page size Xiao Guangrong
2010-04-12 8:36 ` Avi Kivity
2010-04-12 11:11 ` Xiao Guangrong
2010-04-12 8:06 ` Xiao Guangrong [this message]
2010-04-12 8:43 ` [PATCH 6/6] KVM MMU: optimize synchronization shadow pages Avi Kivity
2010-04-12 11:14 ` Xiao Guangrong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4BC2D481.9060101@cn.fujitsu.com \
--to=xiaoguangrong@cn.fujitsu.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox