From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
To: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: [PATCH v3 06/11] KVM: MMU: cleanup FNAME(invlpg)
Date: Tue, 30 Aug 2011 10:36:19 +0800 [thread overview]
Message-ID: <4E5C4CA3.6030808@cn.fujitsu.com> (raw)
In-Reply-To: <4E5C4C20.3000403@cn.fujitsu.com>
Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the
same code between FNAME(invlpg) and FNAME(sync_page)
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 16 ++++++++++------
arch/x86/kvm/paging_tmpl.h | 42 +++++++++++++++---------------------------
2 files changed, 25 insertions(+), 33 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7ec2a6a..ed3e778 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1808,7 +1808,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
}
}
-static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte)
{
u64 pte;
@@ -1816,17 +1816,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
pte = *spte;
if (is_shadow_present_pte(pte)) {
- if (is_last_spte(pte, sp->role.level))
+ if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
- else {
+ if (is_large_pte(pte))
+ --kvm->stat.lpages;
+ } else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
}
- } else if (is_mmio_spte(pte))
+ return true;
+ }
+
+ if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte);
- if (is_large_pte(pte))
- --kvm->stat.lpages;
+ return false;
}
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9299410..7862c05 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -656,6 +656,16 @@ out_unlock:
return 0;
}
+static gpa_t FNAME(get_first_pte_gpa)(struct kvm_mmu_page *sp)
+{
+ int offset = 0;
+
+ if (PTTYPE == 32)
+ offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
+ return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+}
+
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
@@ -663,7 +673,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t pte_gpa = -1;
int level;
u64 *sptep;
- int need_flush = 0;
vcpu_clear_mmio_info(vcpu, gva);
@@ -675,36 +684,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
- int offset, shift;
-
if (!sp->unsync)
break;
- shift = PAGE_SHIFT -
- (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
- offset = sp->role.quadrant << shift;
-
- pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+ pte_gpa = FNAME(get_first_pte_gpa)(sp);
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
- if (is_shadow_present_pte(*sptep)) {
- if (is_large_pte(*sptep))
- --vcpu->kvm->stat.lpages;
- drop_spte(vcpu->kvm, sptep);
- need_flush = 1;
- } else if (is_mmio_spte(*sptep))
- mmu_spte_clear_no_track(sptep);
-
- break;
+ if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
- if (need_flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
-
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -769,19 +762,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
- int i, offset, nr_present;
+ int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
- offset = nr_present = 0;
-
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
- if (PTTYPE == 32)
- offset = sp->role.quadrant << PT64_LEVEL_BITS;
-
- first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+ first_pte_gpa = FNAME(get_first_pte_gpa)(sp);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
--
1.7.5.4
next prev parent reply other threads:[~2011-08-30 2:34 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-08-30 2:34 [PATCH v3 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
2011-08-30 2:34 ` [PATCH v3 02/11] KVM: x86: tag the instructions which are used to write page table Xiao Guangrong
2011-08-30 2:35 ` [PATCH v3 03/11] KVM: x86: retry non-page-table writing instruction Xiao Guangrong
2011-09-13 10:47 ` Avi Kivity
2011-09-13 18:24 ` Xiao Guangrong
2011-09-14 9:53 ` Avi Kivity
2011-09-14 10:19 ` Xiao Guangrong
2011-09-15 4:56 ` Xiao Guangrong
2011-08-30 2:35 ` [PATCH v3 04/11] KVM: x86: cleanup port-in/port-out emulated Xiao Guangrong
2011-08-30 2:35 ` [PATCH v3 05/11] KVM: MMU: do not mark accessed bit on pte write path Xiao Guangrong
2011-09-13 10:53 ` Avi Kivity
2011-09-13 18:29 ` Xiao Guangrong
2011-09-14 9:55 ` Avi Kivity
2011-09-15 13:11 ` Marcelo Tosatti
2011-08-30 2:36 ` Xiao Guangrong [this message]
2011-09-13 11:00 ` [PATCH v3 06/11] KVM: MMU: cleanup FNAME(invlpg) Avi Kivity
2011-09-13 18:31 ` Xiao Guangrong
2011-09-14 9:57 ` Avi Kivity
2011-08-30 2:36 ` [PATCH v3 07/11] KVM: MMU: fast prefetch spte on invlpg path Xiao Guangrong
2011-08-30 2:37 ` [PATCH v3 08/11] KVM: MMU: remove unnecessary kvm_mmu_free_some_pages Xiao Guangrong
2011-08-30 2:37 ` [PATCH v3 09/11] KVM: MMU: split kvm_mmu_pte_write function Xiao Guangrong
2011-08-30 2:37 ` [PATCH v3 10/11] KVM: MMU: fix detecting misaligned accessed Xiao Guangrong
2011-08-30 2:38 ` [PATCH v3 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
2011-09-13 11:07 ` Avi Kivity
2011-09-13 19:19 ` Xiao Guangrong
2011-09-14 9:59 ` Avi Kivity
2011-09-13 9:51 ` [PATCH v3 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Avi Kivity
2011-09-13 10:24 ` Xiao Guangrong
2011-09-13 10:50 ` Avi Kivity
2011-09-13 19:31 ` Xiao Guangrong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4E5C4CA3.6030808@cn.fujitsu.com \
--to=xiaoguangrong@cn.fujitsu.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox