From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751114Ab0EGEBx (ORCPT ); Fri, 7 May 2010 00:01:53 -0400 Received: from cn.fujitsu.com ([222.73.24.84]:58701 "EHLO song.cn.fujitsu.com" rhost-flags-OK-FAIL-OK-OK) by vger.kernel.org with ESMTP id S1750773Ab0EGEBw (ORCPT ); Fri, 7 May 2010 00:01:52 -0400 Message-ID: <4BE38FF1.3030603@cn.fujitsu.com> Date: Fri, 07 May 2010 11:58:41 +0800 From: Xiao Guangrong User-Agent: Thunderbird 2.0.0.24 (Windows/20100228) MIME-Version: 1.0 To: Avi Kivity CC: Marcelo Tosatti , KVM list , LKML Subject: [PATCH v5 6/9] KVM MMU: support keeping sp live while it's out of protection References: <4BE2818A.5000301@cn.fujitsu.com> <4BE28C6B.8010505@cn.fujitsu.com> In-Reply-To: <4BE28C6B.8010505@cn.fujitsu.com> Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org If we want to keep sp live while it it's out of kvm->mmu_lock protection, we can increase sp->active_count. Then, the invalid page is not only for active root but also unsync sp, we should filter those out when we make a page to unsync. And move 'hlist_del(&sp->hash_link)' into kvm_mmu_free_page() then we can free the invalid unsync page to call kvm_mmu_free_page directly. Signed-off-by: Xiao Guangrong --- arch/x86/kvm/mmu.c | 11 +++++++---- 1 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4077a9c..2d3347c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -894,6 +894,7 @@ static int is_empty_shadow_page(u64 *spt) static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) { ASSERT(is_empty_shadow_page(sp->spt)); + hlist_del(&sp->hash_link); list_del(&sp->link); __free_page(virt_to_page(sp->spt)); __free_page(virt_to_page(sp->gfns)); @@ -1542,12 +1543,13 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) if (!sp->active_count) { /* Count self */ ret++; - hlist_del(&sp->hash_link); kvm_mmu_free_page(kvm, sp); } else { sp->role.invalid = 1; list_move(&sp->link, &kvm->arch.active_mmu_pages); - kvm_reload_remote_mmus(kvm); + /* No need reload mmu if it's unsync page zapped */ + if (sp->role.level != PT_PAGE_TABLE_LEVEL) + kvm_reload_remote_mmus(kvm); } kvm_mmu_reset_last_pte_updated(kvm); return ret; @@ -1782,7 +1784,8 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) bucket = &vcpu->kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { - if (s->gfn != gfn || s->role.direct || s->unsync) + if (s->gfn != gfn || s->role.direct || s->unsync || + s->role.invalid) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); __kvm_unsync_page(vcpu, s); @@ -1807,7 +1810,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, if (s->role.level != PT_PAGE_TABLE_LEVEL) return 1; - if (!need_unsync && !s->unsync) { + if (!need_unsync && !s->unsync && !s->role.invalid) { if (!can_unsync || !oos_shadow) return 1; need_unsync = true; -- 1.6.1.2