From mboxrd@z Thu Jan 1 00:00:00 1970 From: Takuya Yoshikawa Subject: [PATCH 2/4] KVM: MMU: Make common preparation code for zapping sp into a function Date: Mon, 12 Dec 2011 07:24:58 +0900 Message-ID: <20111212072458.f289a43deec2f9ddceb768b9@gmail.com> References: <20111212072242.8aaf64a3420608b8204702c7@gmail.com> Mime-Version: 1.0 Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: 7bit Cc: kvm@vger.kernel.org, yoshikawa.takuya@oss.ntt.co.jp To: avi@redhat.com, mtosatti@redhat.com Return-path: Received: from mail-iy0-f174.google.com ([209.85.210.174]:40445 "EHLO mail-iy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752566Ab1LKWZC (ORCPT ); Sun, 11 Dec 2011 17:25:02 -0500 Received: by iaeh11 with SMTP id h11so2190354iae.19 for ; Sun, 11 Dec 2011 14:25:02 -0800 (PST) In-Reply-To: <20111212072242.8aaf64a3420608b8204702c7@gmail.com> Sender: kvm-owner@vger.kernel.org List-ID: From: Takuya Yoshikawa Use list_entry() instead of container_of() for taking a shadow page from the active_mmu_pages list. Note: the return value of pre_zap_one_sp() will be used later. Signed-off-by: Takuya Yoshikawa --- arch/x86/kvm/mmu.c | 45 +++++++++++++++++++++++---------------------- 1 files changed, 23 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 590f76b..b1e8270 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1930,6 +1930,26 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, return ret; } +/** + * pre_zap_one_sp - make one shadow page ready for being freed + * @kvm: the kvm instance + * @invalid_list: the list to which we add shadow pages ready for being freed + * + * Take one shadow page from the tail of the active_mmu_pages list and make it + * ready for being freed, then put it into the @invalid_list. Other pages, + * unsync children, may also be put into the @invalid_list. + * + * Return the number of shadow pages added to the @invalid_list this way. + */ +static int pre_zap_one_sp(struct kvm *kvm, struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp; + + sp = list_entry(kvm->arch.active_mmu_pages.prev, + struct kvm_mmu_page, link); + return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); +} + static void kvm_mmu_isolate_pages(struct list_head *invalid_list) { struct kvm_mmu_page *sp; @@ -1999,11 +2019,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && !list_empty(&kvm->arch.active_mmu_pages)) { - struct kvm_mmu_page *page; - - page = container_of(kvm->arch.active_mmu_pages.prev, - struct kvm_mmu_page, link); - kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); + pre_zap_one_sp(kvm, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; @@ -3719,11 +3735,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES && !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { - struct kvm_mmu_page *sp; - - sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, - struct kvm_mmu_page, link); - kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); + pre_zap_one_sp(vcpu->kvm, &invalid_list); ++vcpu->kvm->stat.mmu_recycled; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); @@ -3890,16 +3902,6 @@ restart: spin_unlock(&kvm->mmu_lock); } -static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, - struct list_head *invalid_list) -{ - struct kvm_mmu_page *page; - - page = container_of(kvm->arch.active_mmu_pages.prev, - struct kvm_mmu_page, link); - kvm_mmu_prepare_zap_page(kvm, page, invalid_list); -} - static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct kvm *kvm; @@ -3919,8 +3921,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) spin_lock(&kvm->mmu_lock); if (!kvm_freed && nr_to_scan > 0 && kvm->arch.n_used_mmu_pages > 0) { - kvm_mmu_remove_some_alloc_mmu_pages(kvm, - &invalid_list); + pre_zap_one_sp(kvm, &invalid_list); kvm_freed = kvm; } nr_to_scan--; -- 1.7.5.4