From mboxrd@z Thu Jan 1 00:00:00 1970 From: mtosatti@redhat.com Subject: [patch 5/5] KVM: MMU: pinned sps are not candidates for deletion. Date: Wed, 18 Jun 2014 20:12:08 -0300 Message-ID: <20140618231521.798438710@amt.cnet> References: <20140618231203.846608908@amt.cnet> Cc: pbonzini@redhat.com, xiaoguangrong@linux.vnet.ibm.com, gleb@kernel.org, avi@cloudius-systems.com, Marcelo Tosatti To: kvm@vger.kernel.org, ak@linux.intel.com Return-path: Received: from mx1.redhat.com ([209.132.183.28]:8333 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753396AbaFRXRD (ORCPT ); Wed, 18 Jun 2014 19:17:03 -0400 Content-Disposition: inline; filename=mmu-pinned-spte-skip Sender: kvm-owner@vger.kernel.org List-ID: Skip pinned shadow pages when selecting pages to zap. Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) Index: kvm/arch/x86/kvm/mmu.c =================================================================== --- kvm.orig/arch/x86/kvm/mmu.c +++ kvm/arch/x86/kvm/mmu.c @@ -2279,16 +2279,24 @@ static void kvm_mmu_commit_zap_page(stru static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, struct list_head *invalid_list) { - struct kvm_mmu_page *sp; - - if (list_empty(&kvm->arch.active_mmu_pages)) - return false; + struct kvm_mmu_page *sp, *nsp; + LIST_HEAD(pinned_list); - sp = list_entry(kvm->arch.active_mmu_pages.prev, - struct kvm_mmu_page, link); - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + list_for_each_entry_safe_reverse(sp, nsp, + &kvm->arch.active_mmu_pages, link) { + if (sp->pinned) { + list_move(&sp->link, &pinned_list); + continue; + } + if (!list_empty(&pinned_list)) + list_move(&pinned_list, &kvm->arch.active_mmu_pages); + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + return true; + } - return true; + if (!list_empty(&pinned_list)) + list_move(&pinned_list, &kvm->arch.active_mmu_pages); + return false; } /* @@ -4660,6 +4668,8 @@ void kvm_mmu_invalidate_zap_all_pages(st * Notify all vcpus to reload its shadow page table * and flush TLB. Then all vcpus will switch to new * shadow page table with the new mmu_valid_gen. + * MMU reload request also forces fault of + * sptes for pinned ranges. * * Note: we should do this under the protection of * mmu-lock, otherwise, vcpu would purge shadow page