From mboxrd@z Thu Jan 1 00:00:00 1970 From: mtosatti@redhat.com Subject: [patch 4/4] KVM: MMU: pinned sps are not candidates for deletion. Date: Wed, 09 Jul 2014 16:12:54 -0300 Message-ID: <20140709191611.349481802@amt.cnet> References: <20140709191250.408928362@amt.cnet> Cc: pbonzini@redhat.com, xiaoguangrong@linux.vnet.ibm.com, gleb@kernel.org, avi.kivity@gmail.com To: kvm@vger.kernel.org, ak@linux.intel.com Return-path: Received: from mx1.redhat.com ([209.132.183.28]:51459 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753923AbaGITRf (ORCPT ); Wed, 9 Jul 2014 15:17:35 -0400 Content-Disposition: inline; filename=mmu-pinned-spte-skip Sender: kvm-owner@vger.kernel.org List-ID: Skip pinned shadow pages when selecting pages to zap. Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) Index: kvm.pinned-sptes/arch/x86/kvm/mmu.c =================================================================== --- kvm.pinned-sptes.orig/arch/x86/kvm/mmu.c 2014-07-09 12:09:26.433674438 -0300 +++ kvm.pinned-sptes/arch/x86/kvm/mmu.c 2014-07-09 12:09:27.164672860 -0300 @@ -2267,16 +2267,24 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, struct list_head *invalid_list) { - struct kvm_mmu_page *sp; - - if (list_empty(&kvm->arch.active_mmu_pages)) - return false; + struct kvm_mmu_page *sp, *nsp; + LIST_HEAD(pinned_list); - sp = list_entry(kvm->arch.active_mmu_pages.prev, - struct kvm_mmu_page, link); - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + list_for_each_entry_safe_reverse(sp, nsp, + &kvm->arch.active_mmu_pages, link) { + if (sp->pinned) { + list_move(&sp->link, &pinned_list); + continue; + } + if (!list_empty(&pinned_list)) + list_move(&pinned_list, &kvm->arch.active_mmu_pages); + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + return true; + } - return true; + if (!list_empty(&pinned_list)) + list_move(&pinned_list, &kvm->arch.active_mmu_pages); + return false; } /* @@ -4679,6 +4687,8 @@ * Notify all vcpus to reload its shadow page table * and flush TLB. Then all vcpus will switch to new * shadow page table with the new mmu_valid_gen. + * MMU reload request also forces fault of + * sptes for pinned ranges. * * Note: we should do this under the protection of * mmu-lock, otherwise, vcpu would purge shadow page