From mboxrd@z Thu Jan 1 00:00:00 1970 From: yasker@gmail.com Subject: [PATCH] KVM: MMU: Add shadow_accessed_shift Date: Sun, 31 Aug 2008 00:18:18 +0800 Message-ID: <1220113098-25053-1-git-send-email-sheng.yang@intel.com> Cc: kvm@vger.kernel.org, Sheng Yang To: Avi Kivity Return-path: Received: from ti-out-0910.google.com ([209.85.142.187]:44058 "EHLO ti-out-0910.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752326AbYH3QSU (ORCPT ); Sat, 30 Aug 2008 12:18:20 -0400 Received: by ti-out-0910.google.com with SMTP id b6so722983tic.23 for ; Sat, 30 Aug 2008 09:18:18 -0700 (PDT) Sender: kvm-owner@vger.kernel.org List-ID: From: Sheng Yang We use a "fake" A/D bit for EPT, to keep epte behaviour consistent with shadow spte. But it's not that good for MMU notifier. Now we can only expect return young=0 for clean_flush_young() in most condition. Also fix a unproper judgement based on shadow_accessed_mask=0 for EPT. Signed-off-by: Sheng Yang --- arch/x86/kvm/mmu.c | 10 ++++++---- 1 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9bc31fc..1e9f9b4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -158,6 +158,7 @@ static u64 __read_mostly shadow_nx_mask; static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_accessed_mask; +static u16 __read_mostly shadow_accessed_shift; static u64 __read_mostly shadow_dirty_mask; void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) @@ -178,6 +179,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, { shadow_user_mask = user_mask; shadow_accessed_mask = accessed_mask; + shadow_accessed_shift = find_first_bit((unsigned long *)&accessed_mask, + sizeof(accessed_mask)); shadow_dirty_mask = dirty_mask; shadow_nx_mask = nx_mask; shadow_x_mask = x_mask; @@ -716,10 +719,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) int _young; u64 _spte = *spte; BUG_ON(!(_spte & PT_PRESENT_MASK)); - _young = _spte & PT_ACCESSED_MASK; + _young = _spte & shadow_accessed_mask; if (_young) { young = 1; - clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); + clear_bit(shadow_accessed_shift, (unsigned long *)spte); } spte = rmap_next(kvm, rmapp, spte); } @@ -1789,10 +1792,9 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) if (spte && vcpu->arch.last_pte_gfn == gfn - && shadow_accessed_mask && !(*spte & shadow_accessed_mask) && is_shadow_present_pte(*spte)) - set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); + set_bit(shadow_accessed_shift, (unsigned long *)spte); } void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, -- 1.5.3