From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
To: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: Re: [PATCH 05/15] KVM: MMU: optimize to handle dirty bit
Date: Wed, 08 Jun 2011 11:16:30 +0800 [thread overview]
Message-ID: <4DEEE98E.6090102@cn.fujitsu.com> (raw)
In-Reply-To: <4DEE210E.2000404@cn.fujitsu.com>
On 06/07/2011 09:01 PM, Xiao Guangrong wrote:
> If dirty bit is not set, we can make the pte access read-only to avoid handing
> dirty bit everywhere
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index b0c8184..67971da 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -106,6 +106,9 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
> unsigned access;
>
> access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> + if (!is_dirty_gpte(gpte))
> + access &= ~ACC_WRITE_MASK;
> +
Sorry, it can break something: if the gpte is not on the last level and dirty bit
is set later, below patch should fix it, i'll merge it into in the next version.
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4287dc8..6ceb5fd 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -101,12 +101,13 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return (ret != orig_pte);
}
-static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
+static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte,
+ bool last)
{
unsigned access;
access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
- if (!is_dirty_gpte(gpte))
+ if (last && !is_dirty_gpte(gpte))
access &= ~ACC_WRITE_MASK;
#if PTTYPE == 64
@@ -230,8 +231,6 @@ walk:
pte |= PT_ACCESSED_MASK;
}
- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
-
walker->ptes[walker->level - 1] = pte;
if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
@@ -266,7 +265,7 @@ walk:
break;
}
- pt_access = pte_access;
+ pt_access &= FNAME(gpte_access)(vcpu, pte, false);
--walker->level;
}
@@ -290,6 +289,7 @@ walk:
walker->ptes[walker->level - 1] = pte;
}
+ pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
@@ -369,7 +369,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
@@ -444,7 +444,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
+ true);
gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pte_access & ACC_WRITE_MASK);
@@ -790,7 +791,7 @@ static bool FNAME(sync_mmio_spte)(struct kvm_vcpu *vcpu,
if (unlikely(is_mmio_spte(*sptep))) {
gfn_t gfn = gpte_to_gfn(gpte);
unsigned access = sp->role.access & FNAME(gpte_access)(vcpu,
- gpte);
+ gpte, true);
if (gfn != get_mmio_spte_gfn(*sptep)) {
__set_spte(sptep, shadow_trap_nonpresent_pte);
@@ -868,7 +869,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
}
nr_present++;
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
+ true);
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
next prev parent reply other threads:[~2011-06-08 3:14 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-07 12:58 [PATCH 0/15] KVM: optimize for MMIO handled Xiao Guangrong
2011-06-07 12:58 ` [PATCH 01/15] KVM: MMU: fix walking shadow page table Xiao Guangrong
2011-06-07 12:59 ` [PATCH 02/15] KVM: MMU: do not update slot bitmap if spte is nonpresent Xiao Guangrong
2011-06-20 16:28 ` Marcelo Tosatti
2011-06-20 18:32 ` Xiao Guangrong
2011-06-07 12:59 ` [PATCH 03/15] KVM: x86: avoid unnecessarily guest page table walking Xiao Guangrong
2011-06-09 6:59 ` Avi Kivity
2011-06-10 3:51 ` Xiao Guangrong
2011-06-07 13:00 ` [PATCH 04/15] KVM: MMU: cache mmio info on page fault path Xiao Guangrong
2011-06-08 8:22 ` Alexander Graf
2011-06-08 8:58 ` Xiao Guangrong
2011-06-08 9:18 ` Alexander Graf
2011-06-08 9:33 ` Xiao Guangrong
2011-06-08 9:39 ` Alexander Graf
2011-06-20 16:14 ` Marcelo Tosatti
2011-06-20 16:16 ` Marcelo Tosatti
2011-06-07 13:01 ` [PATCH 05/15] KVM: MMU: optimize to handle dirty bit Xiao Guangrong
2011-06-08 3:16 ` Xiao Guangrong [this message]
2011-06-07 13:01 ` [PATCH 06/15] KVM: MMU: cleanup for FNAME(fetch) Xiao Guangrong
2011-06-07 13:02 ` [PATCH 07/15] KVM: MMU: rename 'pt_write' to 'emulate' Xiao Guangrong
2011-06-07 13:02 ` [PATCH 08/15] KVM: MMU: count used shadow pages on preparing path Xiao Guangrong
2011-06-07 13:03 ` [PATCH 09/15] KVM: MMU: split kvm_mmu_free_page Xiao Guangrong
2011-06-09 7:07 ` Avi Kivity
2011-06-10 3:50 ` Xiao Guangrong
2011-06-12 8:33 ` Avi Kivity
2011-06-13 3:15 ` Xiao Guangrong
2011-06-07 13:04 ` [PATCH 10/15] KVM: MMU: lockless walking shadow page table Xiao Guangrong
2011-06-09 20:09 ` Paul E. McKenney
2011-06-10 4:23 ` Xiao Guangrong
2011-06-20 16:37 ` Marcelo Tosatti
2011-06-20 18:54 ` Xiao Guangrong
2011-06-07 13:05 ` [PATCH 11/15] KVM: MMU: filter out the mmio pfn from the fault pfn Xiao Guangrong
2011-06-07 13:05 ` [PATCH 12/15] KVM: MMU: abstract some functions to handle " Xiao Guangrong
2011-06-07 13:06 ` [PATCH 13/15] KVM: VMX: modify the default value of nontrap shadow pte Xiao Guangrong
2011-06-09 7:14 ` Avi Kivity
2011-06-07 13:07 ` [PATCH 14/15] KVM: MMU: mmio page fault support Xiao Guangrong
2011-06-09 7:28 ` Avi Kivity
2011-06-10 3:47 ` Xiao Guangrong
2011-06-12 8:38 ` Avi Kivity
2011-06-13 3:38 ` Xiao Guangrong
2011-06-13 8:10 ` Avi Kivity
2011-06-07 13:07 ` [PATCH 15/15] KVM: MMU: trace mmio page fault Xiao Guangrong
2011-06-08 3:11 ` [PATCH 0/15] KVM: optimize for MMIO handled Takuya Yoshikawa
2011-06-08 3:25 ` Xiao Guangrong
2011-06-08 3:32 ` Xiao Guangrong
2011-06-08 3:47 ` Takuya Yoshikawa
2011-06-08 5:16 ` Xiao Guangrong
2011-06-08 6:22 ` Xiao Guangrong
2011-06-08 8:33 ` Takuya Yoshikawa
2011-06-09 7:39 ` Avi Kivity
2011-06-10 4:05 ` Xiao Guangrong
2011-06-12 8:47 ` Avi Kivity
2011-06-13 4:46 ` Xiao Guangrong
2011-06-13 8:06 ` Avi Kivity
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4DEEE98E.6090102@cn.fujitsu.com \
--to=xiaoguangrong@cn.fujitsu.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox