From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Avi Kivity <avi@redhat.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: [PATCH v6 5/9] KVM: MMU: introduce SPTE_MMU_WRITEABLE bit
Date: Tue, 29 May 2012 14:49:14 +0800 [thread overview]
Message-ID: <4FC4716A.8030304@linux.vnet.ibm.com> (raw)
In-Reply-To: <4FC470C7.5040700@linux.vnet.ibm.com>
This bit indicates whether the spte can be writable on MMU, that means
the corresponding gpte is writable and the corresponding gfn is not
protected by shadow page protection
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
arch/x86/kvm/mmu.c | 41 +++++++++++++++++++++++++++--------------
1 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4810992..150c5ad 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -145,7 +145,8 @@ module_param(dbg, bool, 0644);
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
-#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
@@ -1065,32 +1066,43 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
rmap_remove(kvm, sptep);
}
+static bool spte_can_be_writable(u64 spte)
+{
+ return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE));
+}
+
/* Return true if the spte is dropped. */
-static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush)
+static bool
+spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
{
u64 spte = *sptep;
- if (!is_writable_pte(spte))
+ if (!is_writable_pte(spte) &&
+ !(pt_protect && spte_can_be_writable(spte)))
return false;
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
- *flush |= true;
if (is_large_pte(spte)) {
WARN_ON(page_header(__pa(sptep))->role.level ==
PT_PAGE_TABLE_LEVEL);
+
+ *flush |= true;
drop_spte(kvm, sptep);
--kvm->stat.lpages;
return true;
}
+ if (pt_protect)
+ spte &= ~SPTE_MMU_WRITEABLE;
spte = spte & ~PT_WRITABLE_MASK;
- mmu_spte_update(sptep, spte);
+
+ *flush = mmu_spte_update(sptep, spte);
return false;
}
-static bool
-__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
+static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
+ int level, bool pt_protect)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1098,7 +1110,7 @@ __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
BUG_ON(!(*sptep & PT_PRESENT_MASK));
- if (spte_write_protect(kvm, sptep, &flush)) {
+ if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
sptep = rmap_get_first(*rmapp, &iter);
continue;
}
@@ -1127,7 +1139,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
while (mask) {
rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
- __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
+ __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL, false);
/* clear the first set bit */
mask &= mask - 1;
@@ -1146,7 +1158,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
for (i = PT_PAGE_TABLE_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmapp, i);
+ write_protected |= __rmap_write_protect(kvm, rmapp, i, true);
}
return write_protected;
@@ -2284,8 +2296,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= shadow_x_mask;
else
spte |= shadow_nx_mask;
+
if (pte_access & ACC_USER_MASK)
spte |= shadow_user_mask;
+
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
@@ -2310,7 +2324,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
goto done;
}
- spte |= PT_WRITABLE_MASK;
+ spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
if (!vcpu->arch.mmu.direct_map
&& !(pte_access & ACC_WRITE_MASK)) {
@@ -2339,8 +2353,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
__func__, gfn);
ret = 1;
pte_access &= ~ACC_WRITE_MASK;
- if (is_writable_pte(spte))
- spte &= ~PT_WRITABLE_MASK;
+ spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
}
}
@@ -3921,7 +3934,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
!is_last_spte(pt[i], sp->role.level))
continue;
- spte_write_protect(kvm, &pt[i], &flush);
+ spte_write_protect(kvm, &pt[i], &flush, false);
}
}
kvm_flush_remote_tlbs(kvm);
--
1.7.7.6
next prev parent reply other threads:[~2012-05-29 6:49 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-05-29 6:46 [PATCH v6 0/9] KVM: MMU: fast page fault Xiao Guangrong
2012-05-29 6:47 ` [PATCH v6 1/9] KVM: MMU: return bool in __rmap_write_protect Xiao Guangrong
2012-05-29 6:47 ` [PATCH v6 2/9] KVM: MMU: abstract spte write-protect Xiao Guangrong
2012-05-29 6:48 ` [PATCH v6 3/9] KVM: VMX: export PFEC.P bit on ept Xiao Guangrong
2012-05-29 6:48 ` [PATCH v6 4/9] KVM: MMU: fold tlb flush judgement into mmu_spte_update Xiao Guangrong
2012-05-29 6:49 ` Xiao Guangrong [this message]
2012-06-11 23:32 ` [PATCH v6 5/9] KVM: MMU: introduce SPTE_MMU_WRITEABLE bit Marcelo Tosatti
2012-06-12 2:23 ` Xiao Guangrong
2012-06-13 2:01 ` Marcelo Tosatti
2012-06-13 3:11 ` Xiao Guangrong
2012-06-13 21:39 ` Marcelo Tosatti
2012-06-14 1:13 ` Takuya Yoshikawa
2012-06-14 2:41 ` Xiao Guangrong
2012-06-14 2:36 ` Xiao Guangrong
2012-05-29 6:50 ` [PATCH v6 6/9] KVM: MMU: fast path of handling guest page fault Xiao Guangrong
2012-06-13 22:40 ` Marcelo Tosatti
2012-06-14 1:22 ` Takuya Yoshikawa
2012-06-18 19:21 ` Marcelo Tosatti
2012-06-19 2:07 ` Takuya Yoshikawa
2012-06-14 3:00 ` Xiao Guangrong
2012-06-18 19:32 ` Marcelo Tosatti
2012-06-19 2:04 ` Xiao Guangrong
2012-05-29 6:51 ` [PATCH v6 7/9] KVM: MMU: trace fast " Xiao Guangrong
2012-05-29 6:51 ` [PATCH v6 8/9] KVM: MMU: fix kvm_mmu_pagetable_walk tracepoint Xiao Guangrong
2012-05-29 6:52 ` [PATCH v6 9/9] KVM: MMU: document mmu-lock and fast page fault Xiao Guangrong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4FC4716A.8030304@linux.vnet.ibm.com \
--to=xiaoguangrong@linux.vnet.ibm.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).