public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 0/5] KVM: x86: improve reexecute_instruction
@ 2013-01-04 13:53 Xiao Guangrong
  2013-01-04 13:54 ` [PATCH v4 1/5] KVM: MMU: fix Dirty bit missed if CR0.WP = 0 Xiao Guangrong
                   ` (4 more replies)
  0 siblings, 5 replies; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-04 13:53 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Gleb Natapov, LKML, KVM

There are some changes from Gleb's review:
- poss the point of target_gfn_is_pt into FNAME(is_self_change_mapping)
  to let the return value be more clearer
- fold some changes of patch 5 into patch 4
- remove vcpu.arch.fault_addr

There are some test cases to trigger the bugs which are fixed in this patchset,
they can be found at:
http://marc.info/?l=kvm&m=135513072903953&w=2
http://marc.info/?l=kvm&m=135555502408253&w=2

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v4 1/5] KVM: MMU: fix Dirty bit missed if CR0.WP = 0
  2013-01-04 13:53 [PATCH v4 0/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
@ 2013-01-04 13:54 ` Xiao Guangrong
  2013-01-04 13:54 ` [PATCH v4 2/5] KVM: MMU: fix infinite fault access retry Xiao Guangrong
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-04 13:54 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, Gleb Natapov, LKML, KVM

If the write-fault access is from supervisor and CR0.WP is not set on the
vcpu, kvm will fix it by adjusting pte access - it sets the W bit on pte
and clears U bit. This is the chance that kvm can change pte access from
readonly to writable

Unfortunately, the pte access is the access of 'direct' shadow page table,
means direct sp.role.access = pte_access, then we will create a writable
spte entry on the readonly shadow page table. It will cause Dirty bit is
not tracked when two guest ptes point to the same large page. Note, it
does not have other impact except Dirty bit since cr0.wp is encoded into
sp.role

It can be fixed by adjusting pte access before establishing shadow page
table. Also, after that, no mmu specified code exists in the common function
and drop two parameters in set_spte

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |   47 ++++++++++++-------------------------------
 arch/x86/kvm/paging_tmpl.h |   30 +++++++++++++++++++++++----
 2 files changed, 38 insertions(+), 39 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01d7c2a..2a3c890 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2342,8 +2342,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 }

 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
-		    unsigned pte_access, int user_fault,
-		    int write_fault, int level,
+		    unsigned pte_access, int level,
 		    gfn_t gfn, pfn_t pfn, bool speculative,
 		    bool can_unsync, bool host_writable)
 {
@@ -2378,9 +2377,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,

 	spte |= (u64)pfn << PAGE_SHIFT;

-	if ((pte_access & ACC_WRITE_MASK)
-	    || (!vcpu->arch.mmu.direct_map && write_fault
-		&& !is_write_protection(vcpu) && !user_fault)) {
+	if (pte_access & ACC_WRITE_MASK) {

 		/*
 		 * There are two cases:
@@ -2399,19 +2396,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,

 		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;

-		if (!vcpu->arch.mmu.direct_map
-		    && !(pte_access & ACC_WRITE_MASK)) {
-			spte &= ~PT_USER_MASK;
-			/*
-			 * If we converted a user page to a kernel page,
-			 * so that the kernel can write to it when cr0.wp=0,
-			 * then we should prevent the kernel from executing it
-			 * if SMEP is enabled.
-			 */
-			if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
-				spte |= PT64_NX_MASK;
-		}
-
 		/*
 		 * Optimization: for pte sync, if spte was writable the hash
 		 * lookup is unnecessary (and expensive). Write protection
@@ -2442,18 +2426,15 @@ done:

 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 			 unsigned pt_access, unsigned pte_access,
-			 int user_fault, int write_fault,
-			 int *emulate, int level, gfn_t gfn,
-			 pfn_t pfn, bool speculative,
-			 bool host_writable)
+			 int write_fault, int *emulate, int level, gfn_t gfn,
+			 pfn_t pfn, bool speculative, bool host_writable)
 {
 	int was_rmapped = 0;
 	int rmap_count;

-	pgprintk("%s: spte %llx access %x write_fault %d"
-		 " user_fault %d gfn %llx\n",
+	pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n",
 		 __func__, *sptep, pt_access,
-		 write_fault, user_fault, gfn);
+		 write_fault, gfn);

 	if (is_rmap_spte(*sptep)) {
 		/*
@@ -2477,9 +2458,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 			was_rmapped = 1;
 	}

-	if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
-		      level, gfn, pfn, speculative, true,
-		      host_writable)) {
+	if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
+	      true, host_writable)) {
 		if (write_fault)
 			*emulate = 1;
 		kvm_mmu_flush_tlb(vcpu);
@@ -2571,10 +2551,9 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
 		return -1;

 	for (i = 0; i < ret; i++, gfn++, start++)
-		mmu_set_spte(vcpu, start, ACC_ALL,
-			     access, 0, 0, NULL,
-			     sp->role.level, gfn,
-			     page_to_pfn(pages[i]), true, true);
+		mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL,
+			     sp->role.level, gfn, page_to_pfn(pages[i]),
+			     true, true);

 	return 0;
 }
@@ -2636,8 +2615,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
 			unsigned pte_access = ACC_ALL;

 			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
-				     0, write, &emulate,
-				     level, gfn, pfn, prefault, map_writable);
+				     write, &emulate, level, gfn, pfn,
+				     prefault, map_writable);
 			direct_pte_prefetch(vcpu, iterator.sptep);
 			++vcpu->stat.pf_fixed;
 			break;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 891eb6d..c1e01b6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -330,7 +330,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	 * we call mmu_set_spte() with host_writable = true because
 	 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 	 */
-	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
+	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0,
 		     NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);

 	return true;
@@ -405,7 +405,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  */
 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 			 struct guest_walker *gw,
-			 int user_fault, int write_fault, int hlevel,
+			 int write_fault, int hlevel,
 			 pfn_t pfn, bool map_writable, bool prefault)
 {
 	struct kvm_mmu_page *sp = NULL;
@@ -478,7 +478,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,

 	clear_sp_write_flooding_count(it.sptep);
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
-		     user_fault, write_fault, &emulate, it.level,
+		     write_fault, &emulate, it.level,
 		     gw->gfn, pfn, prefault, map_writable);
 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);

@@ -564,6 +564,26 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 				walker.gfn, pfn, walker.pte_access, &r))
 		return r;

+	/*
+	 * Do not change pte_access if the pfn is a mmio page, otherwise
+	 * we will cache the incorrect access into mmio spte.
+	 */
+	if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
+	     !is_write_protection(vcpu) && !user_fault &&
+	      !is_noslot_pfn(pfn)) {
+		walker.pte_access |= ACC_WRITE_MASK;
+		walker.pte_access &= ~ACC_USER_MASK;
+
+		/*
+		 * If we converted a user page to a kernel page,
+		 * so that the kernel can write to it when cr0.wp=0,
+		 * then we should prevent the kernel from executing it
+		 * if SMEP is enabled.
+		 */
+		if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+			walker.pte_access &= ~ACC_EXEC_MASK;
+	}
+
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 		goto out_unlock;
@@ -572,7 +592,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	kvm_mmu_free_some_pages(vcpu);
 	if (!force_pt_level)
 		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
-	r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
 			 level, pfn, map_writable, prefault);
 	++vcpu->stat.pf_fixed;
 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
@@ -747,7 +767,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)

 		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;

-		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
+		set_spte(vcpu, &sp->spt[i], pte_access,
 			 PT_PAGE_TABLE_LEVEL, gfn,
 			 spte_to_pfn(sp->spt[i]), true, false,
 			 host_writable);
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 2/5] KVM: MMU: fix infinite fault access retry
  2013-01-04 13:53 [PATCH v4 0/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
  2013-01-04 13:54 ` [PATCH v4 1/5] KVM: MMU: fix Dirty bit missed if CR0.WP = 0 Xiao Guangrong
@ 2013-01-04 13:54 ` Xiao Guangrong
  2013-01-04 13:55 ` [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction Xiao Guangrong
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-04 13:54 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, Gleb Natapov, LKML, KVM

We have two issues in current code:
- if target gfn is used as its page table, guest will refault then kvm will use
  small page size to map it. We need two #PF to fix its shadow page table

- sometimes, say a exception is triggered during vm-exit caused by #PF
  (see handle_exception() in vmx.c), we remove all the shadow pages shadowed
  by the target gfn before go into page fault path, it will cause infinite
  loop:
  delete shadow pages shadowed by the gfn -> try to use large page size to map
  the gfn -> retry the access ->...

To fix these, we can adjust page size early if the target gfn is used as page
table

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |   13 ++++---------
 arch/x86/kvm/paging_tmpl.h |   35 ++++++++++++++++++++++++++++++++++-
 2 files changed, 38 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a3c890..54fc61e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2380,15 +2380,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	if (pte_access & ACC_WRITE_MASK) {

 		/*
-		 * There are two cases:
-		 * - the one is other vcpu creates new sp in the window
-		 *   between mapping_level() and acquiring mmu-lock.
-		 * - the another case is the new sp is created by itself
-		 *   (page-fault path) when guest uses the target gfn as
-		 *   its page table.
-		 * Both of these cases can be fixed by allowing guest to
-		 * retry the access, it will refault, then we can establish
-		 * the mapping by using small page.
+		 * Other vcpu creates new sp in the window between
+		 * mapping_level() and acquiring mmu-lock. We can
+		 * allow guest to retry the access, the mapping can
+		 * be fixed if guest refault.
 		 */
 		if (level > PT_PAGE_TABLE_LEVEL &&
 		    has_wrprotected_page(vcpu->kvm, gfn, level))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c1e01b6..0453fa0 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -491,6 +491,38 @@ out_gpte_changed:
 	return 0;
 }

+ /*
+ * To see whether the mapped gfn can write its page table in the current
+ * mapping.
+ *
+ * It is the helper function of FNAME(page_fault). When guest uses large page
+ * size to map the writable gfn which is used as current page table, we should
+ * force kvm to use small page size to map it because new shadow page will be
+ * created when kvm establishes shadow page table that stop kvm using large
+ * page size. Do it early can avoid unnecessary #PF and emulation.
+ *
+ * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
+ * since the PDPT is always shadowed, that means, we can not use large page
+ * size to map the gfn which is used as PDPT.
+ */
+static bool
+FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
+			      struct guest_walker *walker, int user_fault)
+{
+	int level;
+	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
+
+	if (!(walker->pte_access & ACC_WRITE_MASK ||
+	      (!is_write_protection(vcpu) && !user_fault)))
+		return false;
+
+	for (level = walker->level; level <= walker->max_level; level++)
+		if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask))
+			return true;
+
+	return false;
+}
+
 /*
  * Page fault handler.  There are several causes for a page fault:
  *   - there is no shadow pte for the guest pte
@@ -545,7 +577,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	}

 	if (walker.level >= PT_DIRECTORY_LEVEL)
-		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
+		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
+		   || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
 	else
 		force_pt_level = 1;
 	if (!force_pt_level) {
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction
  2013-01-04 13:53 [PATCH v4 0/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
  2013-01-04 13:54 ` [PATCH v4 1/5] KVM: MMU: fix Dirty bit missed if CR0.WP = 0 Xiao Guangrong
  2013-01-04 13:54 ` [PATCH v4 2/5] KVM: MMU: fix infinite fault access retry Xiao Guangrong
@ 2013-01-04 13:55 ` Xiao Guangrong
  2013-01-04 22:21   ` Marcelo Tosatti
  2013-01-04 13:56 ` [PATCH v4 4/5] KVM: x86: let reexecute_instruction work for tdp Xiao Guangrong
  2013-01-04 13:56 ` [PATCH v4 5/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
  4 siblings, 1 reply; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-04 13:55 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, Gleb Natapov, LKML, KVM

Little cleanup for reexecute_instruction, also use gpa_to_gfn in
retry_instruction

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/x86.c |   13 ++++++-------
 1 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1c9c834..ad39018 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4761,19 +4761,18 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
 	if (tdp_enabled)
 		return false;

+	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
+	if (gpa == UNMAPPED_GVA)
+		return true; /* let cpu generate fault */
+
 	/*
 	 * if emulation was due to access to shadowed page table
 	 * and it failed try to unshadow page and re-enter the
 	 * guest to let CPU execute the instruction.
 	 */
-	if (kvm_mmu_unprotect_page_virt(vcpu, gva))
+	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
 		return true;

-	gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
-
-	if (gpa == UNMAPPED_GVA)
-		return true; /* let cpu generate fault */
-
 	/*
 	 * Do not retry the unhandleable instruction if it faults on the
 	 * readonly host memory, otherwise it will goto a infinite loop:
@@ -4828,7 +4827,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	if (!vcpu->arch.mmu.direct_map)
 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);

-	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));

 	return true;
 }
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 4/5] KVM: x86: let reexecute_instruction work for tdp
  2013-01-04 13:53 [PATCH v4 0/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
                   ` (2 preceding siblings ...)
  2013-01-04 13:55 ` [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction Xiao Guangrong
@ 2013-01-04 13:56 ` Xiao Guangrong
  2013-01-04 13:56 ` [PATCH v4 5/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
  4 siblings, 0 replies; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-04 13:56 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, Gleb Natapov, LKML, KVM

Currently, reexecute_instruction refused to retry all instructions. If
nested npt is used, the emulation may be caused by shadow page, it can
be fixed by dropping the shadow page

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/x86.c |   28 ++++++++++++++++++++++------
 1 files changed, 22 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ad39018..b0a3678 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4753,17 +4753,33 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 	return r;
 }

-static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
+static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 {
-	gpa_t gpa;
+	gpa_t gpa = cr2;
 	pfn_t pfn;
+	unsigned int indirect_shadow_pages;
+
+	spin_lock(&vcpu->kvm->mmu_lock);
+	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
+	spin_unlock(&vcpu->kvm->mmu_lock);

-	if (tdp_enabled)
+	if (!indirect_shadow_pages)
 		return false;

-	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-	if (gpa == UNMAPPED_GVA)
-		return true; /* let cpu generate fault */
+	if (!vcpu->arch.mmu.direct_map) {
+		/*
+		 * Write permission should be allowed since only
+		 * write access need to be emulated.
+		 */
+		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+
+		/*
+		 * If the mapping is invalid in guest, let cpu retry
+		 * it to generate fault.
+		 */
+		if (gpa == UNMAPPED_GVA)
+			return true;
+	}

 	/*
 	 * if emulation was due to access to shadowed page table
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v4 5/5] KVM: x86: improve reexecute_instruction
  2013-01-04 13:53 [PATCH v4 0/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
                   ` (3 preceding siblings ...)
  2013-01-04 13:56 ` [PATCH v4 4/5] KVM: x86: let reexecute_instruction work for tdp Xiao Guangrong
@ 2013-01-04 13:56 ` Xiao Guangrong
  2013-01-04 22:44   ` Marcelo Tosatti
  4 siblings, 1 reply; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-04 13:56 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, Gleb Natapov, LKML, KVM

The current reexecute_instruction can not well detect the failed instruction
emulation. It allows guest to retry all the instructions except it accesses
on error pfn

For example, some cases are nested-write-protect - if the page we want to
write is used as PDE but it chains to itself. Under this case, we should
stop the emulation and report the case to userspace

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/include/asm/kvm_host.h |    7 +++++
 arch/x86/kvm/paging_tmpl.h      |   24 +++++++++++++-----
 arch/x86/kvm/x86.c              |   50 ++++++++++++++++++++++++--------------
 3 files changed, 55 insertions(+), 26 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c431b33..de229e6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -502,6 +502,13 @@ struct kvm_vcpu_arch {
 		u64 msr_val;
 		struct gfn_to_hva_cache data;
 	} pv_eoi;
+
+	/*
+	 * Indicate whether the gfn is used as page table in guest which
+	 * is set when fix page fault and used to detect unhandeable
+	 * instruction.
+	 */
+	bool target_gfn_is_pt;
 };

 struct kvm_lpage_info {
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0453fa0..ca1be75 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -507,20 +507,27 @@ out_gpte_changed:
  */
 static bool
 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
-			      struct guest_walker *walker, int user_fault)
+			      struct guest_walker *walker, int user_fault,
+			      bool *target_gfn_is_pt)
 {
 	int level;
 	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
+	bool self_changed = false;
+
+	*target_gfn_is_pt = false;

 	if (!(walker->pte_access & ACC_WRITE_MASK ||
 	      (!is_write_protection(vcpu) && !user_fault)))
 		return false;

-	for (level = walker->level; level <= walker->max_level; level++)
-		if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask))
-			return true;
+	for (level = walker->level; level <= walker->max_level; level++) {
+		gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
+
+		self_changed |= !(gfn & mask);
+		*target_gfn_is_pt |= !gfn;
+	}

-	return false;
+	return self_changed;
 }

 /*
@@ -548,7 +555,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	int level = PT_PAGE_TABLE_LEVEL;
 	int force_pt_level;
 	unsigned long mmu_seq;
-	bool map_writable;
+	bool map_writable, is_self_change_mapping;

 	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);

@@ -576,9 +583,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 		return 0;
 	}

+	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
+		      &walker, user_fault, &vcpu->arch.target_gfn_is_pt);
+
 	if (walker.level >= PT_DIRECTORY_LEVEL)
 		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
-		   || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
+		   || is_self_change_mapping;
 	else
 		force_pt_level = 1;
 	if (!force_pt_level) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b0a3678..44c6992 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4756,15 +4756,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 {
 	gpa_t gpa = cr2;
+	gfn_t gfn;
 	pfn_t pfn;
-	unsigned int indirect_shadow_pages;
-
-	spin_lock(&vcpu->kvm->mmu_lock);
-	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
-	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	if (!indirect_shadow_pages)
-		return false;

 	if (!vcpu->arch.mmu.direct_map) {
 		/*
@@ -4781,13 +4774,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 			return true;
 	}

-	/*
-	 * if emulation was due to access to shadowed page table
-	 * and it failed try to unshadow page and re-enter the
-	 * guest to let CPU execute the instruction.
-	 */
-	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
-		return true;
+	gfn = gpa_to_gfn(gpa);

 	/*
 	 * Do not retry the unhandleable instruction if it faults on the
@@ -4795,13 +4782,38 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 	 * retry instruction -> write #PF -> emulation fail -> retry
 	 * instruction -> ...
 	 */
-	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
-	if (!is_error_noslot_pfn(pfn)) {
-		kvm_release_pfn_clean(pfn);
+	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	/*
+	 * If the instruction failed on the error pfn, it can not be fixed,
+	 * report the error to userspace.
+	 */
+	if (is_error_noslot_pfn(pfn))
+		return false;
+
+	kvm_release_pfn_clean(pfn);
+
+	/* The instructions are well-emulated on direct mmu. */
+	if (vcpu->arch.mmu.direct_map) {
+		unsigned int indirect_shadow_pages;
+
+		spin_lock(&vcpu->kvm->mmu_lock);
+		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
+		spin_unlock(&vcpu->kvm->mmu_lock);
+
+		if (indirect_shadow_pages)
+			kvm_mmu_unprotect_page(vcpu->kvm, gfn);
+
 		return true;
 	}

-	return false;
+	kvm_mmu_unprotect_page(vcpu->kvm, gfn);
+
+	/* If the target gfn is used as page table, the fault can
+	 * not be avoided by unprotecting shadow page and it will
+	 * be reported to userspace.
+	 */
+	return !vcpu->arch.target_gfn_is_pt;
 }

 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction
  2013-01-04 13:55 ` [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction Xiao Guangrong
@ 2013-01-04 22:21   ` Marcelo Tosatti
  2013-01-05  7:20     ` Xiao Guangrong
  0 siblings, 1 reply; 11+ messages in thread
From: Marcelo Tosatti @ 2013-01-04 22:21 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Gleb Natapov, LKML, KVM

On Fri, Jan 04, 2013 at 09:55:40PM +0800, Xiao Guangrong wrote:
> Little cleanup for reexecute_instruction, also use gpa_to_gfn in
> retry_instruction
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/x86.c |   13 ++++++-------
>  1 files changed, 6 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 1c9c834..ad39018 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4761,19 +4761,18 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
>  	if (tdp_enabled)
>  		return false;
> 
> +	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
> +	if (gpa == UNMAPPED_GVA)
> +		return true; /* let cpu generate fault */
> +

Why change from _system to _read here? Purely cleanup patch should
have no logical changes.

BTW, there is not much logic in using reexecute_instruction() at
for x86_decode_insn (checks in reexecute_instruction() assume 
write to the cr2, for instance).
Fault propagation for x86_decode_insn seems completly broken
(which is perhaps why reexecute_instruction() there survived).

>  	/*
>  	 * if emulation was due to access to shadowed page table
>  	 * and it failed try to unshadow page and re-enter the
>  	 * guest to let CPU execute the instruction.
>  	 */
> -	if (kvm_mmu_unprotect_page_virt(vcpu, gva))
> +	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
>  		return true;
> 
> -	gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
> -
> -	if (gpa == UNMAPPED_GVA)
> -		return true; /* let cpu generate fault */
> -
>  	/*
>  	 * Do not retry the unhandleable instruction if it faults on the
>  	 * readonly host memory, otherwise it will goto a infinite loop:
> @@ -4828,7 +4827,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
>  	if (!vcpu->arch.mmu.direct_map)
>  		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
> 
> -	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
> +	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> 
>  	return true;
>  }
> -- 
> 1.7.7.6

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 5/5] KVM: x86: improve reexecute_instruction
  2013-01-04 13:56 ` [PATCH v4 5/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
@ 2013-01-04 22:44   ` Marcelo Tosatti
  2013-01-05  8:16     ` Xiao Guangrong
  0 siblings, 1 reply; 11+ messages in thread
From: Marcelo Tosatti @ 2013-01-04 22:44 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Gleb Natapov, LKML, KVM

On Fri, Jan 04, 2013 at 09:56:59PM +0800, Xiao Guangrong wrote:
> The current reexecute_instruction can not well detect the failed instruction
> emulation. It allows guest to retry all the instructions except it accesses
> on error pfn
> 
> For example, some cases are nested-write-protect - if the page we want to
> write is used as PDE but it chains to itself. Under this case, we should
> stop the emulation and report the case to userspace
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    7 +++++
>  arch/x86/kvm/paging_tmpl.h      |   24 +++++++++++++-----
>  arch/x86/kvm/x86.c              |   50 ++++++++++++++++++++++++--------------
>  3 files changed, 55 insertions(+), 26 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index c431b33..de229e6 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -502,6 +502,13 @@ struct kvm_vcpu_arch {
>  		u64 msr_val;
>  		struct gfn_to_hva_cache data;
>  	} pv_eoi;
> +
> +	/*
> +	 * Indicate whether the gfn is used as page table in guest which
> +	 * is set when fix page fault and used to detect unhandeable
> +	 * instruction.
> +	 */
> +	bool target_gfn_is_pt;
>  };
> 
>  struct kvm_lpage_info {
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 0453fa0..ca1be75 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -507,20 +507,27 @@ out_gpte_changed:
>   */
>  static bool
>  FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
> -			      struct guest_walker *walker, int user_fault)
> +			      struct guest_walker *walker, int user_fault,
> +			      bool *target_gfn_is_pt)
>  {
>  	int level;
>  	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
> +	bool self_changed = false;
> +
> +	*target_gfn_is_pt = false;
> 
>  	if (!(walker->pte_access & ACC_WRITE_MASK ||
>  	      (!is_write_protection(vcpu) && !user_fault)))
>  		return false;
> 
> -	for (level = walker->level; level <= walker->max_level; level++)
> -		if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask))
> -			return true;
> +	for (level = walker->level; level <= walker->max_level; level++) {
> +		gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
> +
> +		self_changed |= !(gfn & mask);
> +		*target_gfn_is_pt |= !gfn;
> +	}
> 
> -	return false;
> +	return self_changed;
>  }
> 
>  /*
> @@ -548,7 +555,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>  	int level = PT_PAGE_TABLE_LEVEL;
>  	int force_pt_level;
>  	unsigned long mmu_seq;
> -	bool map_writable;
> +	bool map_writable, is_self_change_mapping;
> 
>  	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
> 
> @@ -576,9 +583,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>  		return 0;
>  	}
> 
> +	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
> +		      &walker, user_fault, &vcpu->arch.target_gfn_is_pt);
> +
>  	if (walker.level >= PT_DIRECTORY_LEVEL)
>  		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
> -		   || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
> +		   || is_self_change_mapping;
>  	else
>  		force_pt_level = 1;
>  	if (!force_pt_level) {
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b0a3678..44c6992 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4756,15 +4756,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
>  static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>  {
>  	gpa_t gpa = cr2;
> +	gfn_t gfn;
>  	pfn_t pfn;
> -	unsigned int indirect_shadow_pages;
> -
> -	spin_lock(&vcpu->kvm->mmu_lock);
> -	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> -	spin_unlock(&vcpu->kvm->mmu_lock);
> -
> -	if (!indirect_shadow_pages)
> -		return false;

This renders the previous patch obsolete, pretty much (please fold).

>  	if (!vcpu->arch.mmu.direct_map) {
>  		/*
> @@ -4781,13 +4774,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>  			return true;
>  	}
> 
> -	/*
> -	 * if emulation was due to access to shadowed page table
> -	 * and it failed try to unshadow page and re-enter the
> -	 * guest to let CPU execute the instruction.
> -	 */
> -	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
> -		return true;
> +	gfn = gpa_to_gfn(gpa);
> 
>  	/*
>  	 * Do not retry the unhandleable instruction if it faults on the
> @@ -4795,13 +4782,38 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>  	 * retry instruction -> write #PF -> emulation fail -> retry
>  	 * instruction -> ...
>  	 */
> -	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
> -	if (!is_error_noslot_pfn(pfn)) {
> -		kvm_release_pfn_clean(pfn);
> +	pfn = gfn_to_pfn(vcpu->kvm, gfn);
> +
> +	/*
> +	 * If the instruction failed on the error pfn, it can not be fixed,
> +	 * report the error to userspace.
> +	 */
> +	if (is_error_noslot_pfn(pfn))
> +		return false;
> +
> +	kvm_release_pfn_clean(pfn);
> +
> +	/* The instructions are well-emulated on direct mmu. */
> +	if (vcpu->arch.mmu.direct_map) {

!direct_map?

> +		unsigned int indirect_shadow_pages;
> +
> +		spin_lock(&vcpu->kvm->mmu_lock);
> +		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> +		spin_unlock(&vcpu->kvm->mmu_lock);
> +
> +		if (indirect_shadow_pages)
> +			kvm_mmu_unprotect_page(vcpu->kvm, gfn);
> +
>  		return true;
>  	}
> 
> -	return false;
> +	kvm_mmu_unprotect_page(vcpu->kvm, gfn);
> +
> +	/* If the target gfn is used as page table, the fault can
> +	 * not be avoided by unprotecting shadow page and it will
> +	 * be reported to userspace.
> +	 */
> +	return !vcpu->arch.target_gfn_is_pt;
>  }

The idea was

How about recording the gfn number for shadow pages that have been
shadowed in the current pagefault run? (which is cheap, compared to
shadowing these pages).

If failed instruction emulation is write to one of these gfns, then
fail.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction
  2013-01-04 22:21   ` Marcelo Tosatti
@ 2013-01-05  7:20     ` Xiao Guangrong
  0 siblings, 0 replies; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-05  7:20 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Gleb Natapov, LKML, KVM

On 01/05/2013 06:21 AM, Marcelo Tosatti wrote:
> On Fri, Jan 04, 2013 at 09:55:40PM +0800, Xiao Guangrong wrote:
>> Little cleanup for reexecute_instruction, also use gpa_to_gfn in
>> retry_instruction
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
>> ---
>>  arch/x86/kvm/x86.c |   13 ++++++-------
>>  1 files changed, 6 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 1c9c834..ad39018 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -4761,19 +4761,18 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
>>  	if (tdp_enabled)
>>  		return false;
>>
>> +	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
>> +	if (gpa == UNMAPPED_GVA)
>> +		return true; /* let cpu generate fault */
>> +
> 
> Why change from _system to _read here? Purely cleanup patch should
> have no logical changes.

Ouch, my mistake, will drop this change.

> 
> BTW, there is not much logic in using reexecute_instruction() at
> for x86_decode_insn (checks in reexecute_instruction() assume 
> write to the cr2, for instance).
> Fault propagation for x86_decode_insn seems completly broken
> (which is perhaps why reexecute_instruction() there survived).

Currently, reexecute_instruction can work only if it is called on page
fault path where cr2 is valid. On other paths, cr2 is 0 which is always
not be mapped on guest since it is NULL pointer, so reexecute_instruction
always retry the instruction.

Yes, as you point it out, it is better if the fault address can be got
from x86_decode_insn. I will consider it later.



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 5/5] KVM: x86: improve reexecute_instruction
  2013-01-04 22:44   ` Marcelo Tosatti
@ 2013-01-05  8:16     ` Xiao Guangrong
  2013-01-07 20:46       ` Marcelo Tosatti
  0 siblings, 1 reply; 11+ messages in thread
From: Xiao Guangrong @ 2013-01-05  8:16 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Gleb Natapov, LKML, KVM

On 01/05/2013 06:44 AM, Marcelo Tosatti wrote:

>> index b0a3678..44c6992 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -4756,15 +4756,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
>>  static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>>  {
>>  	gpa_t gpa = cr2;
>> +	gfn_t gfn;
>>  	pfn_t pfn;
>> -	unsigned int indirect_shadow_pages;
>> -
>> -	spin_lock(&vcpu->kvm->mmu_lock);
>> -	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
>> -	spin_unlock(&vcpu->kvm->mmu_lock);
>> -
>> -	if (!indirect_shadow_pages)
>> -		return false;
> 
> This renders the previous patch obsolete, pretty much (please fold).

Will try.

> 
>>  	if (!vcpu->arch.mmu.direct_map) {
>>  		/*
>> @@ -4781,13 +4774,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>>  			return true;
>>  	}
>>
>> -	/*
>> -	 * if emulation was due to access to shadowed page table
>> -	 * and it failed try to unshadow page and re-enter the
>> -	 * guest to let CPU execute the instruction.
>> -	 */
>> -	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
>> -		return true;
>> +	gfn = gpa_to_gfn(gpa);
>>
>>  	/*
>>  	 * Do not retry the unhandleable instruction if it faults on the
>> @@ -4795,13 +4782,38 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>>  	 * retry instruction -> write #PF -> emulation fail -> retry
>>  	 * instruction -> ...
>>  	 */
>> -	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
>> -	if (!is_error_noslot_pfn(pfn)) {
>> -		kvm_release_pfn_clean(pfn);
>> +	pfn = gfn_to_pfn(vcpu->kvm, gfn);
>> +
>> +	/*
>> +	 * If the instruction failed on the error pfn, it can not be fixed,
>> +	 * report the error to userspace.
>> +	 */
>> +	if (is_error_noslot_pfn(pfn))
>> +		return false;
>> +
>> +	kvm_release_pfn_clean(pfn);
>> +
>> +	/* The instructions are well-emulated on direct mmu. */
>> +	if (vcpu->arch.mmu.direct_map) {
> 
> !direct_map?

No. This logic is, if it is direct mmu, we just unprotect the page shadowed by
nested mmu, then let guest retry the instruction, no need to detect unhandlable
instruction.

> 
>> +		unsigned int indirect_shadow_pages;
>> +
>> +		spin_lock(&vcpu->kvm->mmu_lock);
>> +		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
>> +		spin_unlock(&vcpu->kvm->mmu_lock);
>> +
>> +		if (indirect_shadow_pages)
>> +			kvm_mmu_unprotect_page(vcpu->kvm, gfn);
>> +
>>  		return true;
>>  	}
>>
>> -	return false;
>> +	kvm_mmu_unprotect_page(vcpu->kvm, gfn);
>> +
>> +	/* If the target gfn is used as page table, the fault can
>> +	 * not be avoided by unprotecting shadow page and it will
>> +	 * be reported to userspace.
>> +	 */
>> +	return !vcpu->arch.target_gfn_is_pt;
>>  }
> 
> The idea was
> 
> How about recording the gfn number for shadow pages that have been
> shadowed in the current pagefault run? (which is cheap, compared to
> shadowing these pages).
> 
> If failed instruction emulation is write to one of these gfns, then
> fail.

If i understood correctly, i do not think it is simpler than the way in this
patch.

There is the change to apply the idea:

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c431b33..2163de8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -502,6 +502,8 @@ struct kvm_vcpu_arch {
 		u64 msr_val;
 		struct gfn_to_hva_cache data;
 	} pv_eoi;
+
+	gfn_t pt_gfns[4];
 };

 struct kvm_lpage_info {
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0453fa0..ac4210f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -523,6 +523,18 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 	return false;
 }

+static void FNAME(cache_pt_gfns)(struct kvm_vcpu *vcpu, struct guest_walker *walker)
+{
+	int level;
+
+	/* Reset all gfns to -1, then we can detect the levels which is not used in guest. */
+	for (level = 0; level < 4; level++)
+		vcpu->arch.pt_gfns[level] = (gfn_t)(-1);
+
+	for (level = walker->level; level <= walker->max_level; level++)
+		vcpu->arch.pt_gfns[level - 1] = walker->table_gfn[level - 1];
+}
+
 /*
  * Page fault handler.  There are several causes for a page fault:
  *   - there is no shadow pte for the guest pte
@@ -576,6 +588,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 		return 0;
 	}

+	 FNAME(cache_pt_gfns)(vcpu, &walker);
+
 	if (walker.level >= PT_DIRECTORY_LEVEL)
 		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
 		   || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b0a3678..b86ee24 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4753,18 +4753,25 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 	return r;
 }

+static bool is_gfn_used_as_pt(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	int level;
+
+	for (level = 0; level < 4; level++) {
+		if (vcpu->arch.pt_gfns[level] == (gfn_t)-1)
+			continue;
+		if (gfn == vcpu->arch.pt_gfns[level])
+			return true;
+	}
+
+	return false;
+}
+
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 {
 	gpa_t gpa = cr2;
+	gfn_t gfn;
 	pfn_t pfn;
-	unsigned int indirect_shadow_pages;
-
-	spin_lock(&vcpu->kvm->mmu_lock);
-	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
-	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	if (!indirect_shadow_pages)
-		return false;

 	if (!vcpu->arch.mmu.direct_map) {
 		/*
@@ -4781,13 +4788,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 			return true;
 	}

-	/*
-	 * if emulation was due to access to shadowed page table
-	 * and it failed try to unshadow page and re-enter the
-	 * guest to let CPU execute the instruction.
-	 */
-	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
-		return true;
+	gfn = gpa_to_gfn(gpa);

 	/*
 	 * Do not retry the unhandleable instruction if it faults on the
@@ -4795,13 +4796,38 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
 	 * retry instruction -> write #PF -> emulation fail -> retry
 	 * instruction -> ...
 	 */
-	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
-	if (!is_error_noslot_pfn(pfn)) {
-		kvm_release_pfn_clean(pfn);
+	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	/*
+	 * If the instruction failed on the error pfn, it can not be fixed,
+	 * report the error to userspace.
+	 */
+	if (is_error_noslot_pfn(pfn))
+		return false;
+
+	kvm_release_pfn_clean(pfn);
+
+	/* The instructions are well-emulated on direct mmu. */
+	if (vcpu->arch.mmu.direct_map) {
+		unsigned int indirect_shadow_pages;
+
+		spin_lock(&vcpu->kvm->mmu_lock);
+		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
+		spin_unlock(&vcpu->kvm->mmu_lock);
+
+		if (indirect_shadow_pages)
+			kvm_mmu_unprotect_page(vcpu->kvm, gfn);
+
 		return true;
 	}

-	return false;
+	kvm_mmu_unprotect_page(vcpu->kvm, gfn);
+
+	/* If the target gfn is used as page table, the fault can
+	 * not be avoided by unprotecting shadow page and it will
+	 * be reported to userspace.
+	 */
+	return !is_gfn_used_as_pt(vcpu, gfn);
 }

 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,


You can see we need to record more things in the vcpu struct (bool vs. gfn_t [4])
and my patch can fold is_gfn_used_as_pt into a existed function FNAME(is_self_change_mapping).

Hmm?

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v4 5/5] KVM: x86: improve reexecute_instruction
  2013-01-05  8:16     ` Xiao Guangrong
@ 2013-01-07 20:46       ` Marcelo Tosatti
  0 siblings, 0 replies; 11+ messages in thread
From: Marcelo Tosatti @ 2013-01-07 20:46 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Gleb Natapov, LKML, KVM

On Sat, Jan 05, 2013 at 04:16:37PM +0800, Xiao Guangrong wrote:
> On 01/05/2013 06:44 AM, Marcelo Tosatti wrote:
> 
> >> index b0a3678..44c6992 100644
> >> --- a/arch/x86/kvm/x86.c
> >> +++ b/arch/x86/kvm/x86.c
> >> @@ -4756,15 +4756,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
> >>  static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
> >>  {
> >>  	gpa_t gpa = cr2;
> >> +	gfn_t gfn;
> >>  	pfn_t pfn;
> >> -	unsigned int indirect_shadow_pages;
> >> -
> >> -	spin_lock(&vcpu->kvm->mmu_lock);
> >> -	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> >> -	spin_unlock(&vcpu->kvm->mmu_lock);
> >> -
> >> -	if (!indirect_shadow_pages)
> >> -		return false;
> > 
> > This renders the previous patch obsolete, pretty much (please fold).
> 
> Will try.
> 
> > 
> >>  	if (!vcpu->arch.mmu.direct_map) {
> >>  		/*
> >> @@ -4781,13 +4774,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
> >>  			return true;
> >>  	}
> >>
> >> -	/*
> >> -	 * if emulation was due to access to shadowed page table
> >> -	 * and it failed try to unshadow page and re-enter the
> >> -	 * guest to let CPU execute the instruction.
> >> -	 */
> >> -	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
> >> -		return true;
> >> +	gfn = gpa_to_gfn(gpa);
> >>
> >>  	/*
> >>  	 * Do not retry the unhandleable instruction if it faults on the
> >> @@ -4795,13 +4782,38 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
> >>  	 * retry instruction -> write #PF -> emulation fail -> retry
> >>  	 * instruction -> ...
> >>  	 */
> >> -	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
> >> -	if (!is_error_noslot_pfn(pfn)) {
> >> -		kvm_release_pfn_clean(pfn);
> >> +	pfn = gfn_to_pfn(vcpu->kvm, gfn);
> >> +
> >> +	/*
> >> +	 * If the instruction failed on the error pfn, it can not be fixed,
> >> +	 * report the error to userspace.
> >> +	 */
> >> +	if (is_error_noslot_pfn(pfn))
> >> +		return false;
> >> +
> >> +	kvm_release_pfn_clean(pfn);
> >> +
> >> +	/* The instructions are well-emulated on direct mmu. */
> >> +	if (vcpu->arch.mmu.direct_map) {
> > 
> > !direct_map?
> 
> No. This logic is, if it is direct mmu, we just unprotect the page shadowed by
> nested mmu, then let guest retry the instruction, no need to detect unhandlable
> instruction.
> 
> > 
> >> +		unsigned int indirect_shadow_pages;
> >> +
> >> +		spin_lock(&vcpu->kvm->mmu_lock);
> >> +		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> >> +		spin_unlock(&vcpu->kvm->mmu_lock);
> >> +
> >> +		if (indirect_shadow_pages)
> >> +			kvm_mmu_unprotect_page(vcpu->kvm, gfn);
> >> +
> >>  		return true;
> >>  	}
> >>
> >> -	return false;
> >> +	kvm_mmu_unprotect_page(vcpu->kvm, gfn);
> >> +
> >> +	/* If the target gfn is used as page table, the fault can
> >> +	 * not be avoided by unprotecting shadow page and it will
> >> +	 * be reported to userspace.
> >> +	 */
> >> +	return !vcpu->arch.target_gfn_is_pt;
> >>  }
> > 
> > The idea was
> > 
> > How about recording the gfn number for shadow pages that have been
> > shadowed in the current pagefault run? (which is cheap, compared to
> > shadowing these pages).
> > 
> > If failed instruction emulation is write to one of these gfns, then
> > fail.
> 
> If i understood correctly, i do not think it is simpler than the way in this
> patch.
> 
> There is the change to apply the idea:
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index c431b33..2163de8 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -502,6 +502,8 @@ struct kvm_vcpu_arch {
>  		u64 msr_val;
>  		struct gfn_to_hva_cache data;
>  	} pv_eoi;
> +
> +	gfn_t pt_gfns[4];
>  };
> 
>  struct kvm_lpage_info {
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 0453fa0..ac4210f 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -523,6 +523,18 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
>  	return false;
>  }
> 
> +static void FNAME(cache_pt_gfns)(struct kvm_vcpu *vcpu, struct guest_walker *walker)
> +{
> +	int level;
> +
> +	/* Reset all gfns to -1, then we can detect the levels which is not used in guest. */
> +	for (level = 0; level < 4; level++)
> +		vcpu->arch.pt_gfns[level] = (gfn_t)(-1);
> +
> +	for (level = walker->level; level <= walker->max_level; level++)
> +		vcpu->arch.pt_gfns[level - 1] = walker->table_gfn[level - 1];
> +}
> +
>  /*
>   * Page fault handler.  There are several causes for a page fault:
>   *   - there is no shadow pte for the guest pte
> @@ -576,6 +588,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>  		return 0;
>  	}
> 
> +	 FNAME(cache_pt_gfns)(vcpu, &walker);
> +
>  	if (walker.level >= PT_DIRECTORY_LEVEL)
>  		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
>  		   || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b0a3678..b86ee24 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4753,18 +4753,25 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
>  	return r;
>  }
> 
> +static bool is_gfn_used_as_pt(struct kvm_vcpu *vcpu, gfn_t gfn)
> +{
> +	int level;
> +
> +	for (level = 0; level < 4; level++) {
> +		if (vcpu->arch.pt_gfns[level] == (gfn_t)-1)
> +			continue;
> +		if (gfn == vcpu->arch.pt_gfns[level])
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
>  static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>  {
>  	gpa_t gpa = cr2;
> +	gfn_t gfn;
>  	pfn_t pfn;
> -	unsigned int indirect_shadow_pages;
> -
> -	spin_lock(&vcpu->kvm->mmu_lock);
> -	indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> -	spin_unlock(&vcpu->kvm->mmu_lock);
> -
> -	if (!indirect_shadow_pages)
> -		return false;
> 
>  	if (!vcpu->arch.mmu.direct_map) {
>  		/*
> @@ -4781,13 +4788,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>  			return true;
>  	}
> 
> -	/*
> -	 * if emulation was due to access to shadowed page table
> -	 * and it failed try to unshadow page and re-enter the
> -	 * guest to let CPU execute the instruction.
> -	 */
> -	if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
> -		return true;
> +	gfn = gpa_to_gfn(gpa);
> 
>  	/*
>  	 * Do not retry the unhandleable instruction if it faults on the
> @@ -4795,13 +4796,38 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, unsigned long cr2)
>  	 * retry instruction -> write #PF -> emulation fail -> retry
>  	 * instruction -> ...
>  	 */
> -	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
> -	if (!is_error_noslot_pfn(pfn)) {
> -		kvm_release_pfn_clean(pfn);
> +	pfn = gfn_to_pfn(vcpu->kvm, gfn);
> +
> +	/*
> +	 * If the instruction failed on the error pfn, it can not be fixed,
> +	 * report the error to userspace.
> +	 */
> +	if (is_error_noslot_pfn(pfn))
> +		return false;
> +
> +	kvm_release_pfn_clean(pfn);
> +
> +	/* The instructions are well-emulated on direct mmu. */
> +	if (vcpu->arch.mmu.direct_map) {
> +		unsigned int indirect_shadow_pages;
> +
> +		spin_lock(&vcpu->kvm->mmu_lock);
> +		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> +		spin_unlock(&vcpu->kvm->mmu_lock);
> +
> +		if (indirect_shadow_pages)
> +			kvm_mmu_unprotect_page(vcpu->kvm, gfn);
> +
>  		return true;
>  	}
> 
> -	return false;
> +	kvm_mmu_unprotect_page(vcpu->kvm, gfn);
> +
> +	/* If the target gfn is used as page table, the fault can
> +	 * not be avoided by unprotecting shadow page and it will
> +	 * be reported to userspace.
> +	 */
> +	return !is_gfn_used_as_pt(vcpu, gfn);
>  }
> 
>  static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> 
> 
> You can see we need to record more things in the vcpu struct (bool vs. gfn_t [4])
> and my patch can fold is_gfn_used_as_pt into a existed function FNAME(is_self_change_mapping).
> 
> Hmm?

Yes, its not needed. But its not clear where target_gfn_is_pt is reset.
Also please use a more descriptive name, such as
"bool write_fault_to_shadow_pgtable".

Please use coding style which is easier for humans to parse, overall.

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2013-01-07 20:46 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-01-04 13:53 [PATCH v4 0/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
2013-01-04 13:54 ` [PATCH v4 1/5] KVM: MMU: fix Dirty bit missed if CR0.WP = 0 Xiao Guangrong
2013-01-04 13:54 ` [PATCH v4 2/5] KVM: MMU: fix infinite fault access retry Xiao Guangrong
2013-01-04 13:55 ` [PATCH v4 3/5] KVM: x86: clean up reexecute_instruction Xiao Guangrong
2013-01-04 22:21   ` Marcelo Tosatti
2013-01-05  7:20     ` Xiao Guangrong
2013-01-04 13:56 ` [PATCH v4 4/5] KVM: x86: let reexecute_instruction work for tdp Xiao Guangrong
2013-01-04 13:56 ` [PATCH v4 5/5] KVM: x86: improve reexecute_instruction Xiao Guangrong
2013-01-04 22:44   ` Marcelo Tosatti
2013-01-05  8:16     ` Xiao Guangrong
2013-01-07 20:46       ` Marcelo Tosatti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox