public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr
@ 2008-09-01 11:41 yasker
  2008-09-01 12:03 ` Avi Kivity
  0 siblings, 1 reply; 2+ messages in thread
From: yasker @ 2008-09-01 11:41 UTC (permalink / raw)
  To: Avi Kivity, kvm; +Cc: Sheng Yang

From: Sheng Yang <sheng.yang@intel.com>

EPT is 4 level by default in 32pae(48 bits), but the addr parameter
of kvm_shadow_walk->entry() only accept unsigned long as virtual
address, which is 32bit in 32pae. This result in SHADOW_PT_INDEX()
overflow when try to fetch level 4 index.

Fix it by extend kvm_shadow_walk->entry() to accept 64bit addr in
parameter.

Signed-off-by: Sheng Yang <sheng.yang@intel.com>
---
 arch/x86/kvm/mmu.c         |   10 +++++-----
 arch/x86/kvm/paging_tmpl.h |    4 ++--
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f4ab99f..3062650 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -144,7 +144,7 @@ struct kvm_rmap_desc {
 
 struct kvm_shadow_walk {
 	int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
-		     gva_t addr, u64 *spte, int level);
+		     u64 addr, u64 *spte, int level);
 };
 
 static struct kmem_cache *pte_chain_cache;
@@ -937,7 +937,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 }
 
 static int walk_shadow(struct kvm_shadow_walk *walker,
-		       struct kvm_vcpu *vcpu, gva_t addr)
+		       struct kvm_vcpu *vcpu, u64 addr)
 {
 	hpa_t shadow_addr;
 	int level;
@@ -1267,7 +1267,7 @@ struct direct_shadow_walk {
 
 static int direct_map_entry(struct kvm_shadow_walk *_walk,
 			    struct kvm_vcpu *vcpu,
-			    gva_t addr, u64 *sptep, int level)
+			    u64 addr, u64 *sptep, int level)
 {
 	struct direct_shadow_walk *walk =
 		container_of(_walk, struct direct_shadow_walk, walker);
@@ -1286,7 +1286,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
 
 	if (*sptep == shadow_trap_nonpresent_pte) {
 		pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
-		sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1,
+		sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
 				      1, ACC_ALL, sptep);
 		if (!sp) {
 			pgprintk("nonpaging_map: ENOMEM\n");
@@ -1314,7 +1314,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
 		.pt_write = 0,
 	};
 
-	r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT);
+	r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
 	if (r < 0)
 		return r;
 	return walker.pt_write;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index b7064e1..b671f61 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
  * Fetch a shadow pte for a specific level in the paging hierarchy.
  */
 static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
-				    struct kvm_vcpu *vcpu, gva_t addr,
+				    struct kvm_vcpu *vcpu, u64 addr,
 				    u64 *sptep, int level)
 {
 	struct shadow_walker *sw =
@@ -326,7 +326,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
 		metaphysical = 0;
 		table_gfn = gw->table_gfn[level - 2];
 	}
-	shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+	shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
 				       metaphysical, access, sptep);
 	if (!metaphysical) {
 		r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
-- 
1.5.3


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr
  2008-09-01 11:41 [PATCH] KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr yasker
@ 2008-09-01 12:03 ` Avi Kivity
  0 siblings, 0 replies; 2+ messages in thread
From: Avi Kivity @ 2008-09-01 12:03 UTC (permalink / raw)
  To: yasker; +Cc: kvm, Sheng Yang

yasker@gmail.com wrote:
> From: Sheng Yang <sheng.yang@intel.com>
>
> EPT is 4 level by default in 32pae(48 bits), but the addr parameter
> of kvm_shadow_walk->entry() only accept unsigned long as virtual
> address, which is 32bit in 32pae. This result in SHADOW_PT_INDEX()
> overflow when try to fetch level 4 index.
>
> Fix it by extend kvm_shadow_walk->entry() to accept 64bit addr in
> parameter.
>   

Applied, thanks.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2008-09-01 12:03 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-09-01 11:41 [PATCH] KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr yasker
2008-09-01 12:03 ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox