public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic()
@ 2011-06-14 17:00 Takuya Yoshikawa
  2011-06-14 17:02 ` [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic() Takuya Yoshikawa
  2011-06-14 17:03 ` [PATCH 3/3] KVM: MMU: Use helpers to clean up walk_addr_generic() Takuya Yoshikawa
  0 siblings, 2 replies; 4+ messages in thread
From: Takuya Yoshikawa @ 2011-06-14 17:00 UTC (permalink / raw)
  To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo

From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>

Avoid two step jump to the error handling code.  This eliminates the use
of the variables present and rsvd_fault.

We also use the const type qualifier to show that write/user/fetch_fault
do not change in the function.

Both of these were suggested by Ingo Molnar.

Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
 v2: Rebased against next.

 arch/x86/kvm/paging_tmpl.h |   64 +++++++++++++++++++------------------------
 1 files changed, 28 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1caeb4d..137aa45 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 	gfn_t table_gfn;
 	unsigned index, pt_access, uninitialized_var(pte_access);
 	gpa_t pte_gpa;
-	bool eperm, present, rsvd_fault;
-	int offset, write_fault, user_fault, fetch_fault;
-
-	write_fault = access & PFERR_WRITE_MASK;
-	user_fault = access & PFERR_USER_MASK;
-	fetch_fault = access & PFERR_FETCH_MASK;
+	bool eperm;
+	int offset;
+	const int write_fault = access & PFERR_WRITE_MASK;
+	const int user_fault  = access & PFERR_USER_MASK;
+	const int fetch_fault = access & PFERR_FETCH_MASK;
+	u16 errcode = 0;
 
 	trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
 				     fetch_fault);
 walk:
-	present = true;
-	eperm = rsvd_fault = false;
+	eperm = false;
 	walker->level = mmu->root_level;
 	pte           = mmu->get_cr3(vcpu);
 
@@ -145,7 +144,7 @@ walk:
 		pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
 		trace_kvm_mmu_paging_element(pte, walker->level);
 		if (!is_present_gpte(pte)) {
-			present = false;
+			errcode |= PFERR_PRESENT_MASK;
 			goto error;
 		}
 		--walker->level;
@@ -171,34 +170,34 @@ walk:
 		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
 					      PFERR_USER_MASK|PFERR_WRITE_MASK);
 		if (unlikely(real_gfn == UNMAPPED_GVA)) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 		real_gfn = gpa_to_gfn(real_gfn);
 
 		host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
 		if (unlikely(kvm_is_error_hva(host_addr))) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 
 		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
 		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 
 		trace_kvm_mmu_paging_element(pte, walker->level);
 
 		if (unlikely(!is_present_gpte(pte))) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 
 		if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
 					      walker->level))) {
-			rsvd_fault = true;
-			break;
+			errcode |= PFERR_RSVD_MASK;
+			goto error;
 		}
 
 		if (unlikely(write_fault && !is_writable_pte(pte)
@@ -213,16 +212,15 @@ walk:
 			eperm = true;
 #endif
 
-		if (!eperm && !rsvd_fault
-		    && unlikely(!(pte & PT_ACCESSED_MASK))) {
+		if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
 			int ret;
 			trace_kvm_mmu_set_accessed_bit(table_gfn, index,
 						       sizeof(pte));
 			ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
 						  pte, pte|PT_ACCESSED_MASK);
 			if (unlikely(ret < 0)) {
-				present = false;
-				break;
+				errcode |= PFERR_PRESENT_MASK;
+				goto error;
 			} else if (ret)
 				goto walk;
 
@@ -276,7 +274,7 @@ walk:
 		--walker->level;
 	}
 
-	if (unlikely(!present || eperm || rsvd_fault))
+	if (unlikely(eperm))
 		goto error;
 
 	if (write_fault && unlikely(!is_dirty_gpte(pte))) {
@@ -286,7 +284,7 @@ walk:
 		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
 					  pte, pte|PT_DIRTY_MASK);
 		if (unlikely(ret < 0)) {
-			present = false;
+			errcode |= PFERR_PRESENT_MASK;
 			goto error;
 		} else if (ret)
 			goto walk;
@@ -303,20 +301,14 @@ walk:
 	return 1;
 
 error:
-	walker->fault.vector = PF_VECTOR;
-	walker->fault.error_code_valid = true;
-	walker->fault.error_code = 0;
-	if (present)
-		walker->fault.error_code |= PFERR_PRESENT_MASK;
-
-	walker->fault.error_code |= write_fault | user_fault;
-
+	errcode |= write_fault | user_fault;
 	if (fetch_fault && (mmu->nx ||
 			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
-		walker->fault.error_code |= PFERR_FETCH_MASK;
-	if (rsvd_fault)
-		walker->fault.error_code |= PFERR_RSVD_MASK;
+		errcode |= PFERR_FETCH_MASK;
 
+	walker->fault.vector = PF_VECTOR;
+	walker->fault.error_code_valid = true;
+	walker->fault.error_code = errcode;
 	walker->fault.address = addr;
 	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic()
  2011-06-14 17:00 [PATCH v2 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
@ 2011-06-14 17:02 ` Takuya Yoshikawa
  2011-06-14 17:03 ` [PATCH 3/3] KVM: MMU: Use helpers to clean up walk_addr_generic() Takuya Yoshikawa
  1 sibling, 0 replies; 4+ messages in thread
From: Takuya Yoshikawa @ 2011-06-14 17:02 UTC (permalink / raw)
  To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo

From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>

The current name does not explain the meaning well.  So give it a better
name "retry_walk" to show that we are retrying the walk again.

This was suggested by Ingo Molnar.

Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
 arch/x86/kvm/paging_tmpl.h |    6 +++---
 1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 137aa45..92fe275 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -134,7 +134,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 
 	trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
 				     fetch_fault);
-walk:
+retry_walk:
 	eperm = false;
 	walker->level = mmu->root_level;
 	pte           = mmu->get_cr3(vcpu);
@@ -222,7 +222,7 @@ walk:
 				errcode |= PFERR_PRESENT_MASK;
 				goto error;
 			} else if (ret)
-				goto walk;
+				goto retry_walk;
 
 			mark_page_dirty(vcpu->kvm, table_gfn);
 			pte |= PT_ACCESSED_MASK;
@@ -287,7 +287,7 @@ walk:
 			errcode |= PFERR_PRESENT_MASK;
 			goto error;
 		} else if (ret)
-			goto walk;
+			goto retry_walk;
 
 		mark_page_dirty(vcpu->kvm, table_gfn);
 		pte |= PT_DIRTY_MASK;
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] KVM: MMU: Use helpers to clean up walk_addr_generic()
  2011-06-14 17:00 [PATCH v2 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
  2011-06-14 17:02 ` [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic() Takuya Yoshikawa
@ 2011-06-14 17:03 ` Takuya Yoshikawa
  2011-06-20 12:02   ` Avi Kivity
  1 sibling, 1 reply; 4+ messages in thread
From: Takuya Yoshikawa @ 2011-06-14 17:03 UTC (permalink / raw)
  To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo

From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>

Introduce two new helpers: set_accessed_bit() and is_last_gpte().

These names were suggested by Ingo and Avi.

Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
 arch/x86/kvm/paging_tmpl.h |   57 ++++++++++++++++++++++++++++++++-----------
 1 files changed, 42 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 92fe275..d655a4b6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -113,6 +113,43 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
 	return access;
 }
 
+static int FNAME(set_accessed_bit)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+				   gfn_t table_gfn, unsigned index,
+				   pt_element_t __user *ptep_user,
+				   pt_element_t *ptep)
+{
+	int ret;
+
+	trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(*ptep));
+	ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
+				  *ptep, *ptep|PT_ACCESSED_MASK);
+	if (unlikely(ret))
+		return ret;
+
+	mark_page_dirty(vcpu->kvm, table_gfn);
+	*ptep |= PT_ACCESSED_MASK;
+
+	return 0;
+}
+
+static bool FNAME(is_last_gpte)(struct guest_walker *walker,
+				struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+				pt_element_t gpte)
+{
+	if (walker->level == PT_PAGE_TABLE_LEVEL)
+		return true;
+
+	if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
+	    (PTTYPE == 64 || is_pse(vcpu)))
+		return true;
+
+	if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
+	    (mmu->root_level == PT64_ROOT_LEVEL))
+		return true;
+
+	return false;
+}
+
 /*
  * Fetch a guest pte for a guest virtual address
  */
@@ -214,31 +251,21 @@ retry_walk:
 
 		if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
 			int ret;
-			trace_kvm_mmu_set_accessed_bit(table_gfn, index,
-						       sizeof(pte));
-			ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
-						  pte, pte|PT_ACCESSED_MASK);
-			if (unlikely(ret < 0)) {
+
+			ret = FNAME(set_accessed_bit)(vcpu, mmu, table_gfn,
+						      index, ptep_user, &pte);
+			if (ret < 0) {
 				errcode |= PFERR_PRESENT_MASK;
 				goto error;
 			} else if (ret)
 				goto retry_walk;
-
-			mark_page_dirty(vcpu->kvm, table_gfn);
-			pte |= PT_ACCESSED_MASK;
 		}
 
 		pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 
 		walker->ptes[walker->level - 1] = pte;
 
-		if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
-		    ((walker->level == PT_DIRECTORY_LEVEL) &&
-				is_large_pte(pte) &&
-				(PTTYPE == 64 || is_pse(vcpu))) ||
-		    ((walker->level == PT_PDPE_LEVEL) &&
-				is_large_pte(pte) &&
-				mmu->root_level == PT64_ROOT_LEVEL)) {
+		if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
 			int lvl = walker->level;
 			gpa_t real_gpa;
 			gfn_t gfn;
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 3/3] KVM: MMU: Use helpers to clean up walk_addr_generic()
  2011-06-14 17:03 ` [PATCH 3/3] KVM: MMU: Use helpers to clean up walk_addr_generic() Takuya Yoshikawa
@ 2011-06-20 12:02   ` Avi Kivity
  0 siblings, 0 replies; 4+ messages in thread
From: Avi Kivity @ 2011-06-20 12:02 UTC (permalink / raw)
  To: Takuya Yoshikawa; +Cc: mtosatti, kvm, yoshikawa.takuya, mingo

On 06/14/2011 08:03 PM, Takuya Yoshikawa wrote:
> From: Takuya Yoshikawa<yoshikawa.takuya@oss.ntt.co.jp>
>
> Introduce two new helpers: set_accessed_bit() and is_last_gpte().
>
> These names were suggested by Ingo and Avi.
>
> Cc: Ingo Molnar<mingo@elte.hu>
> Signed-off-by: Takuya Yoshikawa<yoshikawa.takuya@oss.ntt.co.jp>
> ---
>   arch/x86/kvm/paging_tmpl.h |   57 ++++++++++++++++++++++++++++++++-----------
>   1 files changed, 42 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 92fe275..d655a4b6 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -113,6 +113,43 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
>   	return access;
>   }
>
> +static int FNAME(set_accessed_bit)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> +				   gfn_t table_gfn, unsigned index,
> +				   pt_element_t __user *ptep_user,
> +				   pt_element_t *ptep)
> +{
> +	int ret;
> +
> +	trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(*ptep));
> +	ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
> +				  *ptep, *ptep|PT_ACCESSED_MASK);
> +	if (unlikely(ret))
> +		return ret;
> +
> +	mark_page_dirty(vcpu->kvm, table_gfn);
> +	*ptep |= PT_ACCESSED_MASK;
> +
> +	return 0;
> +}


I don't think this one is worthwhile, it takes 7 parameters!  If there's 
so much communication between caller and callee, it means they are too 
heavily tied up.


> +
> +static bool FNAME(is_last_gpte)(struct guest_walker *walker,
> +				struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> +				pt_element_t gpte)
> +{
> +	if (walker->level == PT_PAGE_TABLE_LEVEL)
> +		return true;
> +
> +	if ((walker->level == PT_DIRECTORY_LEVEL)&&  is_large_pte(gpte)&&
> +	    (PTTYPE == 64 || is_pse(vcpu)))
> +		return true;
> +
> +	if ((walker->level == PT_PDPE_LEVEL)&&  is_large_pte(gpte)&&
> +	    (mmu->root_level == PT64_ROOT_LEVEL))
> +		return true;
> +
> +	return false;
> +}
> +

This one is much better.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-06-20 12:02 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-06-14 17:00 [PATCH v2 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
2011-06-14 17:02 ` [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic() Takuya Yoshikawa
2011-06-14 17:03 ` [PATCH 3/3] KVM: MMU: Use helpers to clean up walk_addr_generic() Takuya Yoshikawa
2011-06-20 12:02   ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox