* [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4
@ 2011-06-30 16:33 Takuya Yoshikawa
2011-06-30 16:34 ` [PATCH 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Takuya Yoshikawa @ 2011-06-30 16:33 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo
This is the v4 of my walk_addr_generic() cleanup.
Changelog v3-v4: fixed v3's reversed present mask.
This time, I have tested the patch set with kvm-unit-tests/x86/access.flat.
npt=0
---
enabling apic
starting test
run
1572866 tests, 0 failures
---
npt=1
---
enabling apic
starting test
run
1572866 tests, 0 failures
---
Thanks,
Takuya
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic()
2011-06-30 16:33 [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Takuya Yoshikawa
@ 2011-06-30 16:34 ` Takuya Yoshikawa
2011-06-30 16:36 ` [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic() Takuya Yoshikawa
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Takuya Yoshikawa @ 2011-06-30 16:34 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo
From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Avoid two step jump to the error handling part. This eliminates the use
of the variables present and rsvd_fault.
We also use the const type qualifier to show that write/user/fetch_fault
do not change in the function.
Both of these were suggested by Ingo Molnar.
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
arch/x86/kvm/paging_tmpl.h | 82 +++++++++++++++++---------------------------
1 files changed, 32 insertions(+), 50 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1caeb4d..f0746d2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
- bool eperm, present, rsvd_fault;
- int offset, write_fault, user_fault, fetch_fault;
-
- write_fault = access & PFERR_WRITE_MASK;
- user_fault = access & PFERR_USER_MASK;
- fetch_fault = access & PFERR_FETCH_MASK;
+ bool eperm;
+ int offset;
+ const int write_fault = access & PFERR_WRITE_MASK;
+ const int user_fault = access & PFERR_USER_MASK;
+ const int fetch_fault = access & PFERR_FETCH_MASK;
+ u16 errcode = 0;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
walk:
- present = true;
- eperm = rsvd_fault = false;
+ eperm = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
@@ -144,10 +143,8 @@ walk:
if (walker->level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
- if (!is_present_gpte(pte)) {
- present = false;
+ if (!is_present_gpte(pte))
goto error;
- }
--walker->level;
}
#endif
@@ -170,35 +167,27 @@ walk:
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK);
- if (unlikely(real_gfn == UNMAPPED_GVA)) {
- present = false;
- break;
- }
+ if (unlikely(real_gfn == UNMAPPED_GVA))
+ goto error;
real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
- if (unlikely(kvm_is_error_hva(host_addr))) {
- present = false;
- break;
- }
+ if (unlikely(kvm_is_error_hva(host_addr)))
+ goto error;
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
- if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
- present = false;
- break;
- }
+ if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+ goto error;
trace_kvm_mmu_paging_element(pte, walker->level);
- if (unlikely(!is_present_gpte(pte))) {
- present = false;
- break;
- }
+ if (unlikely(!is_present_gpte(pte)))
+ goto error;
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) {
- rsvd_fault = true;
- break;
+ errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+ goto error;
}
if (unlikely(write_fault && !is_writable_pte(pte)
@@ -213,17 +202,15 @@ walk:
eperm = true;
#endif
- if (!eperm && !rsvd_fault
- && unlikely(!(pte & PT_ACCESSED_MASK))) {
+ if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_ACCESSED_MASK);
- if (unlikely(ret < 0)) {
- present = false;
- break;
- } else if (ret)
+ if (unlikely(ret < 0))
+ goto error;
+ else if (ret)
goto walk;
mark_page_dirty(vcpu->kvm, table_gfn);
@@ -276,8 +263,10 @@ walk:
--walker->level;
}
- if (unlikely(!present || eperm || rsvd_fault))
+ if (unlikely(eperm)) {
+ errcode |= PFERR_PRESENT_MASK;
goto error;
+ }
if (write_fault && unlikely(!is_dirty_gpte(pte))) {
int ret;
@@ -285,10 +274,9 @@ walk:
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_DIRTY_MASK);
- if (unlikely(ret < 0)) {
- present = false;
+ if (unlikely(ret < 0))
goto error;
- } else if (ret)
+ else if (ret)
goto walk;
mark_page_dirty(vcpu->kvm, table_gfn);
@@ -303,20 +291,14 @@ walk:
return 1;
error:
- walker->fault.vector = PF_VECTOR;
- walker->fault.error_code_valid = true;
- walker->fault.error_code = 0;
- if (present)
- walker->fault.error_code |= PFERR_PRESENT_MASK;
-
- walker->fault.error_code |= write_fault | user_fault;
-
+ errcode |= write_fault | user_fault;
if (fetch_fault && (mmu->nx ||
kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
- walker->fault.error_code |= PFERR_FETCH_MASK;
- if (rsvd_fault)
- walker->fault.error_code |= PFERR_RSVD_MASK;
+ errcode |= PFERR_FETCH_MASK;
+ walker->fault.vector = PF_VECTOR;
+ walker->fault.error_code_valid = true;
+ walker->fault.error_code = errcode;
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
--
1.7.4.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic()
2011-06-30 16:33 [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Takuya Yoshikawa
2011-06-30 16:34 ` [PATCH 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
@ 2011-06-30 16:36 ` Takuya Yoshikawa
2011-06-30 16:37 ` [PATCH 3/3] KVM: MMU: Introduce is_last_gpte() to clean up walk_addr_generic() Takuya Yoshikawa
2011-07-07 9:16 ` [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Marcelo Tosatti
3 siblings, 0 replies; 5+ messages in thread
From: Takuya Yoshikawa @ 2011-06-30 16:36 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo
From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
The current name does not explain the meaning well. So give it a better
name "retry_walk" to show that we are trying the walk again.
This was suggested by Ingo Molnar.
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
arch/x86/kvm/paging_tmpl.h | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index f0746d2..9c0afba 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -134,7 +134,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
-walk:
+retry_walk:
eperm = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
@@ -211,7 +211,7 @@ walk:
if (unlikely(ret < 0))
goto error;
else if (ret)
- goto walk;
+ goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
@@ -277,7 +277,7 @@ walk:
if (unlikely(ret < 0))
goto error;
else if (ret)
- goto walk;
+ goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
--
1.7.4.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/3] KVM: MMU: Introduce is_last_gpte() to clean up walk_addr_generic()
2011-06-30 16:33 [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Takuya Yoshikawa
2011-06-30 16:34 ` [PATCH 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
2011-06-30 16:36 ` [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic() Takuya Yoshikawa
@ 2011-06-30 16:37 ` Takuya Yoshikawa
2011-07-07 9:16 ` [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Marcelo Tosatti
3 siblings, 0 replies; 5+ messages in thread
From: Takuya Yoshikawa @ 2011-06-30 16:37 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, yoshikawa.takuya, mingo
From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Suggested by Ingo and Avi.
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
arch/x86/kvm/paging_tmpl.h | 26 +++++++++++++++++++-------
1 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9c0afba..1e1c244 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -113,6 +113,24 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
return access;
}
+static bool FNAME(is_last_gpte)(struct guest_walker *walker,
+ struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ pt_element_t gpte)
+{
+ if (walker->level == PT_PAGE_TABLE_LEVEL)
+ return true;
+
+ if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
+ (PTTYPE == 64 || is_pse(vcpu)))
+ return true;
+
+ if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
+ (mmu->root_level == PT64_ROOT_LEVEL))
+ return true;
+
+ return false;
+}
+
/*
* Fetch a guest pte for a guest virtual address
*/
@@ -221,13 +239,7 @@ retry_walk:
walker->ptes[walker->level - 1] = pte;
- if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
- ((walker->level == PT_DIRECTORY_LEVEL) &&
- is_large_pte(pte) &&
- (PTTYPE == 64 || is_pse(vcpu))) ||
- ((walker->level == PT_PDPE_LEVEL) &&
- is_large_pte(pte) &&
- mmu->root_level == PT64_ROOT_LEVEL)) {
+ if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
int lvl = walker->level;
gpa_t real_gpa;
gfn_t gfn;
--
1.7.4.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4
2011-06-30 16:33 [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Takuya Yoshikawa
` (2 preceding siblings ...)
2011-06-30 16:37 ` [PATCH 3/3] KVM: MMU: Introduce is_last_gpte() to clean up walk_addr_generic() Takuya Yoshikawa
@ 2011-07-07 9:16 ` Marcelo Tosatti
3 siblings, 0 replies; 5+ messages in thread
From: Marcelo Tosatti @ 2011-07-07 9:16 UTC (permalink / raw)
To: Takuya Yoshikawa; +Cc: avi, kvm, yoshikawa.takuya, mingo
On Fri, Jul 01, 2011 at 01:33:20AM +0900, Takuya Yoshikawa wrote:
> This is the v4 of my walk_addr_generic() cleanup.
>
> Changelog v3-v4: fixed v3's reversed present mask.
>
> This time, I have tested the patch set with kvm-unit-tests/x86/access.flat.
Applied, thanks.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2011-07-07 10:59 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-06-30 16:33 [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Takuya Yoshikawa
2011-06-30 16:34 ` [PATCH 1/3] KVM: MMU: Clean up the error handling of walk_addr_generic() Takuya Yoshikawa
2011-06-30 16:36 ` [PATCH 2/3] KVM: MMU: Rename the walk label in walk_addr_generic() Takuya Yoshikawa
2011-06-30 16:37 ` [PATCH 3/3] KVM: MMU: Introduce is_last_gpte() to clean up walk_addr_generic() Takuya Yoshikawa
2011-07-07 9:16 ` [PATCH 0/3] KVM: MMU: Clean up walk_addr_generic() v4 Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox