* [PATCH 0/5] for_each_shadow_entry
@ 2008-12-25 13:23 Avi Kivity
2008-12-25 13:23 ` [PATCH 1/5] KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow() Avi Kivity
` (5 more replies)
0 siblings, 6 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-25 13:23 UTC (permalink / raw)
To: kvm; +Cc: Marcelo Tosatti
This patchset replaces walk_shadow(), which calls a callback for each
shadow pte that maps a guest virtal address, by an equivalent for_each style
construct. Benefits are less thunks and smaller code.
Please review.
Avi Kivity (5):
KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to
walk_shadow()
KVM: MMU: Use for_each_shadow_entry() in __direct_map()
KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch()
KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in
invlpg()
KVM: MMU: Drop walk_shadow()
arch/x86/kvm/mmu.c | 150 ++++++++++++++-----------------
arch/x86/kvm/paging_tmpl.h | 209 +++++++++++++++++++-------------------------
2 files changed, 157 insertions(+), 202 deletions(-)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 1/5] KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow()
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
@ 2008-12-25 13:23 ` Avi Kivity
2008-12-25 13:23 ` [PATCH 2/5] KVM: MMU: Use for_each_shadow_entry() in __direct_map() Avi Kivity
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-25 13:23 UTC (permalink / raw)
To: kvm; +Cc: Marcelo Tosatti
Using a for_each loop style removes the need to write callback and nasty
casts.
Implement the walk_shadow() using the for_each_shadow_entry().
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 69 +++++++++++++++++++++++++++++++++++++---------------
1 files changed, 49 insertions(+), 20 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3b86df6..3248a3e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -150,6 +150,20 @@ struct kvm_shadow_walk {
u64 addr, u64 *spte, int level);
};
+struct kvm_shadow_walk_iterator {
+ u64 addr;
+ hpa_t shadow_addr;
+ int level;
+ u64 *sptep;
+ unsigned index;
+};
+
+#define for_each_shadow_entry(_vcpu, _addr, _walker) \
+ for (shadow_walk_init(&(_walker), _vcpu, _addr); \
+ shadow_walk_okay(&(_walker)); \
+ shadow_walk_next(&(_walker)))
+
+
struct kvm_unsync_walk {
int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
};
@@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
return sp;
}
+static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
+ struct kvm_vcpu *vcpu, u64 addr)
+{
+ iterator->addr = addr;
+ iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
+ iterator->level = vcpu->arch.mmu.shadow_root_level;
+ if (iterator->level == PT32E_ROOT_LEVEL) {
+ iterator->shadow_addr
+ = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
+ iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
+ --iterator->level;
+ if (!iterator->shadow_addr)
+ iterator->level = 0;
+ }
+}
+
+static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
+{
+ if (iterator->level < PT_PAGE_TABLE_LEVEL)
+ return false;
+ iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
+ iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
+ return true;
+}
+
+static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
+{
+ iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
+ --iterator->level;
+}
+
static int walk_shadow(struct kvm_shadow_walk *walker,
struct kvm_vcpu *vcpu, u64 addr)
{
- hpa_t shadow_addr;
- int level;
+ struct kvm_shadow_walk_iterator iterator;
int r;
- u64 *sptep;
- unsigned index;
-
- shadow_addr = vcpu->arch.mmu.root_hpa;
- level = vcpu->arch.mmu.shadow_root_level;
- if (level == PT32E_ROOT_LEVEL) {
- shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
- shadow_addr &= PT64_BASE_ADDR_MASK;
- if (!shadow_addr)
- return 1;
- --level;
- }
- while (level >= PT_PAGE_TABLE_LEVEL) {
- index = SHADOW_PT_INDEX(addr, level);
- sptep = ((u64 *)__va(shadow_addr)) + index;
- r = walker->entry(walker, vcpu, addr, sptep, level);
+ for_each_shadow_entry(vcpu, addr, iterator) {
+ r = walker->entry(walker, vcpu, addr,
+ iterator.sptep, iterator.level);
if (r)
return r;
- shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
- --level;
}
return 0;
}
--
1.6.0.6
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/5] KVM: MMU: Use for_each_shadow_entry() in __direct_map()
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
2008-12-25 13:23 ` [PATCH 1/5] KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow() Avi Kivity
@ 2008-12-25 13:23 ` Avi Kivity
2008-12-25 13:23 ` [PATCH 3/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch() Avi Kivity
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-25 13:23 UTC (permalink / raw)
To: kvm; +Cc: Marcelo Tosatti
Eliminating a callback and a useless structure.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 83 ++++++++++++++++++---------------------------------
1 files changed, 29 insertions(+), 54 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3248a3e..b4b79b0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1841,67 +1841,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}
-struct direct_shadow_walk {
- struct kvm_shadow_walk walker;
- pfn_t pfn;
- int write;
- int largepage;
- int pt_write;
-};
-
-static int direct_map_entry(struct kvm_shadow_walk *_walk,
- struct kvm_vcpu *vcpu,
- u64 addr, u64 *sptep, int level)
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+ int largepage, gfn_t gfn, pfn_t pfn)
{
- struct direct_shadow_walk *walk =
- container_of(_walk, struct direct_shadow_walk, walker);
+ struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
+ int pt_write = 0;
gfn_t pseudo_gfn;
- gfn_t gfn = addr >> PAGE_SHIFT;
-
- if (level == PT_PAGE_TABLE_LEVEL
- || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
- mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
- 0, walk->write, 1, &walk->pt_write,
- walk->largepage, 0, gfn, walk->pfn, false);
- ++vcpu->stat.pf_fixed;
- return 1;
- }
- if (*sptep == shadow_trap_nonpresent_pte) {
- pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
- sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
- 1, ACC_ALL, sptep);
- if (!sp) {
- pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_pfn_clean(walk->pfn);
- return -ENOMEM;
+ for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
+ if (iterator.level == PT_PAGE_TABLE_LEVEL
+ || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
+ mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
+ 0, write, 1, &pt_write,
+ largepage, 0, gfn, pfn, false);
+ ++vcpu->stat.pf_fixed;
+ break;
}
- set_shadow_pte(sptep,
- __pa(sp->spt)
- | PT_PRESENT_MASK | PT_WRITABLE_MASK
- | shadow_user_mask | shadow_x_mask);
- }
- return 0;
-}
+ if (*iterator.sptep == shadow_trap_nonpresent_pte) {
+ pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
+ sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
+ iterator.level - 1,
+ 1, ACC_ALL, iterator.sptep);
+ if (!sp) {
+ pgprintk("nonpaging_map: ENOMEM\n");
+ kvm_release_pfn_clean(pfn);
+ return -ENOMEM;
+ }
-static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
- int largepage, gfn_t gfn, pfn_t pfn)
-{
- int r;
- struct direct_shadow_walk walker = {
- .walker = { .entry = direct_map_entry, },
- .pfn = pfn,
- .largepage = largepage,
- .write = write,
- .pt_write = 0,
- };
-
- r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
- if (r < 0)
- return r;
- return walker.pt_write;
+ set_shadow_pte(iterator.sptep,
+ __pa(sp->spt)
+ | PT_PRESENT_MASK | PT_WRITABLE_MASK
+ | shadow_user_mask | shadow_x_mask);
+ }
+ }
+ return pt_write;
}
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
--
1.6.0.6
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch()
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
2008-12-25 13:23 ` [PATCH 1/5] KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow() Avi Kivity
2008-12-25 13:23 ` [PATCH 2/5] KVM: MMU: Use for_each_shadow_entry() in __direct_map() Avi Kivity
@ 2008-12-25 13:23 ` Avi Kivity
2008-12-25 13:23 ` [PATCH 4/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg() Avi Kivity
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-25 13:23 UTC (permalink / raw)
To: kvm; +Cc: Marcelo Tosatti
Effectively reverting to the pre walk_shadow() version -- but now
with the reusable for_each().
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/paging_tmpl.h | 128 ++++++++++++++++++++------------------------
1 files changed, 58 insertions(+), 70 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9fd78b6..69c7e33 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -283,91 +283,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
-static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
- struct kvm_vcpu *vcpu, u64 addr,
- u64 *sptep, int level)
+static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+ struct guest_walker *gw,
+ int user_fault, int write_fault, int largepage,
+ int *ptwrite, pfn_t pfn)
{
- struct shadow_walker *sw =
- container_of(_sw, struct shadow_walker, walker);
- struct guest_walker *gw = sw->guest_walker;
unsigned access = gw->pt_access;
struct kvm_mmu_page *shadow_page;
- u64 spte;
+ u64 spte, *sptep;
int metaphysical;
gfn_t table_gfn;
int r;
+ int level;
pt_element_t curr_pte;
+ struct kvm_shadow_walk_iterator iterator;
- if (level == PT_PAGE_TABLE_LEVEL
- || (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
- mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
- sw->user_fault, sw->write_fault,
- gw->ptes[gw->level-1] & PT_DIRTY_MASK,
- sw->ptwrite, sw->largepage,
- gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
- gw->gfn, sw->pfn, false);
- sw->sptep = sptep;
- return 1;
- }
-
- if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
- return 0;
-
- if (is_large_pte(*sptep)) {
- set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
- kvm_flush_remote_tlbs(vcpu->kvm);
- rmap_remove(vcpu->kvm, sptep);
- }
+ if (!is_present_pte(gw->ptes[gw->level - 1]))
+ return NULL;
- if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
- metaphysical = 1;
- if (!is_dirty_pte(gw->ptes[level - 1]))
- access &= ~ACC_WRITE_MASK;
- table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
- } else {
- metaphysical = 0;
- table_gfn = gw->table_gfn[level - 2];
- }
- shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
- metaphysical, access, sptep);
- if (!metaphysical) {
- r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
- &curr_pte, sizeof(curr_pte));
- if (r || curr_pte != gw->ptes[level - 2]) {
- kvm_mmu_put_page(shadow_page, sptep);
- kvm_release_pfn_clean(sw->pfn);
- sw->sptep = NULL;
- return 1;
+ for_each_shadow_entry(vcpu, addr, iterator) {
+ level = iterator.level;
+ sptep = iterator.sptep;
+ if (level == PT_PAGE_TABLE_LEVEL
+ || (largepage && level == PT_DIRECTORY_LEVEL)) {
+ mmu_set_spte(vcpu, sptep, access,
+ gw->pte_access & access,
+ user_fault, write_fault,
+ gw->ptes[gw->level-1] & PT_DIRTY_MASK,
+ ptwrite, largepage,
+ gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
+ gw->gfn, pfn, false);
+ break;
}
- }
- spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
- | PT_WRITABLE_MASK | PT_USER_MASK;
- *sptep = spte;
- return 0;
-}
+ if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
+ continue;
-static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
- struct guest_walker *guest_walker,
- int user_fault, int write_fault, int largepage,
- int *ptwrite, pfn_t pfn)
-{
- struct shadow_walker walker = {
- .walker = { .entry = FNAME(shadow_walk_entry), },
- .guest_walker = guest_walker,
- .user_fault = user_fault,
- .write_fault = write_fault,
- .largepage = largepage,
- .ptwrite = ptwrite,
- .pfn = pfn,
- };
+ if (is_large_pte(*sptep)) {
+ set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ rmap_remove(vcpu->kvm, sptep);
+ }
- if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
- return NULL;
+ if (level == PT_DIRECTORY_LEVEL
+ && gw->level == PT_DIRECTORY_LEVEL) {
+ metaphysical = 1;
+ if (!is_dirty_pte(gw->ptes[level - 1]))
+ access &= ~ACC_WRITE_MASK;
+ table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
+ } else {
+ metaphysical = 0;
+ table_gfn = gw->table_gfn[level - 2];
+ }
+ shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+ metaphysical, access, sptep);
+ if (!metaphysical) {
+ r = kvm_read_guest_atomic(vcpu->kvm,
+ gw->pte_gpa[level - 2],
+ &curr_pte, sizeof(curr_pte));
+ if (r || curr_pte != gw->ptes[level - 2]) {
+ kvm_mmu_put_page(shadow_page, sptep);
+ kvm_release_pfn_clean(pfn);
+ sptep = NULL;
+ break;
+ }
+ }
- walk_shadow(&walker.walker, vcpu, addr);
+ spte = __pa(shadow_page->spt)
+ | PT_PRESENT_MASK | PT_ACCESSED_MASK
+ | PT_WRITABLE_MASK | PT_USER_MASK;
+ *sptep = spte;
+ }
- return walker.sptep;
+ return sptep;
}
/*
--
1.6.0.6
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 4/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg()
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
` (2 preceding siblings ...)
2008-12-25 13:23 ` [PATCH 3/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch() Avi Kivity
@ 2008-12-25 13:23 ` Avi Kivity
2008-12-25 13:23 ` [PATCH 5/5] KVM: MMU: Drop walk_shadow() Avi Kivity
2008-12-27 23:25 ` [PATCH 0/5] for_each_shadow_entry Marcelo Tosatti
5 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-25 13:23 UTC (permalink / raw)
To: kvm; +Cc: Marcelo Tosatti
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/paging_tmpl.h | 81 +++++++++++++++++--------------------------
1 files changed, 32 insertions(+), 49 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 69c7e33..46b68f9 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -25,7 +25,6 @@
#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
- #define shadow_walker shadow_walker64
#define FNAME(name) paging##64_##name
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
@@ -42,7 +41,6 @@
#elif PTTYPE == 32
#define pt_element_t u32
#define guest_walker guest_walker32
- #define shadow_walker shadow_walker32
#define FNAME(name) paging##32_##name
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
@@ -73,18 +71,6 @@ struct guest_walker {
u32 error_code;
};
-struct shadow_walker {
- struct kvm_shadow_walk walker;
- struct guest_walker *guest_walker;
- int user_fault;
- int write_fault;
- int largepage;
- int *ptwrite;
- pfn_t pfn;
- u64 *sptep;
- gpa_t pte_gpa;
-};
-
static gfn_t gpte_to_gfn(pt_element_t gpte)
{
return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -453,54 +439,52 @@ out_unlock:
return 0;
}
-static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
- struct kvm_vcpu *vcpu, u64 addr,
- u64 *sptep, int level)
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
- struct shadow_walker *sw =
- container_of(_sw, struct shadow_walker, walker);
+ struct kvm_shadow_walk_iterator iterator;
+ pt_element_t gpte;
+ gpa_t pte_gpa = -1;
+ int level;
+ u64 *sptep;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
- /* FIXME: properly handle invlpg on large guest pages */
- if (level == PT_PAGE_TABLE_LEVEL ||
- ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
- struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ for_each_shadow_entry(vcpu, gva, iterator) {
+ level = iterator.level;
+ sptep = iterator.sptep;
- sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
- sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+ /* FIXME: properly handle invlpg on large guest pages */
+ if (level == PT_PAGE_TABLE_LEVEL ||
+ ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
- if (is_shadow_present_pte(*sptep)) {
- rmap_remove(vcpu->kvm, sptep);
- if (is_large_pte(*sptep))
- --vcpu->kvm->stat.lpages;
+ pte_gpa = (sp->gfn << PAGE_SHIFT);
+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+
+ if (is_shadow_present_pte(*sptep)) {
+ rmap_remove(vcpu->kvm, sptep);
+ if (is_large_pte(*sptep))
+ --vcpu->kvm->stat.lpages;
+ }
+ set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+ break;
}
- set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
- return 1;
- }
- if (!is_shadow_present_pte(*sptep))
- return 1;
- return 0;
-}
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
-{
- pt_element_t gpte;
- struct shadow_walker walker = {
- .walker = { .entry = FNAME(shadow_invlpg_entry), },
- .pte_gpa = -1,
- };
+ if (!is_shadow_present_pte(*sptep))
+ break;
+ }
- spin_lock(&vcpu->kvm->mmu_lock);
- walk_shadow(&walker.walker, vcpu, gva);
spin_unlock(&vcpu->kvm->mmu_lock);
- if (walker.pte_gpa == -1)
+
+ if (pte_gpa == -1)
return;
- if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte,
+ if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
return;
if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
if (mmu_topup_memory_caches(vcpu))
return;
- kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte,
+ kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
sizeof(pt_element_t), 0);
}
}
@@ -607,7 +591,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
#undef pt_element_t
#undef guest_walker
-#undef shadow_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
--
1.6.0.6
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 5/5] KVM: MMU: Drop walk_shadow()
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
` (3 preceding siblings ...)
2008-12-25 13:23 ` [PATCH 4/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg() Avi Kivity
@ 2008-12-25 13:23 ` Avi Kivity
2008-12-27 23:25 ` [PATCH 0/5] for_each_shadow_entry Marcelo Tosatti
5 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-25 13:23 UTC (permalink / raw)
To: kvm; +Cc: Marcelo Tosatti
No longer used.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/kvm/mmu.c | 20 --------------------
1 files changed, 0 insertions(+), 20 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b4b79b0..31ebe69 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -145,11 +145,6 @@ struct kvm_rmap_desc {
struct kvm_rmap_desc *more;
};
-struct kvm_shadow_walk {
- int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
- u64 addr, u64 *spte, int level);
-};
-
struct kvm_shadow_walk_iterator {
u64 addr;
hpa_t shadow_addr;
@@ -1299,21 +1294,6 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
--iterator->level;
}
-static int walk_shadow(struct kvm_shadow_walk *walker,
- struct kvm_vcpu *vcpu, u64 addr)
-{
- struct kvm_shadow_walk_iterator iterator;
- int r;
-
- for_each_shadow_entry(vcpu, addr, iterator) {
- r = walker->entry(walker, vcpu, addr,
- iterator.sptep, iterator.level);
- if (r)
- return r;
- }
- return 0;
-}
-
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
--
1.6.0.6
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 0/5] for_each_shadow_entry
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
` (4 preceding siblings ...)
2008-12-25 13:23 ` [PATCH 5/5] KVM: MMU: Drop walk_shadow() Avi Kivity
@ 2008-12-27 23:25 ` Marcelo Tosatti
2008-12-28 9:00 ` Avi Kivity
5 siblings, 1 reply; 8+ messages in thread
From: Marcelo Tosatti @ 2008-12-27 23:25 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm
On Thu, Dec 25, 2008 at 03:23:34PM +0200, Avi Kivity wrote:
> This patchset replaces walk_shadow(), which calls a callback for each
> shadow pte that maps a guest virtal address, by an equivalent for_each style
> construct. Benefits are less thunks and smaller code.
>
> Please review.
Looks good.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 0/5] for_each_shadow_entry
2008-12-27 23:25 ` [PATCH 0/5] for_each_shadow_entry Marcelo Tosatti
@ 2008-12-28 9:00 ` Avi Kivity
0 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2008-12-28 9:00 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
Marcelo Tosatti wrote:
> On Thu, Dec 25, 2008 at 03:23:34PM +0200, Avi Kivity wrote:
>
>> This patchset replaces walk_shadow(), which calls a callback for each
>> shadow pte that maps a guest virtal address, by an equivalent for_each style
>> construct. Benefits are less thunks and smaller code.
>>
>> Please review.
>>
>
> Looks good.
>
Merged; thanks for the review.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2008-12-28 9:00 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-12-25 13:23 [PATCH 0/5] for_each_shadow_entry Avi Kivity
2008-12-25 13:23 ` [PATCH 1/5] KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow() Avi Kivity
2008-12-25 13:23 ` [PATCH 2/5] KVM: MMU: Use for_each_shadow_entry() in __direct_map() Avi Kivity
2008-12-25 13:23 ` [PATCH 3/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch() Avi Kivity
2008-12-25 13:23 ` [PATCH 4/5] KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg() Avi Kivity
2008-12-25 13:23 ` [PATCH 5/5] KVM: MMU: Drop walk_shadow() Avi Kivity
2008-12-27 23:25 ` [PATCH 0/5] for_each_shadow_entry Marcelo Tosatti
2008-12-28 9:00 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox