* [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers
@ 2015-10-15 10:39 Takuya Yoshikawa
2015-10-15 10:40 ` [PATCH 1/5] KVM: x86: MMU: Make force_pt_level bool Takuya Yoshikawa
` (4 more replies)
0 siblings, 5 replies; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-15 10:39 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
In page fault handlers, both mapping_level_dirty_bitmap() and mapping_level()
do a memory slot search, binary search, through kvm_vcpu_gfn_to_memslot(), which
may not be negligible especially for virtual machines with many memory slots.
With a bit of cleanup effort, the patch set reduces this overhead.
[PATCH 1/5] KVM: x86: MMU: Make force_pt_level bool
[PATCH 2/5] KVM: x86: MMU: Simplify force_pt_level calculation code in FNAME(page_fault)()
[PATCH 3/5] KVM: x86: MMU: Merge mapping_level_dirty_bitmap() into mapping_level()
[PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap()
[PATCH 5/5] KVM: x86: MMU: Eliminate an extra memory slot search in mapping_level()
Takuya
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 1/5] KVM: x86: MMU: Make force_pt_level bool
2015-10-15 10:39 [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
@ 2015-10-15 10:40 ` Takuya Yoshikawa
2015-10-15 10:41 ` [PATCH 2/5] KVM: x86: MMU: Simplify force_pt_level calculation code in FNAME(page_fault)() Takuya Yoshikawa
` (3 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-15 10:40 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
This will be passed to a function later.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
arch/x86/kvm/mmu.c | 8 ++++----
arch/x86/kvm/paging_tmpl.h | 4 ++--
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b8482c0..2262728 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2962,7 +2962,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
{
int r;
int level;
- int force_pt_level;
+ bool force_pt_level;
pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
@@ -3476,7 +3476,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
pfn_t pfn;
int r;
int level;
- int force_pt_level;
+ bool force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
@@ -3497,9 +3497,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (mapping_level_dirty_bitmap(vcpu, gfn) ||
!check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL))
- force_pt_level = 1;
+ force_pt_level = true;
else
- force_pt_level = 0;
+ force_pt_level = false;
if (likely(!force_pt_level)) {
level = mapping_level(vcpu, gfn);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 736e6ab..07f1a4e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -698,7 +698,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
- int force_pt_level;
+ bool force_pt_level;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
@@ -747,7 +747,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
|| is_self_change_mapping;
else
- force_pt_level = 1;
+ force_pt_level = true;
if (!force_pt_level) {
level = min(walker.level, mapping_level(vcpu, walker.gfn));
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/5] KVM: x86: MMU: Simplify force_pt_level calculation code in FNAME(page_fault)()
2015-10-15 10:39 [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
2015-10-15 10:40 ` [PATCH 1/5] KVM: x86: MMU: Make force_pt_level bool Takuya Yoshikawa
@ 2015-10-15 10:41 ` Takuya Yoshikawa
2015-10-15 10:42 ` [PATCH 3/5] KVM: x86: MMU: Merge mapping_level_dirty_bitmap() into mapping_level() Takuya Yoshikawa
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-15 10:41 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
As a bonus, an extra memory slot search can be eliminated when
is_self_change_mapping is true.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
arch/x86/kvm/paging_tmpl.h | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 07f1a4e..8ebc3a5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -743,15 +743,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
- if (walker.level >= PT_DIRECTORY_LEVEL)
- force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
- || is_self_change_mapping;
- else
+ if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
+ force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
+ if (!force_pt_level) {
+ level = min(walker.level, mapping_level(vcpu, walker.gfn));
+ walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+ }
+ } else
force_pt_level = true;
- if (!force_pt_level) {
- level = min(walker.level, mapping_level(vcpu, walker.gfn));
- walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/5] KVM: x86: MMU: Merge mapping_level_dirty_bitmap() into mapping_level()
2015-10-15 10:39 [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
2015-10-15 10:40 ` [PATCH 1/5] KVM: x86: MMU: Make force_pt_level bool Takuya Yoshikawa
2015-10-15 10:41 ` [PATCH 2/5] KVM: x86: MMU: Simplify force_pt_level calculation code in FNAME(page_fault)() Takuya Yoshikawa
@ 2015-10-15 10:42 ` Takuya Yoshikawa
2015-10-15 10:43 ` [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap() Takuya Yoshikawa
2015-10-15 10:44 ` [PATCH 5/5] KVM: x86: MMU: Eliminate an extra memory slot search in mapping_level() Takuya Yoshikawa
4 siblings, 0 replies; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-15 10:42 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
This is necessary to eliminate an extra memory slot search later.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
arch/x86/kvm/mmu.c | 29 ++++++++++++++---------------
arch/x86/kvm/paging_tmpl.h | 6 +++---
2 files changed, 17 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2262728..890cd69 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -870,10 +870,16 @@ static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
}
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
+ bool *force_pt_level)
{
int host_level, level, max_level;
+ if (likely(!*force_pt_level))
+ *force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn);
+ if (unlikely(*force_pt_level))
+ return PT_PAGE_TABLE_LEVEL;
+
host_level = host_mapping_level(vcpu->kvm, large_gfn);
if (host_level == PT_PAGE_TABLE_LEVEL)
@@ -2962,14 +2968,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
{
int r;
int level;
- bool force_pt_level;
+ bool force_pt_level = false;
pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
- force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+ level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
- level = mapping_level(vcpu, gfn);
/*
* This path builds a PAE pagetable - so we can map
* 2mb pages at maximum. Therefore check if the level
@@ -2979,8 +2984,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- } else
- level = PT_PAGE_TABLE_LEVEL;
+ }
if (fast_page_fault(vcpu, v, level, error_code))
return 0;
@@ -3495,20 +3499,15 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (r)
return r;
- if (mapping_level_dirty_bitmap(vcpu, gfn) ||
- !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL))
- force_pt_level = true;
- else
- force_pt_level = false;
-
+ force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
+ PT_DIRECTORY_LEVEL);
+ level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
- level = mapping_level(vcpu, gfn);
if (level > PT_DIRECTORY_LEVEL &&
!check_hugepage_cache_consistency(vcpu, gfn, level))
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- } else
- level = PT_PAGE_TABLE_LEVEL;
+ }
if (fast_page_fault(vcpu, gpa, level, error_code))
return 0;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 8ebc3a5..bf39d0f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -744,9 +744,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
- force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
- if (!force_pt_level) {
- level = min(walker.level, mapping_level(vcpu, walker.gfn));
+ level = mapping_level(vcpu, walker.gfn, &force_pt_level);
+ if (likely(!force_pt_level)) {
+ level = min(walker.level, level);
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
} else
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap()
2015-10-15 10:39 [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
` (2 preceding siblings ...)
2015-10-15 10:42 ` [PATCH 3/5] KVM: x86: MMU: Merge mapping_level_dirty_bitmap() into mapping_level() Takuya Yoshikawa
@ 2015-10-15 10:43 ` Takuya Yoshikawa
2015-10-15 15:35 ` Paolo Bonzini
2015-10-15 10:44 ` [PATCH 5/5] KVM: x86: MMU: Eliminate an extra memory slot search in mapping_level() Takuya Yoshikawa
4 siblings, 1 reply; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-15 10:43 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
Now that it has only one caller, and its name is not so helpful for
readers, just remove it.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
arch/x86/kvm/mmu.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 890cd69..78a3d08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -851,6 +851,14 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
return ret;
}
+static inline bool memslot_invalid(struct kvm_memory_slot *slot)
+{
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
+ return true;
+
+ return false;
+}
+
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
@@ -858,25 +866,22 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
- (no_dirty_log && slot->dirty_bitmap))
+ if (memslot_invalid(slot) || (no_dirty_log && slot->dirty_bitmap))
slot = NULL;
return slot;
}
-static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
-{
- return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
-}
-
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
bool *force_pt_level)
{
int host_level, level, max_level;
+ struct kvm_memory_slot *slot;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
if (likely(!*force_pt_level))
- *force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn);
+ *force_pt_level = memslot_invalid(slot) || slot->dirty_bitmap;
if (unlikely(*force_pt_level))
return PT_PAGE_TABLE_LEVEL;
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 5/5] KVM: x86: MMU: Eliminate an extra memory slot search in mapping_level()
2015-10-15 10:39 [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
` (3 preceding siblings ...)
2015-10-15 10:43 ` [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap() Takuya Yoshikawa
@ 2015-10-15 10:44 ` Takuya Yoshikawa
4 siblings, 0 replies; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-15 10:44 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
Calling kvm_vcpu_gfn_to_memslot() twice in mapping_level() should be
avoided since getting a slot by binary search may not be negligible,
especially for virtual machines with many memory slots.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
arch/x86/kvm/mmu.c | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 78a3d08..8d285dc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -818,14 +818,11 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm->arch.indirect_shadow_pages--;
}
-static int has_wrprotected_page(struct kvm_vcpu *vcpu,
- gfn_t gfn,
- int level)
+static int __has_wrprotected_page(gfn_t gfn, int level,
+ struct kvm_memory_slot *slot)
{
- struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count;
@@ -834,6 +831,14 @@ static int has_wrprotected_page(struct kvm_vcpu *vcpu,
return 1;
}
+static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ return __has_wrprotected_page(gfn, level, slot);
+}
+
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
{
unsigned long page_size;
@@ -893,7 +898,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
- if (has_wrprotected_page(vcpu, large_gfn, level))
+ if (__has_wrprotected_page(large_gfn, level, slot))
break;
return level - 1;
--
1.7.9.5
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap()
2015-10-15 10:43 ` [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap() Takuya Yoshikawa
@ 2015-10-15 15:35 ` Paolo Bonzini
0 siblings, 0 replies; 8+ messages in thread
From: Paolo Bonzini @ 2015-10-15 15:35 UTC (permalink / raw)
To: Takuya Yoshikawa; +Cc: kvm, linux-kernel
On 15/10/2015 12:43, Takuya Yoshikawa wrote:
> +static inline bool memslot_invalid(struct kvm_memory_slot *slot)
Can you make this function memslot_valid_for_gpte(struct kvm_memory_slot
*slot, bool no_dirty_log), and have it return
slot && !(slot->flags & KVM_MEMSLOT_INVALID) &&
(!no_dirty_log || !slot->dirty_bitmap)
? If gfn_to_memslot_dirty_bitmap and mapping_level call the same
function, it helps highlighting the similarity between them. Your
optimization loses that similarity in the name, but I think we can bring
it back somehow.
Otherwise, the patches are great. Thanks!
Paolo
> +{
> + if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
> + return true;
> +
> + return false;
> +}
> +
> static struct kvm_memory_slot *
> gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
> bool no_dirty_log)
> @@ -858,25 +866,22 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
> struct kvm_memory_slot *slot;
>
> slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> - if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
> - (no_dirty_log && slot->dirty_bitmap))
> + if (memslot_invalid(slot) || (no_dirty_log && slot->dirty_bitmap))
> slot = NULL;
>
> return slot;
> }
>
> -static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
> -{
> - return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
> -}
> -
> static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
> bool *force_pt_level)
> {
> int host_level, level, max_level;
> + struct kvm_memory_slot *slot;
> +
> + slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
>
> if (likely(!*force_pt_level))
> - *force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn);
> + *force_pt_level = memslot_invalid(slot) || slot->dirty_bitmap;
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap()
2015-10-16 8:03 [PATCH 0/5 v2] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
@ 2015-10-16 8:07 ` Takuya Yoshikawa
0 siblings, 0 replies; 8+ messages in thread
From: Takuya Yoshikawa @ 2015-10-16 8:07 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel
Now that it has only one caller, and its name is not so helpful for
readers, remove it. Instead, the new memslot_valid_for_gpte() function
makes it possible to share the common code.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
arch/x86/kvm/mmu.c | 24 ++++++++++++++++--------
1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 890cd69..09833b0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -851,6 +851,17 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
return ret;
}
+static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
+ bool no_dirty_log)
+{
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
+ return false;
+ if (no_dirty_log && slot->dirty_bitmap)
+ return false;
+
+ return true;
+}
+
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
@@ -858,25 +869,22 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
- (no_dirty_log && slot->dirty_bitmap))
+ if (!memslot_valid_for_gpte(slot, no_dirty_log))
slot = NULL;
return slot;
}
-static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
-{
- return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
-}
-
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
bool *force_pt_level)
{
int host_level, level, max_level;
+ struct kvm_memory_slot *slot;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
if (likely(!*force_pt_level))
- *force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn);
+ *force_pt_level = !memslot_valid_for_gpte(slot, true);
if (unlikely(*force_pt_level))
return PT_PAGE_TABLE_LEVEL;
--
2.1.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2015-10-16 8:07 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-10-15 10:39 [PATCH 0/5] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
2015-10-15 10:40 ` [PATCH 1/5] KVM: x86: MMU: Make force_pt_level bool Takuya Yoshikawa
2015-10-15 10:41 ` [PATCH 2/5] KVM: x86: MMU: Simplify force_pt_level calculation code in FNAME(page_fault)() Takuya Yoshikawa
2015-10-15 10:42 ` [PATCH 3/5] KVM: x86: MMU: Merge mapping_level_dirty_bitmap() into mapping_level() Takuya Yoshikawa
2015-10-15 10:43 ` [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap() Takuya Yoshikawa
2015-10-15 15:35 ` Paolo Bonzini
2015-10-15 10:44 ` [PATCH 5/5] KVM: x86: MMU: Eliminate an extra memory slot search in mapping_level() Takuya Yoshikawa
-- strict thread matches above, loose matches on Subject: below --
2015-10-16 8:03 [PATCH 0/5 v2] KVM: x86: MMU: Eliminate extra memory slot searches in page fault handlers Takuya Yoshikawa
2015-10-16 8:07 ` [PATCH 4/5] KVM: x86: MMU: Remove mapping_level_dirty_bitmap() Takuya Yoshikawa
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).