* KVM: x86: handle invalid root_hpa everywhere
@ 2014-01-03 19:09 Marcelo Tosatti
2014-01-15 11:16 ` Paolo Bonzini
0 siblings, 1 reply; 2+ messages in thread
From: Marcelo Tosatti @ 2014-01-03 19:09 UTC (permalink / raw)
To: kvm-devel; +Cc: Paolo Bonzini
Rom Freiman <rom@stratoscale.com> notes other code paths vulnerable to
bug fixed by 989c6b34f6a9480e397b.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 31a5702..e50425d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
bool ret = false;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return false;
+
if (!page_fault_can_be_fast(error_code))
return false;
@@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return spte;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte))
@@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
u64 spte;
int nr_sptes = 0;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return nr_sptes;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ad75d77..cba218a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ goto out_gpte_changed;
+
for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) {
@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/
mmu_topup_memory_caches(vcpu);
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+ WARN_ON(1);
+ return;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: KVM: x86: handle invalid root_hpa everywhere
2014-01-03 19:09 KVM: x86: handle invalid root_hpa everywhere Marcelo Tosatti
@ 2014-01-15 11:16 ` Paolo Bonzini
0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2014-01-15 11:16 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel
Il 03/01/2014 20:09, Marcelo Tosatti ha scritto:
>
> Rom Freiman <rom@stratoscale.com> notes other code paths vulnerable to
> bug fixed by 989c6b34f6a9480e397b.
>
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 31a5702..e50425d 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
> bool ret = false;
> u64 spte = 0ull;
>
> + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> + return false;
> +
> if (!page_fault_can_be_fast(error_code))
> return false;
>
> @@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
> struct kvm_shadow_walk_iterator iterator;
> u64 spte = 0ull;
>
> + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> + return spte;
> +
> walk_shadow_page_lockless_begin(vcpu);
> for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
> if (!is_shadow_present_pte(spte))
> @@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
> u64 spte;
> int nr_sptes = 0;
>
> + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> + return nr_sptes;
> +
> walk_shadow_page_lockless_begin(vcpu);
> for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
> sptes[iterator.level-1] = spte;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index ad75d77..cba218a 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
> if (FNAME(gpte_changed)(vcpu, gw, top_level))
> goto out_gpte_changed;
>
> + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> + goto out_gpte_changed;
> +
> for (shadow_walk_init(&it, vcpu, addr);
> shadow_walk_okay(&it) && it.level > gw->level;
> shadow_walk_next(&it)) {
> @@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
> */
> mmu_topup_memory_caches(vcpu);
>
> + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
> + WARN_ON(1);
> + return;
> + }
> +
> spin_lock(&vcpu->kvm->mmu_lock);
> for_each_shadow_entry(vcpu, gva, iterator) {
> level = iterator.level;
>
Applied to kvm/queue, thanks.
Paolo
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2014-01-15 11:16 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-01-03 19:09 KVM: x86: handle invalid root_hpa everywhere Marcelo Tosatti
2014-01-15 11:16 ` Paolo Bonzini
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox