From mboxrd@z Thu Jan 1 00:00:00 1970 From: Avi Kivity Subject: [PATCH] KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots Date: Tue, 4 May 2010 13:03:50 +0300 Message-ID: <1272967430-6191-1-git-send-email-avi@redhat.com> To: kvm@vger.kernel.org, Marcelo Tosatti , Joerg Roedel Return-path: Received: from mx1.redhat.com ([209.132.183.28]:25801 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752383Ab0EDKDx (ORCPT ); Tue, 4 May 2010 06:03:53 -0400 Sender: kvm-owner@vger.kernel.org List-ID: On svm, kvm_read_pdptr() may require reading guest memory, which can sleep. Push the spinlock into mmu_alloc_roots(), and only take it after we've read the pdptr. Signed-off-by: Avi Kivity --- Marcelo, dropping and re-acquiring the lock before mmu_sync_roots(), is fine, yes? arch/x86/kvm/mmu.c | 7 +++++++ 1 files changed, 7 insertions(+), 0 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 51eb6d6..de99638 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2065,11 +2065,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) direct = 1; root_gfn = 0; } + spin_lock(&vcpu->kvm->mmu_lock); sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.root_hpa = root; return 0; } @@ -2093,11 +2095,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) direct = 1; root_gfn = i << 30; } + spin_lock(&vcpu->kvm->mmu_lock); sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; } vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); @@ -2466,7 +2471,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) goto out; spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); + spin_unlock(&vcpu->kvm->mmu_lock); r = mmu_alloc_roots(vcpu); + spin_lock(&vcpu->kvm->mmu_lock); mmu_sync_roots(vcpu); spin_unlock(&vcpu->kvm->mmu_lock); if (r) -- 1.7.0.4