From mboxrd@z Thu Jan 1 00:00:00 1970 From: Marcelo Tosatti Subject: KVM: x86: move vapic page handling out of fast path Date: Thu, 19 Jun 2008 14:43:47 -0300 Message-ID: <20080619174347.GA9236@dmt.cnet> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Cc: kvm-devel To: Avi Kivity Return-path: Received: from mx1.redhat.com ([66.187.233.31]:48833 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1759873AbYFSRoF (ORCPT ); Thu, 19 Jun 2008 13:44:05 -0400 Content-Disposition: inline Sender: kvm-owner@vger.kernel.org List-ID: I fail to see the point of handling the vapic page grab and ref counting in __vcpu_run's heavyweight enter/exit path. So move it to kvm_lapic_set_vapic_addr / kvm_free_lapic time. Other than the obvious improvement for non-Flexpriority case, this kills a down_read/up_read pair in heavy exits and reduces code size. Signed-off-by: Marcelo Tosatti Index: kvm/arch/x86/kvm/lapic.c =================================================================== --- kvm.orig/arch/x86/kvm/lapic.c +++ kvm/arch/x86/kvm/lapic.c @@ -800,6 +800,34 @@ static int apic_mmio_range(struct kvm_io return ret; } +static void vapic_get_page(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + struct page *page; + + if (!apic || !apic->vapic_addr) + return; + + down_read(&vcpu->kvm->slots_lock); + down_read(¤t->mm->mmap_sem); + page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); + up_read(&vcpu->kvm->slots_lock); + + vcpu->arch.apic->vapic_page = page; +} + +static void vapic_put_page(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (!irqchip_in_kernel(vcpu->kvm) || !apic || !apic->vapic_addr) + return; + + kvm_release_page_dirty(apic->vapic_page); + mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); +} + void kvm_free_lapic(struct kvm_vcpu *vcpu) { if (!vcpu->arch.apic) @@ -810,6 +838,9 @@ void kvm_free_lapic(struct kvm_vcpu *vcp if (vcpu->arch.apic->regs_page) __free_page(vcpu->arch.apic->regs_page); + if (vcpu->arch.apic->vapic_page) + vapic_put_page(vcpu); + kfree(vcpu->arch.apic); } @@ -1172,5 +1203,9 @@ void kvm_lapic_set_vapic_addr(struct kvm if (!irqchip_in_kernel(vcpu->kvm)) return; + if (vcpu->arch.apic->vapic_page) + vapic_put_page(vcpu); + vcpu->arch.apic->vapic_addr = vapic_addr; + vapic_get_page(vcpu); } Index: kvm/arch/x86/kvm/x86.c =================================================================== --- kvm.orig/arch/x86/kvm/x86.c +++ kvm/arch/x86/kvm/x86.c @@ -2737,32 +2737,6 @@ static void post_kvm_run_save(struct kvm vcpu->arch.irq_summary == 0); } -static void vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return; - - down_read(¤t->mm->mmap_sem); - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - up_read(¤t->mm->mmap_sem); - - vcpu->arch.apic->vapic_page = page; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - - if (!apic || !apic->vapic_addr) - return; - - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); -} - static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; @@ -2778,7 +2752,6 @@ static int __vcpu_run(struct kvm_vcpu *v } down_read(&vcpu->kvm->slots_lock); - vapic_enter(vcpu); preempted: if (vcpu->guest_debug.enabled) @@ -2916,10 +2889,6 @@ out: post_kvm_run_save(vcpu, kvm_run); - down_read(&vcpu->kvm->slots_lock); - vapic_exit(vcpu); - up_read(&vcpu->kvm->slots_lock); - return r; }